]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
Merge tag 'imx-drm-fixes-2018-08-03' of git://git.pengutronix.de/git/pza/linux into...
authorDave Airlie <airlied@redhat.com>
Fri, 10 Aug 2018 01:37:30 +0000 (11:37 +1000)
committerDave Airlie <airlied@redhat.com>
Fri, 10 Aug 2018 01:37:35 +0000 (11:37 +1000)
drm/imx: ipu-v3 plane offset and IPU id fixes

- Fix U/V plane offsets for odd vertical offsets. Due to wrong operator
  order, the y offset was not rounded down properly for vertically
  chroma subsampled planar formats.
- Fix IPU id number for boards that don't have an OF alias for their
  single IPU in the device tree. This is necessary to support imx-media
  on i.MX51 and i.MX53 SoCs.

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Philipp Zabel <p.zabel@pengutronix.de>
Link: https://patchwork.freedesktop.org/patch/msgid/1533552680.4204.14.camel@pengutronix.de
2534 files changed:
Documentation/admin-guide/kernel-parameters.txt
Documentation/admin-guide/pm/intel_pstate.rst
Documentation/core-api/kernel-api.rst
Documentation/device-mapper/writecache.txt
Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt
Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
Documentation/devicetree/bindings/display/ilitek,ili9341.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
Documentation/devicetree/bindings/display/msm/dpu.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/msm/dsi.txt
Documentation/devicetree/bindings/display/panel/auo,g070vvn01.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/boe,hv070wsa-100.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/dataimage,scf0700c48ggu18.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/dlc,dlc0700yzg-1.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/edt,et-series.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/edt,et070080dh6.txt [deleted file]
Documentation/devicetree/bindings/display/panel/edt,etm0700g0dh6.txt [deleted file]
Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/innolux,g070y2-l01.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/innolux,p097pfg.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/innolux,tv123wam.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/kingdisplay,kd097d04.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/newhaven,nhd-4.3-480272ef-atxl.txt [moved from Documentation/devicetree/bindings/display/panel/edt,et057090dhu.txt with 55% similarity]
Documentation/devicetree/bindings/display/panel/rocktech,rk070er9427.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/sharp,lq035q7db03.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt
Documentation/devicetree/bindings/gpio/nintendo,hollywood-gpio.txt
Documentation/devicetree/bindings/input/sprd,sc27xx-vibra.txt [new file with mode: 0644]
Documentation/devicetree/bindings/input/touchscreen/hideep.txt
Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.txt
Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt
Documentation/devicetree/bindings/mips/brcm/soc.txt
Documentation/devicetree/bindings/net/fsl-fman.txt
Documentation/devicetree/bindings/power/power_domain.txt
Documentation/devicetree/bindings/regulator/tps65090.txt
Documentation/devicetree/bindings/reset/st,sti-softreset.txt
Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt
Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt
Documentation/devicetree/bindings/sound/qcom,apq8096.txt
Documentation/devicetree/bindings/usb/rockchip,dwc3.txt
Documentation/devicetree/bindings/vendor-prefixes.txt
Documentation/devicetree/bindings/w1/w1-gpio.txt
Documentation/driver-api/dma-buf.rst
Documentation/driver-api/infrastructure.rst
Documentation/fb/fbcon.txt
Documentation/filesystems/Locking
Documentation/filesystems/cifs/AUTHORS
Documentation/filesystems/cifs/CHANGES
Documentation/filesystems/cifs/TODO
Documentation/filesystems/vfs.txt
Documentation/gpu/amdgpu.rst [new file with mode: 0644]
Documentation/gpu/drivers.rst
Documentation/gpu/drm-client.rst [new file with mode: 0644]
Documentation/gpu/drm-kms-helpers.rst
Documentation/gpu/drm-kms.rst
Documentation/gpu/drm-mm.rst
Documentation/gpu/index.rst
Documentation/gpu/kms-properties.csv
Documentation/gpu/msm-crash-dump.rst [new file with mode: 0644]
Documentation/gpu/v3d.rst [new file with mode: 0644]
Documentation/kbuild/kbuild.txt
Documentation/kbuild/kconfig-language.txt
Documentation/kbuild/kconfig.txt
Documentation/locking/ww-mutex-design.txt
Documentation/networking/bonding.txt
Documentation/networking/e100.rst
Documentation/networking/e1000.rst
Documentation/networking/strparser.txt
Documentation/trace/histogram.txt
Documentation/usb/gadget_configfs.txt
Documentation/virtual/kvm/api.txt
MAINTAINERS
Makefile
arch/alpha/Kconfig
arch/alpha/kernel/osf_sys.c
arch/alpha/lib/Makefile
arch/alpha/lib/dec_and_lock.c [deleted file]
arch/arc/Kconfig
arch/arc/Makefile
arch/arc/configs/axs101_defconfig
arch/arc/configs/axs103_defconfig
arch/arc/configs/axs103_smp_defconfig
arch/arc/configs/haps_hs_defconfig
arch/arc/configs/haps_hs_smp_defconfig
arch/arc/configs/hsdk_defconfig
arch/arc/configs/nsim_700_defconfig
arch/arc/configs/nsim_hs_defconfig
arch/arc/configs/nsim_hs_smp_defconfig
arch/arc/configs/nsimosci_defconfig
arch/arc/configs/nsimosci_hs_defconfig
arch/arc/configs/nsimosci_hs_smp_defconfig
arch/arc/configs/tb10x_defconfig
arch/arc/include/asm/entry-compact.h
arch/arc/include/asm/entry.h
arch/arc/include/asm/mach_desc.h
arch/arc/include/asm/page.h
arch/arc/include/asm/pgtable.h
arch/arc/kernel/irq.c
arch/arc/kernel/process.c
arch/arc/plat-hsdk/Kconfig
arch/arc/plat-hsdk/platform.c
arch/arm/Kconfig
arch/arm/boot/dts/am335x-bone-common.dtsi
arch/arm/boot/dts/am3517.dtsi
arch/arm/boot/dts/am437x-sk-evm.dts
arch/arm/boot/dts/armada-385-synology-ds116.dts
arch/arm/boot/dts/armada-38x.dtsi
arch/arm/boot/dts/bcm-cygnus.dtsi
arch/arm/boot/dts/bcm-hr2.dtsi
arch/arm/boot/dts/bcm-nsp.dtsi
arch/arm/boot/dts/bcm5301x.dtsi
arch/arm/boot/dts/da850.dtsi
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/imx51-zii-rdu1.dts
arch/arm/boot/dts/imx6q.dtsi
arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi
arch/arm/boot/dts/imx6sx.dtsi
arch/arm/boot/dts/omap4-droid4-xt894.dts
arch/arm/boot/dts/socfpga.dtsi
arch/arm/boot/dts/socfpga_arria10.dtsi
arch/arm/common/Makefile
arch/arm/configs/imx_v4_v5_defconfig
arch/arm/configs/imx_v6_v7_defconfig
arch/arm/configs/multi_v7_defconfig
arch/arm/crypto/speck-neon-core.S
arch/arm/firmware/Makefile
arch/arm/kernel/head-nommu.S
arch/arm/kernel/process.c
arch/arm/kernel/signal.c
arch/arm/mach-bcm/Kconfig
arch/arm/mach-davinci/board-da850-evm.c
arch/arm/mach-omap2/omap-smp.c
arch/arm/mach-pxa/irq.c
arch/arm/mach-rpc/ecard.c
arch/arm/mach-socfpga/Kconfig
arch/arm/mm/dma-mapping.c
arch/arm/mm/init.c
arch/arm/net/bpf_jit_32.c
arch/arm/xen/enlighten.c
arch/arm64/Makefile
arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
arch/arm64/boot/dts/amlogic/meson-axg-s400.dts
arch/arm64/boot/dts/amlogic/meson-axg.dtsi
arch/arm64/boot/dts/amlogic/meson-gx.dtsi
arch/arm64/boot/dts/amlogic/meson-gxl-mali.dtsi
arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts
arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts
arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
arch/arm64/boot/dts/marvell/armada-cp110.dtsi
arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
arch/arm64/boot/dts/qcom/msm8916.dtsi
arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts
arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts
arch/arm64/configs/defconfig
arch/arm64/crypto/aes-glue.c
arch/arm64/include/asm/alternative.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/simd.h
arch/arm64/include/asm/sysreg.h
arch/arm64/include/asm/tlb.h
arch/arm64/kernel/alternative.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/module.c
arch/arm64/kernel/smp.c
arch/arm64/kvm/fpsimd.c
arch/arm64/mm/dma-mapping.c
arch/arm64/mm/hugetlbpage.c
arch/arm64/mm/init.c
arch/arm64/mm/proc.S
arch/ia64/include/asm/tlb.h
arch/ia64/kernel/perfmon.c
arch/ia64/mm/init.c
arch/m68k/include/asm/mcf_pgalloc.h
arch/microblaze/Kconfig.debug
arch/microblaze/include/asm/setup.h
arch/microblaze/include/asm/unistd.h
arch/microblaze/include/uapi/asm/unistd.h
arch/microblaze/kernel/Makefile
arch/microblaze/kernel/heartbeat.c [deleted file]
arch/microblaze/kernel/platform.c [deleted file]
arch/microblaze/kernel/reset.c
arch/microblaze/kernel/syscall_table.S
arch/microblaze/kernel/timer.c
arch/mips/Kconfig
arch/mips/ath79/common.c
arch/mips/ath79/mach-pb44.c
arch/mips/include/asm/io.h
arch/mips/include/uapi/asm/unistd.h
arch/mips/kernel/entry.S
arch/mips/kernel/mcount.S
arch/mips/kernel/process.c
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/scall64-o32.S
arch/mips/kernel/signal.c
arch/mips/kernel/traps.c
arch/mips/mm/ioremap.c
arch/mips/pci/pci.c
arch/nds32/Kconfig
arch/nds32/Makefile
arch/nds32/include/asm/cacheflush.h
arch/nds32/include/asm/futex.h
arch/nds32/kernel/setup.c
arch/nds32/mm/cacheflush.c
arch/openrisc/include/asm/pgalloc.h
arch/openrisc/kernel/entry.S
arch/openrisc/kernel/head.S
arch/openrisc/kernel/traps.c
arch/parisc/Kconfig
arch/parisc/Makefile
arch/parisc/include/asm/signal.h
arch/parisc/include/uapi/asm/unistd.h
arch/parisc/kernel/drivers.c
arch/parisc/kernel/syscall_table.S
arch/parisc/kernel/unwind.c
arch/powerpc/Makefile
arch/powerpc/include/asm/book3s/32/pgalloc.h
arch/powerpc/include/asm/book3s/64/pgtable-4k.h
arch/powerpc/include/asm/book3s/64/pgtable-64k.h
arch/powerpc/include/asm/book3s/64/pgtable.h
arch/powerpc/include/asm/mmu_context.h
arch/powerpc/include/asm/nmi.h
arch/powerpc/include/asm/nohash/32/pgalloc.h
arch/powerpc/include/asm/nohash/64/pgalloc.h
arch/powerpc/include/asm/systbl.h
arch/powerpc/include/asm/unistd.h
arch/powerpc/include/uapi/asm/unistd.h
arch/powerpc/kernel/dt_cpu_ftrs.c
arch/powerpc/kernel/idle_book3s.S
arch/powerpc/kernel/pci_32.c
arch/powerpc/kernel/pci_64.c
arch/powerpc/kernel/rtas.c
arch/powerpc/kernel/setup-common.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/signal.c
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/smp.c
arch/powerpc/kernel/stacktrace.c
arch/powerpc/kernel/syscalls.c
arch/powerpc/kvm/book3s_64_vio.c
arch/powerpc/kvm/book3s_64_vio_hv.c
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/mm/mmu_context_iommu.c
arch/powerpc/mm/pgtable-book3s64.c
arch/powerpc/mm/subpage-prot.c
arch/powerpc/mm/tlb-radix.c
arch/powerpc/net/bpf_jit_comp64.c
arch/powerpc/platforms/powermac/time.c
arch/powerpc/xmon/xmon.c
arch/riscv/Kconfig
arch/riscv/include/uapi/asm/elf.h
arch/riscv/kernel/irq.c
arch/riscv/kernel/module.c
arch/riscv/kernel/ptrace.c
arch/riscv/kernel/setup.c
arch/riscv/mm/init.c
arch/s390/Kconfig
arch/s390/include/asm/css_chars.h
arch/s390/kernel/compat_wrapper.c
arch/s390/kernel/entry.S
arch/s390/kernel/signal.c
arch/s390/kernel/syscalls/syscall.tbl
arch/s390/mm/pgalloc.c
arch/s390/net/bpf_jit_comp.c
arch/x86/Kconfig
arch/x86/Makefile
arch/x86/boot/compressed/eboot.c
arch/x86/crypto/aegis128-aesni-asm.S
arch/x86/crypto/aegis128l-aesni-asm.S
arch/x86/crypto/aegis256-aesni-asm.S
arch/x86/crypto/morus1280-avx2-asm.S
arch/x86/crypto/morus1280-sse2-asm.S
arch/x86/crypto/morus640-sse2-asm.S
arch/x86/entry/common.c
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64_compat.S
arch/x86/events/intel/ds.c
arch/x86/hyperv/hv_apic.c
arch/x86/hyperv/hv_init.c
arch/x86/include/asm/apm.h
arch/x86/include/asm/asm.h
arch/x86/include/asm/barrier.h
arch/x86/include/asm/irqflags.h
arch/x86/include/asm/mshyperv.h
arch/x86/include/asm/pgalloc.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable_64.h
arch/x86/include/asm/uaccess_64.h
arch/x86/include/asm/vmx.h
arch/x86/kernel/Makefile
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/apm_32.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/cacheinfo.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/mcheck/mce-severity.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/microcode/intel.c
arch/x86/kernel/cpu/mtrr/if.c
arch/x86/kernel/e820.c
arch/x86/kernel/early-quirks.c
arch/x86/kernel/head64.c
arch/x86/kernel/irqflags.S [new file with mode: 0644]
arch/x86/kernel/kvmclock.c
arch/x86/kernel/quirks.c
arch/x86/kernel/signal.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/traps.c
arch/x86/kernel/uprobes.c
arch/x86/kvm/Kconfig
arch/x86/kvm/mmu.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/mm/fault.c
arch/x86/mm/init_64.c
arch/x86/platform/efi/efi_64.c
arch/x86/purgatory/Makefile
arch/x86/um/mem_32.c
arch/x86/xen/enlighten.c
arch/x86/xen/enlighten_pv.c
arch/x86/xen/enlighten_pvh.c
arch/x86/xen/irq.c
arch/x86/xen/smp_pv.c
block/bio.c
block/blk-core.c
block/blk-mq-debugfs.c
block/blk-mq.c
block/blk-softirq.c
block/blk-timeout.c
block/bsg.c
block/sed-opal.c
certs/blacklist.h
crypto/af_alg.c
crypto/algif_aead.c
crypto/algif_skcipher.c
crypto/asymmetric_keys/x509_cert_parser.c
crypto/morus640.c
crypto/sha3_generic.c
drivers/acpi/acpi_lpss.c
drivers/acpi/acpica/hwsleep.c
drivers/acpi/acpica/psloop.c
drivers/acpi/acpica/uterror.c
drivers/acpi/battery.c
drivers/acpi/ec.c
drivers/acpi/nfit/core.c
drivers/acpi/nfit/nfit.h
drivers/acpi/osl.c
drivers/acpi/pptt.c
drivers/ata/Kconfig
drivers/ata/ahci.c
drivers/ata/ahci_mvebu.c
drivers/ata/libahci.c
drivers/ata/libata-core.c
drivers/ata/libata-eh.c
drivers/ata/libata-scsi.c
drivers/ata/sata_fsl.c
drivers/ata/sata_nv.c
drivers/atm/iphase.c
drivers/atm/zatm.c
drivers/base/Makefile
drivers/base/core.c
drivers/base/dd.c
drivers/base/power/domain.c
drivers/block/drbd/drbd_req.c
drivers/block/drbd/drbd_worker.c
drivers/block/loop.c
drivers/block/nbd.c
drivers/block/null_blk.c
drivers/bluetooth/hci_nokia.c
drivers/bus/ti-sysc.c
drivers/char/agp/alpha-agp.c
drivers/char/agp/amd64-agp.c
drivers/char/hw_random/core.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/ipmi/kcs_bmc.c
drivers/char/mem.c
drivers/char/random.c
drivers/clk/Makefile
drivers/clk/clk-aspeed.c
drivers/clk/clk.c
drivers/clk/davinci/da8xx-cfgchip.c
drivers/clk/davinci/psc.h
drivers/clk/meson/clk-audio-divider.c
drivers/clk/meson/gxbb.c
drivers/clk/mvebu/armada-37xx-periph.c
drivers/clk/qcom/gcc-msm8996.c
drivers/clk/qcom/mmcc-msm8996.c
drivers/clk/sunxi-ng/Makefile
drivers/clocksource/arm_arch_timer.c
drivers/clocksource/timer-stm32.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/pcc-cpufreq.c
drivers/cpufreq/qcom-cpufreq-kryo.c
drivers/crypto/chelsio/chtls/chtls_io.c
drivers/dax/device.c
drivers/dax/super.c
drivers/dma-buf/dma-buf.c
drivers/dma-buf/dma-fence-array.c
drivers/dma-buf/dma-fence.c
drivers/dma-buf/reservation.c
drivers/dma-buf/sw_sync.c
drivers/dma/k3dma.c
drivers/dma/pl330.c
drivers/dma/ti/omap-dma.c
drivers/firmware/dmi-id.c
drivers/firmware/dmi_scan.c
drivers/firmware/efi/libstub/tpm.c
drivers/fpga/altera-cvp.c
drivers/gpio/gpio-uniphier.c
drivers/gpio/gpiolib-of.c
drivers/gpu/drm/Kconfig
drivers/gpu/drm/Makefile
drivers/gpu/drm/amd/amdgpu/ObjectID.h
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
drivers/gpu/drm/amd/amdgpu/ci_dpm.c
drivers/gpu/drm/amd/amdgpu/cik.c
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/amd/amdgpu/dce_virtual.c
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/kv_dpm.c
drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/si_dpm.c
drivers/gpu/drm/amd/amdgpu/soc15d.h
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
drivers/gpu/drm/amd/amdgpu/vi.c
drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
drivers/gpu/drm/amd/amdkfd/cik_int.h
drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/amdkfd/kfd_crat.c
drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h
drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
drivers/gpu/drm/amd/amdkfd/kfd_events.c
drivers/gpu/drm/amd/amdkfd/kfd_events.h
drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
drivers/gpu/drm/amd/amdkfd/kfd_module.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
drivers/gpu/drm/amd/display/Kconfig
drivers/gpu/drm/amd/display/TODO
drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c [new file with mode: 0644]
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h [moved from drivers/gpu/drm/amd/powerplay/inc/pp_power_source.h with 81% similarity]
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c [new file with mode: 0644]
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
drivers/gpu/drm/amd/display/dc/Makefile
drivers/gpu/drm/amd/display/dc/basics/Makefile
drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
drivers/gpu/drm/amd/display/dc/basics/logger.c [deleted file]
drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
drivers/gpu/drm/amd/display/dc/bios/command_table2.c
drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
drivers/gpu/drm/amd/display/dc/calcs/Makefile
drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h
drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_debug.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/core/dc_sink.c
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
drivers/gpu/drm/amd/display/dc/core/dc_surface.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dc_bios_types.h
drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
drivers/gpu/drm/amd/display/dc/dc_dp_types.h
drivers/gpu/drm/amd/display/dc/dc_helper.c
drivers/gpu/drm/amd/display/dc/dc_hw_types.h
drivers/gpu/drm/amd/display/dc/dc_link.h
drivers/gpu/drm/amd/display/dc/dc_stream.h
drivers/gpu/drm/amd/display/dc/dc_types.h
drivers/gpu/drm/amd/display/dc/dce/Makefile
drivers/gpu/drm/amd/display/dc/dce/dce_aux.c [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/dce/dce_aux.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
drivers/gpu/drm/amd/display/dc/dm_helpers.h
drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
drivers/gpu/drm/amd/display/dc/dm_services.h
drivers/gpu/drm/amd/display/dc/dm_services_types.h
drivers/gpu/drm/amd/display/dc/dml/Makefile
drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c [deleted file]
drivers/gpu/drm/amd/display/dc/gpio/Makefile
drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
drivers/gpu/drm/amd/display/dc/i2caux/Makefile
drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c
drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h
drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c
drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h
drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c
drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c
drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c
drivers/gpu/drm/amd/display/dc/i2caux/engine.h
drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h
drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
drivers/gpu/drm/amd/display/dc/inc/core_types.h
drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
drivers/gpu/drm/amd/display/dc/inc/resource.h
drivers/gpu/drm/amd/display/dc/irq/Makefile
drivers/gpu/drm/amd/display/dc/irq/irq_service.c
drivers/gpu/drm/amd/display/dc/os_types.h
drivers/gpu/drm/amd/display/include/ddc_service_types.h
drivers/gpu/drm/amd/display/include/dpcd_defs.h
drivers/gpu/drm/amd/display/include/fixed31_32.h
drivers/gpu/drm/amd/display/include/grph_object_defs.h
drivers/gpu/drm/amd/display/include/grph_object_id.h
drivers/gpu/drm/amd/display/include/logger_interface.h
drivers/gpu/drm/amd/display/include/logger_types.h
drivers/gpu/drm/amd/display/modules/color/color_gamma.c
drivers/gpu/drm/amd/display/modules/color/luts_1d.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/modules/stats/stats.c
drivers/gpu/drm/amd/include/amd_pcie.h
drivers/gpu/drm/amd/include/amd_shared.h
drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
drivers/gpu/drm/amd/include/atomfirmware.h
drivers/gpu/drm/amd/include/dm_pp_interface.h
drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_9_0.h [new file with mode: 0644]
drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h
drivers/gpu/drm/amd/include/ivsrcid/sdma0/irqsrcs_sdma0_4_0.h [new file with mode: 0644]
drivers/gpu/drm/amd/include/ivsrcid/sdma1/irqsrcs_sdma1_4_0.h [new file with mode: 0644]
drivers/gpu/drm/amd/include/ivsrcid/smuio/irqsrcs_smuio_9_0.h [moved from drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.h with 75% similarity]
drivers/gpu/drm/amd/include/ivsrcid/thm/irqsrcs_thm_9_0.h [new file with mode: 0644]
drivers/gpu/drm/amd/include/ivsrcid/uvd/irqsrcs_uvd_7_0.h [new file with mode: 0644]
drivers/gpu/drm/amd/include/ivsrcid/vce/irqsrcs_vce_4_0.h [new file with mode: 0644]
drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_1_0.h [new file with mode: 0644]
drivers/gpu/drm/amd/include/ivsrcid/vmc/irqsrcs_vmc_1_0.h [new file with mode: 0644]
drivers/gpu/drm/amd/include/kgd_kfd_interface.h
drivers/gpu/drm/amd/include/kgd_pp_interface.h
drivers/gpu/drm/amd/powerplay/amd_powerplay.c
drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
drivers/gpu/drm/amd/powerplay/inc/smumgr.h
drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
drivers/gpu/drm/amd/powerplay/smumgr/Makefile
drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h [new file with mode: 0644]
drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h
drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
drivers/gpu/drm/arc/arcpgu_crtc.c
drivers/gpu/drm/arc/arcpgu_sim.c
drivers/gpu/drm/arm/Makefile
drivers/gpu/drm/arm/hdlcd_crtc.c
drivers/gpu/drm/arm/hdlcd_drv.c
drivers/gpu/drm/arm/hdlcd_drv.h
drivers/gpu/drm/arm/malidp_crtc.c
drivers/gpu/drm/arm/malidp_drv.c
drivers/gpu/drm/arm/malidp_drv.h
drivers/gpu/drm/arm/malidp_hw.c
drivers/gpu/drm/arm/malidp_hw.h
drivers/gpu/drm/arm/malidp_mw.c [new file with mode: 0644]
drivers/gpu/drm/arm/malidp_mw.h [new file with mode: 0644]
drivers/gpu/drm/arm/malidp_planes.c
drivers/gpu/drm/arm/malidp_regs.h
drivers/gpu/drm/armada/Makefile
drivers/gpu/drm/armada/armada_510.c
drivers/gpu/drm/armada/armada_crtc.c
drivers/gpu/drm/armada/armada_crtc.h
drivers/gpu/drm/armada/armada_drm.h
drivers/gpu/drm/armada/armada_drv.c
drivers/gpu/drm/armada/armada_fb.c
drivers/gpu/drm/armada/armada_fb.h
drivers/gpu/drm/armada/armada_fbdev.c
drivers/gpu/drm/armada/armada_gem.c
drivers/gpu/drm/armada/armada_hw.h
drivers/gpu/drm/armada/armada_overlay.c
drivers/gpu/drm/armada/armada_plane.c [new file with mode: 0644]
drivers/gpu/drm/armada/armada_plane.h [new file with mode: 0644]
drivers/gpu/drm/ast/ast_mode.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
drivers/gpu/drm/bochs/bochs_kms.c
drivers/gpu/drm/bridge/Kconfig
drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
drivers/gpu/drm/bridge/analogix-anx78xx.c
drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
drivers/gpu/drm/bridge/cdns-dsi.c
drivers/gpu/drm/bridge/dumb-vga-dac.c
drivers/gpu/drm/bridge/lvds-encoder.c
drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
drivers/gpu/drm/bridge/nxp-ptn3460.c
drivers/gpu/drm/bridge/panel.c
drivers/gpu/drm/bridge/parade-ps8622.c
drivers/gpu/drm/bridge/sii902x.c
drivers/gpu/drm/bridge/sil-sii8620.c
drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
drivers/gpu/drm/bridge/tc358767.c
drivers/gpu/drm/bridge/ti-tfp410.c
drivers/gpu/drm/cirrus/cirrus_drv.h
drivers/gpu/drm/cirrus/cirrus_fbdev.c
drivers/gpu/drm/cirrus/cirrus_main.c
drivers/gpu/drm/cirrus/cirrus_mode.c
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_client.c [new file with mode: 0644]
drivers/gpu/drm/drm_connector.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_crtc_internal.h
drivers/gpu/drm/drm_debugfs.c
drivers/gpu/drm/drm_debugfs_crc.c
drivers/gpu/drm/drm_dp_cec.c [new file with mode: 0644]
drivers/gpu/drm/drm_dp_helper.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_dumb_buffers.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_fb_cma_helper.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_file.c
drivers/gpu/drm/drm_fourcc.c
drivers/gpu/drm/drm_framebuffer.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_gem_framebuffer_helper.c
drivers/gpu/drm/drm_global.c
drivers/gpu/drm/drm_internal.h
drivers/gpu/drm/drm_ioctl.c
drivers/gpu/drm/drm_lease.c
drivers/gpu/drm/drm_mipi_dsi.c
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/drm_mode_config.c
drivers/gpu/drm/drm_mode_object.c
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/drm_of.c
drivers/gpu/drm/drm_panel.c
drivers/gpu/drm/drm_pci.c
drivers/gpu/drm/drm_plane.c
drivers/gpu/drm/drm_plane_helper.c
drivers/gpu/drm/drm_prime.c
drivers/gpu/drm/drm_print.c
drivers/gpu/drm/drm_probe_helper.c
drivers/gpu/drm/drm_property.c
drivers/gpu/drm/drm_simple_kms_helper.c
drivers/gpu/drm/drm_syncobj.c
drivers/gpu/drm/drm_vm.c
drivers/gpu/drm/drm_vma_manager.c
drivers/gpu/drm/drm_writeback.c [new file with mode: 0644]
drivers/gpu/drm/etnaviv/etnaviv_drv.c
drivers/gpu/drm/etnaviv/etnaviv_drv.h
drivers/gpu/drm/etnaviv/etnaviv_gem.c
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.h
drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
drivers/gpu/drm/etnaviv/etnaviv_sched.c
drivers/gpu/drm/exynos/Makefile
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
drivers/gpu/drm/exynos/exynos7_drm_decon.c
drivers/gpu/drm/exynos/exynos_dp.c
drivers/gpu/drm/exynos/exynos_drm_core.c [deleted file]
drivers/gpu/drm/exynos/exynos_drm_dpi.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_dsi.c
drivers/gpu/drm/exynos/exynos_drm_fb.c
drivers/gpu/drm/exynos/exynos_drm_fimc.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/exynos/exynos_drm_g2d.h
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_drm_gem.h
drivers/gpu/drm/exynos/exynos_drm_gsc.c
drivers/gpu/drm/exynos/exynos_drm_ipp.c
drivers/gpu/drm/exynos/exynos_drm_mic.c
drivers/gpu/drm/exynos/exynos_drm_plane.c
drivers/gpu/drm/exynos/exynos_drm_rotator.c
drivers/gpu/drm/exynos/exynos_drm_scaler.c
drivers/gpu/drm/exynos/exynos_drm_vidi.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/exynos/regs-gsc.h
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
drivers/gpu/drm/gma500/accel_2d.c
drivers/gpu/drm/gma500/cdv_intel_dp.c
drivers/gpu/drm/gma500/cdv_intel_hdmi.c
drivers/gpu/drm/gma500/framebuffer.c
drivers/gpu/drm/gma500/framebuffer.h
drivers/gpu/drm/gma500/gem.c
drivers/gpu/drm/gma500/gma_display.c
drivers/gpu/drm/gma500/gtt.h
drivers/gpu/drm/gma500/intel_bios.h
drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
drivers/gpu/drm/gma500/mdfld_intel_display.c
drivers/gpu/drm/gma500/oaktrail_crtc.c
drivers/gpu/drm/gma500/oaktrail_hdmi.c
drivers/gpu/drm/gma500/oaktrail_lvds.c
drivers/gpu/drm/gma500/psb_intel_modes.c
drivers/gpu/drm/gma500/psb_intel_sdvo.c
drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
drivers/gpu/drm/i2c/tda998x_drv.c
drivers/gpu/drm/i810/i810_dma.c
drivers/gpu/drm/i915/Kconfig.debug
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/dvo_ch7017.c
drivers/gpu/drm/i915/dvo_ch7xxx.c
drivers/gpu/drm/i915/dvo_ivch.c
drivers/gpu/drm/i915/dvo_ns2501.c
drivers/gpu/drm/i915/dvo_sil164.c
drivers/gpu/drm/i915/dvo_tfp410.c
drivers/gpu/drm/i915/gvt/aperture_gm.c
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/gvt/display.c
drivers/gpu/drm/i915/gvt/dmabuf.c
drivers/gpu/drm/i915/gvt/edid.c
drivers/gpu/drm/i915/gvt/execlist.h
drivers/gpu/drm/i915/gvt/fb_decoder.c
drivers/gpu/drm/i915/gvt/firmware.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/gtt.h
drivers/gpu/drm/i915/gvt/gvt.c
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/hypercall.h
drivers/gpu/drm/i915/gvt/interrupt.c
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/mmio.c
drivers/gpu/drm/i915/gvt/mmio.h
drivers/gpu/drm/i915/gvt/mmio_context.c
drivers/gpu/drm/i915/gvt/mmio_context.h
drivers/gpu/drm/i915/gvt/mpt.h
drivers/gpu/drm/i915/gvt/page_track.c
drivers/gpu/drm/i915/gvt/sched_policy.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/gvt/scheduler.h
drivers/gpu/drm/i915/gvt/vgpu.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem.h
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_context.h
drivers/gpu/drm/i915/i915_gem_dmabuf.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_gtt.h
drivers/gpu/drm/i915/i915_gem_object.h
drivers/gpu/drm/i915/i915_gem_render_state.c
drivers/gpu/drm/i915/i915_gem_shrinker.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_gpu_error.h
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_params.c
drivers/gpu/drm/i915/i915_params.h
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/i915/i915_pmu.c
drivers/gpu/drm/i915/i915_pmu.h
drivers/gpu/drm/i915/i915_pvinfo.h
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/i915_request.h
drivers/gpu/drm/i915/i915_selftest.h
drivers/gpu/drm/i915/i915_timeline.h
drivers/gpu/drm/i915/i915_trace.h
drivers/gpu/drm/i915/i915_vgpu.c
drivers/gpu/drm/i915/i915_vgpu.h
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/i915_vma.h
drivers/gpu/drm/i915/icl_dsi.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_acpi.c
drivers/gpu/drm/i915/intel_atomic.c
drivers/gpu/drm/i915/intel_atomic_plane.c
drivers/gpu/drm/i915/intel_audio.c
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_breadcrumbs.c
drivers/gpu/drm/i915/intel_cdclk.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_device_info.c
drivers/gpu/drm/i915/intel_device_info.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_display.h
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_aux_backlight.c
drivers/gpu/drm/i915/intel_dp_link_training.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_dpll_mgr.c
drivers/gpu/drm/i915/intel_dpll_mgr.h
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dsi.h
drivers/gpu/drm/i915/intel_dsi_vbt.c
drivers/gpu/drm/i915/intel_dvo.c
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_fbc.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_frontbuffer.c
drivers/gpu/drm/i915/intel_guc.c
drivers/gpu/drm/i915/intel_guc.h
drivers/gpu/drm/i915/intel_guc_fwif.h
drivers/gpu/drm/i915/intel_guc_log.c
drivers/gpu/drm/i915/intel_guc_log.h
drivers/gpu/drm/i915/intel_guc_submission.c
drivers/gpu/drm/i915/intel_gvt.c
drivers/gpu/drm/i915/intel_hangcheck.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_hotplug.c
drivers/gpu/drm/i915/intel_huc.c
drivers/gpu/drm/i915/intel_huc.h
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/i915/intel_lpe_audio.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lrc.h
drivers/gpu/drm/i915/intel_lspcon.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_modes.c
drivers/gpu/drm/i915/intel_opregion.c
drivers/gpu/drm/i915/intel_opregion.h
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pipe_crc.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_psr.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/i915/intel_uc.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/i915/intel_uncore.h
drivers/gpu/drm/i915/intel_vbt_defs.h
drivers/gpu/drm/i915/intel_workarounds.c
drivers/gpu/drm/i915/selftests/huge_pages.c
drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
drivers/gpu/drm/i915/selftests/i915_gem_context.c
drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
drivers/gpu/drm/i915/selftests/i915_gem_evict.c
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
drivers/gpu/drm/i915/selftests/i915_gem_object.c
drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
drivers/gpu/drm/i915/selftests/i915_request.c
drivers/gpu/drm/i915/selftests/i915_selftest.c
drivers/gpu/drm/i915/selftests/i915_vma.c
drivers/gpu/drm/i915/selftests/igt_flush_test.c
drivers/gpu/drm/i915/selftests/igt_wedge_me.h [new file with mode: 0644]
drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
drivers/gpu/drm/i915/selftests/intel_guc.c
drivers/gpu/drm/i915/selftests/intel_hangcheck.c
drivers/gpu/drm/i915/selftests/intel_lrc.c
drivers/gpu/drm/i915/selftests/intel_workarounds.c
drivers/gpu/drm/i915/selftests/mock_context.c
drivers/gpu/drm/i915/selftests/mock_dmabuf.c
drivers/gpu/drm/i915/selftests/mock_engine.c
drivers/gpu/drm/i915/selftests/mock_gem_device.c
drivers/gpu/drm/i915/selftests/mock_gtt.c
drivers/gpu/drm/i915/vlv_dsi.c [moved from drivers/gpu/drm/i915/intel_dsi.c with 96% similarity]
drivers/gpu/drm/i915/vlv_dsi_pll.c [moved from drivers/gpu/drm/i915/intel_dsi_pll.c with 84% similarity]
drivers/gpu/drm/imx/imx-drm-core.c
drivers/gpu/drm/imx/imx-drm.h
drivers/gpu/drm/imx/imx-ldb.c
drivers/gpu/drm/imx/imx-tve.c
drivers/gpu/drm/imx/ipuv3-crtc.c
drivers/gpu/drm/imx/parallel-display.c
drivers/gpu/drm/mediatek/mtk_drm_crtc.c
drivers/gpu/drm/mediatek/mtk_drm_ddp.c
drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
drivers/gpu/drm/mediatek/mtk_drm_drv.c
drivers/gpu/drm/mediatek/mtk_drm_drv.h
drivers/gpu/drm/mediatek/mtk_drm_fb.c
drivers/gpu/drm/mediatek/mtk_drm_fb.h
drivers/gpu/drm/mediatek/mtk_drm_plane.c
drivers/gpu/drm/mediatek/mtk_dsi.c
drivers/gpu/drm/mediatek/mtk_hdmi.c
drivers/gpu/drm/meson/meson_drv.c
drivers/gpu/drm/meson/meson_dw_hdmi.c
drivers/gpu/drm/meson/meson_vclk.c
drivers/gpu/drm/meson/meson_vclk.h
drivers/gpu/drm/meson/meson_venc.c
drivers/gpu/drm/meson/meson_venc.h
drivers/gpu/drm/meson/meson_venc_cvbs.c
drivers/gpu/drm/mgag200/mgag200_mode.c
drivers/gpu/drm/msm/Kconfig
drivers/gpu/drm/msm/Makefile
drivers/gpu/drm/msm/adreno/a3xx_gpu.c
drivers/gpu/drm/msm/adreno/a4xx_gpu.c
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
drivers/gpu/drm/msm/adreno/adreno_device.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/adreno/adreno_gpu.h
drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h [new file with mode: 0644]
drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h [new file with mode: 0644]
drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
drivers/gpu/drm/msm/dsi/dsi.c
drivers/gpu/drm/msm/dsi/dsi.h
drivers/gpu/drm/msm/dsi/dsi_cfg.c
drivers/gpu/drm/msm/dsi/dsi_cfg.h
drivers/gpu/drm/msm/dsi/dsi_host.c
drivers/gpu/drm/msm/dsi/dsi_manager.c
drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
drivers/gpu/drm/msm/edp/edp_connector.c
drivers/gpu/drm/msm/hdmi/hdmi_connector.c
drivers/gpu/drm/msm/msm_atomic.c
drivers/gpu/drm/msm/msm_debugfs.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_fb.c
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/msm/msm_gpu.h
drivers/gpu/drm/msm/msm_kms.h
drivers/gpu/drm/nouveau/dispnv04/crtc.c
drivers/gpu/drm/nouveau/dispnv04/dac.c
drivers/gpu/drm/nouveau/dispnv04/dfp.c
drivers/gpu/drm/nouveau/dispnv04/disp.c
drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
drivers/gpu/drm/nouveau/dispnv50/curs507a.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/dispnv50/wndw.c
drivers/gpu/drm/nouveau/include/nvif/object.h
drivers/gpu/drm/nouveau/nouveau_abi16.c
drivers/gpu/drm/nouveau/nouveau_backlight.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_connector.h
drivers/gpu/drm/nouveau/nouveau_debugfs.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/nouveau/nouveau_hwmon.c
drivers/gpu/drm/nouveau/nouveau_platform.c
drivers/gpu/drm/nouveau/nouveau_ttm.c
drivers/gpu/drm/nouveau/nvkm/core/engine.c
drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/changf119.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gv100.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c
drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b. [deleted file]
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c
drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
drivers/gpu/drm/omapdrm/dss/core.c
drivers/gpu/drm/omapdrm/dss/dispc.c
drivers/gpu/drm/omapdrm/dss/display.c
drivers/gpu/drm/omapdrm/dss/dpi.c
drivers/gpu/drm/omapdrm/dss/dsi.c
drivers/gpu/drm/omapdrm/dss/dss.c
drivers/gpu/drm/omapdrm/dss/dss.h
drivers/gpu/drm/omapdrm/dss/pll.c
drivers/gpu/drm/omapdrm/dss/sdi.c
drivers/gpu/drm/omapdrm/dss/venc.c
drivers/gpu/drm/omapdrm/dss/video-pll.c
drivers/gpu/drm/omapdrm/omap_connector.c
drivers/gpu/drm/omapdrm/omap_debugfs.c
drivers/gpu/drm/omapdrm/omap_drv.c
drivers/gpu/drm/omapdrm/omap_drv.h
drivers/gpu/drm/omapdrm/omap_fb.c
drivers/gpu/drm/omapdrm/omap_fb.h
drivers/gpu/drm/omapdrm/omap_fbdev.c
drivers/gpu/drm/omapdrm/omap_gem.c
drivers/gpu/drm/omapdrm/omap_gem.h
drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
drivers/gpu/drm/panel/Kconfig
drivers/gpu/drm/panel/Makefile
drivers/gpu/drm/panel/panel-ilitek-ili9881c.c [new file with mode: 0644]
drivers/gpu/drm/panel/panel-innolux-p079zca.c
drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
drivers/gpu/drm/panel/panel-lvds.c
drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
drivers/gpu/drm/panel/panel-simple.c
drivers/gpu/drm/panel/panel-sitronix-st7789v.c
drivers/gpu/drm/pl111/Makefile
drivers/gpu/drm/pl111/pl111_display.c
drivers/gpu/drm/pl111/pl111_drm.h
drivers/gpu/drm/pl111/pl111_drv.c
drivers/gpu/drm/pl111/pl111_nomadik.c [new file with mode: 0644]
drivers/gpu/drm/pl111/pl111_nomadik.h [new file with mode: 0644]
drivers/gpu/drm/qxl/qxl_display.c
drivers/gpu/drm/qxl/qxl_release.c
drivers/gpu/drm/radeon/ci_dpm.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/r600_dpm.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_dp_mst.c
drivers/gpu/drm/radeon/radeon_encoders.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_test.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/gpu/drm/rcar-du/rcar_lvds.c
drivers/gpu/drm/rockchip/cdn-dp-core.c
drivers/gpu/drm/rockchip/cdn-dp-reg.c
drivers/gpu/drm/rockchip/dw-mipi-dsi.c
drivers/gpu/drm/rockchip/inno_hdmi.c
drivers/gpu/drm/rockchip/rockchip_drm_fb.c
drivers/gpu/drm/rockchip/rockchip_drm_fb.h
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.h
drivers/gpu/drm/rockchip/rockchip_lvds.c
drivers/gpu/drm/savage/savage_state.c
drivers/gpu/drm/scheduler/Makefile
drivers/gpu/drm/scheduler/gpu_scheduler.c
drivers/gpu/drm/scheduler/sched_fence.c
drivers/gpu/drm/selftests/drm_mm_selftests.h
drivers/gpu/drm/selftests/test-drm_mm.c
drivers/gpu/drm/shmobile/shmob_drm_crtc.c
drivers/gpu/drm/sti/sti_cursor.c
drivers/gpu/drm/sti/sti_drv.c
drivers/gpu/drm/sti/sti_dvo.c
drivers/gpu/drm/sti/sti_gdp.c
drivers/gpu/drm/sti/sti_hda.c
drivers/gpu/drm/sti/sti_hdmi.c
drivers/gpu/drm/sti/sti_hqvdp.c
drivers/gpu/drm/stm/drv.c
drivers/gpu/drm/stm/ltdc.c
drivers/gpu/drm/stm/ltdc.h
drivers/gpu/drm/sun4i/Kconfig
drivers/gpu/drm/sun4i/Makefile
drivers/gpu/drm/sun4i/sun4i_backend.c
drivers/gpu/drm/sun4i/sun4i_crtc.c
drivers/gpu/drm/sun4i/sun4i_drv.c
drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
drivers/gpu/drm/sun4i/sun4i_lvds.c
drivers/gpu/drm/sun4i/sun4i_rgb.c
drivers/gpu/drm/sun4i/sun4i_tcon.c
drivers/gpu/drm/sun4i/sun4i_tv.c
drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
drivers/gpu/drm/sun4i/sun8i_hdmi_phy_clk.c
drivers/gpu/drm/sun4i/sun8i_mixer.c
drivers/gpu/drm/sun4i/sun8i_mixer.h
drivers/gpu/drm/sun4i/sun8i_tcon_top.c [new file with mode: 0644]
drivers/gpu/drm/sun4i/sun8i_tcon_top.h [new file with mode: 0644]
drivers/gpu/drm/sun4i/sun8i_ui_layer.c
drivers/gpu/drm/sun4i/sun8i_vi_layer.c
drivers/gpu/drm/tegra/drm.c
drivers/gpu/drm/tegra/dsi.c
drivers/gpu/drm/tegra/gem.c
drivers/gpu/drm/tegra/hdmi.c
drivers/gpu/drm/tegra/output.c
drivers/gpu/drm/tegra/rgb.c
drivers/gpu/drm/tegra/sor.c
drivers/gpu/drm/tilcdc/tilcdc_drv.c
drivers/gpu/drm/tilcdc/tilcdc_external.c
drivers/gpu/drm/tilcdc/tilcdc_panel.c
drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
drivers/gpu/drm/tinydrm/Kconfig
drivers/gpu/drm/tinydrm/Makefile
drivers/gpu/drm/tinydrm/core/tinydrm-core.c
drivers/gpu/drm/tinydrm/ili9225.c
drivers/gpu/drm/tinydrm/ili9341.c [new file with mode: 0644]
drivers/gpu/drm/tinydrm/mi0283qt.c
drivers/gpu/drm/tinydrm/mipi-dbi.c
drivers/gpu/drm/tinydrm/st7586.c
drivers/gpu/drm/tinydrm/st7735r.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_bo_vm.c
drivers/gpu/drm/ttm/ttm_page_alloc.c
drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/gpu/drm/udl/udl_connector.c
drivers/gpu/drm/udl/udl_dmabuf.c
drivers/gpu/drm/udl/udl_drv.h
drivers/gpu/drm/udl/udl_fb.c
drivers/gpu/drm/udl/udl_gem.c
drivers/gpu/drm/udl/udl_main.c
drivers/gpu/drm/udl/udl_modeset.c
drivers/gpu/drm/udl/udl_transfer.c
drivers/gpu/drm/v3d/v3d_bo.c
drivers/gpu/drm/v3d/v3d_drv.c
drivers/gpu/drm/v3d/v3d_drv.h
drivers/gpu/drm/v3d/v3d_fence.c
drivers/gpu/drm/v3d/v3d_gem.c
drivers/gpu/drm/v3d/v3d_irq.c
drivers/gpu/drm/v3d/v3d_regs.h
drivers/gpu/drm/v3d/v3d_sched.c
drivers/gpu/drm/vc4/Makefile
drivers/gpu/drm/vc4/vc4_bo.c
drivers/gpu/drm/vc4/vc4_crtc.c
drivers/gpu/drm/vc4/vc4_debugfs.c
drivers/gpu/drm/vc4/vc4_drv.c
drivers/gpu/drm/vc4/vc4_drv.h
drivers/gpu/drm/vc4/vc4_dsi.c
drivers/gpu/drm/vc4/vc4_fence.c
drivers/gpu/drm/vc4/vc4_hdmi.c
drivers/gpu/drm/vc4/vc4_kms.c
drivers/gpu/drm/vc4/vc4_plane.c
drivers/gpu/drm/vc4/vc4_regs.h
drivers/gpu/drm/vc4/vc4_txp.c [new file with mode: 0644]
drivers/gpu/drm/vc4/vc4_vec.c
drivers/gpu/drm/vgem/vgem_drv.c
drivers/gpu/drm/virtio/virtgpu_display.c
drivers/gpu/drm/virtio/virtgpu_drv.h
drivers/gpu/drm/virtio/virtgpu_fb.c
drivers/gpu/drm/virtio/virtgpu_fence.c
drivers/gpu/drm/virtio/virtgpu_plane.c
drivers/gpu/drm/vkms/Makefile [new file with mode: 0644]
drivers/gpu/drm/vkms/vkms_crtc.c [new file with mode: 0644]
drivers/gpu/drm/vkms/vkms_drv.c [new file with mode: 0644]
drivers/gpu/drm/vkms/vkms_drv.h [new file with mode: 0644]
drivers/gpu/drm/vkms/vkms_gem.c [new file with mode: 0644]
drivers/gpu/drm/vkms/vkms_output.c [new file with mode: 0644]
drivers/gpu/drm/vkms/vkms_plane.c [new file with mode: 0644]
drivers/gpu/drm/vmwgfx/Kconfig
drivers/gpu/drm/vmwgfx/Makefile
drivers/gpu/drm/vmwgfx/device_include/svga3d_caps.h
drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h
drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h
drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h
drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h
drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h
drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h
drivers/gpu/drm/vmwgfx/device_include/svga_escape.h
drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h
drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
drivers/gpu/drm/vmwgfx/device_include/svga_types.h
drivers/gpu/drm/vmwgfx/device_include/vmware_pack_begin.h
drivers/gpu/drm/vmwgfx/device_include/vmware_pack_end.h
drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c [new file with mode: 0644]
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
drivers/gpu/drm/vmwgfx/vmwgfx_context.c
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c [deleted file]
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
drivers/gpu/drm/vmwgfx/vmwgfx_so.c
drivers/gpu/drm/vmwgfx/vmwgfx_so.h
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c [moved from drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c with 99% similarity]
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
drivers/gpu/drm/vmwgfx/vmwgfx_va.c
drivers/gpu/drm/xen/xen_drm_front.c
drivers/gpu/drm/xen/xen_drm_front.h
drivers/gpu/drm/xen/xen_drm_front_shbuf.c
drivers/gpu/drm/zte/zx_hdmi.c
drivers/gpu/drm/zte/zx_plane.c
drivers/gpu/drm/zte/zx_tvenc.c
drivers/gpu/drm/zte/zx_vga.c
drivers/gpu/host1x/dev.c
drivers/gpu/host1x/job.c
drivers/gpu/ipu-v3/ipu-common.c
drivers/gpu/ipu-v3/ipu-cpmem.c
drivers/gpu/ipu-v3/ipu-csi.c
drivers/gpu/ipu-v3/ipu-image-convert.c
drivers/hid/hid-core.c
drivers/hid/hid-debug.c
drivers/hid/hid-google-hammer.c
drivers/hid/hid-ids.h
drivers/hid/hid-steam.c
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/intel-ish-hid/ipc/pci-ish.c
drivers/hid/usbhid/hiddev.c
drivers/hid/wacom_sys.c
drivers/hid/wacom_wac.c
drivers/hwmon/dell-smm-hwmon.c
drivers/hwmon/nct6775.c
drivers/i2c/algos/i2c-algo-bit.c
drivers/i2c/busses/i2c-cht-wc.c
drivers/i2c/busses/i2c-davinci.c
drivers/i2c/busses/i2c-gpio.c
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-rcar.c
drivers/i2c/busses/i2c-stu300.c
drivers/i2c/busses/i2c-tegra.c
drivers/i2c/i2c-core-base.c
drivers/i2c/i2c-core-smbus.c
drivers/iio/accel/mma8452.c
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
drivers/iio/light/tsl2772.c
drivers/iio/pressure/bmp280-core.c
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/cxgb4/mem.c
drivers/infiniband/hw/hfi1/rc.c
drivers/infiniband/hw/hfi1/uc.c
drivers/infiniband/hw/hfi1/ud.c
drivers/infiniband/hw/hfi1/verbs_txreq.c
drivers/infiniband/hw/hfi1/verbs_txreq.h
drivers/infiniband/hw/mlx4/mr.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/srq.c
drivers/infiniband/hw/qedr/verbs.c
drivers/infiniband/sw/rxe/rxe_req.c
drivers/input/input-mt.c
drivers/input/joystick/xpad.c
drivers/input/keyboard/goldfish_events.c
drivers/input/misc/Kconfig
drivers/input/misc/Makefile
drivers/input/misc/sc27xx-vibra.c [new file with mode: 0644]
drivers/input/mouse/elan_i2c.h
drivers/input/mouse/elan_i2c_core.c
drivers/input/mouse/elan_i2c_smbus.c
drivers/input/mouse/elantech.c
drivers/input/mouse/psmouse-base.c
drivers/input/rmi4/Kconfig
drivers/input/rmi4/rmi_2d_sensor.c
drivers/input/rmi4/rmi_bus.c
drivers/input/rmi4/rmi_bus.h
drivers/input/rmi4/rmi_driver.c
drivers/input/rmi4/rmi_f01.c
drivers/input/rmi4/rmi_f03.c
drivers/input/rmi4/rmi_f11.c
drivers/input/rmi4/rmi_f12.c
drivers/input/rmi4/rmi_f30.c
drivers/input/rmi4/rmi_f34.c
drivers/input/rmi4/rmi_f54.c
drivers/input/serio/i8042-x86ia64io.h
drivers/input/touchscreen/silead.c
drivers/iommu/Kconfig
drivers/iommu/intel-iommu.c
drivers/irqchip/irq-gic-v2m.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-ls-scfg-msi.c
drivers/isdn/mISDN/socket.c
drivers/lightnvm/Kconfig
drivers/md/dm-raid.c
drivers/md/dm-table.c
drivers/md/dm-thin-metadata.c
drivers/md/dm-thin.c
drivers/md/dm-writecache.c
drivers/md/dm-zoned-target.c
drivers/md/dm.c
drivers/md/md.c
drivers/md/raid10.c
drivers/media/common/videobuf2/videobuf2-dma-contig.c
drivers/media/common/videobuf2/videobuf2-dma-sg.c
drivers/media/common/videobuf2/videobuf2-vmalloc.c
drivers/media/rc/bpf-lirc.c
drivers/misc/cxl/api.c
drivers/misc/ibmasm/ibmasmfs.c
drivers/misc/mei/interrupt.c
drivers/misc/vmw_balloon.c
drivers/mmc/core/slot-gpio.c
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/renesas_sdhi_internal_dmac.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mmc/host/sunxi-mmc.c
drivers/mtd/chips/cfi_cmdset_0002.c
drivers/mtd/devices/mtd_dataflash.c
drivers/mtd/nand/raw/denali_dt.c
drivers/mtd/nand/raw/mxc_nand.c
drivers/mtd/nand/raw/nand_base.c
drivers/mtd/nand/raw/nand_macronix.c
drivers/mtd/nand/raw/nand_micron.c
drivers/mtd/spi-nor/cadence-quadspi.c
drivers/net/bonding/bond_options.c
drivers/net/can/m_can/m_can.c
drivers/net/can/mscan/mpc5xxx_can.c
drivers/net/can/peak_canfd/peak_pciefd_main.c
drivers/net/can/xilinx_can.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/ethernet/3com/Kconfig
drivers/net/ethernet/amd/Kconfig
drivers/net/ethernet/apm/xgene-v2/Kconfig
drivers/net/ethernet/apm/xgene/Kconfig
drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
drivers/net/ethernet/aquantia/atlantic/aq_hw.h
drivers/net/ethernet/aquantia/atlantic/aq_main.c
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
drivers/net/ethernet/aquantia/atlantic/aq_nic.h
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
drivers/net/ethernet/arc/Kconfig
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
drivers/net/ethernet/broadcom/Kconfig
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bcmsysport.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
drivers/net/ethernet/broadcom/cnic.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/broadcom/tg3.h
drivers/net/ethernet/cadence/macb.h
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/cadence/macb_ptp.c
drivers/net/ethernet/calxeda/Kconfig
drivers/net/ethernet/cavium/Kconfig
drivers/net/ethernet/cavium/liquidio/lio_main.c
drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/cirrus/Kconfig
drivers/net/ethernet/cisco/enic/enic_clsf.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/faraday/ftgmac100.c
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
drivers/net/ethernet/freescale/fman/fman_port.c
drivers/net/ethernet/hisilicon/Kconfig
drivers/net/ethernet/huawei/hinic/hinic_rx.c
drivers/net/ethernet/huawei/hinic/hinic_tx.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/marvell/Kconfig
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/fw.c
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
drivers/net/ethernet/mellanox/mlx5/core/port.c
drivers/net/ethernet/mellanox/mlx5/core/sriov.c
drivers/net/ethernet/mellanox/mlx5/core/vport.c
drivers/net/ethernet/mellanox/mlx5/core/wq.c
drivers/net/ethernet/mellanox/mlxsw/Kconfig
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/netronome/nfp/bpf/main.c
drivers/net/ethernet/netronome/nfp/flower/match.c
drivers/net/ethernet/netronome/nfp/flower/offload.c
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
drivers/net/ethernet/netronome/nfp/nfp_main.c
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
drivers/net/ethernet/qlogic/qed/qed.h
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
drivers/net/ethernet/qlogic/qed/qed_debug.c
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/qlogic/qed/qed_l2.c
drivers/net/ethernet/qlogic/qed/qed_l2.h
drivers/net/ethernet/qlogic/qed/qed_ll2.c
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qed/qed_mcp.c
drivers/net/ethernet/qlogic/qed/qed_sriov.c
drivers/net/ethernet/qlogic/qed/qed_vf.c
drivers/net/ethernet/qlogic/qed/qed_vf.h
drivers/net/ethernet/qlogic/qede/qede_ptp.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
drivers/net/ethernet/qualcomm/qca_spi.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/Kconfig
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/farch.c
drivers/net/ethernet/stmicro/stmmac/Kconfig
drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
drivers/net/ethernet/stmicro/stmmac/hwif.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/sun/sungem.c
drivers/net/ethernet/ti/davinci_cpdma.c
drivers/net/ethernet/ti/davinci_emac.c
drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
drivers/net/geneve.c
drivers/net/hamradio/bpqether.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/ieee802154/adf7242.c
drivers/net/ieee802154/at86rf230.c
drivers/net/ieee802154/fakelb.c
drivers/net/ieee802154/mcr20a.c
drivers/net/ipvlan/ipvlan_main.c
drivers/net/net_failover.c
drivers/net/phy/dp83tc811.c
drivers/net/phy/marvell.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/phy/sfp-bus.c
drivers/net/ppp/pppoe.c
drivers/net/tun.c
drivers/net/usb/asix_devices.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/lan78xx.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/usb/rtl8150.c
drivers/net/usb/smsc75xx.c
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath10k/wmi.h
drivers/net/wireless/ath/wcn36xx/testmode.c
drivers/net/wireless/broadcom/brcm80211/Kconfig
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/marvell/mwifiex/usb.c
drivers/net/wireless/mediatek/mt7601u/phy.c
drivers/net/wireless/quantenna/qtnfmac/Kconfig
drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
drivers/net/wireless/realtek/rtlwifi/base.c
drivers/net/wireless/realtek/rtlwifi/base.h
drivers/net/wireless/realtek/rtlwifi/core.c
drivers/net/wireless/realtek/rtlwifi/pci.c
drivers/net/wireless/realtek/rtlwifi/ps.c
drivers/net/wireless/realtek/rtlwifi/usb.c
drivers/net/xen-netfront.c
drivers/nfc/pn533/usb.c
drivers/nvdimm/claim.c
drivers/nvdimm/pmem.c
drivers/nvme/host/core.c
drivers/nvme/host/fabrics.c
drivers/nvme/host/fabrics.h
drivers/nvme/host/fc.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/target/configfs.c
drivers/nvme/target/core.c
drivers/nvme/target/fc.c
drivers/nvme/target/loop.c
drivers/nvmem/core.c
drivers/of/base.c
drivers/of/of_private.h
drivers/of/overlay.c
drivers/opp/core.c
drivers/pci/Makefile
drivers/pci/controller/Kconfig
drivers/pci/controller/dwc/Kconfig
drivers/pci/controller/dwc/pcie-designware-host.c
drivers/pci/controller/pci-aardvark.c
drivers/pci/controller/pci-ftpci100.c
drivers/pci/controller/pci-hyperv.c
drivers/pci/controller/pci-v3-semi.c
drivers/pci/controller/pci-versatile.c
drivers/pci/controller/pci-xgene.c
drivers/pci/controller/pcie-mediatek.c
drivers/pci/controller/pcie-rcar.c
drivers/pci/controller/pcie-xilinx-nwl.c
drivers/pci/controller/pcie-xilinx.c
drivers/pci/endpoint/pci-epf-core.c
drivers/pci/hotplug/acpi_pcihp.c
drivers/pci/iov.c
drivers/pci/of.c
drivers/pci/pci-acpi.c
drivers/pci/pci-driver.c
drivers/pci/pci.c
drivers/pci/pci.h
drivers/pci/pcie/err.c
drivers/perf/xgene_pmu.c
drivers/phy/broadcom/phy-brcm-usb-init.c
drivers/phy/motorola/phy-mapphone-mdm6600.c
drivers/pinctrl/actions/pinctrl-owl.c
drivers/pinctrl/bcm/pinctrl-nsp-mux.c
drivers/pinctrl/devicetree.c
drivers/pinctrl/mediatek/pinctrl-mt7622.c
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
drivers/pinctrl/pinctrl-ingenic.c
drivers/pinctrl/pinctrl-single.c
drivers/pinctrl/sh-pfc/pfc-r8a77970.c
drivers/platform/x86/dell-laptop.c
drivers/ptp/ptp_chardev.c
drivers/ptp/ptp_qoriq.c
drivers/rtc/interface.c
drivers/rtc/rtc-mrst.c
drivers/s390/block/dasd.c
drivers/s390/block/dasd_alias.c
drivers/s390/block/dasd_diag.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/dasd_eer.c
drivers/s390/block/dasd_fba.c
drivers/s390/block/dasd_int.h
drivers/s390/cio/Makefile
drivers/s390/cio/vfio_ccw_cp.c
drivers/s390/cio/vfio_ccw_drv.c
drivers/s390/cio/vfio_ccw_fsm.c
drivers/s390/cio/vfio_ccw_trace.h [new file with mode: 0644]
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/scsi/aacraid/aachba.c
drivers/scsi/cxlflash/main.h
drivers/scsi/cxlflash/ocxl_hw.c
drivers/scsi/hpsa.c
drivers/scsi/hpsa.h
drivers/scsi/ipr.c
drivers/scsi/qedf/qedf_main.c
drivers/scsi/qedi/qedi_main.c
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_gs.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_transport_fc.c
drivers/scsi/sd_zbc.c
drivers/scsi/sg.c
drivers/scsi/xen-scsifront.c
drivers/soc/imx/gpc.c
drivers/soc/imx/gpcv2.c
drivers/soc/qcom/Kconfig
drivers/soc/renesas/rcar-sysc.c
drivers/staging/android/ion/ion.c
drivers/staging/android/ion/ion_heap.c
drivers/staging/comedi/drivers/quatech_daqp_cs.c
drivers/staging/ks7010/ks_hostif.c
drivers/staging/media/omap4iss/iss_video.c
drivers/staging/rtl8188eu/Kconfig
drivers/staging/rtl8188eu/core/rtw_recv.c
drivers/staging/rtl8188eu/core/rtw_security.c
drivers/staging/rtl8723bs/core/rtw_ap.c
drivers/staging/rtlwifi/rtl8822be/hw.c
drivers/staging/rtlwifi/wifi.h
drivers/staging/speakup/speakup_soft.c
drivers/staging/typec/Kconfig
drivers/staging/vboxvideo/vbox_mode.c
drivers/target/target_core_pr.c
drivers/target/target_core_user.c
drivers/tee/tee_shm.c
drivers/thunderbolt/domain.c
drivers/tty/n_tty.c
drivers/tty/serdev/core.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/vt/vt.c
drivers/uio/uio.c
drivers/usb/chipidea/Kconfig
drivers/usb/chipidea/Makefile
drivers/usb/chipidea/ci.h
drivers/usb/chipidea/host.c
drivers/usb/chipidea/ulpi.c
drivers/usb/class/cdc-acm.c
drivers/usb/core/hub.c
drivers/usb/core/quirks.c
drivers/usb/dwc2/core.h
drivers/usb/dwc2/gadget.c
drivers/usb/dwc2/hcd.c
drivers/usb/dwc2/hcd.h
drivers/usb/dwc2/hcd_intr.c
drivers/usb/dwc2/hcd_queue.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/dwc3-of-simple.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/dwc3-qcom.c
drivers/usb/dwc3/ep0.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/f_uac2.c
drivers/usb/gadget/function/u_audio.c
drivers/usb/gadget/udc/aspeed-vhub/Kconfig
drivers/usb/gadget/udc/aspeed-vhub/ep0.c
drivers/usb/gadget/udc/aspeed-vhub/epn.c
drivers/usb/gadget/udc/aspeed-vhub/vhub.h
drivers/usb/gadget/udc/r8a66597-udc.c
drivers/usb/host/xhci-dbgcap.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-tegra.c
drivers/usb/host/xhci-trace.h
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/misc/yurex.c
drivers/usb/phy/phy-fsl-usb.c
drivers/usb/serial/ch341.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/keyspan_pda.c
drivers/usb/serial/mos7840.c
drivers/usb/typec/tcpm.c
drivers/usb/typec/ucsi/ucsi.c
drivers/usb/typec/ucsi/ucsi_acpi.c
drivers/vfio/pci/Kconfig
drivers/vfio/pci/vfio_pci.c
drivers/vfio/vfio_iommu_spapr_tce.c
drivers/vfio/vfio_iommu_type1.c
drivers/vhost/net.c
drivers/video/console/Kconfig
drivers/video/console/dummycon.c
drivers/video/fbdev/core/fbcon.c
drivers/xen/Makefile
drivers/xen/events/events_base.c
drivers/xen/grant-table.c
drivers/xen/manage.c
drivers/xen/privcmd-buf.c [new file with mode: 0644]
drivers/xen/privcmd.c
drivers/xen/privcmd.h
drivers/xen/xen-scsiback.c
fs/aio.c
fs/autofs/Makefile
fs/autofs/dev-ioctl.c
fs/autofs/init.c
fs/binfmt_elf.c
fs/block_dev.c
fs/btrfs/extent_io.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/qgroup.c
fs/btrfs/scrub.c
fs/btrfs/volumes.c
fs/cachefiles/bind.c
fs/cachefiles/namei.c
fs/cachefiles/rdwr.c
fs/ceph/inode.c
fs/cifs/cifs_debug.c
fs/cifs/cifsencrypt.c
fs/cifs/cifsglob.h
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/inode.c
fs/cifs/misc.c
fs/cifs/smb1ops.c
fs/cifs/smb2file.c
fs/cifs/smb2misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/smb2pdu.h
fs/cifs/smb2proto.h
fs/cifs/smb2transport.c
fs/cifs/smbdirect.c
fs/cifs/smbdirect.h
fs/cifs/trace.h
fs/cifs/transport.c
fs/eventfd.c
fs/eventpoll.c
fs/exec.c
fs/ext2/ext2.h
fs/ext2/super.c
fs/ext4/balloc.c
fs/ext4/ext4.h
fs/ext4/ext4_extents.h
fs/ext4/extents.c
fs/ext4/ialloc.c
fs/ext4/inline.c
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/mmp.c
fs/ext4/super.c
fs/ext4/xattr.c
fs/fat/inode.c
fs/fscache/cache.c
fs/fscache/cookie.c
fs/fscache/object.c
fs/fscache/operation.c
fs/hugetlbfs/inode.c
fs/inode.c
fs/internal.h
fs/jbd2/transaction.c
fs/jfs/xattr.c
fs/nfs/delegation.c
fs/nfs/flexfilelayout/flexfilelayout.c
fs/nfs/nfs4proc.c
fs/nfs/pnfs.h
fs/pipe.c
fs/proc/base.c
fs/proc/generic.c
fs/proc/task_mmu.c
fs/quota/dquot.c
fs/reiserfs/prints.c
fs/select.c
fs/squashfs/cache.c
fs/squashfs/file.c
fs/squashfs/fragment.c
fs/squashfs/squashfs_fs.h
fs/timerfd.c
fs/udf/balloc.c
fs/udf/directory.c
fs/udf/inode.c
fs/udf/namei.c
fs/udf/udfdecl.h
fs/userfaultfd.c
fs/xfs/libxfs/xfs_ag_resv.c
fs/xfs/libxfs/xfs_alloc.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_bmap.h
fs/xfs/libxfs/xfs_format.h
fs/xfs/libxfs/xfs_inode_buf.c
fs/xfs/libxfs/xfs_rtbitmap.c
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_fsmap.c
fs/xfs/xfs_fsops.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_trans.c
include/acpi/processor.h
include/asm-generic/qspinlock_types.h
include/asm-generic/tlb.h
include/crypto/if_alg.h
include/drm/drmP.h
include/drm/drm_atomic.h
include/drm/drm_atomic_helper.h
include/drm/drm_bridge.h
include/drm/drm_client.h [new file with mode: 0644]
include/drm/drm_connector.h
include/drm/drm_crtc.h
include/drm/drm_debugfs_crc.h
include/drm/drm_device.h
include/drm/drm_dp_helper.h
include/drm/drm_drv.h
include/drm/drm_encoder.h
include/drm/drm_fb_cma_helper.h
include/drm/drm_fb_helper.h
include/drm/drm_file.h
include/drm/drm_fourcc.h
include/drm/drm_mm.h
include/drm/drm_mode_config.h
include/drm/drm_modes.h
include/drm/drm_modeset_helper_vtables.h
include/drm/drm_of.h
include/drm/drm_panel.h
include/drm/drm_pci.h
include/drm/drm_plane.h
include/drm/drm_plane_helper.h
include/drm/drm_prime.h
include/drm/drm_print.h
include/drm/drm_property.h
include/drm/drm_vma_manager.h
include/drm/drm_writeback.h [new file with mode: 0644]
include/drm/gpu_scheduler.h
include/drm/i915_drm.h
include/drm/i915_pciids.h
include/drm/tinydrm/tinydrm.h
include/drm/ttm/ttm_bo_api.h
include/drm/ttm/ttm_set_memory.h [new file with mode: 0644]
include/dt-bindings/clock/imx6ul-clock.h
include/dt-bindings/clock/sun8i-tcon-top.h [new file with mode: 0644]
include/linux/acpi.h
include/linux/ascii85.h [new file with mode: 0644]
include/linux/atmdev.h
include/linux/backing-dev-defs.h
include/linux/blk-mq.h
include/linux/blkdev.h
include/linux/bpf-cgroup.h
include/linux/bpf.h
include/linux/bpf_lirc.h
include/linux/bpfilter.h
include/linux/compat.h
include/linux/compiler-gcc.h
include/linux/compiler_types.h
include/linux/console.h
include/linux/dax.h
include/linux/delayacct.h
include/linux/dma-buf.h
include/linux/dma-contiguous.h
include/linux/dma-fence.h
include/linux/eventfd.h
include/linux/filter.h
include/linux/fs.h
include/linux/fsl/guts.h
include/linux/ftrace.h
include/linux/hid.h
include/linux/if_bridge.h
include/linux/igmp.h
include/linux/iio/buffer-dma.h
include/linux/input/mt.h
include/linux/intel-iommu.h
include/linux/irq.h
include/linux/irqdesc.h
include/linux/kernel.h
include/linux/kthread.h
include/linux/libata.h
include/linux/marvell_phy.h
include/linux/memory.h
include/linux/mlx5/driver.h
include/linux/mlx5/eswitch.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mm.h
include/linux/mod_devicetable.h
include/linux/net.h
include/linux/netdevice.h
include/linux/nfs_xdr.h
include/linux/pci.h
include/linux/pm_domain.h
include/linux/poll.h
include/linux/refcount.h
include/linux/ring_buffer.h
include/linux/rmi.h
include/linux/scatterlist.h
include/linux/sched.h
include/linux/sched/task.h
include/linux/skbuff.h
include/linux/slub_def.h
include/linux/spinlock.h
include/linux/syscalls.h
include/linux/uio_driver.h
include/linux/ww_mutex.h
include/net/bluetooth/bluetooth.h
include/net/cfg80211.h
include/net/ip6_fib.h
include/net/ip6_route.h
include/net/ipv6.h
include/net/iucv/af_iucv.h
include/net/net_namespace.h
include/net/netfilter/nf_tables.h
include/net/netfilter/nf_tables_core.h
include/net/netfilter/nf_tproxy.h
include/net/netns/ipv6.h
include/net/pkt_cls.h
include/net/sctp/sctp.h
include/net/tc_act/tc_csum.h
include/net/tc_act/tc_tunnel_key.h
include/net/tcp.h
include/net/tls.h
include/net/udp.h
include/net/xdp_sock.h
include/rdma/ib_verbs.h
include/uapi/drm/amdgpu_drm.h
include/uapi/drm/drm.h
include/uapi/drm/drm_fourcc.h
include/uapi/drm/drm_mode.h
include/uapi/drm/vmwgfx_drm.h
include/uapi/linux/aio_abi.h
include/uapi/linux/bpf.h
include/uapi/linux/btf.h
include/uapi/linux/ethtool.h
include/uapi/linux/kfd_ioctl.h
include/uapi/linux/nbd.h
include/uapi/linux/rseq.h
include/uapi/linux/target_core_user.h
include/uapi/linux/tcp.h
include/uapi/linux/types_32_64.h [deleted file]
include/video/mipi_display.h
include/xen/xen.h
init/Kconfig
ipc/sem.c
kernel/Makefile
kernel/bpf/btf.c
kernel/bpf/cgroup.c
kernel/bpf/core.c
kernel/bpf/devmap.c
kernel/bpf/hashtab.c
kernel/bpf/sockmap.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/dma/Kconfig [new file with mode: 0644]
kernel/dma/Makefile [new file with mode: 0644]
kernel/dma/coherent.c [moved from drivers/base/dma-coherent.c with 100% similarity]
kernel/dma/contiguous.c [moved from drivers/base/dma-contiguous.c with 100% similarity]
kernel/dma/debug.c [moved from lib/dma-debug.c with 100% similarity]
kernel/dma/direct.c [moved from lib/dma-direct.c with 100% similarity]
kernel/dma/mapping.c [moved from drivers/base/dma-mapping.c with 99% similarity]
kernel/dma/noncoherent.c [moved from lib/dma-noncoherent.c with 100% similarity]
kernel/dma/swiotlb.c [moved from lib/swiotlb.c with 99% similarity]
kernel/dma/virt.c [moved from lib/dma-virt.c with 98% similarity]
kernel/events/core.c
kernel/events/ring_buffer.c
kernel/fork.c
kernel/irq/debugfs.c
kernel/kthread.c
kernel/locking/lockdep.c
kernel/locking/locktorture.c
kernel/locking/mutex.c
kernel/locking/rwsem.c
kernel/locking/test-ww_mutex.c
kernel/memremap.c
kernel/printk/printk.c
kernel/rseq.c
kernel/sched/core.c
kernel/sched/cpufreq_schedutil.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/sched/rt.c
kernel/sched/sched.h
kernel/softirq.c
kernel/stop_machine.c
kernel/time/hrtimer.c
kernel/time/posix-cpu-timers.c
kernel/time/tick-common.c
kernel/time/time.c
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_events_filter.c
kernel/trace/trace_events_hist.c
kernel/trace/trace_events_trigger.c
kernel/trace/trace_functions_graph.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_output.c
lib/Kconfig
lib/Kconfig.kasan
lib/Makefile
lib/dec_and_lock.c
lib/iov_iter.c
lib/locking-selftest.c
lib/percpu_ida.c
lib/refcount.c
lib/rhashtable.c
lib/scatterlist.c
lib/test_bpf.c
lib/test_printf.c
mm/backing-dev.c
mm/debug.c
mm/gup.c
mm/huge_memory.c
mm/hugetlb.c
mm/kasan/kasan.c
mm/memblock.c
mm/memcontrol.c
mm/mempolicy.c
mm/mmap.c
mm/nommu.c
mm/page_alloc.c
mm/rmap.c
mm/shmem.c
mm/slab_common.c
mm/slub.c
mm/vmstat.c
mm/zswap.c
net/8021q/vlan.c
net/9p/client.c
net/Makefile
net/appletalk/ddp.c
net/atm/br2684.c
net/atm/clip.c
net/atm/common.c
net/atm/common.h
net/atm/lec.c
net/atm/mpc.c
net/atm/pppoatm.c
net/atm/pvc.c
net/atm/raw.c
net/atm/svc.c
net/ax25/af_ax25.c
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bat_v.c
net/batman-adv/debugfs.c
net/batman-adv/debugfs.h
net/batman-adv/hard-interface.c
net/batman-adv/translation-table.c
net/bluetooth/af_bluetooth.c
net/bluetooth/hci_sock.c
net/bluetooth/l2cap_sock.c
net/bluetooth/rfcomm/sock.c
net/bluetooth/sco.c
net/bpf/test_run.c
net/bpfilter/.gitignore [new file with mode: 0644]
net/bpfilter/Kconfig
net/bpfilter/Makefile
net/bpfilter/bpfilter_kern.c
net/bpfilter/bpfilter_umh_blob.S [new file with mode: 0644]
net/caif/caif_dev.c
net/caif/caif_socket.c
net/can/bcm.c
net/can/raw.c
net/core/datagram.c
net/core/dev.c
net/core/dev_ioctl.c
net/core/fib_rules.c
net/core/filter.c
net/core/gen_stats.c
net/core/page_pool.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock.c
net/dccp/ccids/ccid3.c
net/dccp/dccp.h
net/dccp/ipv4.c
net/dccp/ipv6.c
net/dccp/proto.c
net/decnet/af_decnet.c
net/dns_resolver/dns_key.c
net/ieee802154/6lowpan/core.c
net/ieee802154/socket.c
net/ipv4/af_inet.c
net/ipv4/fib_frontend.c
net/ipv4/fou.c
net/ipv4/gre_offload.c
net/ipv4/igmp.c
net/ipv4/inet_fragment.c
net/ipv4/inet_hashtables.c
net/ipv4/ip_output.c
net/ipv4/ip_sockglue.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/nf_tproxy_ipv4.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_dctcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv4/udp_offload.c
net/ipv6/Kconfig
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/ipv6/calipso.c
net/ipv6/datagram.c
net/ipv6/exthdrs.c
net/ipv6/icmp.c
net/ipv6/inet6_hashtables.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/ipv6_sockglue.c
net/ipv6/mcast.c
net/ipv6/ndisc.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/netfilter/nf_tproxy_ipv6.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/seg6_hmac.c
net/ipv6/seg6_iptunnel.c
net/ipv6/tcp_ipv6.c
net/iucv/af_iucv.c
net/kcm/kcmsock.c
net/key/af_key.c
net/l2tp/l2tp_ip.c
net/l2tp/l2tp_ip6.c
net/l2tp/l2tp_ppp.c
net/llc/af_llc.c
net/mac80211/rx.c
net/mac80211/tx.c
net/mac80211/util.c
net/ncsi/ncsi-aen.c
net/ncsi/ncsi-manage.c
net/netfilter/Kconfig
net/netfilter/Makefile
net/netfilter/nf_conncount.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_helper.c
net/netfilter/nf_conntrack_proto_dccp.c
net/netfilter/nf_log.c
net/netfilter/nf_tables_api.c
net/netfilter/nf_tables_set_core.c [new file with mode: 0644]
net/netfilter/nfnetlink_queue.c
net/netfilter/nft_compat.c
net/netfilter/nft_immediate.c
net/netfilter/nft_lookup.c
net/netfilter/nft_set_bitmap.c
net/netfilter/nft_set_hash.c
net/netfilter/nft_set_rbtree.c
net/netfilter/xt_TPROXY.c
net/netlink/af_netlink.c
net/netrom/af_netrom.c
net/nfc/llcp_commands.c
net/nfc/llcp_sock.c
net/nfc/rawsock.c
net/nsh/nsh.c
net/packet/af_packet.c
net/phonet/socket.c
net/qrtr/qrtr.c
net/rds/connection.c
net/rds/loop.c
net/rds/loop.h
net/rose/af_rose.c
net/rxrpc/af_rxrpc.c
net/sched/act_csum.c
net/sched/act_ife.c
net/sched/act_tunnel_key.c
net/sched/cls_api.c
net/sched/cls_flower.c
net/sched/sch_blackhole.c
net/sched/sch_fq_codel.c
net/sched/sch_hfsc.c
net/sctp/chunk.c
net/sctp/ipv6.c
net/sctp/protocol.c
net/sctp/socket.c
net/sctp/transport.c
net/smc/af_smc.c
net/smc/smc.h
net/smc/smc_clc.c
net/smc/smc_close.c
net/smc/smc_tx.c
net/socket.c
net/strparser/strparser.c
net/sunrpc/xprt.c
net/tipc/discover.c
net/tipc/net.c
net/tipc/node.c
net/tipc/socket.c
net/tls/tls_main.c
net/tls/tls_sw.c
net/unix/af_unix.c
net/vmw_vsock/af_vsock.c
net/vmw_vsock/virtio_transport.c
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/trace.h
net/x25/af_x25.c
net/xdp/xsk.c
net/xdp/xsk_queue.h
samples/bpf/.gitignore [new file with mode: 0644]
samples/bpf/parse_varlen.c
samples/bpf/test_overhead_user.c
samples/bpf/trace_event_user.c
samples/bpf/xdp2skb_meta.sh
samples/bpf/xdp_fwd_kern.c
samples/bpf/xdpsock_user.c
samples/vfio-mdev/mbochs.c
scripts/Kbuild.include
scripts/Makefile.build
scripts/Makefile.clean
scripts/Makefile.modbuiltin
scripts/Makefile.modinst
scripts/Makefile.modpost
scripts/Makefile.modsign
scripts/cc-can-link.sh
scripts/checkpatch.pl
scripts/extract-vmlinux
scripts/gcc-x86_64-has-stack-protector.sh
scripts/kconfig/expr.h
scripts/kconfig/preprocess.c
scripts/kconfig/zconf.y
scripts/tags.sh
security/keys/dh.c
security/selinux/selinuxfs.c
security/smack/smack_lsm.c
sound/core/rawmidi.c
sound/core/seq/seq_clientmgr.c
sound/core/timer.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_codec.h
sound/pci/hda/patch_ca0132.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/lx6464es/lx6464es.c
tools/arch/arm/include/uapi/asm/kvm.h
tools/arch/arm64/include/uapi/asm/kvm.h
tools/arch/powerpc/include/uapi/asm/kvm.h
tools/arch/powerpc/include/uapi/asm/unistd.h
tools/arch/x86/include/asm/cpufeatures.h
tools/bpf/bpftool/common.c
tools/bpf/bpftool/perf.c
tools/bpf/bpftool/prog.c
tools/build/Build.include
tools/build/Makefile
tools/include/uapi/drm/drm.h
tools/include/uapi/linux/bpf.h
tools/include/uapi/linux/if_link.h
tools/include/uapi/linux/kvm.h
tools/objtool/check.c
tools/objtool/elf.c
tools/perf/Documentation/perf-stat.txt
tools/perf/Makefile.config
tools/perf/arch/powerpc/util/skip-callchain-idx.c
tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
tools/perf/arch/x86/util/perf_regs.c
tools/perf/bench/numa.c
tools/perf/builtin-annotate.c
tools/perf/builtin-c2c.c
tools/perf/builtin-report.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/jvmti/jvmti_agent.c
tools/perf/pmu-events/Build
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
tools/perf/scripts/python/sched-migration.py
tools/perf/tests/builtin-test.c
tools/perf/tests/parse-events.c
tools/perf/tests/shell/record+probe_libc_inet_pton.sh
tools/perf/tests/shell/trace+probe_vfs_getname.sh
tools/perf/tests/topology.c
tools/perf/ui/gtk/hists.c
tools/perf/util/c++/clang.cpp
tools/perf/util/header.c
tools/perf/util/hist.c
tools/perf/util/hist.h
tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
tools/perf/util/llvm-utils.c
tools/perf/util/parse-events.y
tools/perf/util/pmu.c
tools/perf/util/scripting-engines/trace-event-python.c
tools/perf/util/sort.h
tools/power/x86/turbostat/turbostat.8
tools/power/x86/turbostat/turbostat.c
tools/testing/nvdimm/test/nfit.c
tools/testing/selftests/bpf/Makefile
tools/testing/selftests/bpf/config
tools/testing/selftests/bpf/test_kmod.sh
tools/testing/selftests/bpf/test_lirc_mode2.sh
tools/testing/selftests/bpf/test_lwt_seg6local.sh
tools/testing/selftests/bpf/test_offload.py
tools/testing/selftests/bpf/test_sockmap.c
tools/testing/selftests/bpf/test_tunnel.sh
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc [new file with mode: 0644]
tools/testing/selftests/net/.gitignore
tools/testing/selftests/net/config
tools/testing/selftests/net/fib_tests.sh [changed mode: 0644->0755]
tools/testing/selftests/net/udpgso_bench.sh
tools/testing/selftests/pstore/pstore_post_reboot_tests
tools/testing/selftests/rseq/param_test.c
tools/testing/selftests/rseq/rseq-arm.h
tools/testing/selftests/rseq/rseq-mips.h [new file with mode: 0644]
tools/testing/selftests/rseq/rseq.h
tools/testing/selftests/rseq/run_param_test.sh [changed mode: 0644->0755]
tools/testing/selftests/sparc64/Makefile
tools/testing/selftests/sparc64/drivers/Makefile
tools/testing/selftests/static_keys/test_static_keys.sh
tools/testing/selftests/sync/config [new file with mode: 0644]
tools/testing/selftests/sysctl/sysctl.sh
tools/testing/selftests/user/test_user_copy.sh
tools/testing/selftests/vm/compaction_test.c
tools/testing/selftests/vm/mlock2-tests.c
tools/testing/selftests/vm/run_vmtests
tools/testing/selftests/vm/userfaultfd.c
tools/testing/selftests/x86/sigreturn.c
tools/testing/selftests/zram/zram.sh
tools/testing/selftests/zram/zram_lib.sh
tools/usb/ffs-test.c
tools/virtio/linux/scatterlist.h
virt/kvm/Kconfig
virt/kvm/arm/mmu.c
virt/kvm/arm/vgic/vgic-v3.c
virt/kvm/eventfd.c
virt/kvm/kvm_main.c

index efc7aa7a067099f6bacdb860cde13f2d876030a8..533ff5c68970aef7b71e976941e8b305f250f2d5 100644 (file)
        xirc2ps_cs=     [NET,PCMCIA]
                        Format:
                        <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
+
+       xhci-hcd.quirks         [USB,KNL]
+                       A hex value specifying bitmask with supplemental xhci
+                       host controller quirks. Meaning of each bit can be
+                       consulted in header drivers/usb/host/xhci.h.
index ab2fe0eda1d7c317faefab52363ce96755ac64d5..8f1d3de449b53fedcc78d1aee506e6882f2be90c 100644 (file)
@@ -324,8 +324,7 @@ Global Attributes
 
 ``intel_pstate`` exposes several global attributes (files) in ``sysfs`` to
 control its functionality at the system level.  They are located in the
-``/sys/devices/system/cpu/cpufreq/intel_pstate/`` directory and affect all
-CPUs.
+``/sys/devices/system/cpu/intel_pstate/`` directory and affect all CPUs.
 
 Some of them are not present if the ``intel_pstate=per_cpu_perf_limits``
 argument is passed to the kernel in the command line.
@@ -379,6 +378,17 @@ argument is passed to the kernel in the command line.
        but it affects the maximum possible value of per-policy P-state limits
        (see `Interpretation of Policy Attributes`_ below for details).
 
+``hwp_dynamic_boost``
+       This attribute is only present if ``intel_pstate`` works in the
+       `active mode with the HWP feature enabled <Active Mode With HWP_>`_ in
+       the processor.  If set (equal to 1), it causes the minimum P-state limit
+       to be increased dynamically for a short time whenever a task previously
+       waiting on I/O is selected to run on a given logical CPU (the purpose
+       of this mechanism is to improve performance).
+
+       This setting has no effect on logical CPUs whose minimum P-state limit
+       is directly set to the highest non-turbo P-state or above it.
+
 .. _status_attr:
 
 ``status``
@@ -410,7 +420,7 @@ argument is passed to the kernel in the command line.
        That only is supported in some configurations, though (for example, if
        the `HWP feature is enabled in the processor <Active Mode With HWP_>`_,
        the operation mode of the driver cannot be changed), and if it is not
-       supported in the current configuration, writes to this attribute with
+       supported in the current configuration, writes to this attribute will
        fail with an appropriate error.
 
 Interpretation of Policy Attributes
index 8e44aea366c262068900cddaabd240d8615ac552..76fe2d0f5e7d7db307bfa4ead890ead2d8840bdd 100644 (file)
@@ -284,7 +284,7 @@ Resources Management
 MTRR Handling
 -------------
 
-.. kernel-doc:: arch/x86/kernel/cpu/mtrr/main.c
+.. kernel-doc:: arch/x86/kernel/cpu/mtrr/mtrr.c
    :export:
 
 Security Framework
index 4424fa2c67d79ebbdfc3aea44d0bd93fa01407c3..01532b3008ae56bb9db1dc1db7d9b9709db5965e 100644 (file)
@@ -15,6 +15,8 @@ Constructor parameters:
    size)
 5. the number of optional parameters (the parameters with an argument
    count as two)
+       start_sector n          (default: 0)
+               offset from the start of cache device in 512-byte sectors
        high_watermark n        (default: 50)
                start writeback when the number of used blocks reach this
                watermark
index bdadc3da9556d47e52372f0a68846779dccc1d95..6970f30a3770f8027a2509aab25f4fe75785667e 100644 (file)
@@ -66,7 +66,7 @@ Required root node properties:
        - "insignal,arndale-octa" - for Exynos5420-based Insignal Arndale
                                    Octa board.
        - "insignal,origen"       - for Exynos4210-based Insignal Origen board.
-       - "insignal,origen4412    - for Exynos4412-based Insignal Origen board.
+       - "insignal,origen4412"   - for Exynos4412-based Insignal Origen board.
 
 
 Optional nodes:
index 284e2b14cfbe0a291416973ce429ee4b9aa05300..26649b4c4dd8de41ab16a6f76f8e73dcac2525a1 100644 (file)
@@ -74,6 +74,12 @@ Required properties for DSI:
                The 3 clocks output from the DSI analog PHY: dsi[01]_byte,
                dsi[01]_ddr2, and dsi[01]_ddr
 
+Required properties for the TXP (writeback) block:
+- compatible:  Should be "brcm,bcm2835-txp"
+- reg:         Physical base address and length of the TXP block's registers
+- interrupts:  The interrupt number
+                 See bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
+
 [1] Documentation/devicetree/bindings/media/video-interfaces.txt
 
 Example:
diff --git a/Documentation/devicetree/bindings/display/ilitek,ili9341.txt b/Documentation/devicetree/bindings/display/ilitek,ili9341.txt
new file mode 100644 (file)
index 0000000..169b32e
--- /dev/null
@@ -0,0 +1,27 @@
+Ilitek ILI9341 display panels
+
+This binding is for display panels using an Ilitek ILI9341 controller in SPI
+mode.
+
+Required properties:
+- compatible:  "adafruit,yx240qv29", "ilitek,ili9341"
+- dc-gpios:    D/C pin
+- reset-gpios: Reset pin
+
+The node for this driver must be a child node of a SPI controller, hence
+all mandatory properties described in ../spi/spi-bus.txt must be specified.
+
+Optional properties:
+- rotation:    panel rotation in degrees counter clockwise (0,90,180,270)
+- backlight:   phandle of the backlight device attached to the panel
+
+Example:
+       display@0{
+               compatible = "adafruit,yx240qv29", "ilitek,ili9341";
+               reg = <0>;
+               spi-max-frequency = <32000000>;
+               dc-gpios = <&gpio0 9 GPIO_ACTIVE_HIGH>;
+               reset-gpios = <&gpio0 8 GPIO_ACTIVE_HIGH>;
+               rotation = <270>;
+               backlight = <&backlight>;
+       };
index 383183a89164de7310ca5d2e241d80e14f991fca..8469de510001b25cc173b4ab2f44d1e27e9acc21 100644 (file)
@@ -40,7 +40,7 @@ Required properties (all function blocks):
        "mediatek,<chip>-dpi"        - DPI controller, see mediatek,dpi.txt
        "mediatek,<chip>-disp-mutex" - display mutex
        "mediatek,<chip>-disp-od"    - overdrive
-  the supported chips are mt2701 and mt8173.
+  the supported chips are mt2701, mt2712 and mt8173.
 - reg: Physical base address and length of the function block register space
 - interrupts: The interrupt signal from the function block (required, except for
   merge and split function blocks).
diff --git a/Documentation/devicetree/bindings/display/msm/dpu.txt b/Documentation/devicetree/bindings/display/msm/dpu.txt
new file mode 100644 (file)
index 0000000..ad2e883
--- /dev/null
@@ -0,0 +1,131 @@
+Qualcomm Technologies, Inc. DPU KMS
+
+Description:
+
+Device tree bindings for MSM Mobile Display Subsytem(MDSS) that encapsulates
+sub-blocks like DPU display controller, DSI and DP interfaces etc.
+The DPU display controller is found in SDM845 SoC.
+
+MDSS:
+Required properties:
+- compatible: "qcom,sdm845-mdss"
+- reg: physical base address and length of contoller's registers.
+- reg-names: register region names. The following region is required:
+  * "mdss"
+- power-domains: a power domain consumer specifier according to
+  Documentation/devicetree/bindings/power/power_domain.txt
+- clocks: list of clock specifiers for clocks needed by the device.
+- clock-names: device clock names, must be in same order as clocks property.
+  The following clocks are required:
+  * "iface"
+  * "bus"
+  * "core"
+- interrupts: interrupt signal from MDSS.
+- interrupt-controller: identifies the node as an interrupt controller.
+- #interrupt-cells: specifies the number of cells needed to encode an interrupt
+  source, should be 1.
+- iommus: phandle of iommu device node.
+- #address-cells: number of address cells for the MDSS children. Should be 1.
+- #size-cells: Should be 1.
+- ranges: parent bus address space is the same as the child bus address space.
+
+Optional properties:
+- assigned-clocks: list of clock specifiers for clocks needing rate assignment
+- assigned-clock-rates: list of clock frequencies sorted in the same order as
+  the assigned-clocks property.
+
+MDP:
+Required properties:
+- compatible: "qcom,sdm845-dpu"
+- reg: physical base address and length of controller's registers.
+- reg-names : register region names. The following region is required:
+  * "mdp"
+  * "vbif"
+- clocks: list of clock specifiers for clocks needed by the device.
+- clock-names: device clock names, must be in same order as clocks property.
+  The following clocks are required.
+  * "bus"
+  * "iface"
+  * "core"
+  * "vsync"
+- interrupts: interrupt line from DPU to MDSS.
+- ports: contains the list of output ports from DPU device. These ports connect
+  to interfaces that are external to the DPU hardware, such as DSI, DP etc.
+
+  Each output port contains an endpoint that describes how it is connected to an
+  external interface. These are described by the standard properties documented
+  here:
+       Documentation/devicetree/bindings/graph.txt
+       Documentation/devicetree/bindings/media/video-interfaces.txt
+
+       Port 0 -> DPU_INTF1 (DSI1)
+       Port 1 -> DPU_INTF2 (DSI2)
+
+Optional properties:
+- assigned-clocks: list of clock specifiers for clocks needing rate assignment
+- assigned-clock-rates: list of clock frequencies sorted in the same order as
+  the assigned-clocks property.
+
+Example:
+
+       mdss: mdss@ae00000 {
+               compatible = "qcom,sdm845-mdss";
+               reg = <0xae00000 0x1000>;
+               reg-names = "mdss";
+
+               power-domains = <&clock_dispcc 0>;
+
+               clocks = <&gcc GCC_DISP_AHB_CLK>, <&gcc GCC_DISP_AXI_CLK>,
+                        <&clock_dispcc DISP_CC_MDSS_MDP_CLK>;
+               clock-names = "iface", "bus", "core";
+
+               assigned-clocks = <&clock_dispcc DISP_CC_MDSS_MDP_CLK>;
+               assigned-clock-rates = <300000000>;
+
+               interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-controller;
+               #interrupt-cells = <1>;
+
+               iommus = <&apps_iommu 0>;
+
+               #address-cells = <2>;
+               #size-cells = <1>;
+               ranges = <0 0 0xae00000 0xb2008>;
+
+               mdss_mdp: mdp@ae01000 {
+                       compatible = "qcom,sdm845-dpu";
+                       reg = <0 0x1000 0x8f000>, <0 0xb0000 0x2008>;
+                       reg-names = "mdp", "vbif";
+
+                       clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>,
+                                <&clock_dispcc DISP_CC_MDSS_AXI_CLK>,
+                                <&clock_dispcc DISP_CC_MDSS_MDP_CLK>,
+                                <&clock_dispcc DISP_CC_MDSS_VSYNC_CLK>;
+                       clock-names = "iface", "bus", "core", "vsync";
+
+                       assigned-clocks = <&clock_dispcc DISP_CC_MDSS_MDP_CLK>,
+                                         <&clock_dispcc DISP_CC_MDSS_VSYNC_CLK>;
+                       assigned-clock-rates = <0 0 300000000 19200000>;
+
+                       interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+
+                       ports {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+
+                               port@0 {
+                                       reg = <0>;
+                                       dpu_intf1_out: endpoint {
+                                               remote-endpoint = <&dsi0_in>;
+                                       };
+                               };
+
+                               port@1 {
+                                       reg = <1>;
+                                       dpu_intf2_out: endpoint {
+                                               remote-endpoint = <&dsi1_in>;
+                                       };
+                               };
+                       };
+               };
+       };
index 518e9cdf0d4bf17fab404b6dc85621888a08050e..d22237a88eae7e439c10e50496ca8a82ecb80601 100644 (file)
@@ -121,6 +121,20 @@ Required properties:
 Optional properties:
 - qcom,dsi-phy-regulator-ldo-mode: Boolean value indicating if the LDO mode PHY
   regulator is wanted.
+- qcom,mdss-mdp-transfer-time-us:      Specifies the dsi transfer time for command mode
+                                       panels in microseconds. Driver uses this number to adjust
+                                       the clock rate according to the expected transfer time.
+                                       Increasing this value would slow down the mdp processing
+                                       and can result in slower performance.
+                                       Decreasing this value can speed up the mdp processing,
+                                       but this can also impact power consumption.
+                                       As a rule this time should not be higher than the time
+                                       that would be expected with the processing at the
+                                       dsi link rate since anyways this would be the maximum
+                                       transfer time that could be achieved.
+                                       If ping pong split is enabled, this time should not be higher
+                                       than two times the dsi link rate time.
+                                       If the property is not specified, then the default value is 14000 us.
 
 [1] Documentation/devicetree/bindings/clock/clock-bindings.txt
 [2] Documentation/devicetree/bindings/graph.txt
@@ -171,6 +185,8 @@ Example:
                qcom,master-dsi;
                qcom,sync-dual-dsi;
 
+               qcom,mdss-mdp-transfer-time-us = <12000>;
+
                pinctrl-names = "default", "sleep";
                pinctrl-0 = <&dsi_active>;
                pinctrl-1 = <&dsi_suspend>;
diff --git a/Documentation/devicetree/bindings/display/panel/auo,g070vvn01.txt b/Documentation/devicetree/bindings/display/panel/auo,g070vvn01.txt
new file mode 100644 (file)
index 0000000..49e4105
--- /dev/null
@@ -0,0 +1,29 @@
+AU Optronics Corporation 7.0" FHD (800 x 480) TFT LCD panel
+
+Required properties:
+- compatible: should be "auo,g070vvn01"
+- backlight: phandle of the backlight device attached to the panel
+- power-supply: single regulator to provide the supply voltage
+
+Required nodes:
+- port: Parallel port mapping to connect this display
+
+This panel needs single power supply voltage. Its backlight is conntrolled
+via PWM signal.
+
+Example:
+--------
+
+Example device-tree definition when connected to iMX6Q based board
+
+       lcd_panel: lcd-panel {
+               compatible = "auo,g070vvn01";
+               backlight = <&backlight_lcd>;
+               power-supply = <&reg_display>;
+
+               port {
+                       lcd_panel_in: endpoint {
+                               remote-endpoint = <&lcd_display_out>;
+                       };
+               };
+       };
diff --git a/Documentation/devicetree/bindings/display/panel/boe,hv070wsa-100.txt b/Documentation/devicetree/bindings/display/panel/boe,hv070wsa-100.txt
new file mode 100644 (file)
index 0000000..55183d3
--- /dev/null
@@ -0,0 +1,28 @@
+BOE HV070WSA-100 7.01" WSVGA TFT LCD panel
+
+Required properties:
+- compatible: should be "boe,hv070wsa-100"
+- power-supply: regulator to provide the VCC supply voltage (3.3 volts)
+- enable-gpios: GPIO pin to enable and disable panel (active high)
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
+
+The device node can contain one 'port' child node with one child
+'endpoint' node, according to the bindings defined in [1]. This
+node should describe panel's video bus.
+
+[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
+
+Example:
+
+       panel: panel {
+               compatible = "boe,hv070wsa-100";
+               power-supply = <&vcc_3v3_reg>;
+               enable-gpios = <&gpd1 3 GPIO_ACTIVE_HIGH>;
+               port {
+                       panel_ep: endpoint {
+                               remote-endpoint = <&bridge_out_ep>;
+                       };
+               };
+       };
diff --git a/Documentation/devicetree/bindings/display/panel/dataimage,scf0700c48ggu18.txt b/Documentation/devicetree/bindings/display/panel/dataimage,scf0700c48ggu18.txt
new file mode 100644 (file)
index 0000000..897085e
--- /dev/null
@@ -0,0 +1,8 @@
+DataImage, Inc. 7" WVGA (800x480) TFT LCD panel with 24-bit parallel interface.
+
+Required properties:
+- compatible: should be "dataimage,scf0700c48ggu18"
+- power-supply: as specified in the base binding
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/dlc,dlc0700yzg-1.txt b/Documentation/devicetree/bindings/display/panel/dlc,dlc0700yzg-1.txt
new file mode 100644 (file)
index 0000000..bf06bb0
--- /dev/null
@@ -0,0 +1,13 @@
+DLC Display Co. DLC0700YZG-1 7.0" WSVGA TFT LCD panel
+
+Required properties:
+- compatible: should be "dlc,dlc0700yzg-1"
+- power-supply: See simple-panel.txt
+
+Optional properties:
+- reset-gpios: See panel-common.txt
+- enable-gpios: See simple-panel.txt
+- backlight: See simple-panel.txt
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/edt,et-series.txt b/Documentation/devicetree/bindings/display/panel/edt,et-series.txt
new file mode 100644 (file)
index 0000000..f56b99e
--- /dev/null
@@ -0,0 +1,39 @@
+Emerging Display Technology Corp. Displays
+==========================================
+
+
+Display bindings for EDT Display Technology Corp. Displays which are
+compatible with the simple-panel binding, which is specified in
+simple-panel.txt
+
+
+5,7" WVGA TFT Panels
+--------------------
+
++-----------------+---------------------+-------------------------------------+
+| Identifier      | compatbile          | description                         |
++=================+=====================+=====================================+
+| ET057090DHU     | edt,et057090dhu     | 5.7" VGA TFT LCD panel              |
++-----------------+---------------------+-------------------------------------+
+
+
+7,0" WVGA TFT Panels
+--------------------
+
++-----------------+---------------------+-------------------------------------+
+| Identifier      | compatbile          | description                         |
++=================+=====================+=====================================+
+| ETM0700G0DH6    | edt,etm070080dh6    | WVGA TFT Display with capacitive    |
+|                 |                     | Touchscreen                         |
++-----------------+---------------------+-------------------------------------+
+| ETM0700G0BDH6   | edt,etm070080bdh6   | Same as ETM0700G0DH6 but with       |
+|                 |                     | inverted pixel clock.               |
++-----------------+---------------------+-------------------------------------+
+| ETM0700G0EDH6   | edt,etm070080edh6   | Same display as the ETM0700G0BDH6,  |
+|                 |                     | but with changed Hardware for the   |
+|                 |                     | backlight and the touch interface   |
++-----------------+---------------------+-------------------------------------+
+| ET070080DH6     | edt,etm070080dh6    | Same timings as the ETM0700G0DH6,   |
+|                 |                     | but with resistive touch.           |
++-----------------+---------------------+-------------------------------------+
+
diff --git a/Documentation/devicetree/bindings/display/panel/edt,et070080dh6.txt b/Documentation/devicetree/bindings/display/panel/edt,et070080dh6.txt
deleted file mode 100644 (file)
index 20cb38e..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-Emerging Display Technology Corp. ET070080DH6 7.0" WVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "edt,et070080dh6"
-
-This panel is the same as ETM0700G0DH6 except for the touchscreen.
-ET070080DH6 is the model with resistive touch.
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/edt,etm0700g0dh6.txt b/Documentation/devicetree/bindings/display/panel/edt,etm0700g0dh6.txt
deleted file mode 100644 (file)
index ee4b180..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-Emerging Display Technology Corp. ETM0700G0DH6 7.0" WVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "edt,etm0700g0dh6"
-
-This panel is the same as ET070080DH6 except for the touchscreen.
-ETM0700G0DH6 is the model with capacitive multitouch.
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.txt b/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.txt
new file mode 100644 (file)
index 0000000..4a041ac
--- /dev/null
@@ -0,0 +1,20 @@
+Ilitek ILI9881c based MIPI-DSI panels
+
+Required properties:
+  - compatible: must be "ilitek,ili9881c" and one of:
+    * "bananapi,lhr050h41"
+  - reg: DSI virtual channel used by that screen
+  - power-supply: phandle to the power regulator
+  - reset-gpios: a GPIO phandle for the reset pin
+
+Optional properties:
+  - backlight: phandle to the backlight used
+
+Example:
+panel@0 {
+       compatible = "bananapi,lhr050h41", "ilitek,ili9881c";
+       reg = <0>;
+       power-supply = <&reg_display>;
+       reset-gpios = <&r_pio 0 5 GPIO_ACTIVE_LOW>; /* PL05 */
+       backlight = <&pwm_bl>;
+};
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,g070y2-l01.txt b/Documentation/devicetree/bindings/display/panel/innolux,g070y2-l01.txt
new file mode 100644 (file)
index 0000000..7c234cf
--- /dev/null
@@ -0,0 +1,12 @@
+Innolux G070Y2-L01 7" WVGA (800x480) TFT LCD panel
+
+Required properties:
+- compatible: should be "innolux,g070y2-l01"
+- power-supply: as specified in the base binding
+
+Optional properties:
+- backlight: as specified in the base binding
+- enable-gpios: as specified in the base binding
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,p097pfg.txt b/Documentation/devicetree/bindings/display/panel/innolux,p097pfg.txt
new file mode 100644 (file)
index 0000000..595d9df
--- /dev/null
@@ -0,0 +1,24 @@
+Innolux P097PFG 9.7" 1536x2048 TFT LCD panel
+
+Required properties:
+- compatible: should be "innolux,p097pfg"
+- reg: DSI virtual channel of the peripheral
+- avdd-supply: phandle of the regulator that provides positive voltage
+- avee-supply: phandle of the regulator that provides negative voltage
+- enable-gpios: panel enable gpio
+
+Optional properties:
+- backlight: phandle of the backlight device attached to the panel
+
+Example:
+
+       &mipi_dsi {
+               panel {
+                       compatible = "innolux,p079zca";
+                       reg = <0>;
+                       avdd-supply = <...>;
+                       avee-supply = <...>;
+                       backlight = <&backlight>;
+                       enable-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
+               };
+       };
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,tv123wam.txt b/Documentation/devicetree/bindings/display/panel/innolux,tv123wam.txt
new file mode 100644 (file)
index 0000000..a9b3526
--- /dev/null
@@ -0,0 +1,20 @@
+Innolux TV123WAM 12.3 inch eDP 2K display panel
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
+
+Required properties:
+- compatible: should be "innolux,tv123wam"
+- power-supply: regulator to provide the supply voltage
+
+Optional properties:
+- enable-gpios: GPIO pin to enable or disable the panel
+- backlight: phandle of the backlight device attached to the panel
+
+Example:
+       panel_edp: panel-edp {
+               compatible = "innolux,tv123wam";
+               enable-gpios = <&msmgpio 31 GPIO_ACTIVE_LOW>;
+               power-supply = <&pm8916_l2>;
+               backlight = <&backlight>;
+       };
diff --git a/Documentation/devicetree/bindings/display/panel/kingdisplay,kd097d04.txt b/Documentation/devicetree/bindings/display/panel/kingdisplay,kd097d04.txt
new file mode 100644 (file)
index 0000000..164a5fa
--- /dev/null
@@ -0,0 +1,22 @@
+Kingdisplay KD097D04 9.7" 1536x2048 TFT LCD panel
+
+Required properties:
+- compatible: should be "kingdisplay,kd097d04"
+- reg: DSI virtual channel of the peripheral
+- power-supply: phandle of the regulator that provides the supply voltage
+- enable-gpios: panel enable gpio
+
+Optional properties:
+- backlight: phandle of the backlight device attached to the panel
+
+Example:
+
+       &mipi_dsi {
+               panel {
+                       compatible = "kingdisplay,kd097d04";
+                       reg = <0>;
+                       power-supply = <...>;
+                       backlight = <&backlight>;
+                       enable-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
+               };
+       };
similarity index 55%
rename from Documentation/devicetree/bindings/display/panel/edt,et057090dhu.txt
rename to Documentation/devicetree/bindings/display/panel/newhaven,nhd-4.3-480272ef-atxl.txt
index 4903d7b1d947fc1a79079504b913b4636d900d2e..e78292b1a131e3d96aa85a7ffd77134e7a8a73ba 100644 (file)
@@ -1,7 +1,7 @@
-Emerging Display Technology Corp. 5.7" VGA TFT LCD panel
+Newhaven Display International 480 x 272 TFT LCD panel
 
 Required properties:
-- compatible: should be "edt,et057090dhu"
+- compatible: should be "newhaven,nhd-4.3-480272ef-atxl"
 
 This binding is compatible with the simple-panel binding, which is specified
 in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/rocktech,rk070er9427.txt b/Documentation/devicetree/bindings/display/panel/rocktech,rk070er9427.txt
new file mode 100644 (file)
index 0000000..eb1fb9f
--- /dev/null
@@ -0,0 +1,25 @@
+Rocktech Display Ltd. RK070ER9427 800(RGB)x480 TFT LCD panel
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
+
+Required properties:
+- compatible: should be "rocktech,rk070er9427"
+
+Optional properties:
+- backlight: phandle of the backlight device attached to the panel
+
+Optional nodes:
+- Video port for LCD panel input.
+
+Example:
+       panel {
+               compatible = "rocktech,rk070er9427";
+               backlight = <&backlight_lcd>;
+
+               port {
+                       lcd_panel_in: endpoint {
+                               remote-endpoint = <&lcd_display_out>;
+                       };
+               };
+       };
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,lq035q7db03.txt b/Documentation/devicetree/bindings/display/panel/sharp,lq035q7db03.txt
new file mode 100644 (file)
index 0000000..0753f69
--- /dev/null
@@ -0,0 +1,12 @@
+Sharp LQ035Q7DB03 3.5" QVGA TFT LCD panel
+
+Required properties:
+- compatible: should be "sharp,lq035q7db03"
+- power-supply: phandle of the regulator that provides the supply voltage
+
+Optional properties:
+- enable-gpios: GPIO pin to enable or disable the panel
+- backlight: phandle of the backlight device attached to the panel
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
index 3346c1e2a7a00a8ed0a7cf7685d53cf354397aa8..f8773ecb75252f91ea1e80cc47574b9781888de2 100644 (file)
@@ -103,6 +103,7 @@ Required properties:
   - compatible: value must be one of:
     * allwinner,sun8i-a83t-hdmi-phy
     * allwinner,sun8i-h3-hdmi-phy
+    * allwinner,sun50i-a64-hdmi-phy
   - reg: base address and size of memory-mapped region
   - clocks: phandles to the clocks feeding the HDMI PHY
     * bus: the HDMI PHY interface clock
@@ -111,8 +112,9 @@ Required properties:
   - resets: phandle to the reset controller driving the PHY
   - reset-names: must be "phy"
 
-H3 HDMI PHY requires additional clock:
+H3 and A64 HDMI PHY require additional clocks:
   - pll-0: parent of phy clock
+  - pll-1: second possible phy clock parent (A64 only)
 
 TV Encoder
 ----------
@@ -145,6 +147,7 @@ Required properties:
    * allwinner,sun8i-a33-tcon
    * allwinner,sun8i-a83t-tcon-lcd
    * allwinner,sun8i-a83t-tcon-tv
+   * allwinner,sun8i-r40-tcon-tv
    * allwinner,sun8i-v3s-tcon
    * allwinner,sun9i-a80-tcon-lcd
    * allwinner,sun9i-a80-tcon-tv
@@ -179,7 +182,7 @@ For TCONs with channel 0, there is one more clock required:
 For TCONs with channel 1, there is one more clock required:
    - 'tcon-ch1': The clock driving the TCON channel 1
 
-When TCON support LVDS (all TCONs except TV TCON on A83T and those found
+When TCON support LVDS (all TCONs except TV TCONs on A83T, R40 and those found
 in A13, H3, H5 and V3s SoCs), you need one more reset line:
    - 'lvds': The reset line driving the LVDS logic
 
@@ -187,6 +190,62 @@ And on the A23, A31, A31s and A33, you need one more clock line:
    - 'lvds-alt': An alternative clock source, separate from the TCON channel 0
                  clock, that can be used to drive the LVDS clock
 
+TCON TOP
+--------
+
+TCON TOPs main purpose is to configure whole display pipeline. It determines
+relationships between mixers and TCONs, selects source TCON for HDMI, muxes
+LCD and TV encoder GPIO output, selects TV encoder clock source and contains
+additional TV TCON and DSI gates.
+
+It allows display pipeline to be configured in very different ways:
+
+                                / LCD0/LVDS0
+                 / [0] TCON-LCD0
+                 |              \ MIPI DSI
+ mixer0          |
+        \        / [1] TCON-LCD1 - LCD1/LVDS1
+         TCON-TOP
+        /        \ [2] TCON-TV0 [0] - TVE0/RGB
+ mixer1          |                  \
+                 |                   TCON-TOP - HDMI
+                 |                  /
+                 \ [3] TCON-TV1 [1] - TVE1/RGB
+
+Note that both TCON TOP references same physical unit. Both mixers can be
+connected to any TCON.
+
+Required properties:
+  - compatible: value must be one of:
+    * allwinner,sun8i-r40-tcon-top
+  - reg: base address and size of the memory-mapped region.
+  - clocks: phandle to the clocks feeding the TCON TOP
+    * bus: TCON TOP interface clock
+    * tcon-tv0: TCON TV0 clock
+    * tve0: TVE0 clock
+    * tcon-tv1: TCON TV1 clock
+    * tve1: TVE0 clock
+    * dsi: MIPI DSI clock
+  - clock-names: clock name mentioned above
+  - resets: phandle to the reset line driving the TCON TOP
+  - #clock-cells : must contain 1
+  - clock-output-names: Names of clocks created for TCON TV0 channel clock,
+    TCON TV1 channel clock and DSI channel clock, in that order.
+
+- ports: A ports node with endpoint definitions as defined in
+    Documentation/devicetree/bindings/media/video-interfaces.txt. 6 ports should
+    be defined:
+    * port 0 is input for mixer0 mux
+    * port 1 is output for mixer0 mux
+    * port 2 is input for mixer1 mux
+    * port 3 is output for mixer1 mux
+    * port 4 is input for HDMI mux
+    * port 5 is output for HDMI mux
+    All output endpoints for mixer muxes and input endpoints for HDMI mux should
+    have reg property with the id of the target TCON, as shown in above graph
+    (0-3 for mixer muxes and 0-1 for HDMI mux). All ports should have only one
+    endpoint connected to remote endpoint.
+
 DRC
 ---
 
@@ -341,6 +400,7 @@ Required properties:
     * allwinner,sun8i-a33-display-engine
     * allwinner,sun8i-a83t-display-engine
     * allwinner,sun8i-h3-display-engine
+    * allwinner,sun8i-r40-display-engine
     * allwinner,sun8i-v3s-display-engine
     * allwinner,sun9i-a80-display-engine
 
index 6fddb4f4f71a45f0fc001b7f904a89f6e50948b3..3055d5c2c04e0ab796215803196c7590a69e02be 100644 (file)
@@ -36,7 +36,7 @@ Optional nodes:
 
  - port/ports: to describe a connection to an external encoder. The
    binding follows Documentation/devicetree/bindings/graph.txt and
-   suppors a single port with a single endpoint.
+   supports a single port with a single endpoint.
 
  - See also Documentation/devicetree/bindings/display/tilcdc/panel.txt and
    Documentation/devicetree/bindings/display/tilcdc/tfp410.txt for connecting
index 20fc72d9e61e5721e56e0aeb0479682f921fd154..45a61b46228712592029e75fe117262ba47d9112 100644 (file)
@@ -1,7 +1,7 @@
 Nintendo Wii (Hollywood) GPIO controller
 
 Required properties:
-- compatible: "nintendo,hollywood-gpio
+- compatible: "nintendo,hollywood-gpio"
 - reg: Physical base address and length of the controller's registers.
 - gpio-controller: Marks the device node as a GPIO controller.
 - #gpio-cells: Should be <2>. The first cell is the pin number and the
diff --git a/Documentation/devicetree/bindings/input/sprd,sc27xx-vibra.txt b/Documentation/devicetree/bindings/input/sprd,sc27xx-vibra.txt
new file mode 100644 (file)
index 0000000..f2ec0d4
--- /dev/null
@@ -0,0 +1,23 @@
+Spreadtrum SC27xx PMIC Vibrator
+
+Required properties:
+- compatible: should be "sprd,sc2731-vibrator".
+- reg: address of vibrator control register.
+
+Example :
+
+       sc2731_pmic: pmic@0 {
+               compatible = "sprd,sc2731";
+               reg = <0>;
+               spi-max-frequency = <26000000>;
+               interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-controller;
+               #interrupt-cells = <2>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               vibrator@eb4 {
+                       compatible = "sprd,sc2731-vibrator";
+                       reg = <0xeb4>;
+               };
+       };
index 121d9b7c79a24cd05e6452bb8b52a14d3d20f46a..1063c30d53f7d0fd7b642d323d32799ba4fb51fe 100644 (file)
@@ -32,7 +32,7 @@ i2c@00000000 {
                reg = <0x6c>;
                interrupt-parent = <&gpx1>;
                interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
-               vdd-supply = <&ldo15_reg>";
+               vdd-supply = <&ldo15_reg>;
                vid-supply = <&ldo18_reg>;
                reset-gpios = <&gpx1 5 0>;
                touchscreen-size-x = <1080>;
index 1099fe0788fae19c27dd1153e6d9d9e4aba10c6f..f246ccbf8838c2c90496572af8aa4e4d17079be1 100644 (file)
@@ -15,7 +15,7 @@ Required properties:
   include "nvidia,tegra30-ictlr".      
 - reg : Specifies base physical address and size of the registers.
   Each controller must be described separately (Tegra20 has 4 of them,
-  whereas Tegra30 and later have 5"  
+  whereas Tegra30 and later have 5).
 - interrupt-controller : Identifies the node as an interrupt controller.
 - #interrupt-cells : Specifies the number of cells needed to encode an
   interrupt source. The value must be 3.
index 136bd612bd8359488447b9ae12b335ffb083ba80..6a36bf66d932d42320cfc6b3c998b16488609d61 100644 (file)
@@ -12,7 +12,7 @@ Required properties:
   specifier, shall be 2
 - interrupts: interrupts references to primary interrupt controller
   (only needed for exti controller with multiple exti under
-  same parent interrupt: st,stm32-exti and st,stm32h7-exti")
+  same parent interrupt: st,stm32-exti and st,stm32h7-exti)
 
 Example:
 
index 356c29789cf54862e1ece93dc40449e221481304..3a66d3c483e1aad12298fcf297767931db09051a 100644 (file)
@@ -152,7 +152,7 @@ Required properties:
 - compatible   : should contain one of:
                  "brcm,bcm7425-timers"
                  "brcm,bcm7429-timers"
-                 "brcm,bcm7435-timers and
+                 "brcm,bcm7435-timers" and
                  "brcm,brcmstb-timers"
 - reg          : the timers register range
 - interrupts   : the interrupt line for this timer block
index df873d1f3b7c598b6c30721d3eec915a20ea8621..f8c33890bc2970e08bf44934835a9b8c464675f1 100644 (file)
@@ -238,7 +238,7 @@ PROPERTIES
                Must include one of the following:
                - "fsl,fman-dtsec" for dTSEC MAC
                - "fsl,fman-xgec" for XGEC MAC
-               - "fsl,fman-memac for mEMAC MAC
+               - "fsl,fman-memac" for mEMAC MAC
 
 - cell-index
                Usage: required
index 9b387f861aed166bda522f6e3d4ebb8856a49218..7dec508987c75c70ac194876df9d4f8019586aab 100644 (file)
@@ -133,7 +133,7 @@ located inside a PM domain with index 0 of a power controller represented by a
 node with the label "power".
 In the second example the consumer device are partitioned across two PM domains,
 the first with index 0 and the second with index 1, of a power controller that
-is represented by a node with the label "power.
+is represented by a node with the label "power".
 
 Optional properties:
 - required-opps: This contains phandle to an OPP node in another device's OPP
index ca69f5e3040cfa48299682dd6371f99c90b49ffa..ae326f26359740bce4fe7ac119288447649b6429 100644 (file)
@@ -16,7 +16,7 @@ Required properties:
 Optional properties:
 - ti,enable-ext-control: This is applicable for DCDC1, DCDC2 and DCDC3.
   If DCDCs are externally controlled then this property should be there.
-- "dcdc-ext-control-gpios: This is applicable for DCDC1, DCDC2 and DCDC3.
+- dcdc-ext-control-gpios: This is applicable for DCDC1, DCDC2 and DCDC3.
   If DCDCs are externally controlled and if it is from GPIO then GPIO
   number should be provided. If it is externally controlled and no GPIO
   entry then driver will just configure this rails as external control
index a21658f18fe6d7d593adece10e056e072fa2c5e4..3661e6153a92bf8df66cea43d5f41415cc497786 100644 (file)
@@ -15,7 +15,7 @@ Please refer to reset.txt in this directory for common reset
 controller binding usage.
 
 Required properties:
-- compatible: Should be st,stih407-softreset";
+- compatible: Should be "st,stih407-softreset";
 - #reset-cells: 1, see below
 
 example:
index d330c73de9a2e0103aabc3cf365d02974faee73c..68b7d6207e3d75acd51400da27e5ca292c5026d0 100644 (file)
@@ -39,7 +39,7 @@ Required properties:
 
 Optional property:
 - clock-frequency:     Desired I2C bus clock frequency in Hz.
-                       When missing default to 400000Hz.
+                       When missing default to 100000Hz.
 
 Child nodes should conform to I2C bus binding as described in i2c.txt.
 
index 6a4aadc4ce06b27ff059c64f6c438d0fef863b21..84b28dbe9f15452bbe341f3dbf5e6f5452b72a19 100644 (file)
@@ -30,7 +30,7 @@ Required properties:
 
                          Board connectors:
                          * Headset Mic
-                         * Secondary Mic",
+                         * Secondary Mic
                          * DMIC
                          * Ext Spk
 
index aa54e49fc8a26b397232f5ee340b7472f0b57a1c..c7600a93ab39e58bb62cc02e1f77a2d5132f1b08 100644 (file)
@@ -35,7 +35,7 @@ This binding describes the APQ8096 sound card, which uses qdsp for audio.
                        "Digital Mic3"
 
                Audio pins and MicBias on WCD9335 Codec:
-                       "MIC_BIAS1
+                       "MIC_BIAS1"
                        "MIC_BIAS2"
                        "MIC_BIAS3"
                        "MIC_BIAS4"
index 252a05c5d976d56b039bcf04069ec9c6e9595dea..c8c4b00ecb941fe85144fb3efd8c5cfa4ec0e5e4 100644 (file)
@@ -16,7 +16,8 @@ A child node must exist to represent the core DWC3 IP block. The name of
 the node is not important. The content of the node is defined in dwc3.txt.
 
 Phy documentation is provided in the following places:
-Documentation/devicetree/bindings/phy/qcom-dwc3-usb-phy.txt
+Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt - USB2.0 PHY
+Documentation/devicetree/bindings/phy/phy-rockchip-typec.txt     - Type-C PHY
 
 Example device nodes:
 
index 7cad066191eeb8e6c9711cb81fd50283362fcf4d..2afaa633ffc893e4e994d84876033d858ef4d1cd 100644 (file)
@@ -8,6 +8,7 @@ abracon Abracon Corporation
 actions        Actions Semiconductor Co., Ltd.
 active-semi    Active-Semi International Inc
 ad     Avionic Design GmbH
+adafruit       Adafruit Industries, LLC
 adapteva       Adapteva, Inc.
 adaptrum       Adaptrum, Inc.
 adh    AD Holdings Plc.
@@ -85,6 +86,7 @@ cubietech     Cubietech, Ltd.
 cypress        Cypress Semiconductor Corporation
 cznic  CZ.NIC, z.s.p.o.
 dallas Maxim Integrated Products (formerly Dallas Semiconductor)
+dataimage      DataImage, Inc.
 davicom        DAVICOM Semiconductor, Inc.
 delta  Delta Electronics, Inc.
 denx   Denx Software Engineering
@@ -93,6 +95,7 @@ dh    DH electronics GmbH
 digi   Digi International Inc.
 digilent       Diglent, Inc.
 dioo   Dioo Microcircuit Co., Ltd
+dlc    DLC Display Co., Ltd.
 dlg    Dialog Semiconductor
 dlink  D-Link Corporation
 dmo    Data Modul AG
@@ -188,6 +191,7 @@ keymile     Keymile GmbH
 khadas Khadas
 kiebackpeter    Kieback & Peter GmbH
 kinetic Kinetic Technologies
+kingdisplay    King & Display Technology Co., Ltd.
 kingnovel      Kingnovel Technology Co., Ltd.
 koe    Kaohsiung Opto-Electronics Inc.
 kosagi Sutajio Ko-Usagi PTE Ltd.
index 6e09c35d9f1a281a0046ed2c07dfce1f1312f48f..37091902a0210328e76582426eaec0eaa3a7ae3d 100644 (file)
@@ -15,7 +15,7 @@ Optional properties:
 
 Examples:
 
-       onewire@0 {
+       onewire {
                compatible = "w1-gpio";
                gpios = <&gpio 126 0>, <&gpio 105 0>;
        };
index dc384f2f7f34c7fde3b892d62aefebf87daab3d8..b541e97c7ab1aa1226f286d3686a0317f4132d1d 100644 (file)
@@ -130,6 +130,12 @@ Reservation Objects
 DMA Fences
 ----------
 
+.. kernel-doc:: drivers/dma-buf/dma-fence.c
+   :doc: DMA fences overview
+
+DMA Fences Functions Reference
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
 .. kernel-doc:: drivers/dma-buf/dma-fence.c
    :export:
 
index bee1b9a1702f1cc6c89811aff6b8bdbc1eefb0b0..6172f3cc3d0b2109916cfccda2da1836065f8766 100644 (file)
@@ -49,10 +49,10 @@ Device Drivers Base
 Device Drivers DMA Management
 -----------------------------
 
-.. kernel-doc:: drivers/base/dma-coherent.c
+.. kernel-doc:: kernel/dma/coherent.c
    :export:
 
-.. kernel-doc:: drivers/base/dma-mapping.c
+.. kernel-doc:: kernel/dma/mapping.c
    :export:
 
 Device drivers PnP support
index 79c22d096bbc0b83ffbbbc162aec23abaa59e08c..d4d642e1ce9ce4597e9d93759748a6ef8c9302b1 100644 (file)
@@ -155,6 +155,13 @@ C. Boot options
        used by text. By default, this area will be black. The 'color' value
        is an integer number that depends on the framebuffer driver being used.
 
+6. fbcon=nodefer
+
+       If the kernel is compiled with deferred fbcon takeover support, normally
+       the framebuffer contents, left in place by the firmware/bootloader, will
+       be preserved until there actually is some text is output to the console.
+       This option causes fbcon to bind immediately to the fbdev device.
+
 C. Attaching, Detaching and Unloading
 
 Before going on how to attach, detach and unload the framebuffer console, an
index 2c391338c6757f505eac6dfcbe98a169452ad305..37bf0a9de75cbe79794e653ff161e4a5eb37a97a 100644 (file)
@@ -441,8 +441,6 @@ prototypes:
        int (*iterate) (struct file *, struct dir_context *);
        int (*iterate_shared) (struct file *, struct dir_context *);
        __poll_t (*poll) (struct file *, struct poll_table_struct *);
-       struct wait_queue_head * (*get_poll_head)(struct file *, __poll_t);
-       __poll_t (*poll_mask) (struct file *, __poll_t);
        long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
        long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
        int (*mmap) (struct file *, struct vm_area_struct *);
@@ -473,7 +471,7 @@ prototypes:
 };
 
 locking rules:
-       All except for ->poll_mask may block.
+       All may block.
 
 ->llseek() locking has moved from llseek to the individual llseek
 implementations.  If your fs is not using generic_file_llseek, you
@@ -505,9 +503,6 @@ in sys_read() and friends.
 the lease within the individual filesystem to record the result of the
 operation
 
-->poll_mask can be called with or without the waitqueue lock for the waitqueue
-returned from ->get_poll_head.
-
 --------------------------- dquot_operations -------------------------------
 prototypes:
        int (*write_dquot) (struct dquot *);
index 9f4f87e1624036349533adf9534bfd3c4b08535d..75865da2ce1475c27bea1050b3e80ff7160f6d6f 100644 (file)
@@ -42,9 +42,11 @@ Jeff Layton (many, many fixes, as well as great work on the cifs Kerberos code)
 Scott Lovenberg
 Pavel Shilovsky (for great work adding SMB2 support, and various SMB3 features)
 Aurelien Aptel (for DFS SMB3 work and some key bug fixes)
-Ronnie Sahlberg (for SMB3 xattr work and bug fixes)
+Ronnie Sahlberg (for SMB3 xattr work, bug fixes, and lots of great work on compounding)
 Shirish Pargaonkar (for many ACL patches over the years)
 Sachin Prabhu (many bug fixes, including for reconnect, copy offload and security)
+Paulo Alcantara
+Long Li (some great work on RDMA, SMB Direct)
 
 
 Test case and Bug Report contributors
@@ -58,5 +60,4 @@ mention to the Stanford Checker (SWAT) which pointed out many minor
 bugs in error paths.  Valuable suggestions also have come from Al Viro
 and Dave Miller.
 
-And thanks to the IBM LTC and Power test teams and SuSE testers for
-finding multiple bugs during excellent stress test runs.
+And thanks to the IBM LTC and Power test teams and SuSE and Citrix and RedHat testers for finding multiple bugs during excellent stress test runs.
index bc0025cdd1c9c0d285c32e8d7656103868126ca8..455e1cc494a9f2e78ee1d45b89bfbe5ee55048eb 100644 (file)
@@ -1,3 +1,6 @@
+See https://wiki.samba.org/index.php/LinuxCIFSKernel for
+more current information.
+
 Version 1.62
 ------------
 Add sockopt=TCP_NODELAY mount option. EA (xattr) routines hardened
index c5adf149b57f7f8f6e2d0b104d5b74f6bc7f5f84..852499aed64b52bb321c0b9656b0b606a4710772 100644 (file)
@@ -9,14 +9,14 @@ is a partial list of the known problems and missing features:
 
 a) SMB3 (and SMB3.02) missing optional features:
    - multichannel (started), integration with RDMA
-   - directory leases (improved metadata caching)
-   - T10 copy offload (copy chunk, and "Duplicate Extents" ioctl
+   - directory leases (improved metadata caching), started (root dir only)
+   - T10 copy offload ie "ODX" (copy chunk, and "Duplicate Extents" ioctl
      currently the only two server side copy mechanisms supported)
 
 b) improved sparse file support
 
 c) Directory entry caching relies on a 1 second timer, rather than
-using Directory Leases
+using Directory Leases, currently only the root file handle is cached longer
 
 d) quota support (needs minor kernel change since quota calls
 to make it to network filesystems or deviceless filesystems)
@@ -42,6 +42,8 @@ mount or a per server basis to client UIDs or nobody if no mapping
 exists. Also better integration with winbind for resolving SID owners
 
 k) Add tools to take advantage of more smb3 specific ioctls and features
+(passthrough ioctl/fsctl for sending various SMB3 fsctls to the server
+is in progress)
 
 l) encrypted file support
 
@@ -71,9 +73,8 @@ t) split cifs and smb3 support into separate modules so legacy (and less
 secure) CIFS dialect can be disabled in environments that don't need it
 and simplify the code.
 
-u) Finish up SMB3.1.1 dialect support
-
-v) POSIX Extensions for SMB3.1.1
+v) POSIX Extensions for SMB3.1.1 (started, create and mkdir support added
+so far).
 
 KNOWN BUGS
 ====================================
@@ -92,8 +93,8 @@ Misc testing to do
 1) check out max path names and max path name components against various server
 types. Try nested symlinks (8 deep). Return max path name in stat -f information
 
-2) Improve xfstest's cifs enablement and adapt xfstests where needed to test
-cifs better
+2) Improve xfstest's cifs/smb3 enablement and adapt xfstests where needed to test
+cifs/smb3 better
 
 3) Additional performance testing and optimization using iozone and similar - 
 there are some easy changes that can be done to parallelize sequential writes,
index 829a7b7857a46904cfb7f02646212504a3a7f259..f608180ad59d71ab2bcc2d2d818699bfaaee1470 100644 (file)
@@ -857,8 +857,6 @@ struct file_operations {
        ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
        int (*iterate) (struct file *, struct dir_context *);
        __poll_t (*poll) (struct file *, struct poll_table_struct *);
-       struct wait_queue_head * (*get_poll_head)(struct file *, __poll_t);
-       __poll_t (*poll_mask) (struct file *, __poll_t);
        long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
        long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
        int (*mmap) (struct file *, struct vm_area_struct *);
@@ -903,17 +901,6 @@ otherwise noted.
        activity on this file and (optionally) go to sleep until there
        is activity. Called by the select(2) and poll(2) system calls
 
-  get_poll_head: Returns the struct wait_queue_head that callers can
-  wait on.  Callers need to check the returned events using ->poll_mask
-  once woken.  Can return NULL to indicate polling is not supported,
-  or any error code using the ERR_PTR convention to indicate that a
-  grave error occured and ->poll_mask shall not be called.
-
-  poll_mask: return the mask of EPOLL* values describing the file descriptor
-  state.  Called either before going to sleep on the waitqueue returned by
-  get_poll_head, or after it has been woken.  If ->get_poll_head and
-  ->poll_mask are implemented ->poll does not need to be implement.
-
   unlocked_ioctl: called by the ioctl(2) system call.
 
   compat_ioctl: called by the ioctl(2) system call when 32 bit system calls
diff --git a/Documentation/gpu/amdgpu.rst b/Documentation/gpu/amdgpu.rst
new file mode 100644 (file)
index 0000000..a740e49
--- /dev/null
@@ -0,0 +1,129 @@
+=========================
+ drm/amdgpu AMDgpu driver
+=========================
+
+The drm/amdgpu driver supports all AMD Radeon GPUs based on the Graphics Core
+Next (GCN) architecture.
+
+Module Parameters
+=================
+
+The amdgpu driver supports the following module parameters:
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+
+Core Driver Infrastructure
+==========================
+
+This section covers core driver infrastructure.
+
+.. _amdgpu_memory_domains:
+
+Memory Domains
+--------------
+
+.. kernel-doc:: include/uapi/drm/amdgpu_drm.h
+   :doc: memory domains
+
+Buffer Objects
+--------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+   :doc: amdgpu_object
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+   :internal:
+
+PRIME Buffer Sharing
+--------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+   :doc: PRIME Buffer Sharing
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+   :internal:
+
+MMU Notifier
+------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+   :doc: MMU Notifier
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+   :internal:
+
+AMDGPU Virtual Memory
+---------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+   :doc: GPUVM
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+   :internal:
+
+Interrupt Handling
+------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+   :doc: Interrupt Handling
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+   :internal:
+
+GPU Power/Thermal Controls and Monitoring
+=========================================
+
+This section covers hwmon and power/thermal controls.
+
+HWMON Interfaces
+----------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+   :doc: hwmon
+
+GPU sysfs Power State Interfaces
+--------------------------------
+
+GPU power controls are exposed via sysfs files.
+
+power_dpm_state
+~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+   :doc: power_dpm_state
+
+power_dpm_force_performance_level
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+   :doc: power_dpm_force_performance_level
+
+pp_table
+~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+   :doc: pp_table
+
+pp_od_clk_voltage
+~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+   :doc: pp_od_clk_voltage
+
+pp_dpm_sclk pp_dpm_mclk pp_dpm_pcie
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+   :doc: pp_dpm_sclk pp_dpm_mclk pp_dpm_pcie
+
+pp_power_profile_mode
+~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+   :doc: pp_power_profile_mode
+
+busy_percent
+~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+   :doc: busy_percent
index f982558fc25d4c2b9deeb9b7b529752f11e4916d..65be325bf282a77219b3c49a024e41d0192e6095 100644 (file)
@@ -4,6 +4,7 @@ GPU Driver Documentation
 
 .. toctree::
 
+   amdgpu
    i915
    meson
    pl111
diff --git a/Documentation/gpu/drm-client.rst b/Documentation/gpu/drm-client.rst
new file mode 100644 (file)
index 0000000..7e67206
--- /dev/null
@@ -0,0 +1,12 @@
+=================
+Kernel clients
+=================
+
+.. kernel-doc:: drivers/gpu/drm/drm_client.c
+   :doc: overview
+
+.. kernel-doc:: include/drm/drm_client.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_client.c
+   :export:
index e37557b30f620604dfddffa2f00e1d303e147f7d..f9cfcdcdf024fc88cbf8edf82d7c2fc1c4f4c8a7 100644 (file)
@@ -109,6 +109,15 @@ Framebuffer CMA Helper Functions Reference
 
 .. _drm_bridges:
 
+Framebuffer GEM Helper Reference
+================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_gem_framebuffer_helper.c
+   :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/drm_gem_framebuffer_helper.c
+   :export:
+
 Bridges
 =======
 
@@ -169,6 +178,15 @@ Display Port Helper Functions Reference
 .. kernel-doc:: drivers/gpu/drm/drm_dp_helper.c
    :export:
 
+Display Port CEC Helper Functions Reference
+===========================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_dp_cec.c
+   :doc: dp cec helpers
+
+.. kernel-doc:: drivers/gpu/drm/drm_dp_cec.c
+   :export:
+
 Display Port Dual Mode Adaptor Helper Functions Reference
 =========================================================
 
@@ -282,13 +300,13 @@ Auxiliary Modeset Helpers
 .. kernel-doc:: drivers/gpu/drm/drm_modeset_helper.c
    :export:
 
-Framebuffer GEM Helper Reference
-================================
+OF/DT Helpers
+=============
 
-.. kernel-doc:: drivers/gpu/drm/drm_gem_framebuffer_helper.c
+.. kernel-doc:: drivers/gpu/drm/drm_of.c
    :doc: overview
 
-.. kernel-doc:: drivers/gpu/drm/drm_gem_framebuffer_helper.c
+.. kernel-doc:: drivers/gpu/drm/drm_of.c
    :export:
 
 Legacy Plane Helper Reference
index 1dffd1ac4cd44be310fa7b8855c265cc57cd30ae..5dee6b8a4c12511f16bb9e540ec7e1eb854e88cb 100644 (file)
@@ -56,11 +56,12 @@ Overview
 
 The basic object structure KMS presents to userspace is fairly simple.
 Framebuffers (represented by :c:type:`struct drm_framebuffer <drm_framebuffer>`,
-see `Frame Buffer Abstraction`_) feed into planes. One or more (or even no)
-planes feed their pixel data into a CRTC (represented by :c:type:`struct
-drm_crtc <drm_crtc>`, see `CRTC Abstraction`_) for blending. The precise
-blending step is explained in more detail in `Plane Composition Properties`_ and
-related chapters.
+see `Frame Buffer Abstraction`_) feed into planes. Planes are represented by
+:c:type:`struct drm_plane <drm_plane>`, see `Plane Abstraction`_ for more
+details. One or more (or even no) planes feed their pixel data into a CRTC
+(represented by :c:type:`struct drm_crtc <drm_crtc>`, see `CRTC Abstraction`_)
+for blending. The precise blending step is explained in more detail in `Plane
+Composition Properties`_ and related chapters.
 
 For the output routing the first step is encoders (represented by
 :c:type:`struct drm_encoder <drm_encoder>`, see `Encoder Abstraction`_). Those
@@ -373,6 +374,15 @@ Connector Functions Reference
 .. kernel-doc:: drivers/gpu/drm/drm_connector.c
    :export:
 
+Writeback Connectors
+--------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_writeback.c
+  :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/drm_writeback.c
+  :export:
+
 Encoder Abstraction
 ===================
 
@@ -457,7 +467,7 @@ Output discovery and initialization example
         drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs,
                  DRM_MODE_ENCODER_DAC);
 
-        drm_mode_connector_attach_encoder(&intel_output->base,
+        drm_connector_attach_encoder(&intel_output->base,
                           &intel_output->enc);
 
         /* Set up the DDC bus. */
@@ -517,6 +527,12 @@ Standard Connector Properties
 .. kernel-doc:: drivers/gpu/drm/drm_connector.c
    :doc: standard connector properties
 
+HDMI Specific Connector Properties
+----------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_connector.c
+   :doc: HDMI connector properties
+
 Plane Composition Properties
 ----------------------------
 
index b08e9dcd91771c39ab56216c506ece87ce755059..21b6b72a9ba8f302d0c6bb4fef8566d7d044012e 100644 (file)
@@ -395,6 +395,8 @@ VMA Offset Manager
 .. kernel-doc:: drivers/gpu/drm/drm_vma_manager.c
    :export:
 
+.. _prime_buffer_sharing:
+
 PRIME Buffer Sharing
 ====================
 
@@ -496,3 +498,21 @@ DRM Sync Objects
 
 .. kernel-doc:: drivers/gpu/drm/drm_syncobj.c
    :export:
+
+GPU Scheduler
+=============
+
+Overview
+--------
+
+.. kernel-doc:: drivers/gpu/drm/scheduler/gpu_scheduler.c
+   :doc: Overview
+
+Scheduler Function References
+-----------------------------
+
+.. kernel-doc:: include/drm/gpu_scheduler.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/scheduler/gpu_scheduler.c
+   :export:
index 00288f34c5a6322835877770fb86c2cd8d380a22..1fcf8e851e1540f1b85b53fba24994f26338a6d9 100644 (file)
@@ -10,6 +10,7 @@ Linux GPU Driver Developer's Guide
    drm-kms
    drm-kms-helpers
    drm-uapi
+   drm-client
    drivers
    vga-switcheroo
    vgaarbiter
index 07ed22ea3bd670f3acd2d016963c6d1c5426997d..bfde04eddd148b8f669c3d2b633e52b08f2dd3e2 100644 (file)
@@ -17,6 +17,7 @@ Owner Module/Drivers,Group,Property Name,Type,Property Values,Object attached,De
 ,Virtual GPU,“suggested X”,RANGE,"Min=0, Max=0xffffffff",Connector,property to suggest an X offset for a connector
 ,,“suggested Y”,RANGE,"Min=0, Max=0xffffffff",Connector,property to suggest an Y offset for a connector
 ,Optional,"""aspect ratio""",ENUM,"{ ""None"", ""4:3"", ""16:9"" }",Connector,TDB
+,Optional,"""content type""",ENUM,"{ ""No Data"", ""Graphics"", ""Photo"", ""Cinema"", ""Game"" }",Connector,TBD
 i915,Generic,"""Broadcast RGB""",ENUM,"{ ""Automatic"", ""Full"", ""Limited 16:235"" }",Connector,"When this property is set to Limited 16:235 and CTM is set, the hardware will be programmed with the result of the multiplication of CTM by the limited range matrix to ensure the pixels normaly in the range 0..1.0 are remapped to the range 16/255..235/255."
 ,,“audio”,ENUM,"{ ""force-dvi"", ""off"", ""auto"", ""on"" }",Connector,TBD
 ,SDVO-TV,“mode”,ENUM,"{ ""NTSC_M"", ""NTSC_J"", ""NTSC_443"", ""PAL_B"" } etc.",Connector,TBD
diff --git a/Documentation/gpu/msm-crash-dump.rst b/Documentation/gpu/msm-crash-dump.rst
new file mode 100644 (file)
index 0000000..757cd25
--- /dev/null
@@ -0,0 +1,96 @@
+=====================
+MSM Crash Dump Format
+=====================
+
+Following a GPU hang the MSM driver outputs debugging information via
+/sys/kernel/dri/X/show or via devcoredump (/sys/class/devcoredump/dcdX/data).
+This document describes how the output is formatted.
+
+Each entry is in the form key: value. Sections headers will not have a value
+and all the contents of a section will be indented two spaces from the header.
+Each section might have multiple array entries the start of which is designated
+by a (-).
+
+Mappings
+--------
+
+kernel
+       The kernel version that generated the dump (UTS_RELEASE).
+
+module
+       The module that generated the crashdump.
+
+time
+       The kernel time at crash formated as seconds.microseconds.
+
+comm
+       Comm string for the binary that generated the fault.
+
+cmdline
+       Command line for the binary that generated the fault.
+
+revision
+       ID of the GPU that generated the crash formatted as
+       core.major.minor.patchlevel separated by dots.
+
+rbbm-status
+       The current value of RBBM_STATUS which shows what top level GPU
+       components are in use at the time of crash.
+
+ringbuffer
+       Section containing the contents of each ringbuffer. Each ringbuffer is
+       identified with an id number.
+
+       id
+               Ringbuffer ID (0 based index).  Each ringbuffer in the section
+               will have its own unique id.
+       iova
+               GPU address of the ringbuffer.
+
+       last-fence
+               The last fence that was issued on the ringbuffer
+
+       retired-fence
+               The last fence retired on the ringbuffer.
+
+       rptr
+               The current read pointer (rptr) for the ringbuffer.
+
+       wptr
+               The current write pointer (wptr) for the ringbuffer.
+
+       size
+               Maximum size of the ringbuffer programmed in the hardware.
+
+       data
+               The contents of the ring encoded as ascii85.  Only the used
+               portions of the ring will be printed.
+
+bo
+       List of buffers from the hanging submission if available.
+       Each buffer object will have a uinque iova.
+
+       iova
+               GPU address of the buffer object.
+
+       size
+               Allocated size of the buffer object.
+
+       data
+               The contents of the buffer object encoded with ascii85.  Only
+               Trailing zeros at the end of the buffer will be skipped.
+
+registers
+       Set of registers values. Each entry is on its own line enclosed
+       by brackets { }.
+
+       offset
+               Byte offset of the register from the start of the
+               GPU memory region.
+
+       value
+               Hexadecimal value of the register.
+
+registers-hlsq
+               (5xx only) Register values from the HLSQ aperture.
+               Same format as the register section.
diff --git a/Documentation/gpu/v3d.rst b/Documentation/gpu/v3d.rst
new file mode 100644 (file)
index 0000000..543f7fb
--- /dev/null
@@ -0,0 +1,28 @@
+=====================================
+ drm/v3d Broadcom V3D Graphics Driver
+=====================================
+
+.. kernel-doc:: drivers/gpu/drm/v3d/v3d_drv.c
+   :doc: Broadcom V3D Graphics Driver
+
+GPU buffer object (BO) management
+---------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/v3d/v3d_bo.c
+   :doc: V3D GEM BO management support
+
+Address space management
+===========================================
+.. kernel-doc:: drivers/gpu/drm/v3d/v3d_mmu.c
+   :doc: Broadcom V3D MMU
+
+GPU Scheduling
+===========================================
+.. kernel-doc:: drivers/gpu/drm/v3d/v3d_sched.c
+   :doc: Broadcom V3D scheduling
+
+Interrupts
+--------------
+
+.. kernel-doc:: drivers/gpu/drm/v3d/v3d_irq.c
+   :doc: Interrupt management for the V3D engine
index 6c9c69ec3986be379a86f745f30a9eab9b817d96..114c7ce7b58de2c15e5b1c917c96e4926c605191 100644 (file)
@@ -50,6 +50,11 @@ LDFLAGS_MODULE
 --------------------------------------------------
 Additional options used for $(LD) when linking modules.
 
+KBUILD_KCONFIG
+--------------------------------------------------
+Set the top-level Kconfig file to the value of this environment
+variable.  The default name is "Kconfig".
+
 KBUILD_VERBOSE
 --------------------------------------------------
 Set the kbuild verbosity. Can be assigned same values as "V=...".
@@ -88,7 +93,8 @@ In most cases the name of the architecture is the same as the
 directory name found in the arch/ directory.
 But some architectures such as x86 and sparc have aliases.
 x86: i386 for 32 bit, x86_64 for 64 bit
-sparc: sparc for 32 bit, sparc64 for 64 bit
+sh: sh for 32 bit, sh64 for 64 bit
+sparc: sparc32 for 32 bit, sparc64 for 64 bit
 
 CROSS_COMPILE
 --------------------------------------------------
@@ -148,15 +154,6 @@ stripped after they are installed.  If INSTALL_MOD_STRIP is '1', then
 the default option --strip-debug will be used.  Otherwise,
 INSTALL_MOD_STRIP value will be used as the options to the strip command.
 
-INSTALL_FW_PATH
---------------------------------------------------
-INSTALL_FW_PATH specifies where to install the firmware blobs.
-The default value is:
-
-    $(INSTALL_MOD_PATH)/lib/firmware
-
-The value can be overridden in which case the default value is ignored.
-
 INSTALL_HDR_PATH
 --------------------------------------------------
 INSTALL_HDR_PATH specifies where to install user space headers when
index 3534a84d206caf324423a9422eb985b48c97813b..64e0775a62d4475ec378d033332b5099987954ff 100644 (file)
@@ -430,6 +430,12 @@ This sets the config program's title bar if the config program chooses
 to use it. It should be placed at the top of the configuration, before any
 other statement.
 
+'#' Kconfig source file comment:
+
+An unquoted '#' character anywhere in a source file line indicates
+the beginning of a source file comment.  The remainder of that line
+is a comment.
+
 
 Kconfig hints
 -------------
index 7233118f3a05481247f4c550542b099e9f655245..68c82914c0f3a1e791cab09d7b6a2b7253541443 100644 (file)
@@ -2,9 +2,9 @@ This file contains some assistance for using "make *config".
 
 Use "make help" to list all of the possible configuration targets.
 
-The xconfig ('qconf') and menuconfig ('mconf') programs also
-have embedded help text.  Be sure to check it for navigation,
-search, and other general help text.
+The xconfig ('qconf'), menuconfig ('mconf'), and nconfig ('nconf')
+programs also have embedded help text.  Be sure to check that for
+navigation, search, and other general help text.
 
 ======================================================================
 General
@@ -17,13 +17,16 @@ this happens, using a previously working .config file and running
 for you, so you may find that you need to see what NEW kernel
 symbols have been introduced.
 
-To see a list of new config symbols when using "make oldconfig", use
+To see a list of new config symbols, use
 
        cp user/some/old.config .config
        make listnewconfig
 
 and the config program will list any new symbols, one per line.
 
+Alternatively, you can use the brute force method:
+
+       make oldconfig
        scripts/diffconfig .config.old .config | less
 
 ______________________________________________________________________
@@ -160,7 +163,7 @@ Searching in menuconfig:
                This lists all config symbols that contain "hotplug",
                e.g., HOTPLUG_CPU, MEMORY_HOTPLUG.
 
-       For search help, enter / followed TAB-TAB-TAB (to highlight
+       For search help, enter / followed by TAB-TAB (to highlight
        <Help>) and Enter.  This will tell you that you can also use
        regular expressions (regexes) in the search string, so if you
        are not interested in MEMORY_HOTPLUG, you could try
@@ -202,6 +205,39 @@ Example:
        make MENUCONFIG_MODE=single_menu menuconfig
 
 
+======================================================================
+nconfig
+--------------------------------------------------
+
+nconfig is an alternate text-based configurator.  It lists function
+keys across the bottom of the terminal (window) that execute commands.
+You can also just use the corresponding numeric key to execute the
+commands unless you are in a data entry window.  E.g., instead of F6
+for Save, you can just press 6.
+
+Use F1 for Global help or F3 for the Short help menu.
+
+Searching in nconfig:
+
+       You can search either in the menu entry "prompt" strings
+       or in the configuration symbols.
+
+       Use / to begin a search through the menu entries.  This does
+       not support regular expressions.  Use <Down> or <Up> for
+       Next hit and Previous hit, respectively.  Use <Esc> to
+       terminate the search mode.
+
+       F8 (SymSearch) searches the configuration symbols for the
+       given string or regular expression (regex).
+
+NCONFIG_MODE
+--------------------------------------------------
+This mode shows all sub-menus in one large tree.
+
+Example:
+       make NCONFIG_MODE=single_menu nconfig
+
+
 ======================================================================
 xconfig
 --------------------------------------------------
@@ -230,8 +266,7 @@ gconfig
 
 Searching in gconfig:
 
-       None (gconfig isn't maintained as well as xconfig or menuconfig);
-       however, gconfig does have a few more viewing choices than
-       xconfig does.
+       There is no search command in gconfig.  However, gconfig does
+       have several different viewing choices, modes, and options.
 
 ###
index 34c3a1b50b9aefc51fe1c4bf6811b445a6be97a8..f0ed7c30e695dc6f6a4ed7a1c2c40a44978a79ed 100644 (file)
@@ -1,4 +1,4 @@
-Wait/Wound Deadlock-Proof Mutex Design
+Wound/Wait Deadlock-Proof Mutex Design
 ======================================
 
 Please read mutex-design.txt first, as it applies to wait/wound mutexes too.
@@ -32,10 +32,26 @@ the oldest task) wins, and the one with the higher reservation id (i.e. the
 younger task) unlocks all of the buffers that it has already locked, and then
 tries again.
 
-In the RDBMS literature this deadlock handling approach is called wait/wound:
-The older tasks waits until it can acquire the contended lock. The younger tasks
-needs to back off and drop all the locks it is currently holding, i.e. the
-younger task is wounded.
+In the RDBMS literature, a reservation ticket is associated with a transaction.
+and the deadlock handling approach is called Wait-Die. The name is based on
+the actions of a locking thread when it encounters an already locked mutex.
+If the transaction holding the lock is younger, the locking transaction waits.
+If the transaction holding the lock is older, the locking transaction backs off
+and dies. Hence Wait-Die.
+There is also another algorithm called Wound-Wait:
+If the transaction holding the lock is younger, the locking transaction
+wounds the transaction holding the lock, requesting it to die.
+If the transaction holding the lock is older, it waits for the other
+transaction. Hence Wound-Wait.
+The two algorithms are both fair in that a transaction will eventually succeed.
+However, the Wound-Wait algorithm is typically stated to generate fewer backoffs
+compared to Wait-Die, but is, on the other hand, associated with more work than
+Wait-Die when recovering from a backoff. Wound-Wait is also a preemptive
+algorithm in that transactions are wounded by other transactions, and that
+requires a reliable way to pick up up the wounded condition and preempt the
+running transaction. Note that this is not the same as process preemption. A
+Wound-Wait transaction is considered preempted when it dies (returning
+-EDEADLK) following a wound.
 
 Concepts
 --------
@@ -47,18 +63,20 @@ Acquire context: To ensure eventual forward progress it is important the a task
 trying to acquire locks doesn't grab a new reservation id, but keeps the one it
 acquired when starting the lock acquisition. This ticket is stored in the
 acquire context. Furthermore the acquire context keeps track of debugging state
-to catch w/w mutex interface abuse.
+to catch w/w mutex interface abuse. An acquire context is representing a
+transaction.
 
 W/w class: In contrast to normal mutexes the lock class needs to be explicit for
-w/w mutexes, since it is required to initialize the acquire context.
+w/w mutexes, since it is required to initialize the acquire context. The lock
+class also specifies what algorithm to use, Wound-Wait or Wait-Die.
 
 Furthermore there are three different class of w/w lock acquire functions:
 
 * Normal lock acquisition with a context, using ww_mutex_lock.
 
-* Slowpath lock acquisition on the contending lock, used by the wounded task
-  after having dropped all already acquired locks. These functions have the
-  _slow postfix.
+* Slowpath lock acquisition on the contending lock, used by the task that just
+  killed its transaction after having dropped all already acquired locks.
+  These functions have the _slow postfix.
 
   From a simple semantics point-of-view the _slow functions are not strictly
   required, since simply calling the normal ww_mutex_lock functions on the
@@ -90,6 +108,12 @@ provided.
 Usage
 -----
 
+The algorithm (Wait-Die vs Wound-Wait) is chosen by using either
+DEFINE_WW_CLASS() (Wound-Wait) or DEFINE_WD_CLASS() (Wait-Die)
+As a rough rule of thumb, use Wound-Wait iff you
+expect the number of simultaneous competing transactions to be typically small,
+and you want to reduce the number of rollbacks.
+
 Three different ways to acquire locks within the same w/w class. Common
 definitions for methods #1 and #2:
 
@@ -220,7 +244,7 @@ mutexes are a natural fit for such a case for two reasons:
 
 Note that this approach differs in two important ways from the above methods:
 - Since the list of objects is dynamically constructed (and might very well be
-  different when retrying due to hitting the -EDEADLK wound condition) there's
+  different when retrying due to hitting the -EDEADLK die condition) there's
   no need to keep any object on a persistent list when it's not locked. We can
   therefore move the list_head into the object itself.
 - On the other hand the dynamic object list construction also means that the -EALREADY return
@@ -312,12 +336,23 @@ Design:
   We maintain the following invariants for the wait list:
   (1) Waiters with an acquire context are sorted by stamp order; waiters
       without an acquire context are interspersed in FIFO order.
-  (2) Among waiters with contexts, only the first one can have other locks
-      acquired already (ctx->acquired > 0). Note that this waiter may come
-      after other waiters without contexts in the list.
+  (2) For Wait-Die, among waiters with contexts, only the first one can have
+      other locks acquired already (ctx->acquired > 0). Note that this waiter
+      may come after other waiters without contexts in the list.
+
+  The Wound-Wait preemption is implemented with a lazy-preemption scheme:
+  The wounded status of the transaction is checked only when there is
+  contention for a new lock and hence a true chance of deadlock. In that
+  situation, if the transaction is wounded, it backs off, clears the
+  wounded status and retries. A great benefit of implementing preemption in
+  this way is that the wounded transaction can identify a contending lock to
+  wait for before restarting the transaction. Just blindly restarting the
+  transaction would likely make the transaction end up in a situation where
+  it would have to back off again.
 
   In general, not much contention is expected. The locks are typically used to
-  serialize access to resources for devices.
+  serialize access to resources for devices, and optimization focus should
+  therefore be directed towards the uncontended cases.
 
 Lockdep:
   Special care has been taken to warn for as many cases of api abuse
index c13214d073a4866f49025033a86fed03275bca5f..d3e5dd26db12d75bc09d25cacbf0f775003cd527 100644 (file)
@@ -1490,7 +1490,7 @@ To remove an ARP target:
 
 To configure the interval between learning packet transmits:
 # echo 12 > /sys/class/net/bond0/bonding/lp_interval
-       NOTE: the lp_inteval is the number of seconds between instances where
+       NOTE: the lp_interval is the number of seconds between instances where
 the bonding driver sends learning packets to each slaves peer switch.  The
 default interval is 1 second.
 
index d4d8370279254472ed812488ea61d4e3bd64651b..f81111eba9c5dd157aecdd1f1370031d3d1b1f0f 100644 (file)
@@ -1,3 +1,4 @@
+==============================================================
 Linux* Base Driver for the Intel(R) PRO/100 Family of Adapters
 ==============================================================
 
@@ -46,123 +47,131 @@ Driver Configuration Parameters
 The default value for each parameter is generally the recommended setting,
 unless otherwise noted.
 
-Rx Descriptors: Number of receive descriptors. A receive descriptor is a data
+Rx Descriptors:
+   Number of receive descriptors. A receive descriptor is a data
    structure that describes a receive buffer and its attributes to the network
    controller. The data in the descriptor is used by the controller to write
    data from the controller to host memory. In the 3.x.x driver the valid range
    for this parameter is 64-256. The default value is 256. This parameter can be
    changed using the command::
 
-   ethtool -G eth? rx n
+     ethtool -G eth? rx n
 
    Where n is the number of desired Rx descriptors.
 
-Tx Descriptors: Number of transmit descriptors. A transmit descriptor is a data
+Tx Descriptors:
+   Number of transmit descriptors. A transmit descriptor is a data
    structure that describes a transmit buffer and its attributes to the network
    controller. The data in the descriptor is used by the controller to read
    data from the host memory to the controller. In the 3.x.x driver the valid
    range for this parameter is 64-256. The default value is 128. This parameter
    can be changed using the command::
 
-   ethtool -G eth? tx n
+     ethtool -G eth? tx n
 
    Where n is the number of desired Tx descriptors.
 
-Speed/Duplex: The driver auto-negotiates the link speed and duplex settings by
+Speed/Duplex:
+   The driver auto-negotiates the link speed and duplex settings by
    default. The ethtool utility can be used as follows to force speed/duplex.::
 
-   ethtool -s eth?  autoneg off speed {10|100} duplex {full|half}
+     ethtool -s eth?  autoneg off speed {10|100} duplex {full|half}
 
    NOTE: setting the speed/duplex to incorrect values will cause the link to
    fail.
 
-Event Log Message Level:  The driver uses the message level flag to log events
+Event Log Message Level:
+   The driver uses the message level flag to log events
    to syslog. The message level can be set at driver load time. It can also be
    set using the command::
 
-   ethtool -s eth? msglvl n
+     ethtool -s eth? msglvl n
 
 
 Additional Configurations
 =========================
 
-  Configuring the Driver on Different Distributions
-  -------------------------------------------------
+Configuring the Driver on Different Distributions
+-------------------------------------------------
 
-  Configuring a network driver to load properly when the system is started is
-  distribution dependent. Typically, the configuration process involves adding
-  an alias line to /etc/modprobe.d/*.conf as well as editing other system
-  startup scripts and/or configuration files.  Many popular Linux
-  distributions ship with tools to make these changes for you. To learn the
-  proper way to configure a network device for your system, refer to your
-  distribution documentation.  If during this process you are asked for the
-  driver or module name, the name for the Linux Base Driver for the Intel
-  PRO/100 Family of Adapters is e100.
+Configuring a network driver to load properly when the system is started
+is distribution dependent.  Typically, the configuration process involves
+adding an alias line to `/etc/modprobe.d/*.conf` as well as editing other
+system startup scripts and/or configuration files.  Many popular Linux
+distributions ship with tools to make these changes for you.  To learn
+the proper way to configure a network device for your system, refer to
+your distribution documentation.  If during this process you are asked
+for the driver or module name, the name for the Linux Base Driver for
+the Intel PRO/100 Family of Adapters is e100.
 
-  As an example, if you install the e100 driver for two PRO/100 adapters
-  (eth0 and eth1), add the following to a configuration file in /etc/modprobe.d/
+As an example, if you install the e100 driver for two PRO/100 adapters
+(eth0 and eth1), add the following to a configuration file in
+/etc/modprobe.d/::
 
        alias eth0 e100
        alias eth1 e100
 
-  Viewing Link Messages
-  ---------------------
-  In order to see link messages and other Intel driver information on your
-  console, you must set the dmesg level up to six. This can be done by
-  entering the following on the command line before loading the e100 driver::
+Viewing Link Messages
+---------------------
+
+In order to see link messages and other Intel driver information on your
+console, you must set the dmesg level up to six.  This can be done by
+entering the following on the command line before loading the e100
+driver::
 
        dmesg -n 6
 
-  If you wish to see all messages issued by the driver, including debug
-  messages, set the dmesg level to eight.
+If you wish to see all messages issued by the driver, including debug
+messages, set the dmesg level to eight.
 
-  NOTE: This setting is not saved across reboots.
+NOTE: This setting is not saved across reboots.
 
+ethtool
+-------
 
-  ethtool
-  -------
+The driver utilizes the ethtool interface for driver configuration and
+diagnostics, as well as displaying statistical information.  The ethtool
+version 1.6 or later is required for this functionality.
 
-  The driver utilizes the ethtool interface for driver configuration and
-  diagnostics, as well as displaying statistical information.  The ethtool
-  version 1.6 or later is required for this functionality.
+The latest release of ethtool can be found from
+https://www.kernel.org/pub/software/network/ethtool/
 
-  The latest release of ethtool can be found from
-  https://www.kernel.org/pub/software/network/ethtool/
+Enabling Wake on LAN* (WoL)
+---------------------------
+WoL is provided through the ethtool* utility.  For instructions on
+enabling WoL with ethtool, refer to the ethtool man page.  WoL will be
+enabled on the system during the next shut down or reboot.  For this
+driver version, in order to enable WoL, the e100 driver must be loaded
+when shutting down or rebooting the system.
 
-  Enabling Wake on LAN* (WoL)
-  ---------------------------
-  WoL is provided through the ethtool* utility.  For instructions on enabling
-  WoL with ethtool, refer to the ethtool man page.
+NAPI
+----
 
-  WoL will be enabled on the system during the next shut down or reboot. For
-  this driver version, in order to enable WoL, the e100 driver must be
-  loaded when shutting down or rebooting the system.
+NAPI (Rx polling mode) is supported in the e100 driver.
 
-  NAPI
-  ----
+See https://wiki.linuxfoundation.org/networking/napi for more
+information on NAPI.
 
-  NAPI (Rx polling mode) is supported in the e100 driver.
+Multiple Interfaces on Same Ethernet Broadcast Network
+------------------------------------------------------
 
-  See https://wiki.linuxfoundation.org/networking/napi for more information
-  on NAPI.
+Due to the default ARP behavior on Linux, it is not possible to have one
+system on two IP networks in the same Ethernet broadcast domain
+(non-partitioned switch) behave as expected.  All Ethernet interfaces
+will respond to IP traffic for any IP address assigned to the system.
+This results in unbalanced receive traffic.
 
-  Multiple Interfaces on Same Ethernet Broadcast Network
-  ------------------------------------------------------
+If you have multiple interfaces in a server, either turn on ARP
+filtering by
 
-  Due to the default ARP behavior on Linux, it is not possible to have
-  one system on two IP networks in the same Ethernet broadcast domain
-  (non-partitioned switch) behave as expected. All Ethernet interfaces
-  will respond to IP traffic for any IP address assigned to the system.
-  This results in unbalanced receive traffic.
+(1) entering::
 
-  If you have multiple interfaces in a server, either turn on ARP
-  filtering by
+       echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter
 
-  (1) entering:: echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter
-      (this only works if your kernel's version is higher than 2.4.5), or
+    (this only works if your kernel's version is higher than 2.4.5), or
 
-  (2) installing the interfaces in separate broadcast domains (either
-      in different switches or in a switch partitioned to VLANs).
+(2) installing the interfaces in separate broadcast domains (either
+    in different switches or in a switch partitioned to VLANs).
 
 
 Support
index 616848940e63f7303633e0be67febc86bee6ac6f..f10dd40869218cb11e1d29bc5e4b6431d30af946 100644 (file)
@@ -1,3 +1,4 @@
+===========================================================
 Linux* Base Driver for Intel(R) Ethernet Network Connection
 ===========================================================
 
@@ -33,7 +34,8 @@ Command Line Parameters
 The default value for each parameter is generally the recommended setting,
 unless otherwise noted.
 
-NOTES:  For more information about the AutoNeg, Duplex, and Speed
+NOTES:
+       For more information about the AutoNeg, Duplex, and Speed
         parameters, see the "Speed and Duplex Configuration" section in
         this document.
 
@@ -44,22 +46,27 @@ NOTES:  For more information about the AutoNeg, Duplex, and Speed
 
 AutoNeg
 -------
+
 (Supported only on adapters with copper connections)
-Valid Range:   0x01-0x0F, 0x20-0x2F
-Default Value: 0x2F
+
+:Valid Range:   0x01-0x0F, 0x20-0x2F
+:Default Value: 0x2F
 
 This parameter is a bit-mask that specifies the speed and duplex settings
 advertised by the adapter.  When this parameter is used, the Speed and
 Duplex parameters must not be specified.
 
-NOTE:  Refer to the Speed and Duplex section of this readme for more
+NOTE:
+       Refer to the Speed and Duplex section of this readme for more
        information on the AutoNeg parameter.
 
 Duplex
 ------
+
 (Supported only on adapters with copper connections)
-Valid Range:   0-2 (0=auto-negotiate, 1=half, 2=full)
-Default Value: 0
+
+:Valid Range:   0-2 (0=auto-negotiate, 1=half, 2=full)
+:Default Value: 0
 
 This defines the direction in which data is allowed to flow.  Can be
 either one or two-directional.  If both Duplex and the link partner are
@@ -69,18 +76,22 @@ duplex.
 
 FlowControl
 -----------
-Valid Range:   0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx)
-Default Value: Reads flow control settings from the EEPROM
+
+:Valid Range:   0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx)
+:Default Value: Reads flow control settings from the EEPROM
 
 This parameter controls the automatic generation(Tx) and response(Rx)
 to Ethernet PAUSE frames.
 
 InterruptThrottleRate
 ---------------------
+
 (not supported on Intel(R) 82542, 82543 or 82544-based adapters)
-Valid Range:   0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative,
-                                 4=simplified balancing)
-Default Value: 3
+
+:Valid Range:
+   0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative,
+   4=simplified balancing)
+:Default Value: 3
 
 The driver can limit the amount of interrupts per second that the adapter
 will generate for incoming packets. It does this by writing a value to the
@@ -134,13 +145,15 @@ Setting InterruptThrottleRate to 0 turns off any interrupt moderation
 and may improve small packet latency, but is generally not suitable
 for bulk throughput traffic.
 
-NOTE:  InterruptThrottleRate takes precedence over the TxAbsIntDelay and
+NOTE:
+       InterruptThrottleRate takes precedence over the TxAbsIntDelay and
        RxAbsIntDelay parameters.  In other words, minimizing the receive
        and/or transmit absolute delays does not force the controller to
        generate more interrupts than what the Interrupt Throttle Rate
        allows.
 
-CAUTION:  If you are using the Intel(R) PRO/1000 CT Network Connection
+CAUTION:
+          If you are using the Intel(R) PRO/1000 CT Network Connection
           (controller 82547), setting InterruptThrottleRate to a value
           greater than 75,000, may hang (stop transmitting) adapters
           under certain network conditions.  If this occurs a NETDEV
@@ -150,7 +163,8 @@ CAUTION:  If you are using the Intel(R) PRO/1000 CT Network Connection
           hang, ensure that InterruptThrottleRate is set no greater
           than 75,000 and is not set to 0.
 
-NOTE:  When e1000 is loaded with default settings and multiple adapters
+NOTE:
+       When e1000 is loaded with default settings and multiple adapters
        are in use simultaneously, the CPU utilization may increase non-
        linearly.  In order to limit the CPU utilization without impacting
        the overall throughput, we recommend that you load the driver as
@@ -167,9 +181,11 @@ NOTE:  When e1000 is loaded with default settings and multiple adapters
 
 RxDescriptors
 -------------
-Valid Range:   48-256 for 82542 and 82543-based adapters
-               48-4096 for all other supported adapters
-Default Value: 256
+
+:Valid Range:
+ - 48-256 for 82542 and 82543-based adapters
+ - 48-4096 for all other supported adapters
+:Default Value: 256
 
 This value specifies the number of receive buffer descriptors allocated
 by the driver.  Increasing this value allows the driver to buffer more
@@ -179,15 +195,17 @@ Each descriptor is 16 bytes.  A receive buffer is also allocated for each
 descriptor and can be either 2048, 4096, 8192, or 16384 bytes, depending
 on the MTU setting. The maximum MTU size is 16110.
 
-NOTE:  MTU designates the frame size.  It only needs to be set for Jumbo
+NOTE:
+       MTU designates the frame size.  It only needs to be set for Jumbo
        Frames.  Depending on the available system resources, the request
        for a higher number of receive descriptors may be denied.  In this
        case, use a lower number.
 
 RxIntDelay
 ----------
-Valid Range:   0-65535 (0=off)
-Default Value: 0
+
+:Valid Range:   0-65535 (0=off)
+:Default Value: 0
 
 This value delays the generation of receive interrupts in units of 1.024
 microseconds.  Receive interrupt reduction can improve CPU efficiency if
@@ -197,7 +215,8 @@ of TCP traffic.  If the system is reporting dropped receives, this value
 may be set too high, causing the driver to run out of available receive
 descriptors.
 
-CAUTION:  When setting RxIntDelay to a value other than 0, adapters may
+CAUTION:
+          When setting RxIntDelay to a value other than 0, adapters may
           hang (stop transmitting) under certain network conditions.  If
           this occurs a NETDEV WATCHDOG message is logged in the system
           event log.  In addition, the controller is automatically reset,
@@ -206,9 +225,11 @@ CAUTION:  When setting RxIntDelay to a value other than 0, adapters may
 
 RxAbsIntDelay
 -------------
+
 (This parameter is supported only on 82540, 82545 and later adapters.)
-Valid Range:   0-65535 (0=off)
-Default Value: 128
+
+:Valid Range:   0-65535 (0=off)
+:Default Value: 128
 
 This value, in units of 1.024 microseconds, limits the delay in which a
 receive interrupt is generated.  Useful only if RxIntDelay is non-zero,
@@ -219,9 +240,11 @@ conditions.
 
 Speed
 -----
+
 (This parameter is supported only on adapters with copper connections.)
-Valid Settings: 0, 10, 100, 1000
-Default Value:  0 (auto-negotiate at all supported speeds)
+
+:Valid Settings: 0, 10, 100, 1000
+:Default Value:  0 (auto-negotiate at all supported speeds)
 
 Speed forces the line speed to the specified value in megabits per second
 (Mbps).  If this parameter is not specified or is set to 0 and the link
@@ -230,22 +253,26 @@ speed.  Duplex should also be set when Speed is set to either 10 or 100.
 
 TxDescriptors
 -------------
-Valid Range:   48-256 for 82542 and 82543-based adapters
-               48-4096 for all other supported adapters
-Default Value: 256
+
+:Valid Range:
+  - 48-256 for 82542 and 82543-based adapters
+  - 48-4096 for all other supported adapters
+:Default Value: 256
 
 This value is the number of transmit descriptors allocated by the driver.
 Increasing this value allows the driver to queue more transmits.  Each
 descriptor is 16 bytes.
 
-NOTE:  Depending on the available system resources, the request for a
+NOTE:
+       Depending on the available system resources, the request for a
        higher number of transmit descriptors may be denied.  In this case,
        use a lower number.
 
 TxIntDelay
 ----------
-Valid Range:   0-65535 (0=off)
-Default Value: 8
+
+:Valid Range:   0-65535 (0=off)
+:Default Value: 8
 
 This value delays the generation of transmit interrupts in units of
 1.024 microseconds.  Transmit interrupt reduction can improve CPU
@@ -255,9 +282,11 @@ causing the driver to run out of available transmit descriptors.
 
 TxAbsIntDelay
 -------------
+
 (This parameter is supported only on 82540, 82545 and later adapters.)
-Valid Range:   0-65535 (0=off)
-Default Value: 32
+
+:Valid Range:   0-65535 (0=off)
+:Default Value: 32
 
 This value, in units of 1.024 microseconds, limits the delay in which a
 transmit interrupt is generated.  Useful only if TxIntDelay is non-zero,
@@ -268,18 +297,21 @@ network conditions.
 
 XsumRX
 ------
+
 (This parameter is NOT supported on the 82542-based adapter.)
-Valid Range:   0-1
-Default Value: 1
+
+:Valid Range:   0-1
+:Default Value: 1
 
 A value of '1' indicates that the driver should enable IP checksum
 offload for received packets (both UDP and TCP) to the adapter hardware.
 
 Copybreak
 ---------
-Valid Range:   0-xxxxxxx (0=off)
-Default Value: 256
-Usage: modprobe e1000.ko copybreak=128
+
+:Valid Range:   0-xxxxxxx (0=off)
+:Default Value: 256
+:Usage: modprobe e1000.ko copybreak=128
 
 Driver copies all packets below or equaling this size to a fresh RX
 buffer before handing it up the stack.
@@ -291,8 +323,9 @@ it is also available during runtime at
 
 SmartPowerDownEnable
 --------------------
-Valid Range: 0-1
-Default Value:  0 (disabled)
+
+:Valid Range: 0-1
+:Default Value:  0 (disabled)
 
 Allows PHY to turn off in lower power states. The user can turn off
 this parameter in supported chipsets.
@@ -308,14 +341,14 @@ fiber interface board only links at 1000 Mbps full-duplex.
 
 For copper-based boards, the keywords interact as follows:
 
-  The default operation is auto-negotiate.  The board advertises all
+- The default operation is auto-negotiate.  The board advertises all
   supported speed and duplex combinations, and it links at the highest
   common speed and duplex mode IF the link partner is set to auto-negotiate.
 
-  If Speed = 1000, limited auto-negotiation is enabled and only 1000 Mbps
+- If Speed = 1000, limited auto-negotiation is enabled and only 1000 Mbps
   is advertised (The 1000BaseT spec requires auto-negotiation.)
 
-  If Speed = 10 or 100, then both Speed and Duplex should be set.  Auto-
+- If Speed = 10 or 100, then both Speed and Duplex should be set.  Auto-
   negotiation is disabled, and the AutoNeg parameter is ignored.  Partner
   SHOULD also be forced.
 
@@ -327,13 +360,15 @@ process.
 The parameter may be specified as either a decimal or hexadecimal value as
 determined by the bitmap below.
 
+============== ====== ====== ======= ======= ====== ====== ======= ======
 Bit position   7      6      5       4       3      2      1       0
 Decimal Value  128    64     32      16      8      4      2       1
 Hex value      80     40     20      10      8      4      2       1
 Speed (Mbps)   N/A    N/A    1000    N/A     100    100    10      10
 Duplex                       Full            Full   Half   Full    Half
+============== ====== ====== ======= ======= ====== ====== ======= ======
 
-Some examples of using AutoNeg:
+Some examples of using AutoNeg::
 
   modprobe e1000 AutoNeg=0x01 (Restricts autonegotiation to 10 Half)
   modprobe e1000 AutoNeg=1 (Same as above)
@@ -354,8 +389,9 @@ previously mentioned to force the adapter to the same speed and duplex.
 Additional Configurations
 =========================
 
-  Jumbo Frames
-  ------------
+Jumbo Frames
+------------
+
   Jumbo Frames support is enabled by changing the MTU to a value larger than
   the default of 1500.  Use the ifconfig command to increase the MTU size.
   For example::
@@ -367,11 +403,11 @@ Additional Configurations
 
        MTU=9000
 
-   to the file /etc/sysconfig/network-scripts/ifcfg-eth<x>.  This example
-   applies to the Red Hat distributions; other distributions may store this
-   setting in a different location.
+  to the file /etc/sysconfig/network-scripts/ifcfg-eth<x>.  This example
+  applies to the Red Hat distributions; other distributions may store this
+  setting in a different location.
 
-  Notes:
+Notes:
   Degradation in throughput performance may be observed in some Jumbo frames
   environments. If this is observed, increasing the application's socket buffer
   size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values may help.
@@ -385,12 +421,14 @@ Additional Configurations
     poor performance or loss of link.
 
   - Adapters based on the Intel(R) 82542 and 82573V/E controller do not
-    support Jumbo Frames. These correspond to the following product names:
+    support Jumbo Frames. These correspond to the following product names::
+
      Intel(R) PRO/1000 Gigabit Server Adapter
      Intel(R) PRO/1000 PM Network Connection
 
-  ethtool
-  -------
+ethtool
+-------
+
   The driver utilizes the ethtool interface for driver configuration and
   diagnostics, as well as displaying statistical information.  The ethtool
   version 1.6 or later is required for this functionality.
@@ -398,8 +436,9 @@ Additional Configurations
   The latest release of ethtool can be found from
   https://www.kernel.org/pub/software/network/ethtool/
 
-  Enabling Wake on LAN* (WoL)
-  ---------------------------
+Enabling Wake on LAN* (WoL)
+---------------------------
+
   WoL is configured through the ethtool* utility.
 
   WoL will be enabled on the system during the next shut down or reboot.
index 13081b3decefa834824b544182d0986e83bc50b4..a7d354ddda7baeb59760215cb41222e3b4698a8d 100644 (file)
@@ -48,7 +48,7 @@ void strp_pause(struct strparser *strp)
      Temporarily pause a stream parser. Message parsing is suspended
      and no new messages are delivered to the upper layer.
 
-void strp_pause(struct strparser *strp)
+void strp_unpause(struct strparser *strp)
 
      Unpause a paused stream parser.
 
index e73bcf9cb5f31cc756521702bbc15fd142e09c71..7ffea6aa22e3c89d4b6e6c7359d40a55c4241176 100644 (file)
@@ -1729,35 +1729,35 @@ If a variable isn't a key variable or prefixed with 'vals=', the
 associated event field will be saved in a variable but won't be summed
 as a value:
 
-  # echo 'hist:keys=next_pid:ts1=common_timestamp ... >> event/trigger
+  # echo 'hist:keys=next_pid:ts1=common_timestamp ...' >> event/trigger
 
 Multiple variables can be assigned at the same time.  The below would
 result in both ts0 and b being created as variables, with both
 common_timestamp and field1 additionally being summed as values:
 
-  # echo 'hist:keys=pid:vals=$ts0,$b:ts0=common_timestamp,b=field1 ... >> \
+  # echo 'hist:keys=pid:vals=$ts0,$b:ts0=common_timestamp,b=field1 ...' >> \
        event/trigger
 
 Note that variable assignments can appear either preceding or
 following their use.  The command below behaves identically to the
 command above:
 
-  # echo 'hist:keys=pid:ts0=common_timestamp,b=field1:vals=$ts0,$b ... >> \
+  # echo 'hist:keys=pid:ts0=common_timestamp,b=field1:vals=$ts0,$b ...' >> \
        event/trigger
 
 Any number of variables not bound to a 'vals=' prefix can also be
 assigned by simply separating them with colons.  Below is the same
 thing but without the values being summed in the histogram:
 
-  # echo 'hist:keys=pid:ts0=common_timestamp:b=field1 ... >> event/trigger
+  # echo 'hist:keys=pid:ts0=common_timestamp:b=field1 ...' >> event/trigger
 
 Variables set as above can be referenced and used in expressions on
 another event.
 
 For example, here's how a latency can be calculated:
 
-  # echo 'hist:keys=pid,prio:ts0=common_timestamp ... >> event1/trigger
-  # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp-$ts0 ... >> event2/trigger
+  # echo 'hist:keys=pid,prio:ts0=common_timestamp ...' >> event1/trigger
+  # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp-$ts0 ...' >> event2/trigger
 
 In the first line above, the event's timetamp is saved into the
 variable ts0.  In the next line, ts0 is subtracted from the second
@@ -1766,7 +1766,7 @@ yet another variable, 'wakeup_lat'.  The hist trigger below in turn
 makes use of the wakeup_lat variable to compute a combined latency
 using the same key and variable from yet another event:
 
-  # echo 'hist:key=pid:wakeupswitch_lat=$wakeup_lat+$switchtime_lat ... >> event3/trigger
+  # echo 'hist:key=pid:wakeupswitch_lat=$wakeup_lat+$switchtime_lat ...' >> event3/trigger
 
 2.2.2 Synthetic Events
 ----------------------
@@ -1807,10 +1807,11 @@ the command that defined it with a '!':
 At this point, there isn't yet an actual 'wakeup_latency' event
 instantiated in the event subsytem - for this to happen, a 'hist
 trigger action' needs to be instantiated and bound to actual fields
-and variables defined on other events (see Section 6.3.3 below).
+and variables defined on other events (see Section 2.2.3 below on
+how that is done using hist trigger 'onmatch' action). Once that is
+done, the 'wakeup_latency' synthetic event instance is created.
 
-Once that is done, an event instance is created, and a histogram can
-be defined using it:
+A histogram can now be defined for the new synthetic event:
 
   # echo 'hist:keys=pid,prio,lat.log2:sort=pid,lat' >> \
         /sys/kernel/debug/tracing/events/synthetic/wakeup_latency/trigger
@@ -1960,7 +1961,7 @@ hist trigger specification.
     back to that pid, the timestamp difference is calculated.  If the
     resulting latency, stored in wakeup_lat, exceeds the current
     maximum latency, the values specified in the save() fields are
-    recoreded:
+    recorded:
 
     # echo 'hist:keys=pid:ts0=common_timestamp.usecs \
             if comm=="cyclictest"' >> \
index 635e57493709e16fbecc0235723dc742a608ae0e..b8cb38a98c1989eef926795b79b7399d65700135 100644 (file)
@@ -226,7 +226,7 @@ $ rm configs/<config name>.<number>/<function>
 where <config name>.<number> specify the configuration and <function> is
 a symlink to a function being removed from the configuration, e.g.:
 
-$ rm configfs/c.1/ncm.usb0
+$ rm configs/c.1/ncm.usb0
 
 ...
 ...
index 495b7742ab58086b5c81fff88eeb884769391b49..d10944e619d3d28c43bcca85bc3dd2761cee37f0 100644 (file)
@@ -4610,7 +4610,7 @@ This capability indicates that kvm will implement the interfaces to handle
 reset, migration and nested KVM for branch prediction blocking. The stfle
 facility 82 should not be provided to the guest without this capability.
 
-8.14 KVM_CAP_HYPERV_TLBFLUSH
+8.18 KVM_CAP_HYPERV_TLBFLUSH
 
 Architectures: x86
 
index 9d5eeff51b5fd32979f64d288375b6489ff25712..9b2bf134964e7a42b8ca332d2869a470dab0877d 100644 (file)
@@ -581,7 +581,7 @@ W:  https://www.infradead.org/~dhowells/kafs/
 
 AGPGART DRIVER
 M:     David Airlie <airlied@linux.ie>
-T:     git git://people.freedesktop.org/~airlied/linux (part of drm maint)
+T:     git git://anongit.freedesktop.org/drm/drm
 S:     Maintained
 F:     drivers/char/agp/
 F:     include/linux/agp*
@@ -728,6 +728,14 @@ S: Supported
 F:     drivers/crypto/ccp/
 F:     include/linux/ccp.h
 
+AMD DISPLAY CORE
+M:     Harry Wentland <harry.wentland@amd.com>
+M:     Leo Li <sunpeng.li@amd.com>
+L:     amd-gfx@lists.freedesktop.org
+T:     git git://people.freedesktop.org/~agd5f/linux
+S:     Supported
+F:     drivers/gpu/drm/amd/display/
+
 AMD FAM15H PROCESSOR POWER MONITORING DRIVER
 M:     Huang Rui <ray.huang@amd.com>
 L:     linux-hwmon@vger.kernel.org
@@ -777,6 +785,14 @@ F: drivers/gpu/drm/amd/include/vi_structs.h
 F:     drivers/gpu/drm/amd/include/v9_structs.h
 F:     include/uapi/linux/kfd_ioctl.h
 
+AMD POWERPLAY
+M:     Rex Zhu <rex.zhu@amd.com>
+M:     Evan Quan <evan.quan@amd.com>
+L:     amd-gfx@lists.freedesktop.org
+S:     Supported
+F:     drivers/gpu/drm/amd/powerplay/
+T:     git git://people.freedesktop.org/~agd5f/linux
+
 AMD SEATTLE DEVICE TREE SUPPORT
 M:     Brijesh Singh <brijeshkumar.singh@amd.com>
 M:     Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
@@ -2523,7 +2539,7 @@ S:        Supported
 F:     drivers/scsi/esas2r
 
 ATUSB IEEE 802.15.4 RADIO DRIVER
-M:     Stefan Schmidt <stefan@osg.samsung.com>
+M:     Stefan Schmidt <stefan@datenfreihafen.org>
 L:     linux-wpan@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ieee802154/atusb.c
@@ -2971,9 +2987,13 @@ N:       bcm585*
 N:     bcm586*
 N:     bcm88312
 N:     hr2
-F:     arch/arm64/boot/dts/broadcom/ns2*
+N:     stingray
+F:     arch/arm64/boot/dts/broadcom/northstar2/*
+F:     arch/arm64/boot/dts/broadcom/stingray/*
 F:     drivers/clk/bcm/clk-ns*
+F:     drivers/clk/bcm/clk-sr*
 F:     drivers/pinctrl/bcm/pinctrl-ns*
+F:     include/dt-bindings/clock/bcm-sr*
 
 BROADCOM KONA GPIO DRIVER
 M:     Ray Jui <rjui@broadcom.com>
@@ -4360,12 +4380,7 @@ L:       iommu@lists.linux-foundation.org
 T:     git git://git.infradead.org/users/hch/dma-mapping.git
 W:     http://git.infradead.org/users/hch/dma-mapping.git
 S:     Supported
-F:     lib/dma-debug.c
-F:     lib/dma-direct.c
-F:     lib/dma-noncoherent.c
-F:     lib/dma-virt.c
-F:     drivers/base/dma-mapping.c
-F:     drivers/base/dma-coherent.c
+F:     kernel/dma/
 F:     include/asm-generic/dma-mapping.h
 F:     include/linux/dma-direct.h
 F:     include/linux/dma-mapping.h
@@ -4461,6 +4476,7 @@ F:        Documentation/blockdev/drbd/
 
 DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
 M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+R:     "Rafael J. Wysocki" <rafael@kernel.org>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
 S:     Supported
 F:     Documentation/kobject.txt
@@ -4631,7 +4647,7 @@ F:        include/uapi/drm/vmwgfx_drm.h
 DRM DRIVERS
 M:     David Airlie <airlied@linux.ie>
 L:     dri-devel@lists.freedesktop.org
-T:     git git://people.freedesktop.org/~airlied/linux
+T:     git git://anongit.freedesktop.org/drm/drm
 B:     https://bugs.freedesktop.org/
 C:     irc://chat.freenode.net/dri-devel
 S:     Maintained
@@ -4884,7 +4900,8 @@ F:        Documentation/gpu/xen-front.rst
 
 DRM TTM SUBSYSTEM
 M:     Christian Koenig <christian.koenig@amd.com>
-M:     Roger He <Hongbo.He@amd.com>
+M:     Huang Rui <ray.huang@amd.com>
+M:     Junwei Zhang <Jerry.Zhang@amd.com>
 T:     git git://people.freedesktop.org/~agd5f/linux
 S:     Maintained
 L:     dri-devel@lists.freedesktop.org
@@ -5674,7 +5691,7 @@ F:        drivers/crypto/caam/
 F:     Documentation/devicetree/bindings/crypto/fsl-sec4.txt
 
 FREESCALE DIU FRAMEBUFFER DRIVER
-M:     Timur Tabi <timur@tabi.org>
+M:     Timur Tabi <timur@kernel.org>
 L:     linux-fbdev@vger.kernel.org
 S:     Maintained
 F:     drivers/video/fbdev/fsl-diu-fb.*
@@ -5774,7 +5791,7 @@ S:        Maintained
 F:     drivers/net/wan/fsl_ucc_hdlc*
 
 FREESCALE QUICC ENGINE UCC UART DRIVER
-M:     Timur Tabi <timur@tabi.org>
+M:     Timur Tabi <timur@kernel.org>
 L:     linuxppc-dev@lists.ozlabs.org
 S:     Maintained
 F:     drivers/tty/serial/ucc_uart.c
@@ -5790,7 +5807,6 @@ F:        include/linux/fsl/
 
 FREESCALE SOC FS_ENET DRIVER
 M:     Pantelis Antoniou <pantelis.antoniou@gmail.com>
-M:     Vitaly Bordug <vbordug@ru.mvista.com>
 L:     linuxppc-dev@lists.ozlabs.org
 L:     netdev@vger.kernel.org
 S:     Maintained
@@ -5798,7 +5814,7 @@ F:        drivers/net/ethernet/freescale/fs_enet/
 F:     include/linux/fs_enet_pd.h
 
 FREESCALE SOC SOUND DRIVERS
-M:     Timur Tabi <timur@tabi.org>
+M:     Timur Tabi <timur@kernel.org>
 M:     Nicolin Chen <nicoleotsuka@gmail.com>
 M:     Xiubo Li <Xiubo.Lee@gmail.com>
 R:     Fabio Estevam <fabio.estevam@nxp.com>
@@ -6909,7 +6925,7 @@ F:        drivers/clk/clk-versaclock5.c
 
 IEEE 802.15.4 SUBSYSTEM
 M:     Alexander Aring <alex.aring@gmail.com>
-M:     Stefan Schmidt <stefan@osg.samsung.com>
+M:     Stefan Schmidt <stefan@datenfreihafen.org>
 L:     linux-wpan@vger.kernel.org
 W:     http://wpan.cakelab.org/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan.git
@@ -7096,6 +7112,7 @@ F:        include/uapi/linux/input.h
 F:     include/uapi/linux/input-event-codes.h
 F:     include/linux/input/
 F:     Documentation/devicetree/bindings/input/
+F:     Documentation/devicetree/bindings/serio/
 F:     Documentation/input/
 
 INPUT MULTITOUCH (MT) PROTOCOL
@@ -8629,7 +8646,7 @@ MARVELL MWIFIEX WIRELESS DRIVER
 M:     Amitkumar Karwar <amitkarwar@gmail.com>
 M:     Nishant Sarmukadam <nishants@marvell.com>
 M:     Ganapathi Bhat <gbhat@marvell.com>
-M:     Xinming Hu <huxm@marvell.com>
+M:     Xinming Hu <huxinming820@gmail.com>
 L:     linux-wireless@vger.kernel.org
 S:     Maintained
 F:     drivers/net/wireless/marvell/mwifiex/
@@ -9075,7 +9092,7 @@ S:        Maintained
 F:     drivers/usb/mtu3/
 
 MEGACHIPS STDPXXXX-GE-B850V3-FW LVDS/DP++ BRIDGES
-M:     Peter Senna Tschudin <peter.senna@collabora.com>
+M:     Peter Senna Tschudin <peter.senna@gmail.com>
 M:     Martin Donnelly <martin.donnelly@ge.com>
 M:     Martyn Welch <martyn.welch@collabora.co.uk>
 S:     Maintained
@@ -9756,6 +9773,11 @@ L:       linux-scsi@vger.kernel.org
 S:     Maintained
 F:     drivers/scsi/NCR_D700.*
 
+NCSI LIBRARY:
+M:     Samuel Mendoza-Jonas <sam@mendozajonas.com>
+S:     Maintained
+F:     net/ncsi/
+
 NCT6775 HARDWARE MONITOR DRIVER
 M:     Guenter Roeck <linux@roeck-us.net>
 L:     linux-hwmon@vger.kernel.org
@@ -9882,6 +9904,7 @@ M:        Andrew Lunn <andrew@lunn.ch>
 M:     Vivien Didelot <vivien.didelot@savoirfairelinux.com>
 M:     Florian Fainelli <f.fainelli@gmail.com>
 S:     Maintained
+F:     Documentation/devicetree/bindings/net/dsa/
 F:     net/dsa/
 F:     include/net/dsa.h
 F:     include/linux/dsa/
@@ -10208,11 +10231,13 @@ F:    sound/soc/codecs/sgtl5000*
 
 NXP TDA998X DRM DRIVER
 M:     Russell King <linux@armlinux.org.uk>
-S:     Supported
+S:     Maintained
 T:     git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-devel
 T:     git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-fixes
 F:     drivers/gpu/drm/i2c/tda998x_drv.c
 F:     include/drm/i2c/tda998x.h
+F:     include/dt-bindings/display/tda998x.h
+K:     "nxp,tda998x"
 
 NXP TFA9879 DRIVER
 M:     Peter Rosin <peda@axentia.se>
@@ -11476,6 +11501,15 @@ W:     http://wireless.kernel.org/en/users/Drivers/p54
 S:     Obsolete
 F:     drivers/net/wireless/intersil/prism54/
 
+PROC FILESYSTEM
+R:     Alexey Dobriyan <adobriyan@gmail.com>
+L:     linux-kernel@vger.kernel.org
+L:     linux-fsdevel@vger.kernel.org
+S:     Maintained
+F:     fs/proc/
+F:     include/linux/proc_fs.h
+F:     tools/testing/selftests/proc/
+
 PROC SYSCTL
 M:     "Luis R. Rodriguez" <mcgrof@kernel.org>
 M:     Kees Cook <keescook@chromium.org>
@@ -11808,9 +11842,9 @@ F:  Documentation/devicetree/bindings/opp/kryo-cpufreq.txt
 F:  drivers/cpufreq/qcom-cpufreq-kryo.c
 
 QUALCOMM EMAC GIGABIT ETHERNET DRIVER
-M:     Timur Tabi <timur@codeaurora.org>
+M:     Timur Tabi <timur@kernel.org>
 L:     netdev@vger.kernel.org
-S:     Supported
+S:     Maintained
 F:     drivers/net/ethernet/qualcomm/emac/
 
 QUALCOMM HEXAGON ARCHITECTURE
@@ -11821,7 +11855,7 @@ S:      Supported
 F:     arch/hexagon/
 
 QUALCOMM HIDMA DRIVER
-M:     Sinan Kaya <okaya@codeaurora.org>
+M:     Sinan Kaya <okaya@kernel.org>
 L:     linux-arm-kernel@lists.infradead.org
 L:     linux-arm-msm@vger.kernel.org
 L:     dmaengine@vger.kernel.org
@@ -13648,7 +13682,7 @@ M:      Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
 L:     iommu@lists.linux-foundation.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb.git
 S:     Supported
-F:     lib/swiotlb.c
+F:     kernel/dma/swiotlb.c
 F:     arch/*/kernel/pci-swiotlb.c
 F:     include/linux/swiotlb.h
 
@@ -15572,9 +15606,17 @@ M:     x86@kernel.org
 L:     linux-kernel@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
 S:     Maintained
+F:     Documentation/devicetree/bindings/x86/
 F:     Documentation/x86/
 F:     arch/x86/
 
+X86 ENTRY CODE
+M:     Andy Lutomirski <luto@kernel.org>
+L:     linux-kernel@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/asm
+S:     Maintained
+F:     arch/x86/entry/
+
 X86 MCE INFRASTRUCTURE
 M:     Tony Luck <tony.luck@intel.com>
 M:     Borislav Petkov <bp@alien8.de>
@@ -15597,7 +15639,7 @@ F:      drivers/platform/x86/
 F:     drivers/platform/olpc/
 
 X86 VDSO
-M:     Andy Lutomirski <luto@amacapital.net>
+M:     Andy Lutomirski <luto@kernel.org>
 L:     linux-kernel@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/vdso
 S:     Maintained
index ca2af1ab91ebadf6ac5c62150b4e72f2a1f1441d..85f3481a56d6cd786cf496bd5f72f23db714130e 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 4
 PATCHLEVEL = 18
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc7
 NAME = Merciless Moray
 
 # *DOCUMENTATION*
@@ -353,9 +353,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
          else if [ -x /bin/bash ]; then echo /bin/bash; \
          else echo sh; fi ; fi)
 
-HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS)
-HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS)
-HOST_LFS_LIBS := $(shell getconf LFS_LIBS)
+HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS 2>/dev/null)
+HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null)
+HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null)
 
 HOSTCC       = gcc
 HOSTCXX      = g++
@@ -507,11 +507,6 @@ ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLA
   KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
 endif
 
-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/cc-can-link.sh $(CC)), y)
-  CC_CAN_LINK := y
-  export CC_CAN_LINK
-endif
-
 # The expansion should be delayed until arch/$(SRCARCH)/Makefile is included.
 # Some architectures define CROSS_COMPILE in arch/$(SRCARCH)/Makefile.
 # CC_VERSION_TEXT is referenced from Kconfig (so it needs export),
@@ -1717,6 +1712,6 @@ endif     # skip-makefile
 PHONY += FORCE
 FORCE:
 
-# Declare the contents of the .PHONY variable as phony.  We keep that
+# Declare the contents of the PHONY variable as phony.  We keep that
 # information in a variable so we can use it in if_changed and friends.
 .PHONY: $(PHONY)
index 0c4805a572c8739ff9d657c63961747e3ea08ff3..04a4a138ed131c7256aeb4108453400516b8965a 100644 (file)
@@ -555,11 +555,6 @@ config SMP
 
          If you don't know what to do here, say N.
 
-config HAVE_DEC_LOCK
-       bool
-       depends on SMP
-       default y
-
 config NR_CPUS
        int "Maximum number of CPUs (2-32)"
        range 2 32
index 6e921754c8fc747be6d6b6b3c28d57d48bddce8d..c210a25dd6daad4a99f40ce729cde4db707de6cf 100644 (file)
@@ -1180,13 +1180,10 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru)
 SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
                struct rusage32 __user *, ur)
 {
-       unsigned int status = 0;
        struct rusage r;
-       long err = kernel_wait4(pid, &status, options, &r);
+       long err = kernel_wait4(pid, ustatus, options, &r);
        if (err <= 0)
                return err;
-       if (put_user(status, ustatus))
-               return -EFAULT;
        if (!ur)
                return err;
        if (put_tv_to_tv32(&ur->ru_utime, &r.ru_utime))
index 04f9729de57c351c7e142b9aab9d9bca0878a2f3..854d5e79979e4ce929d7998bf1135a12880238f8 100644 (file)
@@ -35,8 +35,6 @@ lib-y =       __divqu.o __remqu.o __divlu.o __remlu.o \
        callback_srm.o srm_puts.o srm_printk.o \
        fls.o
 
-lib-$(CONFIG_SMP) += dec_and_lock.o
-
 # The division routines are built from single source, with different defines.
 AFLAGS___divqu.o = -DDIV
 AFLAGS___remqu.o =       -DREM
diff --git a/arch/alpha/lib/dec_and_lock.c b/arch/alpha/lib/dec_and_lock.c
deleted file mode 100644 (file)
index a117707..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/alpha/lib/dec_and_lock.c
- *
- * ll/sc version of atomic_dec_and_lock()
- * 
- */
-
-#include <linux/spinlock.h>
-#include <linux/atomic.h>
-#include <linux/export.h>
-
-  asm (".text                                  \n\
-       .global _atomic_dec_and_lock            \n\
-       .ent _atomic_dec_and_lock               \n\
-       .align  4                               \n\
-_atomic_dec_and_lock:                          \n\
-       .prologue 0                             \n\
-1:     ldl_l   $1, 0($16)                      \n\
-       subl    $1, 1, $1                       \n\
-       beq     $1, 2f                          \n\
-       stl_c   $1, 0($16)                      \n\
-       beq     $1, 4f                          \n\
-       mb                                      \n\
-       clr     $0                              \n\
-       ret                                     \n\
-2:     br      $29, 3f                         \n\
-3:     ldgp    $29, 0($29)                     \n\
-       br      $atomic_dec_and_lock_1..ng      \n\
-       .subsection 2                           \n\
-4:     br      1b                              \n\
-       .previous                               \n\
-       .end _atomic_dec_and_lock");
-
-static int __used atomic_dec_and_lock_1(atomic_t *atomic, spinlock_t *lock)
-{
-       /* Slow path */
-       spin_lock(lock);
-       if (atomic_dec_and_test(atomic))
-               return 1;
-       spin_unlock(lock);
-       return 0;
-}
-EXPORT_SYMBOL(_atomic_dec_and_lock);
index e81bcd271be72e7b1e2bbece5b2b7442ebd7b462..9cf59fc60eab80f5f7a74b846b0e3e8efbaac593 100644 (file)
@@ -413,7 +413,7 @@ config ARC_HAS_DIV_REM
 
 config ARC_HAS_ACCL_REGS
        bool "Reg Pair ACCL:ACCH (FPU and/or MPY > 6)"
-       default n
+       default y
        help
          Depending on the configuration, CPU can contain accumulator reg-pair
          (also referred to as r58:r59). These can also be used by gcc as GPR so
index d37f49d6a27f40f65d3e34bd3e2df5343a97d1e4..6c1b20dd76ad902655d7317eb44580923d98c690 100644 (file)
@@ -16,7 +16,7 @@ endif
 
 KBUILD_DEFCONFIG := nsim_700_defconfig
 
-cflags-y       += -fno-common -pipe -fno-builtin -D__linux__
+cflags-y       += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
 cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
 cflags-$(CONFIG_ISA_ARCV2)     += -mcpu=archs
 
@@ -140,16 +140,3 @@ dtbs: scripts
 
 archclean:
        $(Q)$(MAKE) $(clean)=$(boot)
-
-# Hacks to enable final link due to absence of link-time branch relexation
-# and gcc choosing optimal(shorter) branches at -O3
-#
-# vineetg Feb 2010: -mlong-calls switched off for overall kernel build
-# However lib/decompress_inflate.o (.init.text) calls
-# zlib_inflate_workspacesize (.text) causing relocation errors.
-# Thus forcing all exten calls in this file to be long calls
-export CFLAGS_decompress_inflate.o = -mmedium-calls
-export CFLAGS_initramfs.o = -mmedium-calls
-ifdef CONFIG_SMP
-export CFLAGS_core.o = -mmedium-calls
-endif
index 09f85154c5a4bf6609f1dabb4595ac00291d0668..a635ea972304e3531b205c23a0e3ef814608e313 100644 (file)
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
 # CONFIG_VM_EVENT_COUNTERS is not set
index 09fed3ef22b6a0c4ea3bcc508b3817c8814b675b..aa507e423075b16be125d95fbb29b55b5b08683c 100644 (file)
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
 # CONFIG_VM_EVENT_COUNTERS is not set
index ea2f6d817d1ae0c241bb63e9b264c09004eeb215..eba07f4686545ed00383756ae53ba404b2b2b25e 100644 (file)
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
 # CONFIG_VM_EVENT_COUNTERS is not set
index ab231c040efe55db40d6811c723f0f44fd601e05..098b19fbaa51f0116e7f0328eb3a17feb72f0123 100644 (file)
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
 CONFIG_EXPERT=y
 CONFIG_PERF_EVENTS=y
 # CONFIG_COMPAT_BRK is not set
index cf449cbf440dfe32c4169b7e64ec9149dd14b692..0104c404d8970ee44ecb0ced17fe137363e4cf5b 100644 (file)
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
 # CONFIG_VM_EVENT_COUNTERS is not set
index 1b54c72f4296fc2a03bd1c70f1550dedbcbc23e9..6491be0ddbc9e9cfd457dccc452d5bebf28c1183 100644 (file)
@@ -9,7 +9,6 @@ CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
 # CONFIG_VM_EVENT_COUNTERS is not set
index 31c2c70b34a172cf89ba35759593f12777574052..99e05cf63fca2c6d953b952386b0cf1649ae7332 100644 (file)
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
index a578c721d50fb62829a02aeaa3b57e9524332734..0dc4f9b737e7a4f48b41ae7caaedaa2ce89c5b40 100644 (file)
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
index 37d7395f3272af75a5b5bbcd4c5d4d0dc8d0e6d8..be3c30a15e54c09db51112ca88fd9b32d73a0d34 100644 (file)
@@ -9,7 +9,6 @@ CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
index 1e1470e2a7f00f558c160fdda5abce07c9441fd7..3a74b9b217723d2c2c75a91510ef1aadeab7b89a 100644 (file)
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
index 084a6e42685bfd9aa16c398bd8279ed198ca613e..ea2834b4dc1dad187193549b7b146da413726c37 100644 (file)
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
 CONFIG_KALLSYMS_ALL=y
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
index f36d479904152da8ce167f2874a0b10b68a3975b..80a5a1b4924bcf086ed57c34d7778304288f35a2 100644 (file)
@@ -9,7 +9,6 @@ CONFIG_IKCONFIG_PROC=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
 CONFIG_PERF_EVENTS=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_KPROBES=y
index 1aca2e8fd1ba2fb08b647142ee7eb28a4dc84dfa..2cc87f909747c1818385de9ba99c0bbeda6197b8 100644 (file)
@@ -56,7 +56,6 @@ CONFIG_STMMAC_ETH=y
 # CONFIG_INPUT is not set
 # CONFIG_SERIO is not set
 # CONFIG_VT is not set
-CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
 # CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
index ec36d5b6d435bfca7a05148e05cd1f801c5cdecf..29f3988c94249408f7b3828957c26f6849e025c5 100644 (file)
        POP     gp
        RESTORE_R12_TO_R0
 
+#ifdef CONFIG_ARC_CURR_IN_REG
+       ld      r25, [sp, 12]
+#endif
        ld  sp, [sp] /* restore original sp */
        /* orig_r0, ECR, user_r25 skipped automatically */
 .endm
        POP     gp
        RESTORE_R12_TO_R0
 
+#ifdef CONFIG_ARC_CURR_IN_REG
+       ld      r25, [sp, 12]
+#endif
        ld  sp, [sp] /* restore original sp */
        /* orig_r0, ECR, user_r25 skipped automatically */
 .endm
index 51597f344a62aced8c98cb2035c9ec40eff55e6e..302b0db8ea2bd9afc0d55116e24eb1dcee92eea4 100644 (file)
@@ -86,9 +86,6 @@
        POP     r1
        POP     r0
 
-#ifdef CONFIG_ARC_CURR_IN_REG
-       ld      r25, [sp, 12]
-#endif
 .endm
 
 /*--------------------------------------------------------------
index c28e6c347b4900217ad48053c69679bb3da8b607..871f3cb16af9f2ec58c76192ffc098d914588b9d 100644 (file)
@@ -34,9 +34,7 @@ struct machine_desc {
        const char              *name;
        const char              **dt_compat;
        void                    (*init_early)(void);
-#ifdef CONFIG_SMP
        void                    (*init_per_cpu)(unsigned int);
-#endif
        void                    (*init_machine)(void);
        void                    (*init_late)(void);
 
index 109baa06831cecc38cf1d9f11ba447a31c0c4b14..09ddddf71cc5049a570d11a5114ccec945df56ad 100644 (file)
@@ -105,7 +105,7 @@ typedef pte_t * pgtable_t;
 #define virt_addr_valid(kaddr)  pfn_valid(virt_to_pfn(kaddr))
 
 /* Default Permissions for stack/heaps pages (Non Executable) */
-#define VM_DATA_DEFAULT_FLAGS   (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE)
+#define VM_DATA_DEFAULT_FLAGS   (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
 
 #define WANT_PAGE_VIRTUAL   1
 
index 8ec5599a0957e3f2314a63450f46588125d94a82..cf4be70d589259df60bfa2198da9f8c7c0a543c7 100644 (file)
@@ -377,7 +377,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
 
 /* Decode a PTE containing swap "identifier "into constituents */
 #define __swp_type(pte_lookalike)      (((pte_lookalike).val) & 0x1f)
-#define __swp_offset(pte_lookalike)    ((pte_lookalike).val << 13)
+#define __swp_offset(pte_lookalike)    ((pte_lookalike).val >> 13)
 
 /* NOPs, to keep generic kernel happy */
 #define __pte_to_swp_entry(pte)        ((swp_entry_t) { pte_val(pte) })
index 538b36afe89e7c9871e2c37d2322d839a9b27a26..62b185057c040157132386aaee6ff56eaaebcf25 100644 (file)
@@ -31,10 +31,10 @@ void __init init_IRQ(void)
        /* a SMP H/w block could do IPI IRQ request here */
        if (plat_smp_ops.init_per_cpu)
                plat_smp_ops.init_per_cpu(smp_processor_id());
+#endif
 
        if (machine_desc->init_per_cpu)
                machine_desc->init_per_cpu(smp_processor_id());
-#endif
 }
 
 /*
index 5ac3b547453fd5b4b5393fdc10dfff34a047941f..4674541eba3fd019a51aeb02db27b2bc04569412 100644 (file)
@@ -47,7 +47,8 @@ SYSCALL_DEFINE0(arc_gettls)
 SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
 {
        struct pt_regs *regs = current_pt_regs();
-       int uval = -EFAULT;
+       u32 uval;
+       int ret;
 
        /*
         * This is only for old cores lacking LLOCK/SCOND, which by defintion
@@ -60,23 +61,47 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
        /* Z indicates to userspace if operation succeded */
        regs->status32 &= ~STATUS_Z_MASK;
 
-       if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
-               return -EFAULT;
+       ret = access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr));
+       if (!ret)
+                goto fail;
 
+again:
        preempt_disable();
 
-       if (__get_user(uval, uaddr))
-               goto done;
+       ret = __get_user(uval, uaddr);
+       if (ret)
+                goto fault;
 
-       if (uval == expected) {
-               if (!__put_user(new, uaddr))
-                       regs->status32 |= STATUS_Z_MASK;
-       }
+       if (uval != expected)
+                goto out;
 
-done:
-       preempt_enable();
+       ret = __put_user(new, uaddr);
+       if (ret)
+                goto fault;
+
+       regs->status32 |= STATUS_Z_MASK;
 
+out:
+       preempt_enable();
        return uval;
+
+fault:
+       preempt_enable();
+
+       if (unlikely(ret != -EFAULT))
+                goto fail;
+
+       down_read(&current->mm->mmap_sem);
+       ret = fixup_user_fault(current, current->mm, (unsigned long) uaddr,
+                              FAULT_FLAG_WRITE, NULL);
+       up_read(&current->mm->mmap_sem);
+
+       if (likely(!ret))
+                goto again;
+
+fail:
+       force_sig(SIGSEGV, current);
+       return ret;
 }
 
 #ifdef CONFIG_ISA_ARCV2
index 19ab3cf98f0f34904b8431a6d4cf36642066c513..9356753c2ed83fc8f9ee7dbf55f173401499e2e4 100644 (file)
@@ -7,5 +7,8 @@
 
 menuconfig ARC_SOC_HSDK
        bool "ARC HS Development Kit SOC"
+       depends on ISA_ARCV2
+       select ARC_HAS_ACCL_REGS
        select CLK_HSDK
        select RESET_HSDK
+       select MIGHT_HAVE_PCI
index 2958aedb649ab183edcce1ca858006f67fd8ff21..2588b842407c281df0051b814fefd3cfcd9c31fe 100644 (file)
@@ -42,6 +42,66 @@ static void __init hsdk_init_per_cpu(unsigned int cpu)
 #define SDIO_UHS_REG_EXT       (SDIO_BASE + 0x108)
 #define SDIO_UHS_REG_EXT_DIV_2 (2 << 30)
 
+#define HSDK_GPIO_INTC          (ARC_PERIPHERAL_BASE + 0x3000)
+
+static void __init hsdk_enable_gpio_intc_wire(void)
+{
+       /*
+        * Peripherals on CPU Card are wired to cpu intc via intermediate
+        * DW APB GPIO blocks (mainly for debouncing)
+        *
+        *         ---------------------
+        *        |  snps,archs-intc  |
+        *        ---------------------
+        *                  |
+        *        ----------------------
+        *        | snps,archs-idu-intc |
+        *        ----------------------
+        *         |   |     |   |    |
+        *         | [eth] [USB]    [... other peripherals]
+        *         |
+        * -------------------
+        * | snps,dw-apb-intc |
+        * -------------------
+        *  |      |   |   |
+        * [Bt] [HAPS]   [... other peripherals]
+        *
+        * Current implementation of "irq-dw-apb-ictl" driver doesn't work well
+        * with stacked INTCs. In particular problem happens if its master INTC
+        * not yet instantiated. See discussion here -
+        * https://lkml.org/lkml/2015/3/4/755
+        *
+        * So setup the first gpio block as a passive pass thru and hide it from
+        * DT hardware topology - connect intc directly to cpu intc
+        * The GPIO "wire" needs to be init nevertheless (here)
+        *
+        * One side adv is that peripheral interrupt handling avoids one nested
+        * intc ISR hop
+        *
+        * According to HSDK User's Manual [1], "Table 2 Interrupt Mapping"
+        * we have the following GPIO input lines used as sources of interrupt:
+        * - GPIO[0] - Bluetooth interrupt of RS9113 module
+        * - GPIO[2] - HAPS interrupt (on HapsTrak 3 connector)
+        * - GPIO[3] - Audio codec (MAX9880A) interrupt
+        * - GPIO[8-23] - Available on Arduino and PMOD_x headers
+        * For now there's no use of Arduino and PMOD_x headers in Linux
+        * use-case so we only enable lines 0, 2 and 3.
+        *
+        * [1] https://github.com/foss-for-synopsys-dwc-arc-processors/ARC-Development-Systems-Forum/wiki/docs/ARC_HSDK_User_Guide.pdf
+        */
+#define GPIO_INTEN              (HSDK_GPIO_INTC + 0x30)
+#define GPIO_INTMASK            (HSDK_GPIO_INTC + 0x34)
+#define GPIO_INTTYPE_LEVEL      (HSDK_GPIO_INTC + 0x38)
+#define GPIO_INT_POLARITY       (HSDK_GPIO_INTC + 0x3c)
+#define GPIO_INT_CONNECTED_MASK        0x0d
+
+       iowrite32(0xffffffff, (void __iomem *) GPIO_INTMASK);
+       iowrite32(~GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTMASK);
+       iowrite32(0x00000000, (void __iomem *) GPIO_INTTYPE_LEVEL);
+       iowrite32(0xffffffff, (void __iomem *) GPIO_INT_POLARITY);
+       iowrite32(GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTEN);
+}
+
 static void __init hsdk_init_early(void)
 {
        /*
@@ -62,6 +122,8 @@ static void __init hsdk_init_early(void)
         * minimum possible div-by-2.
         */
        iowrite32(SDIO_UHS_REG_EXT_DIV_2, (void __iomem *) SDIO_UHS_REG_EXT);
+
+       hsdk_enable_gpio_intc_wire();
 }
 
 static const char *hsdk_compat[] __initconst = {
index 54eeb8d00bc62a9f818aa9a833cbc15e7a1d9324..843edfd000be7210ebef62e529ba074df5ee242f 100644 (file)
@@ -1245,8 +1245,14 @@ config PCI
          VESA. If you have PCI, say Y, otherwise N.
 
 config PCI_DOMAINS
-       bool
+       bool "Support for multiple PCI domains"
        depends on PCI
+       help
+         Enable PCI domains kernel management. Say Y if your machine
+         has a PCI bus hierarchy that requires more than one PCI
+         domain (aka segment) to be correctly managed. Say N otherwise.
+
+         If you don't know what to do here, say N.
 
 config PCI_DOMAINS_GENERIC
        def_bool PCI_DOMAINS
index f9e8667f5886db82027643130ca87b58cbea8f62..73b514dddf65b281b0c3093f40b05496240b5455 100644 (file)
                        AM33XX_IOPAD(0x8f0, PIN_INPUT_PULLUP | MUX_MODE0)       /* mmc0_dat3.mmc0_dat3 */
                        AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0)       /* mmc0_cmd.mmc0_cmd */
                        AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0)       /* mmc0_clk.mmc0_clk */
-                       AM33XX_IOPAD(0x9a0, PIN_INPUT | MUX_MODE4)              /* mcasp0_aclkr.mmc0_sdwp */
                >;
        };
 
index ca294914bbb131b9725c43b8e7c768466bf0c775..23ea381d363fd12e6d9ac7f08e8613f1bf12443e 100644 (file)
@@ -39,6 +39,8 @@
                        ti,davinci-ctrl-ram-size = <0x2000>;
                        ti,davinci-rmii-en = /bits/ 8 <1>;
                        local-mac-address = [ 00 00 00 00 00 00 ];
+                       clocks = <&emac_ick>;
+                       clock-names = "ick";
                };
 
                davinci_mdio: ethernet@5c030000 {
@@ -49,6 +51,8 @@
                        bus_freq = <1000000>;
                        #address-cells = <1>;
                        #size-cells = <0>;
+                       clocks = <&emac_fck>;
+                       clock-names = "fck";
                };
 
                uart4: serial@4809e000 {
        };
 };
 
+/* Table Table 5-79 of the TRM shows 480ab000 is reserved */
+&usb_otg_hs {
+       status = "disabled";
+};
+
 &iva {
        status = "disabled";
 };
index 440351ad0b80686d06126df39b02d8de104a95d0..d4be3fd0b6f4094643ef98660e0f2dbcb5edca9d 100644 (file)
 
                touchscreen-size-x = <480>;
                touchscreen-size-y = <272>;
+
+               wakeup-source;
        };
 
        tlv320aic3106: tlv320aic3106@1b {
index 6782ce481ac967ded05bbfc124355aa894790d6f..d8769956cbfcff7b4a38e72959ba8b88f397a1c0 100644 (file)
                                              3700 5
                                              3900 6
                                              4000 7>;
-                       cooling-cells = <2>;
+                       #cooling-cells = <2>;
                };
 
                gpio-leds {
index 18edc9bc79273b794ce263ce7d9674f43732c58e..929459c42760592c00d4ea0d585be4da2ad7e301 100644 (file)
 
                        thermal: thermal@e8078 {
                                compatible = "marvell,armada380-thermal";
-                               reg = <0xe4078 0x4>, <0xe4074 0x4>;
+                               reg = <0xe4078 0x4>, <0xe4070 0x8>;
                                status = "okay";
                        };
 
index 9fe4f5a6379e3b60d79a6ed8a0327f680434861e..2c4df2d2d4a6e1165fe27565a19d681c47a32cfa 100644 (file)
                        reg = <0x18008000 0x100>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 85 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                        status = "disabled";
                };
                        reg = <0x1800b000 0x100>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 86 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                        status = "disabled";
                };
 
                        #interrupt-cells = <1>;
                        interrupt-map-mask = <0 0 0 0>;
-                       interrupt-map = <0 0 0 0 &gic GIC_SPI 100 IRQ_TYPE_NONE>;
+                       interrupt-map = <0 0 0 0 &gic GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
 
                        linux,pci-domain = <0>;
 
                                compatible = "brcm,iproc-msi";
                                msi-controller;
                                interrupt-parent = <&gic>;
-                               interrupts = <GIC_SPI 96 IRQ_TYPE_NONE>,
-                                            <GIC_SPI 97 IRQ_TYPE_NONE>,
-                                            <GIC_SPI 98 IRQ_TYPE_NONE>,
-                                            <GIC_SPI 99 IRQ_TYPE_NONE>;
+                               interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
+                                            <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
+                                            <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+                                            <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
                        };
                };
 
 
                        #interrupt-cells = <1>;
                        interrupt-map-mask = <0 0 0 0>;
-                       interrupt-map = <0 0 0 0 &gic GIC_SPI 106 IRQ_TYPE_NONE>;
+                       interrupt-map = <0 0 0 0 &gic GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
 
                        linux,pci-domain = <1>;
 
                                compatible = "brcm,iproc-msi";
                                msi-controller;
                                interrupt-parent = <&gic>;
-                               interrupts = <GIC_SPI 102 IRQ_TYPE_NONE>,
-                                            <GIC_SPI 103 IRQ_TYPE_NONE>,
-                                            <GIC_SPI 104 IRQ_TYPE_NONE>,
-                                            <GIC_SPI 105 IRQ_TYPE_NONE>;
+                               interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+                                            <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+                                            <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+                                            <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
                        };
                };
 
index 3f9cedd8011f0c22fb05b6a50d1705fc5ceab05d..3084a7c957339f0edc2fef97d203b08635c96790 100644 (file)
                        reg = <0x38000 0x50>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 95 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                };
 
                        reg = <0x3b000 0x50>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 96 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                };
        };
 
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
-               interrupt-map = <0 0 0 0 &gic GIC_SPI 186 IRQ_TYPE_NONE>;
+               interrupt-map = <0 0 0 0 &gic GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
 
                linux,pci-domain = <0>;
 
                        compatible = "brcm,iproc-msi";
                        msi-controller;
                        interrupt-parent = <&gic>;
-                       interrupts = <GIC_SPI 182 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 183 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 184 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 185 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>;
                        brcm,pcie-msi-inten;
                };
        };
 
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
-               interrupt-map = <0 0 0 0 &gic GIC_SPI 192 IRQ_TYPE_NONE>;
+               interrupt-map = <0 0 0 0 &gic GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
 
                linux,pci-domain = <1>;
 
                        compatible = "brcm,iproc-msi";
                        msi-controller;
                        interrupt-parent = <&gic>;
-                       interrupts = <GIC_SPI 188 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 189 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 190 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 191 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
                        brcm,pcie-msi-inten;
                };
        };
index dcc55aa84583cdd18f7ef6ecd780eb947be1ef1f..09ba8504632284532e3b17c6d1531e2d732fadc4 100644 (file)
                        reg = <0x38000 0x50>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 89 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                        dma-coherent;
                        status = "disabled";
 
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
-               interrupt-map = <0 0 0 0 &gic GIC_SPI 131 IRQ_TYPE_NONE>;
+               interrupt-map = <0 0 0 0 &gic GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
 
                linux,pci-domain = <0>;
 
                        compatible = "brcm,iproc-msi";
                        msi-controller;
                        interrupt-parent = <&gic>;
-                       interrupts = <GIC_SPI 127 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 128 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 129 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 130 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>;
                        brcm,pcie-msi-inten;
                };
        };
 
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
-               interrupt-map = <0 0 0 0 &gic GIC_SPI 137 IRQ_TYPE_NONE>;
+               interrupt-map = <0 0 0 0 &gic GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
 
                linux,pci-domain = <1>;
 
                        compatible = "brcm,iproc-msi";
                        msi-controller;
                        interrupt-parent = <&gic>;
-                       interrupts = <GIC_SPI 133 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 134 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 135 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 136 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
                        brcm,pcie-msi-inten;
                };
        };
 
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
-               interrupt-map = <0 0 0 0 &gic GIC_SPI 143 IRQ_TYPE_NONE>;
+               interrupt-map = <0 0 0 0 &gic GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
 
                linux,pci-domain = <2>;
 
                        compatible = "brcm,iproc-msi";
                        msi-controller;
                        interrupt-parent = <&gic>;
-                       interrupts = <GIC_SPI 139 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 140 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 141 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 142 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
                        brcm,pcie-msi-inten;
                };
        };
index 9a076c409f4ed35fcf5fbe79807ede6e7e8466d5..ef995e50ee12bfd8b3d90d9e07062a41e04f4ff3 100644 (file)
        i2c0: i2c@18009000 {
                compatible = "brcm,iproc-i2c";
                reg = <0x18009000 0x50>;
-               interrupts = <GIC_SPI 121 IRQ_TYPE_NONE>;
+               interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
                #address-cells = <1>;
                #size-cells = <0>;
                clock-frequency = <100000>;
index f6f1597b03df931a1dea057921a43cea9f929a31..0f4f817a9e229c58f973f935a6f5906b1e0f8979 100644 (file)
                        gpio-controller;
                        #gpio-cells = <2>;
                        reg = <0x226000 0x1000>;
-                       interrupts = <42 IRQ_TYPE_EDGE_BOTH
-                               43 IRQ_TYPE_EDGE_BOTH 44 IRQ_TYPE_EDGE_BOTH
-                               45 IRQ_TYPE_EDGE_BOTH 46 IRQ_TYPE_EDGE_BOTH
-                               47 IRQ_TYPE_EDGE_BOTH 48 IRQ_TYPE_EDGE_BOTH
-                               49 IRQ_TYPE_EDGE_BOTH 50 IRQ_TYPE_EDGE_BOTH>;
+                       interrupts = <42 43 44 45 46 47 48 49 50>;
                        ti,ngpio = <144>;
                        ti,davinci-gpio-unbanked = <0>;
                        status = "disabled";
index 9dcd14edc20287f80c73a3b95d21e303ffa6d39c..e03495a799ce8d034feab58e263177794e706013 100644 (file)
                                dr_mode = "otg";
                                snps,dis_u3_susphy_quirk;
                                snps,dis_u2_susphy_quirk;
-                               snps,dis_metastability_quirk;
                        };
                };
 
                                dr_mode = "otg";
                                snps,dis_u3_susphy_quirk;
                                snps,dis_u2_susphy_quirk;
+                               snps,dis_metastability_quirk;
                        };
                };
 
index df9eca94d812290afe03affd59f76663ea1ab0ee..8a878687197b35a8e056ba55c4aaec56293123e1 100644 (file)
 
        pinctrl_ts: tsgrp {
                fsl,pins = <
-                       MX51_PAD_CSI1_D8__GPIO3_12              0x85
+                       MX51_PAD_CSI1_D8__GPIO3_12              0x04
                        MX51_PAD_CSI1_D9__GPIO3_13              0x85
                >;
        };
index 70483ce72ba6cf648809acb7f24be3af11817674..77f8f030dd0772aba631f57b704a7e60a9bd0532 100644 (file)
@@ -90,7 +90,7 @@
                                        clocks = <&clks IMX6Q_CLK_ECSPI5>,
                                                 <&clks IMX6Q_CLK_ECSPI5>;
                                        clock-names = "ipg", "per";
-                                       dmas = <&sdma 11 7 1>, <&sdma 12 7 2>;
+                                       dmas = <&sdma 11 8 1>, <&sdma 12 8 2>;
                                        dma-names = "rx", "tx";
                                        status = "disabled";
                                };
index 19a075aee19eabfb5ac2b127db45199bca2f0d6a..f14df0baf2ab42867e30f3b93520b75652561afa 100644 (file)
                        dsa,member = <0 0>;
                        eeprom-length = <512>;
                        interrupt-parent = <&gpio6>;
-                       interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
+                       interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
                        interrupt-controller;
                        #interrupt-cells = <2>;
 
index d8b94f47498b67051ade669f23d2796a0b1e7433..4e4a55aad5c9ca9aa6fff90deb0ae1c5e99c3a13 100644 (file)
                        ranges = <0x81000000 0 0          0x08f80000 0 0x00010000 /* downstream I/O */
                                  0x82000000 0 0x08000000 0x08000000 0 0x00f00000>; /* non-prefetchable memory */
                        num-lanes = <1>;
-                       interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
                        interrupt-names = "msi";
                        #interrupt-cells = <1>;
                        interrupt-map-mask = <0 0 0 0x7>;
index bdf73cbcec3a5c1df0bb55344049aafa3a3cc404..e7c3c563ff8f5d05bb022ed1c3be513f2e97ff66 100644 (file)
 
                dais = <&mcbsp2_port>, <&mcbsp3_port>;
        };
-};
-
-&dss {
-       status = "okay";
-};
 
-&gpio6 {
        pwm8: dmtimer-pwm-8 {
                pinctrl-names = "default";
                pinctrl-0 = <&vibrator_direction_pin>;
                pwm-names = "enable", "direction";
                direction-duty-cycle-ns = <10000000>;
        };
+};
 
+&dss {
+       status = "okay";
 };
 
 &dsi1 {
index 486d4e7433ed32d2662fabcf9b25fe54eab0f187..b38f8c24055800c45e1e81aef451f08ac9e27be5 100644 (file)
                nand0: nand@ff900000 {
                        #address-cells = <0x1>;
                        #size-cells = <0x1>;
-                       compatible = "denali,denali-nand-dt";
+                       compatible = "altr,socfpga-denali-nand";
                        reg = <0xff900000 0x100000>,
                              <0xffb80000 0x10000>;
                        reg-names = "nand_data", "denali_reg";
                        interrupts = <0x0 0x90 0x4>;
                        dma-mask = <0xffffffff>;
-                       clocks = <&nand_clk>;
+                       clocks = <&nand_x_clk>;
                        status = "disabled";
                };
 
index bead79e4b2aa2b624b8f7d21cef4751d6536b724..791ca15c799eba98850cbc3d4b96be7a509c422f 100644 (file)
                        #size-cells = <0>;
                        reg = <0xffda5000 0x100>;
                        interrupts = <0 102 4>;
-                       num-chipselect = <4>;
-                       bus-num = <0>;
+                       num-cs = <4>;
                        /*32bit_access;*/
                        tx-dma-channel = <&pdma 16>;
                        rx-dma-channel = <&pdma 17>;
                nand: nand@ffb90000 {
                        #address-cells = <1>;
                        #size-cells = <1>;
-                       compatible = "denali,denali-nand-dt", "altr,socfpga-denali-nand";
+                       compatible = "altr,socfpga-denali-nand";
                        reg = <0xffb90000 0x72000>,
                              <0xffb80000 0x10000>;
                        reg-names = "nand_data", "denali_reg";
index 1e9f7af8f70ff6ba23d9403f930f09dd6e0dda7e..3157be413297e5d22ad3174e2082b5199fc3083c 100644 (file)
@@ -10,7 +10,7 @@ obj-$(CONFIG_DMABOUNCE)               += dmabounce.o
 obj-$(CONFIG_SHARP_LOCOMO)     += locomo.o
 obj-$(CONFIG_SHARP_PARAM)      += sharpsl_param.o
 obj-$(CONFIG_SHARP_SCOOP)      += scoop.o
-obj-$(CONFIG_SMP)              += secure_cntvoff.o
+obj-$(CONFIG_CPU_V7)           += secure_cntvoff.o
 obj-$(CONFIG_PCI_HOST_ITE8152)  += it8152.o
 obj-$(CONFIG_MCPM)             += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o
 CFLAGS_REMOVE_mcpm_entry.o     = -pg
index 054591dc9a0020dcdaa907f6b3cded43408075d0..4cd2f4a2bff4e20beb76fd524348aae58fbc3590 100644 (file)
@@ -141,9 +141,11 @@ CONFIG_USB_STORAGE=y
 CONFIG_USB_CHIPIDEA=y
 CONFIG_USB_CHIPIDEA_UDC=y
 CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_CHIPIDEA_ULPI=y
 CONFIG_NOP_USB_XCEIV=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_ETH=m
+CONFIG_USB_ULPI_BUS=y
 CONFIG_MMC=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
index f70507ab91eeb1b59a0857cb9e6f55ff2825fe9f..200ebda47e0c3bee90eadd948b6f8f522fcfbedc 100644 (file)
@@ -302,6 +302,7 @@ CONFIG_USB_STORAGE=y
 CONFIG_USB_CHIPIDEA=y
 CONFIG_USB_CHIPIDEA_UDC=y
 CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_CHIPIDEA_ULPI=y
 CONFIG_USB_SERIAL=m
 CONFIG_USB_SERIAL_GENERIC=y
 CONFIG_USB_SERIAL_FTDI_SIO=m
@@ -338,6 +339,7 @@ CONFIG_USB_GADGETFS=m
 CONFIG_USB_FUNCTIONFS=m
 CONFIG_USB_MASS_STORAGE=m
 CONFIG_USB_G_SERIAL=m
+CONFIG_USB_ULPI_BUS=y
 CONFIG_MMC=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
index 7e1c543162c3ab16f11f6be6ccec5a16abae31d0..8f6be19825456496ef471b3b03a78d32354d9736 100644 (file)
@@ -1,5 +1,4 @@
 CONFIG_SYSVIPC=y
-CONFIG_FHANDLE=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_CGROUPS=y
@@ -10,20 +9,10 @@ CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_CMDLINE_PARTITION=y
-CONFIG_ARCH_MULTI_V7=y
-# CONFIG_ARCH_MULTI_V5 is not set
-# CONFIG_ARCH_MULTI_V4 is not set
 CONFIG_ARCH_VIRT=y
 CONFIG_ARCH_ALPINE=y
 CONFIG_ARCH_ARTPEC=y
 CONFIG_MACH_ARTPEC6=y
-CONFIG_ARCH_MVEBU=y
-CONFIG_MACH_ARMADA_370=y
-CONFIG_MACH_ARMADA_375=y
-CONFIG_MACH_ARMADA_38X=y
-CONFIG_MACH_ARMADA_39X=y
-CONFIG_MACH_ARMADA_XP=y
-CONFIG_MACH_DOVE=y
 CONFIG_ARCH_AT91=y
 CONFIG_SOC_SAMA5D2=y
 CONFIG_SOC_SAMA5D3=y
@@ -32,9 +21,9 @@ CONFIG_ARCH_BCM=y
 CONFIG_ARCH_BCM_CYGNUS=y
 CONFIG_ARCH_BCM_HR2=y
 CONFIG_ARCH_BCM_NSP=y
-CONFIG_ARCH_BCM_21664=y
-CONFIG_ARCH_BCM_281XX=y
 CONFIG_ARCH_BCM_5301X=y
+CONFIG_ARCH_BCM_281XX=y
+CONFIG_ARCH_BCM_21664=y
 CONFIG_ARCH_BCM2835=y
 CONFIG_ARCH_BCM_63XX=y
 CONFIG_ARCH_BRCMSTB=y
@@ -43,14 +32,14 @@ CONFIG_MACH_BERLIN_BG2=y
 CONFIG_MACH_BERLIN_BG2CD=y
 CONFIG_MACH_BERLIN_BG2Q=y
 CONFIG_ARCH_DIGICOLOR=y
+CONFIG_ARCH_EXYNOS=y
+CONFIG_EXYNOS5420_MCPM=y
 CONFIG_ARCH_HIGHBANK=y
 CONFIG_ARCH_HISI=y
 CONFIG_ARCH_HI3xxx=y
-CONFIG_ARCH_HIX5HD2=y
 CONFIG_ARCH_HIP01=y
 CONFIG_ARCH_HIP04=y
-CONFIG_ARCH_KEYSTONE=y
-CONFIG_ARCH_MESON=y
+CONFIG_ARCH_HIX5HD2=y
 CONFIG_ARCH_MXC=y
 CONFIG_SOC_IMX50=y
 CONFIG_SOC_IMX51=y
@@ -60,29 +49,30 @@ CONFIG_SOC_IMX6SL=y
 CONFIG_SOC_IMX6SX=y
 CONFIG_SOC_IMX6UL=y
 CONFIG_SOC_IMX7D=y
-CONFIG_SOC_VF610=y
 CONFIG_SOC_LS1021A=y
+CONFIG_SOC_VF610=y
+CONFIG_ARCH_KEYSTONE=y
+CONFIG_ARCH_MEDIATEK=y
+CONFIG_ARCH_MESON=y
+CONFIG_ARCH_MVEBU=y
+CONFIG_MACH_ARMADA_370=y
+CONFIG_MACH_ARMADA_375=y
+CONFIG_MACH_ARMADA_38X=y
+CONFIG_MACH_ARMADA_39X=y
+CONFIG_MACH_ARMADA_XP=y
+CONFIG_MACH_DOVE=y
 CONFIG_ARCH_OMAP3=y
 CONFIG_ARCH_OMAP4=y
 CONFIG_SOC_OMAP5=y
 CONFIG_SOC_AM33XX=y
 CONFIG_SOC_AM43XX=y
 CONFIG_SOC_DRA7XX=y
+CONFIG_ARCH_SIRF=y
 CONFIG_ARCH_QCOM=y
-CONFIG_ARCH_MEDIATEK=y
 CONFIG_ARCH_MSM8X60=y
 CONFIG_ARCH_MSM8960=y
 CONFIG_ARCH_MSM8974=y
 CONFIG_ARCH_ROCKCHIP=y
-CONFIG_ARCH_SOCFPGA=y
-CONFIG_PLAT_SPEAR=y
-CONFIG_ARCH_SPEAR13XX=y
-CONFIG_MACH_SPEAR1310=y
-CONFIG_MACH_SPEAR1340=y
-CONFIG_ARCH_STI=y
-CONFIG_ARCH_STM32=y
-CONFIG_ARCH_EXYNOS=y
-CONFIG_EXYNOS5420_MCPM=y
 CONFIG_ARCH_RENESAS=y
 CONFIG_ARCH_EMEV2=y
 CONFIG_ARCH_R7S72100=y
@@ -99,40 +89,33 @@ CONFIG_ARCH_R8A7792=y
 CONFIG_ARCH_R8A7793=y
 CONFIG_ARCH_R8A7794=y
 CONFIG_ARCH_SH73A0=y
+CONFIG_ARCH_SOCFPGA=y
+CONFIG_PLAT_SPEAR=y
+CONFIG_ARCH_SPEAR13XX=y
+CONFIG_MACH_SPEAR1310=y
+CONFIG_MACH_SPEAR1340=y
+CONFIG_ARCH_STI=y
+CONFIG_ARCH_STM32=y
 CONFIG_ARCH_SUNXI=y
-CONFIG_ARCH_SIRF=y
 CONFIG_ARCH_TEGRA=y
-CONFIG_ARCH_TEGRA_2x_SOC=y
-CONFIG_ARCH_TEGRA_3x_SOC=y
-CONFIG_ARCH_TEGRA_114_SOC=y
-CONFIG_ARCH_TEGRA_124_SOC=y
 CONFIG_ARCH_UNIPHIER=y
 CONFIG_ARCH_U8500=y
-CONFIG_MACH_HREFV60=y
-CONFIG_MACH_SNOWBALL=y
 CONFIG_ARCH_VEXPRESS=y
 CONFIG_ARCH_VEXPRESS_TC2_PM=y
 CONFIG_ARCH_WM8850=y
 CONFIG_ARCH_ZYNQ=y
-CONFIG_TRUSTED_FOUNDATIONS=y
-CONFIG_PCI=y
-CONFIG_PCI_HOST_GENERIC=y
-CONFIG_PCI_DRA7XX=y
-CONFIG_PCI_DRA7XX_EP=y
-CONFIG_PCI_KEYSTONE=y
-CONFIG_PCI_MSI=y
+CONFIG_PCIEPORTBUS=y
 CONFIG_PCI_MVEBU=y
 CONFIG_PCI_TEGRA=y
 CONFIG_PCI_RCAR_GEN2=y
 CONFIG_PCIE_RCAR=y
-CONFIG_PCIEPORTBUS=y
+CONFIG_PCI_DRA7XX_EP=y
+CONFIG_PCI_KEYSTONE=y
 CONFIG_PCI_ENDPOINT=y
 CONFIG_PCI_ENDPOINT_CONFIGFS=y
 CONFIG_PCI_EPF_TEST=m
 CONFIG_SMP=y
 CONFIG_NR_CPUS=16
-CONFIG_HIGHPTE=y
-CONFIG_CMA=y
 CONFIG_SECCOMP=y
 CONFIG_ARM_APPENDED_DTB=y
 CONFIG_ARM_ATAG_DTB_COMPAT=y
@@ -145,14 +128,14 @@ CONFIG_CPU_FREQ_GOV_POWERSAVE=m
 CONFIG_CPU_FREQ_GOV_USERSPACE=m
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPUFREQ_DT=y
 CONFIG_ARM_IMX6Q_CPUFREQ=y
 CONFIG_QORIQ_CPUFREQ=y
 CONFIG_CPU_IDLE=y
 CONFIG_ARM_CPUIDLE=y
-CONFIG_NEON=y
-CONFIG_KERNEL_MODE_NEON=y
 CONFIG_ARM_ZYNQ_CPUIDLE=y
 CONFIG_ARM_EXYNOS_CPUIDLE=y
+CONFIG_KERNEL_MODE_NEON=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -170,23 +153,13 @@ CONFIG_IPV6_MIP6=m
 CONFIG_IPV6_TUNNEL=m
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_NET_DSA=m
-CONFIG_NET_SWITCHDEV=y
 CONFIG_CAN=y
-CONFIG_CAN_RAW=y
-CONFIG_CAN_BCM=y
-CONFIG_CAN_DEV=y
 CONFIG_CAN_AT91=m
 CONFIG_CAN_FLEXCAN=m
-CONFIG_CAN_RCAR=m
+CONFIG_CAN_SUN4I=y
 CONFIG_CAN_XILINXCAN=y
+CONFIG_CAN_RCAR=m
 CONFIG_CAN_MCP251X=y
-CONFIG_NET_DSA_BCM_SF2=m
-CONFIG_B53=m
-CONFIG_B53_SPI_DRIVER=m
-CONFIG_B53_MDIO_DRIVER=m
-CONFIG_B53_MMAP_DRIVER=m
-CONFIG_B53_SRAB_DRIVER=m
-CONFIG_CAN_SUN4I=y
 CONFIG_BT=m
 CONFIG_BT_HCIUART=m
 CONFIG_BT_HCIUART_BCM=y
@@ -199,11 +172,9 @@ CONFIG_RFKILL_INPUT=y
 CONFIG_RFKILL_GPIO=y
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=64
 CONFIG_OMAP_OCP2SCP=y
 CONFIG_SIMPLE_PM_BUS=y
-CONFIG_SUNXI_RSB=y
 CONFIG_MTD=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_BLOCK=y
@@ -236,7 +207,6 @@ CONFIG_PCI_ENDPOINT_TEST=m
 CONFIG_EEPROM_AT24=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_BLK_DEV_SR=y
-CONFIG_SCSI_MULTI_LUN=y
 CONFIG_ATA=y
 CONFIG_SATA_AHCI=y
 CONFIG_SATA_AHCI_PLATFORM=y
@@ -251,14 +221,20 @@ CONFIG_SATA_MV=y
 CONFIG_SATA_RCAR=y
 CONFIG_NETDEVICES=y
 CONFIG_VIRTIO_NET=y
-CONFIG_HIX5HD2_GMAC=y
+CONFIG_B53_SPI_DRIVER=m
+CONFIG_B53_MDIO_DRIVER=m
+CONFIG_B53_MMAP_DRIVER=m
+CONFIG_B53_SRAB_DRIVER=m
+CONFIG_NET_DSA_BCM_SF2=m
 CONFIG_SUN4I_EMAC=y
-CONFIG_MACB=y
 CONFIG_BCMGENET=m
 CONFIG_BGMAC_BCMA=y
 CONFIG_SYSTEMPORT=m
+CONFIG_MACB=y
 CONFIG_NET_CALXEDA_XGMAC=y
 CONFIG_GIANFAR=y
+CONFIG_HIX5HD2_GMAC=y
+CONFIG_E1000E=y
 CONFIG_IGB=y
 CONFIG_MV643XX_ETH=y
 CONFIG_MVNETA=y
@@ -268,19 +244,17 @@ CONFIG_R8169=y
 CONFIG_SH_ETH=y
 CONFIG_SMSC911X=y
 CONFIG_STMMAC_ETH=y
-CONFIG_STMMAC_PLATFORM=y
 CONFIG_DWMAC_DWC_QOS_ETH=y
 CONFIG_TI_CPSW=y
 CONFIG_XILINX_EMACLITE=y
 CONFIG_AT803X_PHY=y
-CONFIG_MARVELL_PHY=y
-CONFIG_SMSC_PHY=y
 CONFIG_BROADCOM_PHY=y
 CONFIG_ICPLUS_PHY=y
-CONFIG_REALTEK_PHY=y
+CONFIG_MARVELL_PHY=y
 CONFIG_MICREL_PHY=y
-CONFIG_FIXED_PHY=y
+CONFIG_REALTEK_PHY=y
 CONFIG_ROCKCHIP_PHY=y
+CONFIG_SMSC_PHY=y
 CONFIG_USB_PEGASUS=y
 CONFIG_USB_RTL8152=m
 CONFIG_USB_LAN78XX=m
@@ -288,29 +262,29 @@ CONFIG_USB_USBNET=y
 CONFIG_USB_NET_SMSC75XX=y
 CONFIG_USB_NET_SMSC95XX=y
 CONFIG_BRCMFMAC=m
-CONFIG_RT2X00=m
-CONFIG_RT2800USB=m
 CONFIG_MWIFIEX=m
 CONFIG_MWIFIEX_SDIO=m
+CONFIG_RT2X00=m
+CONFIG_RT2800USB=m
 CONFIG_INPUT_JOYDEV=y
 CONFIG_INPUT_EVDEV=y
 CONFIG_KEYBOARD_QT1070=m
 CONFIG_KEYBOARD_GPIO=y
 CONFIG_KEYBOARD_TEGRA=y
-CONFIG_KEYBOARD_SPEAR=y
+CONFIG_KEYBOARD_SAMSUNG=m
 CONFIG_KEYBOARD_ST_KEYSCAN=y
+CONFIG_KEYBOARD_SPEAR=y
 CONFIG_KEYBOARD_CROS_EC=m
-CONFIG_KEYBOARD_SAMSUNG=m
 CONFIG_MOUSE_PS2_ELANTECH=y
 CONFIG_MOUSE_CYAPA=m
 CONFIG_MOUSE_ELAN_I2C=y
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_TOUCHSCREEN_ATMEL_MXT=m
 CONFIG_TOUCHSCREEN_MMS114=m
+CONFIG_TOUCHSCREEN_WM97XX=m
 CONFIG_TOUCHSCREEN_ST1232=m
 CONFIG_TOUCHSCREEN_STMPE=y
 CONFIG_TOUCHSCREEN_SUN4I=y
-CONFIG_TOUCHSCREEN_WM97XX=m
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_MAX77693_HAPTIC=m
 CONFIG_INPUT_MAX8997_HAPTIC=m
@@ -327,13 +301,12 @@ CONFIG_SERIAL_8250_DW=y
 CONFIG_SERIAL_8250_EM=y
 CONFIG_SERIAL_8250_MT6577=y
 CONFIG_SERIAL_8250_UNIPHIER=y
+CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_SERIAL_AMBA_PL011=y
 CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
 CONFIG_SERIAL_ATMEL=y
 CONFIG_SERIAL_ATMEL_CONSOLE=y
 CONFIG_SERIAL_ATMEL_TTYAT=y
-CONFIG_SERIAL_BCM63XX=y
-CONFIG_SERIAL_BCM63XX_CONSOLE=y
 CONFIG_SERIAL_MESON=y
 CONFIG_SERIAL_MESON_CONSOLE=y
 CONFIG_SERIAL_SAMSUNG=y
@@ -345,15 +318,14 @@ CONFIG_SERIAL_IMX=y
 CONFIG_SERIAL_IMX_CONSOLE=y
 CONFIG_SERIAL_SH_SCI=y
 CONFIG_SERIAL_SH_SCI_NR_UARTS=20
-CONFIG_SERIAL_SH_SCI_CONSOLE=y
-CONFIG_SERIAL_SH_SCI_DMA=y
 CONFIG_SERIAL_MSM=y
 CONFIG_SERIAL_MSM_CONSOLE=y
 CONFIG_SERIAL_VT8500=y
 CONFIG_SERIAL_VT8500_CONSOLE=y
-CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_SERIAL_OMAP=y
 CONFIG_SERIAL_OMAP_CONSOLE=y
+CONFIG_SERIAL_BCM63XX=y
+CONFIG_SERIAL_BCM63XX_CONSOLE=y
 CONFIG_SERIAL_XILINX_PS_UART=y
 CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
 CONFIG_SERIAL_FSL_LPUART=y
@@ -365,12 +337,10 @@ CONFIG_SERIAL_ST_ASC_CONSOLE=y
 CONFIG_SERIAL_STM32=y
 CONFIG_SERIAL_STM32_CONSOLE=y
 CONFIG_SERIAL_DEV_BUS=y
-CONFIG_HVC_DRIVER=y
 CONFIG_VIRTIO_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_ST=y
 CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_DAVINCI=y
-CONFIG_I2C_MESON=y
-CONFIG_I2C_MUX=y
 CONFIG_I2C_ARB_GPIO_CHALLENGE=m
 CONFIG_I2C_MUX_PCA954x=y
 CONFIG_I2C_MUX_PINCTRL=y
@@ -378,12 +348,13 @@ CONFIG_I2C_DEMUX_PINCTRL=y
 CONFIG_I2C_AT91=m
 CONFIG_I2C_BCM2835=y
 CONFIG_I2C_CADENCE=y
+CONFIG_I2C_DAVINCI=y
 CONFIG_I2C_DESIGNWARE_PLATFORM=y
 CONFIG_I2C_DIGICOLOR=m
 CONFIG_I2C_EMEV2=m
 CONFIG_I2C_GPIO=m
-CONFIG_I2C_EXYNOS5=y
 CONFIG_I2C_IMX=y
+CONFIG_I2C_MESON=y
 CONFIG_I2C_MV64XXX=y
 CONFIG_I2C_RIIC=y
 CONFIG_I2C_RK3X=y
@@ -427,7 +398,6 @@ CONFIG_SPI_SPIDEV=y
 CONFIG_SPMI=y
 CONFIG_PINCTRL_AS3722=y
 CONFIG_PINCTRL_PALMAS=y
-CONFIG_PINCTRL_BCM2835=y
 CONFIG_PINCTRL_APQ8064=y
 CONFIG_PINCTRL_APQ8084=y
 CONFIG_PINCTRL_IPQ8064=y
@@ -437,25 +407,33 @@ CONFIG_PINCTRL_MSM8X74=y
 CONFIG_PINCTRL_MSM8916=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_PINCTRL_QCOM_SSBI_PMIC=y
-CONFIG_GPIO_GENERIC_PLATFORM=y
 CONFIG_GPIO_DAVINCI=y
 CONFIG_GPIO_DWAPB=y
 CONFIG_GPIO_EM=y
 CONFIG_GPIO_RCAR=y
+CONFIG_GPIO_SYSCON=y
 CONFIG_GPIO_UNIPHIER=y
 CONFIG_GPIO_XILINX=y
 CONFIG_GPIO_ZYNQ=y
 CONFIG_GPIO_PCA953X=y
 CONFIG_GPIO_PCA953X_IRQ=y
 CONFIG_GPIO_PCF857X=y
-CONFIG_GPIO_TWL4030=y
 CONFIG_GPIO_PALMAS=y
-CONFIG_GPIO_SYSCON=y
 CONFIG_GPIO_TPS6586X=y
 CONFIG_GPIO_TPS65910=y
+CONFIG_GPIO_TWL4030=y
+CONFIG_POWER_AVS=y
+CONFIG_ROCKCHIP_IODOMAIN=y
+CONFIG_POWER_RESET_AS3722=y
+CONFIG_POWER_RESET_GPIO=y
+CONFIG_POWER_RESET_GPIO_RESTART=y
+CONFIG_POWER_RESET_ST=y
+CONFIG_POWER_RESET_KEYSTONE=y
+CONFIG_POWER_RESET_RMOBILE=y
 CONFIG_BATTERY_ACT8945A=y
 CONFIG_BATTERY_CPCAP=m
 CONFIG_BATTERY_SBS=y
+CONFIG_AXP20X_POWER=m
 CONFIG_BATTERY_MAX17040=m
 CONFIG_BATTERY_MAX17042=m
 CONFIG_CHARGER_CPCAP=m
@@ -464,15 +442,6 @@ CONFIG_CHARGER_MAX77693=m
 CONFIG_CHARGER_MAX8997=m
 CONFIG_CHARGER_MAX8998=m
 CONFIG_CHARGER_TPS65090=y
-CONFIG_AXP20X_POWER=m
-CONFIG_POWER_RESET_AS3722=y
-CONFIG_POWER_RESET_GPIO=y
-CONFIG_POWER_RESET_GPIO_RESTART=y
-CONFIG_POWER_RESET_KEYSTONE=y
-CONFIG_POWER_RESET_RMOBILE=y
-CONFIG_POWER_RESET_ST=y
-CONFIG_POWER_AVS=y
-CONFIG_ROCKCHIP_IODOMAIN=y
 CONFIG_SENSORS_IIO_HWMON=y
 CONFIG_SENSORS_LM90=y
 CONFIG_SENSORS_LM95245=y
@@ -480,14 +449,12 @@ CONFIG_SENSORS_NTC_THERMISTOR=m
 CONFIG_SENSORS_PWM_FAN=m
 CONFIG_SENSORS_INA2XX=m
 CONFIG_CPU_THERMAL=y
-CONFIG_BCM2835_THERMAL=m
-CONFIG_BRCMSTB_THERMAL=m
 CONFIG_IMX_THERMAL=y
 CONFIG_ROCKCHIP_THERMAL=y
 CONFIG_RCAR_THERMAL=y
 CONFIG_ARMADA_THERMAL=y
-CONFIG_DAVINCI_WATCHDOG=m
-CONFIG_EXYNOS_THERMAL=m
+CONFIG_BCM2835_THERMAL=m
+CONFIG_BRCMSTB_THERMAL=m
 CONFIG_ST_THERMAL_MEMMAP=y
 CONFIG_WATCHDOG=y
 CONFIG_DA9063_WATCHDOG=m
@@ -495,20 +462,24 @@ CONFIG_XILINX_WATCHDOG=y
 CONFIG_ARM_SP805_WATCHDOG=y
 CONFIG_AT91SAM9X_WATCHDOG=y
 CONFIG_SAMA5D4_WATCHDOG=y
+CONFIG_DW_WATCHDOG=y
+CONFIG_DAVINCI_WATCHDOG=m
 CONFIG_ORION_WATCHDOG=y
 CONFIG_RN5T618_WATCHDOG=y
-CONFIG_ST_LPC_WATCHDOG=y
 CONFIG_SUNXI_WATCHDOG=y
 CONFIG_IMX2_WDT=y
+CONFIG_ST_LPC_WATCHDOG=y
 CONFIG_TEGRA_WATCHDOG=m
 CONFIG_MESON_WATCHDOG=y
-CONFIG_DW_WATCHDOG=y
 CONFIG_DIGICOLOR_WATCHDOG=y
 CONFIG_RENESAS_WDT=m
-CONFIG_BCM2835_WDT=y
 CONFIG_BCM47XX_WDT=y
-CONFIG_BCM7038_WDT=m
+CONFIG_BCM2835_WDT=y
 CONFIG_BCM_KONA_WDT=y
+CONFIG_BCM7038_WDT=m
+CONFIG_BCMA_HOST_SOC=y
+CONFIG_BCMA_DRIVER_GMAC_CMN=y
+CONFIG_BCMA_DRIVER_GPIO=y
 CONFIG_MFD_ACT8945A=y
 CONFIG_MFD_AS3711=y
 CONFIG_MFD_AS3722=y
@@ -516,7 +487,6 @@ CONFIG_MFD_ATMEL_FLEXCOM=y
 CONFIG_MFD_ATMEL_HLCDC=m
 CONFIG_MFD_BCM590XX=y
 CONFIG_MFD_AC100=y
-CONFIG_MFD_AXP20X=y
 CONFIG_MFD_AXP20X_I2C=y
 CONFIG_MFD_AXP20X_RSB=y
 CONFIG_MFD_CROS_EC=m
@@ -529,11 +499,11 @@ CONFIG_MFD_MAX77693=m
 CONFIG_MFD_MAX8907=y
 CONFIG_MFD_MAX8997=y
 CONFIG_MFD_MAX8998=y
-CONFIG_MFD_RK808=y
 CONFIG_MFD_CPCAP=y
 CONFIG_MFD_PM8XXX=y
 CONFIG_MFD_QCOM_RPM=y
 CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_RK808=y
 CONFIG_MFD_RN5T618=y
 CONFIG_MFD_SEC_CORE=y
 CONFIG_MFD_STMPE=y
@@ -543,10 +513,10 @@ CONFIG_MFD_TPS65217=y
 CONFIG_MFD_TPS65218=y
 CONFIG_MFD_TPS6586X=y
 CONFIG_MFD_TPS65910=y
-CONFIG_REGULATOR_ACT8945A=y
-CONFIG_REGULATOR_AB8500=y
 CONFIG_REGULATOR_ACT8865=y
+CONFIG_REGULATOR_ACT8945A=y
 CONFIG_REGULATOR_ANATOP=y
+CONFIG_REGULATOR_AB8500=y
 CONFIG_REGULATOR_AS3711=y
 CONFIG_REGULATOR_AS3722=y
 CONFIG_REGULATOR_AXP20X=y
@@ -554,10 +524,7 @@ CONFIG_REGULATOR_BCM590XX=y
 CONFIG_REGULATOR_CPCAP=y
 CONFIG_REGULATOR_DA9210=y
 CONFIG_REGULATOR_FAN53555=y
-CONFIG_REGULATOR_RK808=y
 CONFIG_REGULATOR_GPIO=y
-CONFIG_MFD_SYSCON=y
-CONFIG_POWER_RESET_SYSCON=y
 CONFIG_REGULATOR_LP872X=y
 CONFIG_REGULATOR_MAX14577=m
 CONFIG_REGULATOR_MAX8907=y
@@ -571,7 +538,8 @@ CONFIG_REGULATOR_PALMAS=y
 CONFIG_REGULATOR_PBIAS=y
 CONFIG_REGULATOR_PWM=y
 CONFIG_REGULATOR_QCOM_RPM=y
-CONFIG_REGULATOR_QCOM_SMD_RPM=y
+CONFIG_REGULATOR_QCOM_SMD_RPM=m
+CONFIG_REGULATOR_RK808=y
 CONFIG_REGULATOR_RN5T618=y
 CONFIG_REGULATOR_S2MPS11=y
 CONFIG_REGULATOR_S5M8767=y
@@ -592,18 +560,17 @@ CONFIG_MEDIA_CEC_SUPPORT=y
 CONFIG_MEDIA_CONTROLLER=y
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
 CONFIG_MEDIA_USB_SUPPORT=y
-CONFIG_USB_VIDEO_CLASS=y
-CONFIG_USB_GSPCA=y
+CONFIG_USB_VIDEO_CLASS=m
 CONFIG_V4L_PLATFORM_DRIVERS=y
 CONFIG_SOC_CAMERA=m
 CONFIG_SOC_CAMERA_PLATFORM=m
-CONFIG_VIDEO_RCAR_VIN=m
-CONFIG_VIDEO_ATMEL_ISI=m
 CONFIG_VIDEO_SAMSUNG_EXYNOS4_IS=m
 CONFIG_VIDEO_S5P_FIMC=m
 CONFIG_VIDEO_S5P_MIPI_CSIS=m
 CONFIG_VIDEO_EXYNOS_FIMC_LITE=m
 CONFIG_VIDEO_EXYNOS4_FIMC_IS=m
+CONFIG_VIDEO_RCAR_VIN=m
+CONFIG_VIDEO_ATMEL_ISI=m
 CONFIG_V4L_MEM2MEM_DRIVERS=y
 CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m
 CONFIG_VIDEO_SAMSUNG_S5P_MFC=m
@@ -614,19 +581,15 @@ CONFIG_VIDEO_STI_DELTA=m
 CONFIG_VIDEO_RENESAS_JPU=m
 CONFIG_VIDEO_RENESAS_VSP1=m
 CONFIG_V4L_TEST_DRIVERS=y
+CONFIG_VIDEO_VIVID=m
 CONFIG_CEC_PLATFORM_DRIVERS=y
 CONFIG_VIDEO_SAMSUNG_S5P_CEC=m
 # CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
 CONFIG_VIDEO_ADV7180=m
 CONFIG_VIDEO_ML86V7667=m
 CONFIG_DRM=y
-CONFIG_DRM_I2C_ADV7511=m
-CONFIG_DRM_I2C_ADV7511_AUDIO=y
 # CONFIG_DRM_I2C_CH7006 is not set
 # CONFIG_DRM_I2C_SIL164 is not set
-CONFIG_DRM_DUMB_VGA_DAC=m
-CONFIG_DRM_NXP_PTN3460=m
-CONFIG_DRM_PARADE_PS8622=m
 CONFIG_DRM_NOUVEAU=m
 CONFIG_DRM_EXYNOS=m
 CONFIG_DRM_EXYNOS_FIMD=y
@@ -645,13 +608,18 @@ CONFIG_DRM_RCAR_LVDS=y
 CONFIG_DRM_SUN4I=m
 CONFIG_DRM_FSL_DCU=m
 CONFIG_DRM_TEGRA=y
+CONFIG_DRM_PANEL_SIMPLE=y
 CONFIG_DRM_PANEL_SAMSUNG_LD9040=m
 CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03=m
 CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=m
-CONFIG_DRM_PANEL_SIMPLE=y
+CONFIG_DRM_DUMB_VGA_DAC=m
+CONFIG_DRM_NXP_PTN3460=m
+CONFIG_DRM_PARADE_PS8622=m
 CONFIG_DRM_SII9234=m
+CONFIG_DRM_I2C_ADV7511=m
+CONFIG_DRM_I2C_ADV7511_AUDIO=y
 CONFIG_DRM_STI=m
-CONFIG_DRM_VC4=y
+CONFIG_DRM_VC4=m
 CONFIG_DRM_ETNAVIV=m
 CONFIG_DRM_MXSFB=m
 CONFIG_FB_ARMCLCD=y
@@ -659,8 +627,6 @@ CONFIG_FB_EFI=y
 CONFIG_FB_WM8505=y
 CONFIG_FB_SH_MOBILE_LCDC=y
 CONFIG_FB_SIMPLE=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
 CONFIG_LCD_PLATFORM=m
 CONFIG_BACKLIGHT_PWM=y
 CONFIG_BACKLIGHT_AS3711=y
@@ -668,7 +634,6 @@ CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
 CONFIG_SOUND=m
 CONFIG_SND=m
-CONFIG_SND_DYNAMIC_MINORS=y
 CONFIG_SND_HDA_TEGRA=m
 CONFIG_SND_HDA_INPUT_BEEP=y
 CONFIG_SND_HDA_PATCH_LOADER=y
@@ -692,7 +657,7 @@ CONFIG_SND_SOC_SNOW=m
 CONFIG_SND_SOC_ODROID=m
 CONFIG_SND_SOC_SH4_FSI=m
 CONFIG_SND_SOC_RCAR=m
-CONFIG_SND_SIMPLE_SCU_CARD=m
+CONFIG_SND_SOC_STI=m
 CONFIG_SND_SUN4I_CODEC=m
 CONFIG_SND_SOC_TEGRA=m
 CONFIG_SND_SOC_TEGRA20_I2S=m
@@ -703,31 +668,25 @@ CONFIG_SND_SOC_TEGRA_WM8903=m
 CONFIG_SND_SOC_TEGRA_WM9712=m
 CONFIG_SND_SOC_TEGRA_TRIMSLICE=m
 CONFIG_SND_SOC_TEGRA_ALC5632=m
-CONFIG_SND_SOC_CPCAP=m
 CONFIG_SND_SOC_TEGRA_MAX98090=m
 CONFIG_SND_SOC_AK4642=m
+CONFIG_SND_SOC_CPCAP=m
 CONFIG_SND_SOC_SGTL5000=m
 CONFIG_SND_SOC_SPDIF=m
-CONFIG_SND_SOC_WM8978=m
-CONFIG_SND_SOC_STI=m
 CONFIG_SND_SOC_STI_SAS=m
-CONFIG_SND_SIMPLE_CARD=m
+CONFIG_SND_SOC_WM8978=m
+CONFIG_SND_SIMPLE_SCU_CARD=m
 CONFIG_USB=y
 CONFIG_USB_OTG=y
 CONFIG_USB_XHCI_HCD=y
 CONFIG_USB_XHCI_MVEBU=y
-CONFIG_USB_XHCI_RCAR=m
 CONFIG_USB_XHCI_TEGRA=m
 CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_MSM=m
-CONFIG_USB_EHCI_EXYNOS=y
-CONFIG_USB_EHCI_TEGRA=y
 CONFIG_USB_EHCI_HCD_STI=y
-CONFIG_USB_EHCI_HCD_PLATFORM=y
-CONFIG_USB_ISP1760=y
+CONFIG_USB_EHCI_TEGRA=y
+CONFIG_USB_EHCI_EXYNOS=y
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_OHCI_HCD_STI=y
-CONFIG_USB_OHCI_HCD_PLATFORM=y
 CONFIG_USB_OHCI_EXYNOS=m
 CONFIG_USB_R8A66597_HCD=m
 CONFIG_USB_RENESAS_USBHS=m
@@ -746,18 +705,18 @@ CONFIG_USB_TI_CPPI41_DMA=y
 CONFIG_USB_TUSB_OMAP_DMA=y
 CONFIG_USB_DWC3=y
 CONFIG_USB_DWC2=y
-CONFIG_USB_HSIC_USB3503=y
 CONFIG_USB_CHIPIDEA=y
 CONFIG_USB_CHIPIDEA_UDC=y
 CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_HSIC_USB3503=y
 CONFIG_AB8500_USB=y
-CONFIG_KEYSTONE_USB_PHY=y
+CONFIG_KEYSTONE_USB_PHY=m
 CONFIG_NOP_USB_XCEIV=m
 CONFIG_AM335X_PHY_USB=m
 CONFIG_TWL6030_USB=m
 CONFIG_USB_GPIO_VBUS=y
 CONFIG_USB_ISP1301=y
-CONFIG_USB_MSM_OTG=m
 CONFIG_USB_MXS_PHY=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_FSL_USB2=y
@@ -793,21 +752,20 @@ CONFIG_MMC_SDHCI_OF_ESDHC=y
 CONFIG_MMC_SDHCI_ESDHC_IMX=y
 CONFIG_MMC_SDHCI_DOVE=y
 CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_MMC_SDHCI_S3C=y
 CONFIG_MMC_SDHCI_PXAV3=y
 CONFIG_MMC_SDHCI_SPEAR=y
-CONFIG_MMC_SDHCI_S3C=y
 CONFIG_MMC_SDHCI_S3C_DMA=y
 CONFIG_MMC_SDHCI_BCM_KONA=y
+CONFIG_MMC_MESON_MX_SDIO=y
 CONFIG_MMC_SDHCI_ST=y
 CONFIG_MMC_OMAP=y
 CONFIG_MMC_OMAP_HS=y
 CONFIG_MMC_ATMELMCI=y
 CONFIG_MMC_SDHCI_MSM=y
-CONFIG_MMC_MESON_MX_SDIO=y
 CONFIG_MMC_MVSDIO=y
 CONFIG_MMC_SDHI=y
 CONFIG_MMC_DW=y
-CONFIG_MMC_DW_PLTFM=y
 CONFIG_MMC_DW_EXYNOS=y
 CONFIG_MMC_DW_ROCKCHIP=y
 CONFIG_MMC_SH_MMCIF=y
@@ -847,94 +805,85 @@ CONFIG_RTC_DRV_MAX77686=y
 CONFIG_RTC_DRV_RK808=m
 CONFIG_RTC_DRV_RS5C372=m
 CONFIG_RTC_DRV_BQ32K=m
-CONFIG_RTC_DRV_PALMAS=y
-CONFIG_RTC_DRV_ST_LPC=y
 CONFIG_RTC_DRV_TWL4030=y
+CONFIG_RTC_DRV_PALMAS=y
 CONFIG_RTC_DRV_TPS6586X=y
 CONFIG_RTC_DRV_TPS65910=y
 CONFIG_RTC_DRV_S35390A=m
 CONFIG_RTC_DRV_RX8581=m
 CONFIG_RTC_DRV_EM3027=y
+CONFIG_RTC_DRV_S5M=m
 CONFIG_RTC_DRV_DA9063=m
 CONFIG_RTC_DRV_EFI=m
 CONFIG_RTC_DRV_DIGICOLOR=m
-CONFIG_RTC_DRV_S5M=m
 CONFIG_RTC_DRV_S3C=m
 CONFIG_RTC_DRV_PL031=y
 CONFIG_RTC_DRV_AT91RM9200=m
 CONFIG_RTC_DRV_AT91SAM9=m
 CONFIG_RTC_DRV_VT8500=y
-CONFIG_RTC_DRV_SUN6I=y
 CONFIG_RTC_DRV_SUNXI=y
 CONFIG_RTC_DRV_MV=y
 CONFIG_RTC_DRV_TEGRA=y
+CONFIG_RTC_DRV_ST_LPC=y
 CONFIG_RTC_DRV_CPCAP=m
 CONFIG_DMADEVICES=y
-CONFIG_DW_DMAC=y
 CONFIG_AT_HDMAC=y
 CONFIG_AT_XDMAC=y
+CONFIG_DMA_BCM2835=y
+CONFIG_DMA_SUN6I=y
 CONFIG_FSL_EDMA=y
+CONFIG_IMX_DMA=y
+CONFIG_IMX_SDMA=y
 CONFIG_MV_XOR=y
+CONFIG_MXS_DMA=y
+CONFIG_PL330_DMA=y
+CONFIG_SIRF_DMA=y
+CONFIG_STE_DMA40=y
+CONFIG_ST_FDMA=m
 CONFIG_TEGRA20_APB_DMA=y
+CONFIG_XILINX_DMA=y
+CONFIG_QCOM_BAM_DMA=y
+CONFIG_DW_DMAC=y
 CONFIG_SH_DMAE=y
 CONFIG_RCAR_DMAC=y
 CONFIG_RENESAS_USB_DMAC=m
-CONFIG_STE_DMA40=y
-CONFIG_SIRF_DMA=y
-CONFIG_TI_EDMA=y
-CONFIG_PL330_DMA=y
-CONFIG_IMX_SDMA=y
-CONFIG_IMX_DMA=y
-CONFIG_MXS_DMA=y
-CONFIG_DMA_BCM2835=y
-CONFIG_DMA_OMAP=y
-CONFIG_QCOM_BAM_DMA=y
-CONFIG_XILINX_DMA=y
-CONFIG_DMA_SUN6I=y
-CONFIG_ST_FDMA=m
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_MMIO=y
 CONFIG_STAGING=y
-CONFIG_SENSORS_ISL29018=y
-CONFIG_SENSORS_ISL29028=y
 CONFIG_MFD_NVEC=y
 CONFIG_KEYBOARD_NVEC=y
 CONFIG_SERIO_NVEC_PS2=y
 CONFIG_NVEC_POWER=y
 CONFIG_NVEC_PAZ00=y
-CONFIG_BCMA=y
-CONFIG_BCMA_HOST_SOC=y
-CONFIG_BCMA_DRIVER_GMAC_CMN=y
-CONFIG_BCMA_DRIVER_GPIO=y
-CONFIG_QCOM_GSBI=y
-CONFIG_QCOM_PM=y
-CONFIG_QCOM_SMEM=y
-CONFIG_QCOM_SMD_RPM=y
-CONFIG_QCOM_SMP2P=y
-CONFIG_QCOM_SMSM=y
-CONFIG_QCOM_WCNSS_CTRL=m
-CONFIG_ROCKCHIP_PM_DOMAINS=y
-CONFIG_COMMON_CLK_QCOM=y
-CONFIG_QCOM_CLK_RPM=y
-CONFIG_CHROME_PLATFORMS=y
 CONFIG_STAGING_BOARD=y
-CONFIG_CROS_EC_CHARDEV=m
 CONFIG_COMMON_CLK_MAX77686=y
 CONFIG_COMMON_CLK_RK808=m
 CONFIG_COMMON_CLK_S2MPS11=m
+CONFIG_COMMON_CLK_QCOM=y
+CONFIG_QCOM_CLK_RPM=y
 CONFIG_APQ_MMCC_8084=y
 CONFIG_MSM_GCC_8660=y
 CONFIG_MSM_MMCC_8960=y
 CONFIG_MSM_MMCC_8974=y
-CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_BCM2835_MBOX=y
 CONFIG_ROCKCHIP_IOMMU=y
 CONFIG_TEGRA_IOMMU_GART=y
 CONFIG_TEGRA_IOMMU_SMMU=y
 CONFIG_REMOTEPROC=m
 CONFIG_ST_REMOTEPROC=m
 CONFIG_RPMSG_VIRTIO=m
+CONFIG_RASPBERRYPI_POWER=y
+CONFIG_QCOM_GSBI=y
+CONFIG_QCOM_PM=y
+CONFIG_QCOM_SMD_RPM=m
+CONFIG_QCOM_WCNSS_CTRL=m
+CONFIG_ROCKCHIP_PM_DOMAINS=y
+CONFIG_ARCH_TEGRA_2x_SOC=y
+CONFIG_ARCH_TEGRA_3x_SOC=y
+CONFIG_ARCH_TEGRA_114_SOC=y
+CONFIG_ARCH_TEGRA_124_SOC=y
 CONFIG_PM_DEVFREQ=y
 CONFIG_ARM_TEGRA_DEVFREQ=m
-CONFIG_MEMORY=y
-CONFIG_EXTCON=y
 CONFIG_TI_AEMIF=y
 CONFIG_IIO=y
 CONFIG_IIO_SW_TRIGGER=y
@@ -947,56 +896,54 @@ CONFIG_VF610_ADC=m
 CONFIG_XILINX_XADC=y
 CONFIG_MPU3050_I2C=y
 CONFIG_CM36651=m
+CONFIG_SENSORS_ISL29018=y
+CONFIG_SENSORS_ISL29028=y
 CONFIG_AK8975=y
-CONFIG_RASPBERRYPI_POWER=y
 CONFIG_IIO_HRTIMER_TRIGGER=y
 CONFIG_PWM=y
 CONFIG_PWM_ATMEL=m
 CONFIG_PWM_ATMEL_HLCDC_PWM=m
 CONFIG_PWM_ATMEL_TCB=m
+CONFIG_PWM_BCM2835=y
+CONFIG_PWM_BRCMSTB=m
 CONFIG_PWM_FSL_FTM=m
 CONFIG_PWM_MESON=m
 CONFIG_PWM_RCAR=m
 CONFIG_PWM_RENESAS_TPU=y
 CONFIG_PWM_ROCKCHIP=m
 CONFIG_PWM_SAMSUNG=m
+CONFIG_PWM_STI=y
 CONFIG_PWM_SUN4I=y
 CONFIG_PWM_TEGRA=y
 CONFIG_PWM_VT8500=y
+CONFIG_KEYSTONE_IRQ=y
+CONFIG_PHY_SUN4I_USB=y
+CONFIG_PHY_SUN9I_USB=y
 CONFIG_PHY_HIX5HD2_SATA=y
-CONFIG_E1000E=y
-CONFIG_PWM_STI=y
-CONFIG_PWM_BCM2835=y
-CONFIG_PWM_BRCMSTB=m
-CONFIG_PHY_DM816X_USB=m
-CONFIG_OMAP_USB2=y
-CONFIG_TI_PIPE3=y
-CONFIG_TWL4030_USB=m
+CONFIG_PHY_BERLIN_SATA=y
 CONFIG_PHY_BERLIN_USB=y
 CONFIG_PHY_CPCAP_USB=m
-CONFIG_PHY_BERLIN_SATA=y
+CONFIG_PHY_QCOM_APQ8064_SATA=m
+CONFIG_PHY_RCAR_GEN2=m
 CONFIG_PHY_ROCKCHIP_DP=m
 CONFIG_PHY_ROCKCHIP_USB=y
-CONFIG_PHY_QCOM_APQ8064_SATA=m
+CONFIG_PHY_SAMSUNG_USB2=m
 CONFIG_PHY_MIPHY28LP=y
-CONFIG_PHY_RCAR_GEN2=m
 CONFIG_PHY_STIH407_USB=y
 CONFIG_PHY_STM32_USBPHYC=y
-CONFIG_PHY_SUN4I_USB=y
-CONFIG_PHY_SUN9I_USB=y
-CONFIG_PHY_SAMSUNG_USB2=m
 CONFIG_PHY_TEGRA_XUSB=y
-CONFIG_PHY_BRCM_SATA=y
-CONFIG_NVMEM=y
+CONFIG_PHY_DM816X_USB=m
+CONFIG_OMAP_USB2=y
+CONFIG_TI_PIPE3=y
+CONFIG_TWL4030_USB=m
 CONFIG_NVMEM_IMX_OCOTP=y
 CONFIG_NVMEM_SUNXI_SID=y
 CONFIG_NVMEM_VF610_OCOTP=y
-CONFIG_BCM2835_MBOX=y
 CONFIG_RASPBERRYPI_FIRMWARE=y
-CONFIG_EFI_VARS=m
-CONFIG_EFI_CAPSULE_LOADER=m
 CONFIG_BCM47XX_NVRAM=y
 CONFIG_BCM47XX_SPROM=y
+CONFIG_EFI_VARS=m
+CONFIG_EFI_CAPSULE_LOADER=m
 CONFIG_EXT4_FS=y
 CONFIG_AUTOFS4_FS=y
 CONFIG_MSDOS_FS=y
@@ -1004,7 +951,6 @@ CONFIG_VFAT_FS=y
 CONFIG_NTFS_FS=y
 CONFIG_TMPFS_POSIX_ACL=y
 CONFIG_UBIFS_FS=y
-CONFIG_TMPFS=y
 CONFIG_SQUASHFS=y
 CONFIG_SQUASHFS_LZO=y
 CONFIG_SQUASHFS_XZ=y
@@ -1020,13 +966,7 @@ CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_NLS_UTF8=y
 CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_FS=y
 CONFIG_MAGIC_SYSRQ=y
-CONFIG_LOCKUP_DETECTOR=y
-CONFIG_CPUFREQ_DT=y
-CONFIG_KEYSTONE_IRQ=y
-CONFIG_HW_RANDOM=y
-CONFIG_HW_RANDOM_ST=y
 CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
@@ -1035,27 +975,19 @@ CONFIG_CRYPTO_USER_API_AEAD=m
 CONFIG_CRYPTO_DEV_MARVELL_CESA=m
 CONFIG_CRYPTO_DEV_EXYNOS_RNG=m
 CONFIG_CRYPTO_DEV_S5P=m
+CONFIG_CRYPTO_DEV_ATMEL_AES=m
+CONFIG_CRYPTO_DEV_ATMEL_TDES=m
+CONFIG_CRYPTO_DEV_ATMEL_SHA=m
 CONFIG_CRYPTO_DEV_SUN4I_SS=m
 CONFIG_CRYPTO_DEV_ROCKCHIP=m
 CONFIG_ARM_CRYPTO=y
-CONFIG_CRYPTO_SHA1_ARM=m
 CONFIG_CRYPTO_SHA1_ARM_NEON=m
 CONFIG_CRYPTO_SHA1_ARM_CE=m
 CONFIG_CRYPTO_SHA2_ARM_CE=m
-CONFIG_CRYPTO_SHA256_ARM=m
 CONFIG_CRYPTO_SHA512_ARM=m
 CONFIG_CRYPTO_AES_ARM=m
 CONFIG_CRYPTO_AES_ARM_BS=m
 CONFIG_CRYPTO_AES_ARM_CE=m
-CONFIG_CRYPTO_CHACHA20_NEON=m
-CONFIG_CRYPTO_CRC32_ARM_CE=m
-CONFIG_CRYPTO_CRCT10DIF_ARM_CE=m
 CONFIG_CRYPTO_GHASH_ARM_CE=m
-CONFIG_CRYPTO_DEV_ATMEL_AES=m
-CONFIG_CRYPTO_DEV_ATMEL_TDES=m
-CONFIG_CRYPTO_DEV_ATMEL_SHA=m
-CONFIG_VIDEO_VIVID=m
-CONFIG_VIRTIO=y
-CONFIG_VIRTIO_PCI=y
-CONFIG_VIRTIO_PCI_LEGACY=y
-CONFIG_VIRTIO_MMIO=y
+CONFIG_CRYPTO_CRC32_ARM_CE=m
+CONFIG_CRYPTO_CHACHA20_NEON=m
index 3c1e203e53b9ccd752731f228b595a5678557782..57caa742016ed59bc8d3755fd6b9526f0c05f860 100644 (file)
         * Allocate stack space to store 128 bytes worth of tweaks.  For
         * performance, this space is aligned to a 16-byte boundary so that we
         * can use the load/store instructions that declare 16-byte alignment.
+        * For Thumb2 compatibility, don't do the 'bic' directly on 'sp'.
         */
-       sub             sp, #128
-       bic             sp, #0xf
+       sub             r12, sp, #128
+       bic             r12, #0xf
+       mov             sp, r12
 
 .if \n == 64
        // Load first tweak
index a71f16536b6c178c09334efc3047f4ddcc8da91b..6e41336b0bc4fc71ebaf5f5f4bae7e5e9e1b0395 100644 (file)
@@ -1 +1,4 @@
 obj-$(CONFIG_TRUSTED_FOUNDATIONS)      += trusted_foundations.o
+
+# tf_generic_smc() fails to build with -fsanitize-coverage=trace-pc
+KCOV_INSTRUMENT                := n
index dd546d65a3830d819a48fc1463d55d1cc2110c18..7a9b86978ee1e2b917d1104138d3aa468a88bb53 100644 (file)
@@ -177,7 +177,7 @@ M_CLASS(streq       r3, [r12, #PMSAv8_MAIR1])
        bic     r0, r0, #CR_I
 #endif
        mcr     p15, 0, r0, c1, c0, 0           @ write control reg
-       isb
+       instr_sync
 #elif defined (CONFIG_CPU_V7M)
 #ifdef CONFIG_ARM_MPU
        ldreq   r3, [r12, MPU_CTRL]
index 225d1c58d2de98d5c4a92de4905052203f1e25b6..d9c2991331111617cc3fce04f38fe17fd1a78f6d 100644 (file)
@@ -338,6 +338,7 @@ static struct vm_area_struct gate_vma = {
 
 static int __init gate_vma_init(void)
 {
+       vma_init(&gate_vma, NULL);
        gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
        return 0;
 }
index f09e9d66d605f4159990ad044cfd29486d621d20..dec130e7078c9adc10dae920c2c706143c7e126e 100644 (file)
@@ -544,7 +544,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
         * Increment event counter and perform fixup for the pre-signal
         * frame.
         */
-       rseq_signal_deliver(regs);
+       rseq_signal_deliver(ksig, regs);
 
        /*
         * Set up the stack frame
@@ -666,7 +666,7 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
                        } else {
                                clear_thread_flag(TIF_NOTIFY_RESUME);
                                tracehook_notify_resume(regs);
-                               rseq_handle_notify_resume(regs);
+                               rseq_handle_notify_resume(NULL, regs);
                        }
                }
                local_irq_disable();
index c46a728df44ead2a0b986a1f218e3c61e9820c34..25aac6ee2ab18cdd0189c27bac759efc02f2d1c5 100644 (file)
@@ -20,6 +20,7 @@ config ARCH_BCM_IPROC
        select GPIOLIB
        select ARM_AMBA
        select PINCTRL
+       select PCI_DOMAINS if PCI
        help
          This enables support for systems based on Broadcom IPROC architected SoCs.
          The IPROC complex contains one or more ARM CPUs along with common
index e22fb40e34bc55be6dd807de63fb9cd009107916..6d5beb11bd965a805107328d2522144f4b857f9a 100644 (file)
@@ -774,7 +774,7 @@ static struct gpiod_lookup_table mmc_gpios_table = {
                GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_CD_PIN, "cd",
                            GPIO_ACTIVE_LOW),
                GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_WP_PIN, "wp",
-                           GPIO_ACTIVE_LOW),
+                           GPIO_ACTIVE_HIGH),
        },
 };
 
index 69df3620eca5ce1720f88ab86cf5a36df891d7e7..1c73694c871ad8289b572056d5c3727f3ee22eb2 100644 (file)
@@ -109,6 +109,45 @@ void omap5_erratum_workaround_801819(void)
 static inline void omap5_erratum_workaround_801819(void) { }
 #endif
 
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+/*
+ * Configure ACR and enable ACTLR[0] (Enable invalidates of BTB with
+ * ICIALLU) to activate the workaround for secondary Core.
+ * NOTE: it is assumed that the primary core's configuration is done
+ * by the boot loader (kernel will detect a misconfiguration and complain
+ * if this is not done).
+ *
+ * In General Purpose(GP) devices, ACR bit settings can only be done
+ * by ROM code in "secure world" using the smc call and there is no
+ * option to update the "firmware" on such devices. This also works for
+ * High security(HS) devices, as a backup option in case the
+ * "update" is not done in the "security firmware".
+ */
+static void omap5_secondary_harden_predictor(void)
+{
+       u32 acr, acr_mask;
+
+       asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr));
+
+       /*
+        * ACTLR[0] (Enable invalidates of BTB with ICIALLU)
+        */
+       acr_mask = BIT(0);
+
+       /* Do we already have it done.. if yes, skip expensive smc */
+       if ((acr & acr_mask) == acr_mask)
+               return;
+
+       acr |= acr_mask;
+       omap_smc1(OMAP5_DRA7_MON_SET_ACR_INDEX, acr);
+
+       pr_debug("%s: ARM ACR setup for CVE_2017_5715 applied on CPU%d\n",
+                __func__, smp_processor_id());
+}
+#else
+static inline void omap5_secondary_harden_predictor(void) { }
+#endif
+
 static void omap4_secondary_init(unsigned int cpu)
 {
        /*
@@ -131,6 +170,8 @@ static void omap4_secondary_init(unsigned int cpu)
                set_cntfreq();
                /* Configure ACR to disable streaming WA for 801819 */
                omap5_erratum_workaround_801819();
+               /* Enable ACR to allow for ICUALLU workaround */
+               omap5_secondary_harden_predictor();
        }
 
        /*
index 9c10248fadccc2d03ef3b3bcbddbe0b43347f158..4e8c2116808ecf3d36d36653184dc89d2941885e 100644 (file)
@@ -185,7 +185,7 @@ static int pxa_irq_suspend(void)
 {
        int i;
 
-       for (i = 0; i < pxa_internal_irq_nr / 32; i++) {
+       for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) {
                void __iomem *base = irq_base(i);
 
                saved_icmr[i] = __raw_readl(base + ICMR);
@@ -204,7 +204,7 @@ static void pxa_irq_resume(void)
 {
        int i;
 
-       for (i = 0; i < pxa_internal_irq_nr / 32; i++) {
+       for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) {
                void __iomem *base = irq_base(i);
 
                __raw_writel(saved_icmr[i], base + ICMR);
index 39aef4876ed41346b81ef0d79d4657e8b93fd208..8db62cc54a6acec02fd8a8da6c844de77b8c4b75 100644 (file)
@@ -237,8 +237,8 @@ static void ecard_init_pgtables(struct mm_struct *mm)
 
        memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE));
 
+       vma_init(&vma, mm);
        vma.vm_flags = VM_EXEC;
-       vma.vm_mm = mm;
 
        flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE);
        flush_tlb_range(&vma, EASI_START, EASI_START + EASI_SIZE);
index d0f62eacf59da510388dd206d2673eb1b6aa84e3..4adb901dd5ebdd99f2a2b747c1bc237e123a87de 100644 (file)
@@ -10,6 +10,7 @@ menuconfig ARCH_SOCFPGA
        select HAVE_ARM_SCU
        select HAVE_ARM_TWD if SMP
        select MFD_SYSCON
+       select PCI_DOMAINS if PCI
 
 if ARCH_SOCFPGA
 config SOCFPGA_SUSPEND
index be0fa7e39c2621ea4a9a83744e513f4d62d246c4..ba0e786c952e70a1989ccb93043bb08e4d721d28 100644 (file)
@@ -1151,6 +1151,11 @@ int arm_dma_supported(struct device *dev, u64 mask)
        return __dma_supported(dev, mask, false);
 }
 
+static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
+{
+       return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
+}
+
 #ifdef CONFIG_ARM_DMA_USE_IOMMU
 
 static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs)
@@ -2296,7 +2301,7 @@ void arm_iommu_detach_device(struct device *dev)
        iommu_detach_device(mapping->domain, dev);
        kref_put(&mapping->kref, release_iommu_mapping);
        to_dma_iommu_mapping(dev) = NULL;
-       set_dma_ops(dev, NULL);
+       set_dma_ops(dev, arm_get_dma_map_ops(dev->archdata.dma_coherent));
 
        pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
 }
@@ -2357,11 +2362,6 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) { }
 
 #endif /* CONFIG_ARM_DMA_USE_IOMMU */
 
-static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
-{
-       return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
-}
-
 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
                        const struct iommu_ops *iommu, bool coherent)
 {
index c186474422f3fb25cb809a6d0bff48f476ef8595..0cc8e04295a40dc1d16f308396afdfb7540aa48c 100644 (file)
@@ -736,20 +736,29 @@ static int __mark_rodata_ro(void *unused)
        return 0;
 }
 
+static int kernel_set_to_readonly __read_mostly;
+
 void mark_rodata_ro(void)
 {
+       kernel_set_to_readonly = 1;
        stop_machine(__mark_rodata_ro, NULL, NULL);
        debug_checkwx();
 }
 
 void set_kernel_text_rw(void)
 {
+       if (!kernel_set_to_readonly)
+               return;
+
        set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
                                current->active_mm);
 }
 
 void set_kernel_text_ro(void)
 {
+       if (!kernel_set_to_readonly)
+               return;
+
        set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
                                current->active_mm);
 }
index 6e8b7161303936908b3b2b7adfced5d17de379ce..f6a62ae44a65b61e162203ad261a7fbb5d4b34cf 100644 (file)
@@ -1844,7 +1844,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
                /* there are 2 passes here */
                bpf_jit_dump(prog->len, image_size, 2, ctx.target);
 
-       set_memory_ro((unsigned long)header, header->pages);
+       bpf_jit_binary_lock_ro(header);
        prog->bpf_func = (void *)ctx.target;
        prog->jited = 1;
        prog->jited_len = image_size;
index 8073625371f5d22defae1efe6322a717201e8010..07060e5b58641cc008f41aa927c2e6043ab6afbb 100644 (file)
@@ -59,6 +59,9 @@ struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
 
 static __read_mostly unsigned int xen_events_irq;
 
+uint32_t xen_start_flags;
+EXPORT_SYMBOL(xen_start_flags);
+
 int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
                               unsigned long addr,
                               xen_pfn_t *gfn, int nr,
@@ -293,9 +296,7 @@ void __init xen_early_init(void)
        xen_setup_features();
 
        if (xen_feature(XENFEAT_dom0))
-               xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
-       else
-               xen_start_info->flags &= ~(SIF_INITDOMAIN|SIF_PRIVILEGED);
+               xen_start_flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
 
        if (!console_set_on_cmdline && !xen_initial_domain())
                add_preferred_console("hvc", 0, NULL);
index 45272266dafb64a1fda433e7f557bf11b89e908e..e7101b19d5902775bf0a2f951a866f1abcf614b1 100644 (file)
@@ -10,7 +10,7 @@
 #
 # Copyright (C) 1995-2001 by Russell King
 
-LDFLAGS_vmlinux        :=-p --no-undefined -X
+LDFLAGS_vmlinux        :=--no-undefined -X
 CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
 GZFLAGS                :=-9
 
@@ -60,15 +60,15 @@ ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
 KBUILD_CPPFLAGS        += -mbig-endian
 CHECKFLAGS     += -D__AARCH64EB__
 AS             += -EB
-LD             += -EB
-LDFLAGS                += -maarch64linuxb
+# We must use the linux target here, since distributions don't tend to package
+# the ELF linker scripts with binutils, and this results in a build failure.
+LDFLAGS                += -EB -maarch64linuxb
 UTS_MACHINE    := aarch64_be
 else
 KBUILD_CPPFLAGS        += -mlittle-endian
 CHECKFLAGS     += -D__AARCH64EL__
 AS             += -EL
-LD             += -EL
-LDFLAGS                += -maarch64linux
+LDFLAGS                += -EL -maarch64linux # See comment above
 UTS_MACHINE    := aarch64
 endif
 
index e6b059378dc04784927a9b996f24213685bf406a..67dac595dc72ebdeffcd5b6bffd50d115cce8cbc 100644 (file)
                        interrupts = <0 99 4>;
                        resets = <&rst SPIM0_RESET>;
                        reg-io-width = <4>;
-                       num-chipselect = <4>;
-                       bus-num = <0>;
+                       num-cs = <4>;
                        status = "disabled";
                };
 
                        interrupts = <0 100 4>;
                        resets = <&rst SPIM1_RESET>;
                        reg-io-width = <4>;
-                       num-chipselect = <4>;
-                       bus-num = <0>;
+                       num-cs = <4>;
                        status = "disabled";
                };
 
index 4b3331fbfe39d7b81d9466fb718975b6265c8e5e..dff9b15eb3c0b63a70c65070c465305c35985dee 100644 (file)
 
 &ethmac {
        status = "okay";
-       phy-mode = "rgmii";
        pinctrl-0 = <&eth_rgmii_y_pins>;
        pinctrl-names = "default";
+       phy-handle = <&eth_phy0>;
+       phy-mode = "rgmii";
+
+       mdio {
+               compatible = "snps,dwmac-mdio";
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               eth_phy0: ethernet-phy@0 {
+                       /* Realtek RTL8211F (0x001cc916) */
+                       reg = <0>;
+                       eee-broken-1000t;
+               };
+       };
 };
 
 &uart_A {
index fee87737a201f1121fe7a3ad3cd70c1d20415a0d..67d7115e4effbde75173aa4a4c07ae890b3183c5 100644 (file)
 
                        sd_emmc_b: sd@5000 {
                                compatible = "amlogic,meson-axg-mmc";
-                               reg = <0x0 0x5000 0x0 0x2000>;
+                               reg = <0x0 0x5000 0x0 0x800>;
                                interrupts = <GIC_SPI 217 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                                clocks = <&clkc CLKID_SD_EMMC_B>,
 
                        sd_emmc_c: mmc@7000 {
                                compatible = "amlogic,meson-axg-mmc";
-                               reg = <0x0 0x7000 0x0 0x2000>;
+                               reg = <0x0 0x7000 0x0 0x800>;
                                interrupts = <GIC_SPI 218 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                                clocks = <&clkc CLKID_SD_EMMC_C>,
index 3c31e21cbed7fcdde5bbdf030fcd6c194be5033d..b8dc4dbb391b669fc13eb13b1a24f01d24ab252f 100644 (file)
                        no-map;
                };
 
+               /* Alternate 3 MiB reserved for ARM Trusted Firmware (BL31) */
+               secmon_reserved_alt: secmon@5000000 {
+                       reg = <0x0 0x05000000 0x0 0x300000>;
+                       no-map;
+               };
+
                linux,cma {
                        compatible = "shared-dma-pool";
                        reusable;
 
                        sd_emmc_a: mmc@70000 {
                                compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
-                               reg = <0x0 0x70000 0x0 0x2000>;
+                               reg = <0x0 0x70000 0x0 0x800>;
                                interrupts = <GIC_SPI 216 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
 
                        sd_emmc_b: mmc@72000 {
                                compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
-                               reg = <0x0 0x72000 0x0 0x2000>;
+                               reg = <0x0 0x72000 0x0 0x800>;
                                interrupts = <GIC_SPI 217 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
 
                        sd_emmc_c: mmc@74000 {
                                compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
-                               reg = <0x0 0x74000 0x0 0x2000>;
+                               reg = <0x0 0x74000 0x0 0x800>;
                                interrupts = <GIC_SPI 218 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
index eb327664a4d8c38c196b7cec2dbbe5e5ac2c147f..6aaafff674f97f56625c2da8ea6a5b7dd10eb2d8 100644 (file)
@@ -6,7 +6,7 @@
 
 &apb {
        mali: gpu@c0000 {
-               compatible = "amlogic,meson-gxbb-mali", "arm,mali-450";
+               compatible = "amlogic,meson-gxl-mali", "arm,mali-450";
                reg = <0x0 0xc0000 0x0 0x40000>;
                interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
                             <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
index 3e3eb31748a35a7790a9dc90e56971f004660298..f63bceb88caafa249d84de963c3daa034fb842b7 100644 (file)
 
        bus-width = <4>;
        cap-sd-highspeed;
-       sd-uhs-sdr12;
-       sd-uhs-sdr25;
-       sd-uhs-sdr50;
        max-frequency = <100000000>;
        disable-wp;
 
index 0cfd701809dec578ac31f5f68a7fcfbc21822619..a1b31013ab6e3494d810619fadf81752a67b94f4 100644 (file)
 &usb0 {
        status = "okay";
 };
+
+&usb2_phy0 {
+       /*
+        * HDMI_5V is also used as supply for the USB VBUS.
+        */
+       phy-supply = <&hdmi_5v>;
+};
index 27538eea547b19a0fe8c14a97de4aa303ba63978..c87a80e9bcc6a80bc0f8a59c43a32d6485facafe 100644 (file)
 / {
        compatible = "amlogic,meson-gxl";
 
-       reserved-memory {
-               /* Alternate 3 MiB reserved for ARM Trusted Firmware (BL31) */
-               secmon_reserved_alt: secmon@5000000 {
-                       reg = <0x0 0x05000000 0x0 0x300000>;
-                       no-map;
-               };
-       };
-
        soc {
                usb0: usb@c9000000 {
                        status = "disabled";
index 4a2a6af8e752dbbe3a17fa02861fb3603d7c44cb..4057197048dcbbacaee733c6067cc677fd1ad54d 100644 (file)
 
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
-               interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 281 IRQ_TYPE_NONE>;
+               interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>;
 
                linux,pci-domain = <0>;
 
 
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
-               interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 305 IRQ_TYPE_NONE>;
+               interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
 
                linux,pci-domain = <4>;
 
                        reg = <0x66080000 0x100>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 394 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                        status = "disabled";
                };
                        reg = <0x660b0000 0x100>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 395 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                        status = "disabled";
                };
index eb6f08cdbd796c3d764393f9e2e70db2129b0e28..77efa28c4dd53db718b22e64569385f6d92c2feb 100644 (file)
        enet-phy-lane-swap;
 };
 
+&sdio0 {
+       mmc-ddr-1_8v;
+};
+
 &uart2 {
        status = "okay";
 };
index 5084b037320fd9cb65133ca929517062a245af3b..55ba495ef56e1f54b518483bc9e5369fcb03b441 100644 (file)
@@ -42,3 +42,7 @@
 &gphy0 {
        enet-phy-lane-swap;
 };
+
+&sdio0 {
+       mmc-ddr-1_8v;
+};
index 99aaff0b6d72b6bc971863411b80caa3dd165048..b203152ad67ca18b4421bb035b2d13d32d7f9be5 100644 (file)
                        reg = <0x000b0000 0x100>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 177 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                        status = "disabled";
                };
                        reg = <0x000e0000 0x100>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 178 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                        status = "disabled";
                };
index c6999624ed8abdcf4a7f8cea12635eb0606e6432..68c5a6c819aef2c3fbe8b59aac695cfb0c3a3a77 100644 (file)
        vmmc-supply = <&wlan_en>;
        ti,non-removable;
        non-removable;
+       cap-power-off-card;
+       keep-power-in-suspend;
        #address-cells = <0x1>;
        #size-cells = <0x0>;
        status = "ok";
index edb4ee0b8896b2c9a5572e1160e273eac42e062d..7f12624f6c8e8c6af0a3900f3e7d703a969c6c0e 100644 (file)
                dwmmc_2: dwmmc2@f723f000 {
                        bus-width = <0x4>;
                        non-removable;
+                       cap-power-off-card;
+                       keep-power-in-suspend;
                        vmmc-supply = <&reg_vdd_3v3>;
                        mmc-pwrseq = <&wl1835_pwrseq>;
 
index 7dabe25f6774827fd08ec78b3f3793e5b5658177..1c6ff8197a88b1f890fed5b592b9358986942145 100644 (file)
 
                CP110_LABEL(icu): interrupt-controller@1e0000 {
                        compatible = "marvell,cp110-icu";
-                       reg = <0x1e0000 0x10>;
+                       reg = <0x1e0000 0x440>;
                        #interrupt-cells = <3>;
                        interrupt-controller;
                        msi-parent = <&gicp>;
index 0f829db33efe2dfa2735a7cdf570de77c49a6356..4d5ef01f43a331c456eddf1a324f1e1d450bcea5 100644 (file)
@@ -75,7 +75,7 @@
 
                serial@75b1000 {
                        label = "LS-UART0";
-                       status = "okay";
+                       status = "disabled";
                        pinctrl-names = "default", "sleep";
                        pinctrl-0 = <&blsp2_uart2_4pins_default>;
                        pinctrl-1 = <&blsp2_uart2_4pins_sleep>;
index 650f356f69ca748f0fbef0c52f4026e43f511e46..c2625d15a8c08f535e6f00f5c8228212c4d7ec5f 100644 (file)
 
                                port@0 {
                                        reg = <0>;
-                                       etf_out: endpoint {
+                                       etf_in: endpoint {
                                                slave-mode;
                                                remote-endpoint = <&funnel0_out>;
                                        };
                                };
                                port@1 {
                                        reg = <0>;
-                                       etf_in: endpoint {
+                                       etf_out: endpoint {
                                                remote-endpoint = <&replicator_in>;
                                        };
                                };
index 9b4dc41703e38036283aa2a4eededd3322e7a428..ae3b5adf32dfe4a31125880e3a8fa49877c83923 100644 (file)
@@ -54,7 +54,7 @@
        sound {
                compatible = "audio-graph-card";
                label = "UniPhier LD11";
-               widgets = "Headphone", "Headphone Jack";
+               widgets = "Headphone", "Headphones";
                dais = <&i2s_port2
                        &i2s_port3
                        &i2s_port4
index fe6608ea327772e3ad0125c020f8d0102dda35bb..7919233c9ce27e3c86dc8dffce13a97e00112c64 100644 (file)
@@ -54,7 +54,7 @@
        sound {
                compatible = "audio-graph-card";
                label = "UniPhier LD20";
-               widgets = "Headphone", "Headphone Jack";
+               widgets = "Headphone", "Headphones";
                dais = <&i2s_port2
                        &i2s_port3
                        &i2s_port4
index 3cfa8ca267384615694e693ed0371df694fea1f4..f9a186f6af8a9206de939bbdf3f6013988b8b994 100644 (file)
@@ -47,6 +47,7 @@ CONFIG_ARCH_MVEBU=y
 CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_ROCKCHIP=y
 CONFIG_ARCH_SEATTLE=y
+CONFIG_ARCH_SYNQUACER=y
 CONFIG_ARCH_RENESAS=y
 CONFIG_ARCH_R8A7795=y
 CONFIG_ARCH_R8A7796=y
@@ -58,7 +59,6 @@ CONFIG_ARCH_R8A77995=y
 CONFIG_ARCH_STRATIX10=y
 CONFIG_ARCH_TEGRA=y
 CONFIG_ARCH_SPRD=y
-CONFIG_ARCH_SYNQUACER=y
 CONFIG_ARCH_THUNDER=y
 CONFIG_ARCH_THUNDER2=y
 CONFIG_ARCH_UNIPHIER=y
@@ -67,25 +67,23 @@ CONFIG_ARCH_XGENE=y
 CONFIG_ARCH_ZX=y
 CONFIG_ARCH_ZYNQMP=y
 CONFIG_PCI=y
-CONFIG_HOTPLUG_PCI_PCIE=y
 CONFIG_PCI_IOV=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_ACPI=y
-CONFIG_PCI_LAYERSCAPE=y
-CONFIG_PCI_HISI=y
-CONFIG_PCIE_QCOM=y
-CONFIG_PCIE_KIRIN=y
-CONFIG_PCIE_ARMADA_8K=y
-CONFIG_PCIE_HISI_STB=y
 CONFIG_PCI_AARDVARK=y
 CONFIG_PCI_TEGRA=y
 CONFIG_PCIE_RCAR=y
-CONFIG_PCIE_ROCKCHIP=y
-CONFIG_PCIE_ROCKCHIP_HOST=m
 CONFIG_PCI_HOST_GENERIC=y
 CONFIG_PCI_XGENE=y
 CONFIG_PCI_HOST_THUNDER_PEM=y
 CONFIG_PCI_HOST_THUNDER_ECAM=y
+CONFIG_PCIE_ROCKCHIP_HOST=m
+CONFIG_PCI_LAYERSCAPE=y
+CONFIG_PCI_HISI=y
+CONFIG_PCIE_QCOM=y
+CONFIG_PCIE_ARMADA_8K=y
+CONFIG_PCIE_KIRIN=y
+CONFIG_PCIE_HISI_STB=y
 CONFIG_ARM64_VA_BITS_48=y
 CONFIG_SCHED_MC=y
 CONFIG_NUMA=y
@@ -104,8 +102,6 @@ CONFIG_HIBERNATION=y
 CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
 CONFIG_ARM_CPUIDLE=y
 CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_GOV_ATTR_SET=y
-CONFIG_CPU_FREQ_GOV_COMMON=y
 CONFIG_CPU_FREQ_STAT=y
 CONFIG_CPU_FREQ_GOV_POWERSAVE=m
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
@@ -113,11 +109,11 @@ CONFIG_CPU_FREQ_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
 CONFIG_CPUFREQ_DT=y
+CONFIG_ACPI_CPPC_CPUFREQ=m
 CONFIG_ARM_ARMADA_37XX_CPUFREQ=y
 CONFIG_ARM_BIG_LITTLE_CPUFREQ=y
 CONFIG_ARM_SCPI_CPUFREQ=y
 CONFIG_ARM_TEGRA186_CPUFREQ=y
-CONFIG_ACPI_CPPC_CPUFREQ=m
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -236,11 +232,6 @@ CONFIG_SMSC911X=y
 CONFIG_SNI_AVE=y
 CONFIG_SNI_NETSEC=y
 CONFIG_STMMAC_ETH=m
-CONFIG_DWMAC_IPQ806X=m
-CONFIG_DWMAC_MESON=m
-CONFIG_DWMAC_ROCKCHIP=m
-CONFIG_DWMAC_SUNXI=m
-CONFIG_DWMAC_SUN8I=m
 CONFIG_MDIO_BUS_MUX_MMIOREG=y
 CONFIG_AT803X_PHY=m
 CONFIG_MARVELL_PHY=m
@@ -269,8 +260,8 @@ CONFIG_WL18XX=m
 CONFIG_WLCORE_SDIO=m
 CONFIG_INPUT_EVDEV=y
 CONFIG_KEYBOARD_ADC=m
-CONFIG_KEYBOARD_CROS_EC=y
 CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_CROS_EC=y
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_TOUCHSCREEN_ATMEL_MXT=m
 CONFIG_INPUT_MISC=y
@@ -296,17 +287,13 @@ CONFIG_SERIAL_SAMSUNG=y
 CONFIG_SERIAL_SAMSUNG_CONSOLE=y
 CONFIG_SERIAL_TEGRA=y
 CONFIG_SERIAL_SH_SCI=y
-CONFIG_SERIAL_SH_SCI_NR_UARTS=11
-CONFIG_SERIAL_SH_SCI_CONSOLE=y
 CONFIG_SERIAL_MSM=y
 CONFIG_SERIAL_MSM_CONSOLE=y
 CONFIG_SERIAL_XILINX_PS_UART=y
 CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
 CONFIG_SERIAL_MVEBU_UART=y
 CONFIG_SERIAL_DEV_BUS=y
-CONFIG_SERIAL_DEV_CTRL_TTYPORT=y
 CONFIG_VIRTIO_CONSOLE=y
-CONFIG_I2C_HID=m
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_MUX=y
 CONFIG_I2C_MUX_PCA954x=y
@@ -325,26 +312,26 @@ CONFIG_I2C_RCAR=y
 CONFIG_I2C_CROS_EC_TUNNEL=y
 CONFIG_SPI=y
 CONFIG_SPI_ARMADA_3700=y
-CONFIG_SPI_MESON_SPICC=m
-CONFIG_SPI_MESON_SPIFC=m
 CONFIG_SPI_BCM2835=m
 CONFIG_SPI_BCM2835AUX=m
+CONFIG_SPI_MESON_SPICC=m
+CONFIG_SPI_MESON_SPIFC=m
 CONFIG_SPI_ORION=y
 CONFIG_SPI_PL022=y
-CONFIG_SPI_QUP=y
 CONFIG_SPI_ROCKCHIP=y
+CONFIG_SPI_QUP=y
 CONFIG_SPI_S3C64XX=y
 CONFIG_SPI_SPIDEV=m
 CONFIG_SPMI=y
-CONFIG_PINCTRL_IPQ8074=y
 CONFIG_PINCTRL_SINGLE=y
 CONFIG_PINCTRL_MAX77620=y
+CONFIG_PINCTRL_IPQ8074=y
 CONFIG_PINCTRL_MSM8916=y
 CONFIG_PINCTRL_MSM8994=y
 CONFIG_PINCTRL_MSM8996=y
-CONFIG_PINCTRL_MT7622=y
 CONFIG_PINCTRL_QDF2XXX=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_PINCTRL_MT7622=y
 CONFIG_GPIO_DWAPB=y
 CONFIG_GPIO_MB86S7X=y
 CONFIG_GPIO_PL061=y
@@ -368,13 +355,13 @@ CONFIG_SENSORS_INA2XX=m
 CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
 CONFIG_CPU_THERMAL=y
 CONFIG_THERMAL_EMULATION=y
+CONFIG_ROCKCHIP_THERMAL=m
+CONFIG_RCAR_GEN3_THERMAL=y
 CONFIG_ARMADA_THERMAL=y
 CONFIG_BRCMSTB_THERMAL=m
 CONFIG_EXYNOS_THERMAL=y
-CONFIG_RCAR_GEN3_THERMAL=y
-CONFIG_QCOM_TSENS=y
-CONFIG_ROCKCHIP_THERMAL=m
 CONFIG_TEGRA_BPMP_THERMAL=m
+CONFIG_QCOM_TSENS=y
 CONFIG_UNIPHIER_THERMAL=y
 CONFIG_WATCHDOG=y
 CONFIG_S3C2410_WATCHDOG=y
@@ -395,9 +382,9 @@ CONFIG_MFD_MAX77620=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_MFD_RK808=y
 CONFIG_MFD_SEC_CORE=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_AXP20X=y
 CONFIG_REGULATOR_FAN53555=y
-CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_GPIO=y
 CONFIG_REGULATOR_HI6421V530=y
 CONFIG_REGULATOR_HI655X=y
@@ -407,16 +394,15 @@ CONFIG_REGULATOR_QCOM_SMD_RPM=y
 CONFIG_REGULATOR_QCOM_SPMI=y
 CONFIG_REGULATOR_RK808=y
 CONFIG_REGULATOR_S2MPS11=y
+CONFIG_RC_CORE=m
+CONFIG_RC_DECODERS=y
+CONFIG_RC_DEVICES=y
+CONFIG_IR_MESON=m
 CONFIG_MEDIA_SUPPORT=m
 CONFIG_MEDIA_CAMERA_SUPPORT=y
 CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
 CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
 CONFIG_MEDIA_CONTROLLER=y
-CONFIG_MEDIA_RC_SUPPORT=y
-CONFIG_RC_CORE=m
-CONFIG_RC_DEVICES=y
-CONFIG_RC_DECODERS=y
-CONFIG_IR_MESON=m
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
 # CONFIG_DVB_NET is not set
 CONFIG_V4L_MEM2MEM_DRIVERS=y
@@ -441,8 +427,7 @@ CONFIG_ROCKCHIP_DW_HDMI=y
 CONFIG_ROCKCHIP_DW_MIPI_DSI=y
 CONFIG_ROCKCHIP_INNO_HDMI=y
 CONFIG_DRM_RCAR_DU=m
-CONFIG_DRM_RCAR_LVDS=y
-CONFIG_DRM_RCAR_VSP=y
+CONFIG_DRM_RCAR_LVDS=m
 CONFIG_DRM_TEGRA=m
 CONFIG_DRM_PANEL_SIMPLE=m
 CONFIG_DRM_I2C_ADV7511=m
@@ -455,7 +440,6 @@ CONFIG_FB_ARMCLCD=y
 CONFIG_BACKLIGHT_GENERIC=m
 CONFIG_BACKLIGHT_PWM=m
 CONFIG_BACKLIGHT_LP855X=m
-CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
 # CONFIG_LOGO_LINUX_MONO is not set
 # CONFIG_LOGO_LINUX_VGA16 is not set
@@ -468,6 +452,7 @@ CONFIG_SND_SOC_RCAR=m
 CONFIG_SND_SOC_AK4613=m
 CONFIG_SND_SIMPLE_CARD=m
 CONFIG_SND_AUDIO_GRAPH_CARD=m
+CONFIG_I2C_HID=m
 CONFIG_USB=y
 CONFIG_USB_OTG=y
 CONFIG_USB_XHCI_HCD=y
@@ -501,12 +486,12 @@ CONFIG_MMC_BLOCK_MINORS=32
 CONFIG_MMC_ARMMMCI=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_ACPI=y
-CONFIG_MMC_SDHCI_F_SDH30=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_OF_ARASAN=y
 CONFIG_MMC_SDHCI_OF_ESDHC=y
 CONFIG_MMC_SDHCI_CADENCE=y
 CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_MMC_SDHCI_F_SDH30=y
 CONFIG_MMC_MESON_GX=y
 CONFIG_MMC_SDHCI_MSM=y
 CONFIG_MMC_SPI=y
@@ -524,11 +509,11 @@ CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_GPIO=y
 CONFIG_LEDS_PWM=y
 CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_TRIGGER_DISK=y
 CONFIG_LEDS_TRIGGER_HEARTBEAT=y
 CONFIG_LEDS_TRIGGER_CPU=y
 CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
 CONFIG_LEDS_TRIGGER_PANIC=y
-CONFIG_LEDS_TRIGGER_DISK=y
 CONFIG_EDAC=y
 CONFIG_EDAC_GHES=y
 CONFIG_RTC_CLASS=y
@@ -537,13 +522,13 @@ CONFIG_RTC_DRV_RK808=m
 CONFIG_RTC_DRV_S5M=y
 CONFIG_RTC_DRV_DS3232=y
 CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_CROS_EC=y
 CONFIG_RTC_DRV_S3C=y
 CONFIG_RTC_DRV_PL031=y
 CONFIG_RTC_DRV_SUN6I=y
 CONFIG_RTC_DRV_ARMADA38X=y
 CONFIG_RTC_DRV_TEGRA=y
 CONFIG_RTC_DRV_XGENE=y
-CONFIG_RTC_DRV_CROS_EC=y
 CONFIG_DMADEVICES=y
 CONFIG_DMA_BCM2835=m
 CONFIG_K3_DMA=y
@@ -579,7 +564,6 @@ CONFIG_HWSPINLOCK_QCOM=y
 CONFIG_ARM_MHU=y
 CONFIG_PLATFORM_MHU=y
 CONFIG_BCM2835_MBOX=y
-CONFIG_HI6220_MBOX=y
 CONFIG_QCOM_APCS_IPC=y
 CONFIG_ROCKCHIP_IOMMU=y
 CONFIG_TEGRA_IOMMU_SMMU=y
@@ -602,7 +586,6 @@ CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
 CONFIG_EXTCON_USB_GPIO=y
 CONFIG_EXTCON_USBC_CROS_EC=y
 CONFIG_MEMORY=y
-CONFIG_TEGRA_MC=y
 CONFIG_IIO=y
 CONFIG_EXYNOS_ADC=y
 CONFIG_ROCKCHIP_SARADC=m
@@ -618,27 +601,27 @@ CONFIG_PWM_RCAR=m
 CONFIG_PWM_ROCKCHIP=y
 CONFIG_PWM_SAMSUNG=y
 CONFIG_PWM_TEGRA=m
+CONFIG_PHY_XGENE=y
+CONFIG_PHY_SUN4I_USB=y
+CONFIG_PHY_HI6220_USB=y
 CONFIG_PHY_HISTB_COMBPHY=y
 CONFIG_PHY_HISI_INNO_USB2=y
-CONFIG_PHY_RCAR_GEN3_USB2=y
-CONFIG_PHY_RCAR_GEN3_USB3=m
-CONFIG_PHY_HI6220_USB=y
-CONFIG_PHY_QCOM_USB_HS=y
-CONFIG_PHY_SUN4I_USB=y
 CONFIG_PHY_MVEBU_CP110_COMPHY=y
 CONFIG_PHY_QCOM_QMP=m
-CONFIG_PHY_ROCKCHIP_INNO_USB2=y
+CONFIG_PHY_QCOM_USB_HS=y
+CONFIG_PHY_RCAR_GEN3_USB2=y
+CONFIG_PHY_RCAR_GEN3_USB3=m
 CONFIG_PHY_ROCKCHIP_EMMC=y
+CONFIG_PHY_ROCKCHIP_INNO_USB2=y
 CONFIG_PHY_ROCKCHIP_PCIE=m
 CONFIG_PHY_ROCKCHIP_TYPEC=y
-CONFIG_PHY_XGENE=y
 CONFIG_PHY_TEGRA_XUSB=y
 CONFIG_QCOM_L2_PMU=y
 CONFIG_QCOM_L3_PMU=y
-CONFIG_MESON_EFUSE=m
 CONFIG_QCOM_QFPROM=y
 CONFIG_ROCKCHIP_EFUSE=y
 CONFIG_UNIPHIER_EFUSE=y
+CONFIG_MESON_EFUSE=m
 CONFIG_TEE=y
 CONFIG_OPTEE=y
 CONFIG_ARM_SCPI_PROTOCOL=y
@@ -647,7 +630,6 @@ CONFIG_EFI_CAPSULE_LOADER=y
 CONFIG_ACPI=y
 CONFIG_ACPI_APEI=y
 CONFIG_ACPI_APEI_GHES=y
-CONFIG_ACPI_APEI_PCIEAER=y
 CONFIG_ACPI_APEI_MEMORY_FAILURE=y
 CONFIG_ACPI_APEI_EINJ=y
 CONFIG_EXT2_FS=y
@@ -682,7 +664,6 @@ CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_FS=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
-CONFIG_LOCKUP_DETECTOR=y
 # CONFIG_SCHED_DEBUG is not set
 # CONFIG_DEBUG_PREEMPT is not set
 # CONFIG_FTRACE is not set
@@ -691,20 +672,15 @@ CONFIG_SECURITY=y
 CONFIG_CRYPTO_ECHAINIV=y
 CONFIG_CRYPTO_ANSI_CPRNG=y
 CONFIG_ARM64_CRYPTO=y
-CONFIG_CRYPTO_SHA256_ARM64=m
-CONFIG_CRYPTO_SHA512_ARM64=m
 CONFIG_CRYPTO_SHA1_ARM64_CE=y
 CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_SHA512_ARM64_CE=m
+CONFIG_CRYPTO_SHA3_ARM64=m
+CONFIG_CRYPTO_SM3_ARM64_CE=m
 CONFIG_CRYPTO_GHASH_ARM64_CE=y
 CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m
 CONFIG_CRYPTO_CRC32_ARM64_CE=m
-CONFIG_CRYPTO_AES_ARM64=m
-CONFIG_CRYPTO_AES_ARM64_CE=m
 CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
 CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
-CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m
 CONFIG_CRYPTO_CHACHA20_NEON=m
 CONFIG_CRYPTO_AES_ARM64_BS=m
-CONFIG_CRYPTO_SHA512_ARM64_CE=m
-CONFIG_CRYPTO_SHA3_ARM64=m
-CONFIG_CRYPTO_SM3_ARM64_CE=m
index 253188fb8cb0cea0e35d0f4ed77b5e2c6332d507..e3e50950a863675b72a3c1e0d605d81cf5f258f2 100644 (file)
@@ -223,8 +223,8 @@ static int ctr_encrypt(struct skcipher_request *req)
                kernel_neon_begin();
                aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                (u8 *)ctx->key_enc, rounds, blocks, walk.iv);
-               err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
                kernel_neon_end();
+               err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        if (walk.nbytes) {
                u8 __aligned(8) tail[AES_BLOCK_SIZE];
index a91933b1e2e62ba235ef05ddf8f9d34dbb6bcf49..4b650ec1d7dd1aa8d4418b6b896f81de4a2187ab 100644 (file)
@@ -28,7 +28,12 @@ typedef void (*alternative_cb_t)(struct alt_instr *alt,
                                 __le32 *origptr, __le32 *updptr, int nr_inst);
 
 void __init apply_alternatives_all(void);
-void apply_alternatives(void *start, size_t length);
+
+#ifdef CONFIG_MODULES
+void apply_alternatives_module(void *start, size_t length);
+#else
+static inline void apply_alternatives_module(void *start, size_t length) { }
+#endif
 
 #define ALTINSTR_ENTRY(feature,cb)                                           \
        " .word 661b - .\n"                             /* label           */ \
index fda9a8ca48bef71b0d4a76be1a45295af1211dd6..fe8777b12f8667c2c0b23952057fc13041276442 100644 (file)
@@ -306,6 +306,7 @@ struct kvm_vcpu_arch {
 #define KVM_ARM64_FP_ENABLED           (1 << 1) /* guest FP regs loaded */
 #define KVM_ARM64_FP_HOST              (1 << 2) /* host FP regs loaded */
 #define KVM_ARM64_HOST_SVE_IN_USE      (1 << 3) /* backup for host TIF_SVE */
+#define KVM_ARM64_HOST_SVE_ENABLED     (1 << 4) /* SVE enabled for EL0 */
 
 #define vcpu_gp_regs(v)                (&(v)->arch.ctxt.gp_regs)
 
index 9f82d6b53851e4b6bedbb28f6d0e7480acd622a6..1bdeca8918a684814f84ca3841b88a3123749cbb 100644 (file)
@@ -224,10 +224,8 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
         * Only if the new pte is valid and kernel, otherwise TLB maintenance
         * or update_mmu_cache() have the necessary barriers.
         */
-       if (pte_valid_not_user(pte)) {
+       if (pte_valid_not_user(pte))
                dsb(ishst);
-               isb();
-       }
 }
 
 extern void __sync_icache_dcache(pte_t pteval);
@@ -434,7 +432,6 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
        WRITE_ONCE(*pmdp, pmd);
        dsb(ishst);
-       isb();
 }
 
 static inline void pmd_clear(pmd_t *pmdp)
@@ -485,7 +482,6 @@ static inline void set_pud(pud_t *pudp, pud_t pud)
 {
        WRITE_ONCE(*pudp, pud);
        dsb(ishst);
-       isb();
 }
 
 static inline void pud_clear(pud_t *pudp)
index fa8b3fe932e6f568841017215524bc0b894cbf28..6495cc51246fc873bef97f99a2b0139f806516c1 100644 (file)
@@ -29,20 +29,15 @@ DECLARE_PER_CPU(bool, kernel_neon_busy);
 static __must_check inline bool may_use_simd(void)
 {
        /*
-        * The raw_cpu_read() is racy if called with preemption enabled.
-        * This is not a bug: kernel_neon_busy is only set when
-        * preemption is disabled, so we cannot migrate to another CPU
-        * while it is set, nor can we migrate to a CPU where it is set.
-        * So, if we find it clear on some CPU then we're guaranteed to
-        * find it clear on any CPU we could migrate to.
-        *
-        * If we are in between kernel_neon_begin()...kernel_neon_end(),
-        * the flag will be set, but preemption is also disabled, so we
-        * can't migrate to another CPU and spuriously see it become
-        * false.
+        * kernel_neon_busy is only set while preemption is disabled,
+        * and is clear whenever preemption is enabled. Since
+        * this_cpu_read() is atomic w.r.t. preemption, kernel_neon_busy
+        * cannot change under our feet -- if it's set we cannot be
+        * migrated, and if it's clear we cannot be migrated to a CPU
+        * where it is set.
         */
        return !in_irq() && !irqs_disabled() && !in_nmi() &&
-               !raw_cpu_read(kernel_neon_busy);
+               !this_cpu_read(kernel_neon_busy);
 }
 
 #else /* ! CONFIG_KERNEL_MODE_NEON */
index 6171178075dcab62def613141732a0b7601b1c43..a8f84812c6e8925c9429451dc3119bfbd5620e8c 100644 (file)
@@ -728,6 +728,17 @@ asm(
        asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \
 } while (0)
 
+/*
+ * Modify bits in a sysreg. Bits in the clear mask are zeroed, then bits in the
+ * set mask are set. Other bits are left as-is.
+ */
+#define sysreg_clear_set(sysreg, clear, set) do {                      \
+       u64 __scs_val = read_sysreg(sysreg);                            \
+       u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set);            \
+       if (__scs_new != __scs_val)                                     \
+               write_sysreg(__scs_new, sysreg);                        \
+} while (0)
+
 static inline void config_sctlr_el1(u32 clear, u32 set)
 {
        u32 val;
index ffdaea7954bb620daf19aba8b855d4c04b1a33c1..d87f2d646caaaa2dfea2fb7dbec6a6ab45f40877 100644 (file)
@@ -37,7 +37,9 @@ static inline void __tlb_remove_table(void *_table)
 
 static inline void tlb_flush(struct mmu_gather *tlb)
 {
-       struct vm_area_struct vma = { .vm_mm = tlb->mm, };
+       struct vm_area_struct vma;
+
+       vma_init(&vma, tlb->mm);
 
        /*
         * The ASID allocator will either invalidate the ASID or mark
index 5c4bce4ac381a4ab87107e4aa47a9b7beef7d891..36fb069fd049c7053f38b75b9916bba7cb630643 100644 (file)
@@ -122,7 +122,30 @@ static void patch_alternative(struct alt_instr *alt,
        }
 }
 
-static void __apply_alternatives(void *alt_region, bool use_linear_alias)
+/*
+ * We provide our own, private D-cache cleaning function so that we don't
+ * accidentally call into the cache.S code, which is patched by us at
+ * runtime.
+ */
+static void clean_dcache_range_nopatch(u64 start, u64 end)
+{
+       u64 cur, d_size, ctr_el0;
+
+       ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
+       d_size = 4 << cpuid_feature_extract_unsigned_field(ctr_el0,
+                                                          CTR_DMINLINE_SHIFT);
+       cur = start & ~(d_size - 1);
+       do {
+               /*
+                * We must clean+invalidate to the PoC in order to avoid
+                * Cortex-A53 errata 826319, 827319, 824069 and 819472
+                * (this corresponds to ARM64_WORKAROUND_CLEAN_CACHE)
+                */
+               asm volatile("dc civac, %0" : : "r" (cur) : "memory");
+       } while (cur += d_size, cur < end);
+}
+
+static void __apply_alternatives(void *alt_region, bool is_module)
 {
        struct alt_instr *alt;
        struct alt_region *region = alt_region;
@@ -145,7 +168,7 @@ static void __apply_alternatives(void *alt_region, bool use_linear_alias)
                pr_info_once("patching kernel code\n");
 
                origptr = ALT_ORIG_PTR(alt);
-               updptr = use_linear_alias ? lm_alias(origptr) : origptr;
+               updptr = is_module ? origptr : lm_alias(origptr);
                nr_inst = alt->orig_len / AARCH64_INSN_SIZE;
 
                if (alt->cpufeature < ARM64_CB_PATCH)
@@ -155,8 +178,20 @@ static void __apply_alternatives(void *alt_region, bool use_linear_alias)
 
                alt_cb(alt, origptr, updptr, nr_inst);
 
-               flush_icache_range((uintptr_t)origptr,
-                                  (uintptr_t)(origptr + nr_inst));
+               if (!is_module) {
+                       clean_dcache_range_nopatch((u64)origptr,
+                                                  (u64)(origptr + nr_inst));
+               }
+       }
+
+       /*
+        * The core module code takes care of cache maintenance in
+        * flush_module_icache().
+        */
+       if (!is_module) {
+               dsb(ish);
+               __flush_icache_all();
+               isb();
        }
 }
 
@@ -178,7 +213,7 @@ static int __apply_alternatives_multi_stop(void *unused)
                isb();
        } else {
                BUG_ON(alternatives_applied);
-               __apply_alternatives(&region, true);
+               __apply_alternatives(&region, false);
                /* Barriers provided by the cache flushing */
                WRITE_ONCE(alternatives_applied, 1);
        }
@@ -192,12 +227,14 @@ void __init apply_alternatives_all(void)
        stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
 }
 
-void apply_alternatives(void *start, size_t length)
+#ifdef CONFIG_MODULES
+void apply_alternatives_module(void *start, size_t length)
 {
        struct alt_region region = {
                .begin  = start,
                .end    = start + length,
        };
 
-       __apply_alternatives(&region, false);
+       __apply_alternatives(&region, true);
 }
+#endif
index d2856b129097899d37ba3790056fc28eefc8409e..c6d80743f4eded6fda5e9fb1dadce9a62e69888c 100644 (file)
@@ -937,7 +937,7 @@ static int __init parse_kpti(char *str)
        __kpti_forced = enabled ? 1 : -1;
        return 0;
 }
-__setup("kpti=", parse_kpti);
+early_param("kpti", parse_kpti);
 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
 
 #ifdef CONFIG_ARM64_HW_AFDBM
@@ -1351,9 +1351,9 @@ static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
 
 static void update_cpu_capabilities(u16 scope_mask)
 {
-       __update_cpu_capabilities(arm64_features, scope_mask, "detected:");
        __update_cpu_capabilities(arm64_errata, scope_mask,
                                  "enabling workaround for");
+       __update_cpu_capabilities(arm64_features, scope_mask, "detected:");
 }
 
 static int __enable_cpu_capability(void *arg)
@@ -1408,8 +1408,8 @@ __enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
 
 static void __init enable_cpu_capabilities(u16 scope_mask)
 {
-       __enable_cpu_capabilities(arm64_features, scope_mask);
        __enable_cpu_capabilities(arm64_errata, scope_mask);
+       __enable_cpu_capabilities(arm64_features, scope_mask);
 }
 
 /*
index 155fd91e78f4a62180e7577355ca4a6b0eb283f4..f0f27aeefb73623a0983c1f3eec2054d306021dc 100644 (file)
@@ -448,9 +448,8 @@ int module_finalize(const Elf_Ehdr *hdr,
        const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
 
        for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
-               if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) {
-                       apply_alternatives((void *)s->sh_addr, s->sh_size);
-               }
+               if (strcmp(".altinstructions", secstrs + s->sh_name) == 0)
+                       apply_alternatives_module((void *)s->sh_addr, s->sh_size);
 #ifdef CONFIG_ARM64_MODULE_PLTS
                if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
                    !strcmp(".text.ftrace_trampoline", secstrs + s->sh_name))
index f3e2e3aec0b0632793abc2ce06dbaa2addd97eb5..2faa9863d2e569e704191bd1939dac2eb111cb5b 100644 (file)
@@ -179,7 +179,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
  * This is the secondary CPU boot entry.  We're using this CPUs
  * idle thread stack, but a set of temporary page tables.
  */
-asmlinkage void secondary_start_kernel(void)
+asmlinkage notrace void secondary_start_kernel(void)
 {
        u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
        struct mm_struct *mm = &init_mm;
index dc6ecfa5a2d2564c90a5ce92003a0e3b8490cbce..aac7808ce2162a9d2bdcdcc938b649655663912e 100644 (file)
@@ -5,13 +5,14 @@
  * Copyright 2018 Arm Limited
  * Author: Dave Martin <Dave.Martin@arm.com>
  */
-#include <linux/bottom_half.h>
+#include <linux/irqflags.h>
 #include <linux/sched.h>
 #include <linux/thread_info.h>
 #include <linux/kvm_host.h>
 #include <asm/kvm_asm.h>
 #include <asm/kvm_host.h>
 #include <asm/kvm_mmu.h>
+#include <asm/sysreg.h>
 
 /*
  * Called on entry to KVM_RUN unless this vcpu previously ran at least
@@ -61,10 +62,16 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
 {
        BUG_ON(!current->mm);
 
-       vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | KVM_ARM64_HOST_SVE_IN_USE);
+       vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
+                             KVM_ARM64_HOST_SVE_IN_USE |
+                             KVM_ARM64_HOST_SVE_ENABLED);
        vcpu->arch.flags |= KVM_ARM64_FP_HOST;
+
        if (test_thread_flag(TIF_SVE))
                vcpu->arch.flags |= KVM_ARM64_HOST_SVE_IN_USE;
+
+       if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
+               vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED;
 }
 
 /*
@@ -92,19 +99,30 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
  */
 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
 {
-       local_bh_disable();
+       unsigned long flags;
 
-       update_thread_flag(TIF_SVE,
-                          vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE);
+       local_irq_save(flags);
 
        if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
                /* Clean guest FP state to memory and invalidate cpu view */
                fpsimd_save();
                fpsimd_flush_cpu_state();
-       } else if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
-               /* Ensure user trap controls are correctly restored */
-               fpsimd_bind_task_to_cpu();
+       } else if (system_supports_sve()) {
+               /*
+                * The FPSIMD/SVE state in the CPU has not been touched, and we
+                * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been
+                * reset to CPACR_EL1_DEFAULT by the Hyp code, disabling SVE
+                * for EL0.  To avoid spurious traps, restore the trap state
+                * seen by kvm_arch_vcpu_load_fp():
+                */
+               if (vcpu->arch.flags & KVM_ARM64_HOST_SVE_ENABLED)
+                       sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN);
+               else
+                       sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0);
        }
 
-       local_bh_enable();
+       update_thread_flag(TIF_SVE,
+                          vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE);
+
+       local_irq_restore(flags);
 }
index 49e217ac7e1ec2087c440c60ec71126f0e48ec32..61e93f0b548228f57a08f25a14291a1e46437115 100644 (file)
@@ -583,13 +583,14 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
                                                    size >> PAGE_SHIFT);
                        return NULL;
                }
-               if (!coherent)
-                       __dma_flush_area(page_to_virt(page), iosize);
-
                addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
                                                   prot,
                                                   __builtin_return_address(0));
-               if (!addr) {
+               if (addr) {
+                       memset(addr, 0, size);
+                       if (!coherent)
+                               __dma_flush_area(page_to_virt(page), iosize);
+               } else {
                        iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
                        dma_release_from_contiguous(dev, page,
                                                    size >> PAGE_SHIFT);
index ecc6818191df961eac49e6ca0c7d8b8d38d0c855..1854e49aa18a7600657c5b70741f56f35f3f4de5 100644 (file)
@@ -108,11 +108,13 @@ static pte_t get_clear_flush(struct mm_struct *mm,
                             unsigned long pgsize,
                             unsigned long ncontig)
 {
-       struct vm_area_struct vma = { .vm_mm = mm };
+       struct vm_area_struct vma;
        pte_t orig_pte = huge_ptep_get(ptep);
        bool valid = pte_valid(orig_pte);
        unsigned long i, saddr = addr;
 
+       vma_init(&vma, mm);
+
        for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
                pte_t pte = ptep_get_and_clear(mm, addr, ptep);
 
@@ -145,9 +147,10 @@ static void clear_flush(struct mm_struct *mm,
                             unsigned long pgsize,
                             unsigned long ncontig)
 {
-       struct vm_area_struct vma = { .vm_mm = mm };
+       struct vm_area_struct vma;
        unsigned long i, saddr = addr;
 
+       vma_init(&vma, mm);
        for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
                pte_clear(mm, addr, ptep);
 
index 325cfb3b858aa698a96b23433230503063375b17..9abf8a1e7b250c49b0064f6abc67d41eabdbc52c 100644 (file)
@@ -611,11 +611,13 @@ void __init mem_init(void)
        BUILD_BUG_ON(TASK_SIZE_32                       > TASK_SIZE_64);
 #endif
 
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
        /*
         * Make sure we chose the upper bound of sizeof(struct page)
-        * correctly.
+        * correctly when sizing the VMEMMAP array.
         */
        BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT));
+#endif
 
        if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
                extern int sysctl_overcommit_memory;
index 5f9a73a4452c2b87dd9a922933b12c85ab008377..03646e6a2ef4f240412d1eb62a1cbc27d04705b0 100644 (file)
@@ -217,8 +217,9 @@ ENDPROC(idmap_cpu_replace_ttbr1)
 
        .macro __idmap_kpti_put_pgtable_ent_ng, type
        orr     \type, \type, #PTE_NG           // Same bit for blocks and pages
-       str     \type, [cur_\()\type\()p]       // Update the entry and ensure it
-       dc      civac, cur_\()\type\()p         // is visible to all CPUs.
+       str     \type, [cur_\()\type\()p]       // Update the entry and ensure
+       dmb     sy                              // that it is visible to all
+       dc      civac, cur_\()\type\()p         // CPUs.
        .endm
 
 /*
index 44f0ac0df30823d49807d91c4234c6556d0a885b..db89e7306081853277ecea3cb502e10c2d80e4d5 100644 (file)
@@ -120,7 +120,7 @@ ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned
                 */
                struct vm_area_struct vma;
 
-               vma.vm_mm = tlb->mm;
+               vma_init(&vma, tlb->mm);
                /* flush the address range from the tlb: */
                flush_tlb_range(&vma, start, end);
                /* now flush the virt. page-table area mapping the address range: */
index 3b38c717008ac1993e5b54aa28a8fa6342ab7350..46bff16618362308ef016a2125cbdc6419c7124f 100644 (file)
@@ -2278,17 +2278,15 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
        DPRINT(("smpl_buf @%p\n", smpl_buf));
 
        /* allocate vma */
-       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+       vma = vm_area_alloc(mm);
        if (!vma) {
                DPRINT(("Cannot allocate vma\n"));
                goto error_kmem;
        }
-       INIT_LIST_HEAD(&vma->anon_vma_chain);
 
        /*
         * partially initialize the vma for the sampling buffer
         */
-       vma->vm_mm           = mm;
        vma->vm_file         = get_file(filp);
        vma->vm_flags        = VM_READ|VM_MAYREAD|VM_DONTEXPAND|VM_DONTDUMP;
        vma->vm_page_prot    = PAGE_READONLY; /* XXX may need to change */
@@ -2346,7 +2344,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
        return 0;
 
 error:
-       kmem_cache_free(vm_area_cachep, vma);
+       vm_area_free(vma);
 error_kmem:
        pfm_rvfree(smpl_buf, size);
 
index 18278b448530d3ac9302754cf170e261401fd008..e6c6dfd98de29e021b498683b0a460a03ecb6ff9 100644 (file)
@@ -114,10 +114,8 @@ ia64_init_addr_space (void)
         * the problem.  When the process attempts to write to the register backing store
         * for the first time, it will get a SEGFAULT in this case.
         */
-       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+       vma = vm_area_alloc(current->mm);
        if (vma) {
-               INIT_LIST_HEAD(&vma->anon_vma_chain);
-               vma->vm_mm = current->mm;
                vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
                vma->vm_end = vma->vm_start + PAGE_SIZE;
                vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
@@ -125,7 +123,7 @@ ia64_init_addr_space (void)
                down_write(&current->mm->mmap_sem);
                if (insert_vm_struct(current->mm, vma)) {
                        up_write(&current->mm->mmap_sem);
-                       kmem_cache_free(vm_area_cachep, vma);
+                       vm_area_free(vma);
                        return;
                }
                up_write(&current->mm->mmap_sem);
@@ -133,10 +131,8 @@ ia64_init_addr_space (void)
 
        /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
        if (!(current->personality & MMAP_PAGE_ZERO)) {
-               vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+               vma = vm_area_alloc(current->mm);
                if (vma) {
-                       INIT_LIST_HEAD(&vma->anon_vma_chain);
-                       vma->vm_mm = current->mm;
                        vma->vm_end = PAGE_SIZE;
                        vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
                        vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
@@ -144,7 +140,7 @@ ia64_init_addr_space (void)
                        down_write(&current->mm->mmap_sem);
                        if (insert_vm_struct(current->mm, vma)) {
                                up_write(&current->mm->mmap_sem);
-                               kmem_cache_free(vm_area_cachep, vma);
+                               vm_area_free(vma);
                                return;
                        }
                        up_write(&current->mm->mmap_sem);
@@ -277,7 +273,7 @@ static struct vm_area_struct gate_vma;
 
 static int __init gate_vma_init(void)
 {
-       gate_vma.vm_mm = NULL;
+       vma_init(&gate_vma, NULL);
        gate_vma.vm_start = FIXADDR_USER_START;
        gate_vma.vm_end = FIXADDR_USER_END;
        gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
index 8b707c249026032ac8bef80c6f1666c105d9b50d..12fe700632f458ea632a18bb9cdccd6660efd241 100644 (file)
@@ -44,6 +44,7 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
                                  unsigned long address)
 {
+       pgtable_page_dtor(page);
        __free_page(page);
 }
 
@@ -74,8 +75,9 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
        return page;
 }
 
-extern inline void pte_free(struct mm_struct *mm, struct page *page)
+static inline void pte_free(struct mm_struct *mm, struct page *page)
 {
+       pgtable_page_dtor(page);
        __free_page(page);
 }
 
index 331a3bb66297baa39404fbefa273663ebd1871fe..93a737c8d1a6448d5bb4dcb2d71c8d8b5241e0d7 100644 (file)
@@ -8,11 +8,4 @@ config TRACE_IRQFLAGS_SUPPORT
 
 source "lib/Kconfig.debug"
 
-config HEART_BEAT
-       bool "Heart beat function for kernel"
-       default n
-       help
-         This option turns on/off heart beat kernel functionality.
-         First GPIO node is taken.
-
 endmenu
index d5384f6f36f777d4487ea00f2908b39fe5a26403..ce9b7b7861569501c0339491338da38a2cdb0050 100644 (file)
@@ -19,15 +19,10 @@ extern char cmd_line[COMMAND_LINE_SIZE];
 
 extern char *klimit;
 
-void microblaze_heartbeat(void);
-void microblaze_setup_heartbeat(void);
-
 #   ifdef CONFIG_MMU
 extern void mmu_reset(void);
 #   endif /* CONFIG_MMU */
 
-extern void of_platform_reset_gpio_probe(void);
-
 void time_init(void);
 void init_IRQ(void);
 void machine_early_init(const char *cmdline, unsigned int ram,
index 9774e1d9507baebbd6efe9bcf67bb88bcd214d82..a62d09420a47b725cf67e12b99784a2259e24d2f 100644 (file)
@@ -38,6 +38,6 @@
 
 #endif /* __ASSEMBLY__ */
 
-#define __NR_syscalls         399
+#define __NR_syscalls         401
 
 #endif /* _ASM_MICROBLAZE_UNISTD_H */
index eb156f914793b29b558c9b48853af8d833f3d3d2..7a9f16a7641374855d4d8f9a7189792031f51185 100644 (file)
 #define __NR_pkey_alloc                396
 #define __NR_pkey_free         397
 #define __NR_statx             398
+#define __NR_io_pgetevents     399
+#define __NR_rseq              400
 
 #endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */
index 7e99cf6984a1eb5f51597dbd8857f6f370d28328..dd71637437f4f6b1ff307d385b8a1ff293959075 100644 (file)
@@ -8,7 +8,6 @@ ifdef CONFIG_FUNCTION_TRACER
 CFLAGS_REMOVE_timer.o = -pg
 CFLAGS_REMOVE_intc.o = -pg
 CFLAGS_REMOVE_early_printk.o = -pg
-CFLAGS_REMOVE_heartbeat.o = -pg
 CFLAGS_REMOVE_ftrace.o = -pg
 CFLAGS_REMOVE_process.o = -pg
 endif
@@ -17,12 +16,11 @@ extra-y := head.o vmlinux.lds
 
 obj-y += dma.o exceptions.o \
        hw_exception_handler.o irq.o \
-       platform.o process.o prom.o ptrace.o \
+       process.o prom.o ptrace.o \
        reset.o setup.o signal.o sys_microblaze.o timer.o traps.o unwind.o
 
 obj-y += cpu/
 
-obj-$(CONFIG_HEART_BEAT)       += heartbeat.o
 obj-$(CONFIG_MODULES)          += microblaze_ksyms.o module.o
 obj-$(CONFIG_MMU)              += misc.o
 obj-$(CONFIG_STACKTRACE)       += stacktrace.o
diff --git a/arch/microblaze/kernel/heartbeat.c b/arch/microblaze/kernel/heartbeat.c
deleted file mode 100644 (file)
index 2022130..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
- * Copyright (C) 2007-2009 PetaLogix
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/sched.h>
-#include <linux/sched/loadavg.h>
-#include <linux/io.h>
-
-#include <asm/setup.h>
-#include <asm/page.h>
-#include <asm/prom.h>
-
-static unsigned int base_addr;
-
-void microblaze_heartbeat(void)
-{
-       static unsigned int cnt, period, dist;
-
-       if (base_addr) {
-               if (cnt == 0 || cnt == dist)
-                       out_be32(base_addr, 1);
-               else if (cnt == 7 || cnt == dist + 7)
-                       out_be32(base_addr, 0);
-
-               if (++cnt > period) {
-                       cnt = 0;
-                       /*
-                        * The hyperbolic function below modifies the heartbeat
-                        * period length in dependency of the current (5min)
-                        * load. It goes through the points f(0)=126, f(1)=86,
-                        * f(5)=51, f(inf)->30.
-                        */
-                       period = ((672 << FSHIFT) / (5 * avenrun[0] +
-                                               (7 << FSHIFT))) + 30;
-                       dist = period / 4;
-               }
-       }
-}
-
-void microblaze_setup_heartbeat(void)
-{
-       struct device_node *gpio = NULL;
-       int *prop;
-       int j;
-       const char * const gpio_list[] = {
-               "xlnx,xps-gpio-1.00.a",
-               NULL
-       };
-
-       for (j = 0; gpio_list[j] != NULL; j++) {
-               gpio = of_find_compatible_node(NULL, NULL, gpio_list[j]);
-               if (gpio)
-                       break;
-       }
-
-       if (gpio) {
-               base_addr = be32_to_cpup(of_get_property(gpio, "reg", NULL));
-               base_addr = (unsigned long) ioremap(base_addr, PAGE_SIZE);
-               pr_notice("Heartbeat GPIO at 0x%x\n", base_addr);
-
-               /* GPIO is configured as output */
-               prop = (int *) of_get_property(gpio, "xlnx,is-bidir", NULL);
-               if (prop)
-                       out_be32(base_addr + 4, 0);
-       }
-}
diff --git a/arch/microblaze/kernel/platform.c b/arch/microblaze/kernel/platform.c
deleted file mode 100644 (file)
index 2540d60..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright 2008 Michal Simek <monstr@monstr.eu>
- *
- * based on virtex.c file
- *
- * Copyright 2007 Secret Lab Technologies Ltd.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <linux/init.h>
-#include <linux/of_platform.h>
-#include <asm/setup.h>
-
-static struct of_device_id xilinx_of_bus_ids[] __initdata = {
-       { .compatible = "simple-bus", },
-       { .compatible = "xlnx,compound", },
-       {}
-};
-
-static int __init microblaze_device_probe(void)
-{
-       of_platform_bus_probe(NULL, xilinx_of_bus_ids, NULL);
-       of_platform_reset_gpio_probe();
-       return 0;
-}
-device_initcall(microblaze_device_probe);
index bab4c8330ef4f3f165ad2992d9660776fb0e3c41..fcbe1daf631662f8d45a580126f58d2090d90023 100644 (file)
@@ -18,7 +18,7 @@
 static int handle; /* reset pin handle */
 static unsigned int reset_val;
 
-void of_platform_reset_gpio_probe(void)
+static int of_platform_reset_gpio_probe(void)
 {
        int ret;
        handle = of_get_named_gpio(of_find_node_by_path("/"),
@@ -27,13 +27,13 @@ void of_platform_reset_gpio_probe(void)
        if (!gpio_is_valid(handle)) {
                pr_info("Skipping unavailable RESET gpio %d (%s)\n",
                                handle, "reset");
-               return;
+               return -ENODEV;
        }
 
        ret = gpio_request(handle, "reset");
        if (ret < 0) {
                pr_info("GPIO pin is already allocated\n");
-               return;
+               return ret;
        }
 
        /* get current setup value */
@@ -51,11 +51,12 @@ void of_platform_reset_gpio_probe(void)
 
        pr_info("RESET: Registered gpio device: %d, current val: %d\n",
                                                        handle, reset_val);
-       return;
+       return 0;
 err:
        gpio_free(handle);
-       return;
+       return ret;
 }
+device_initcall(of_platform_reset_gpio_probe);
 
 
 static void gpio_system_reset(void)
index 56bcf313121fb6bd31be14dc5d9f676a132cb85c..6ab6505937921e247231f8a09fbf5ab8b463d1a5 100644 (file)
@@ -400,3 +400,5 @@ ENTRY(sys_call_table)
        .long sys_pkey_alloc
        .long sys_pkey_free
        .long sys_statx
+       .long sys_io_pgetevents
+       .long sys_rseq
index 7de941cbbd940fb7ca72840e3bf6422993c6eb94..a6683484b3a12690c517ccf5803ac64f03f16c81 100644 (file)
@@ -156,9 +156,6 @@ static inline void timer_ack(void)
 static irqreturn_t timer_interrupt(int irq, void *dev_id)
 {
        struct clock_event_device *evt = &clockevent_xilinx_timer;
-#ifdef CONFIG_HEART_BEAT
-       microblaze_heartbeat();
-#endif
        timer_ack();
        evt->event_handler(evt);
        return IRQ_HANDLED;
@@ -318,10 +315,6 @@ static int __init xilinx_timer_init(struct device_node *timer)
                return ret;
        }
 
-#ifdef CONFIG_HEART_BEAT
-       microblaze_setup_heartbeat();
-#endif
-
        ret = xilinx_clocksource_init();
        if (ret)
                return ret;
index 3f9deec70b92383130b847ef3d9585db5134675e..08c10c518f8323fea7838d92fe03b748cbb2966e 100644 (file)
@@ -65,6 +65,7 @@ config MIPS
        select HAVE_OPROFILE
        select HAVE_PERF_EVENTS
        select HAVE_REGS_AND_STACK_ACCESS_API
+       select HAVE_RSEQ
        select HAVE_STACKPROTECTOR
        select HAVE_SYSCALL_TRACEPOINTS
        select HAVE_VIRT_CPU_ACCOUNTING_GEN if 64BIT || !SMP
index 10a405d593df3b5c64fa84ce9ae27eaa7ba222df..c782b10ddf50d6a09713edc21f23356399ce1a4b 100644 (file)
@@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(ath79_ddr_ctrl_init);
 
 void ath79_ddr_wb_flush(u32 reg)
 {
-       void __iomem *flush_reg = ath79_ddr_wb_flush_base + reg;
+       void __iomem *flush_reg = ath79_ddr_wb_flush_base + (reg * 4);
 
        /* Flush the DDR write buffer. */
        __raw_writel(0x1, flush_reg);
index 6b2c6f3baefa556018dffea409500b1c7846ed77..75fb96ca61db7ef6652722640a8d452cbc125d55 100644 (file)
@@ -34,7 +34,7 @@
 #define PB44_KEYS_DEBOUNCE_INTERVAL    (3 * PB44_KEYS_POLL_INTERVAL)
 
 static struct gpiod_lookup_table pb44_i2c_gpiod_table = {
-       .dev_id = "i2c-gpio",
+       .dev_id = "i2c-gpio.0",
        .table = {
                GPIO_LOOKUP_IDX("ath79-gpio", PB44_GPIO_I2C_SDA,
                                NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
index a7d0b836f2f7dd9c8bf7897759aed6b9f59ade39..cea8ad864b3f6f416cb45687bfbcb5bd882933a7 100644 (file)
@@ -414,6 +414,8 @@ static inline type pfx##in##bwlq##p(unsigned long port)                     \
        __val = *__addr;                                                \
        slow;                                                           \
                                                                        \
+       /* prevent prefetching of coherent DMA data prematurely */      \
+       rmb();                                                          \
        return pfx##ioswab##bwlq(__addr, __val);                        \
 }
 
index bb05e9916a5fa7f969d915b742329ae67cd51b57..f25dd1d83fb74700b33e4bf2387ebf89ac200f64 100644 (file)
 #define __NR_pkey_alloc                        (__NR_Linux + 364)
 #define __NR_pkey_free                 (__NR_Linux + 365)
 #define __NR_statx                     (__NR_Linux + 366)
+#define __NR_rseq                      (__NR_Linux + 367)
+#define __NR_io_pgetevents             (__NR_Linux + 368)
 
 
 /*
  * Offset of the last Linux o32 flavoured syscall
  */
-#define __NR_Linux_syscalls            366
+#define __NR_Linux_syscalls            368
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 #define __NR_O32_Linux                 4000
-#define __NR_O32_Linux_syscalls                366
+#define __NR_O32_Linux_syscalls                368
 
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 
 #define __NR_pkey_alloc                        (__NR_Linux + 324)
 #define __NR_pkey_free                 (__NR_Linux + 325)
 #define __NR_statx                     (__NR_Linux + 326)
+#define __NR_rseq                      (__NR_Linux + 327)
+#define __NR_io_pgetevents             (__NR_Linux + 328)
 
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  */
-#define __NR_Linux_syscalls            326
+#define __NR_Linux_syscalls            328
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 #define __NR_64_Linux                  5000
-#define __NR_64_Linux_syscalls         326
+#define __NR_64_Linux_syscalls         328
 
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 
 #define __NR_pkey_alloc                        (__NR_Linux + 328)
 #define __NR_pkey_free                 (__NR_Linux + 329)
 #define __NR_statx                     (__NR_Linux + 330)
+#define __NR_rseq                      (__NR_Linux + 331)
+#define __NR_io_pgetevents             (__NR_Linux + 332)
 
 /*
  * Offset of the last N32 flavoured syscall
  */
-#define __NR_Linux_syscalls            330
+#define __NR_Linux_syscalls            332
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
 #define __NR_N32_Linux                 6000
-#define __NR_N32_Linux_syscalls                330
+#define __NR_N32_Linux_syscalls                332
 
 #endif /* _UAPI_ASM_UNISTD_H */
index 38a302919e6b5ae8aa07303065bd561529fe828e..d7de8adcfcc8767a826e7823d3bf189326da0e33 100644 (file)
@@ -79,6 +79,10 @@ FEXPORT(ret_from_fork)
        jal     schedule_tail           # a0 = struct task_struct *prev
 
 FEXPORT(syscall_exit)
+#ifdef CONFIG_DEBUG_RSEQ
+       move    a0, sp
+       jal     rseq_syscall
+#endif
        local_irq_disable               # make sure need_resched and
                                        # signals dont change between
                                        # sampling and return
@@ -141,6 +145,10 @@ work_notifysig:                            # deal with pending signals and
        j       resume_userspace_check
 
 FEXPORT(syscall_exit_partial)
+#ifdef CONFIG_DEBUG_RSEQ
+       move    a0, sp
+       jal     rseq_syscall
+#endif
        local_irq_disable               # make sure need_resched doesn't
                                        # change between and return
        LONG_L  a2, TI_FLAGS($28)       # current->work
index f2ee7e1e3342e498be961f8995fc91b1de1f2744..cff52b283e03843519201ca8fe8754e0899c0c3c 100644 (file)
@@ -119,10 +119,20 @@ NESTED(_mcount, PT_SIZE, ra)
 EXPORT_SYMBOL(_mcount)
        PTR_LA  t1, ftrace_stub
        PTR_L   t2, ftrace_trace_function /* Prepare t2 for (1) */
-       bne     t1, t2, static_trace
+       beq     t1, t2, fgraph_trace
         nop
 
+       MCOUNT_SAVE_REGS
+
+       move    a0, ra          /* arg1: self return address */
+       jalr    t2              /* (1) call *ftrace_trace_function */
+        move   a1, AT          /* arg2: parent's return address */
+
+       MCOUNT_RESTORE_REGS
+
+fgraph_trace:
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       PTR_LA  t1, ftrace_stub
        PTR_L   t3, ftrace_graph_return
        bne     t1, t3, ftrace_graph_caller
         nop
@@ -131,24 +141,11 @@ EXPORT_SYMBOL(_mcount)
        bne     t1, t3, ftrace_graph_caller
         nop
 #endif
-       b       ftrace_stub
-#ifdef CONFIG_32BIT
-        addiu sp, sp, 8
-#else
-        nop
-#endif
 
-static_trace:
-       MCOUNT_SAVE_REGS
-
-       move    a0, ra          /* arg1: self return address */
-       jalr    t2              /* (1) call *ftrace_trace_function */
-        move   a1, AT          /* arg2: parent's return address */
-
-       MCOUNT_RESTORE_REGS
 #ifdef CONFIG_32BIT
        addiu sp, sp, 8
 #endif
+
        .globl ftrace_stub
 ftrace_stub:
        RETURN_BACK
index 8d85046adcc8dd858cb5b392b68dc22da19185c4..9670e70139fd971d00bf7a06c46b06cec7ff1e35 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/kallsyms.h>
 #include <linux/random.h>
 #include <linux/prctl.h>
+#include <linux/nmi.h>
 
 #include <asm/asm.h>
 #include <asm/bootinfo.h>
@@ -655,28 +656,42 @@ unsigned long arch_align_stack(unsigned long sp)
        return sp & ALMASK;
 }
 
-static void arch_dump_stack(void *info)
+static DEFINE_PER_CPU(call_single_data_t, backtrace_csd);
+static struct cpumask backtrace_csd_busy;
+
+static void handle_backtrace(void *info)
 {
-       struct pt_regs *regs;
+       nmi_cpu_backtrace(get_irq_regs());
+       cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
+}
 
-       regs = get_irq_regs();
+static void raise_backtrace(cpumask_t *mask)
+{
+       call_single_data_t *csd;
+       int cpu;
 
-       if (regs)
-               show_regs(regs);
+       for_each_cpu(cpu, mask) {
+               /*
+                * If we previously sent an IPI to the target CPU & it hasn't
+                * cleared its bit in the busy cpumask then it didn't handle
+                * our previous IPI & it's not safe for us to reuse the
+                * call_single_data_t.
+                */
+               if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
+                       pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
+                               cpu);
+                       continue;
+               }
 
-       dump_stack();
+               csd = &per_cpu(backtrace_csd, cpu);
+               csd->func = handle_backtrace;
+               smp_call_function_single_async(cpu, csd);
+       }
 }
 
 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
 {
-       long this_cpu = get_cpu();
-
-       if (cpumask_test_cpu(this_cpu, mask) && !exclude_self)
-               dump_stack();
-
-       smp_call_function_many(mask, arch_dump_stack, NULL, 1);
-
-       put_cpu();
+       nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
 }
 
 int mips_get_process_fp_mode(struct task_struct *task)
index a9a7d78803cde30097a02c76aa49bef9f812be7e..91d3c8c46097cd960fd541cdf7c76d7e0d3636e3 100644 (file)
@@ -590,3 +590,5 @@ EXPORT(sys_call_table)
        PTR     sys_pkey_alloc
        PTR     sys_pkey_free                   /* 4365 */
        PTR     sys_statx
+       PTR     sys_rseq
+       PTR     sys_io_pgetevents
index 65d5aeeb9bdb51ac846d5acc213f3a1af9b97533..358d9599983d17840cd909ea7851197e7b38b838 100644 (file)
@@ -439,4 +439,6 @@ EXPORT(sys_call_table)
        PTR     sys_pkey_alloc
        PTR     sys_pkey_free                   /* 5325 */
        PTR     sys_statx
+       PTR     sys_rseq
+       PTR     sys_io_pgetevents
        .size   sys_call_table,.-sys_call_table
index cbf190ef9e8a5e2a0e499cfaf721908abf4213ec..c65eaacc1abfcf4c15a40721056cf6c3503927ee 100644 (file)
@@ -434,4 +434,6 @@ EXPORT(sysn32_call_table)
        PTR     sys_pkey_alloc
        PTR     sys_pkey_free
        PTR     sys_statx                       /* 6330 */
+       PTR     sys_rseq
+       PTR     compat_sys_io_pgetevents
        .size   sysn32_call_table,.-sysn32_call_table
index 9ebe3e2403b1d7b84d66732cd261364208f6020d..73913f072e3916f36c23bda86870f83002a725c0 100644 (file)
@@ -583,4 +583,6 @@ EXPORT(sys32_call_table)
        PTR     sys_pkey_alloc
        PTR     sys_pkey_free                   /* 4365 */
        PTR     sys_statx
+       PTR     sys_rseq
+       PTR     compat_sys_io_pgetevents
        .size   sys32_call_table,.-sys32_call_table
index 9e224469c78887e9c2eb55779bfc8d4646ca2f09..0a9cfe7a0372940fceb71931cff1eddec2e10e37 100644 (file)
@@ -801,6 +801,8 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
                regs->regs[0] = 0;              /* Don't deal with this again.  */
        }
 
+       rseq_signal_deliver(ksig, regs);
+
        if (sig_uses_siginfo(&ksig->ka, abi))
                ret = abi->setup_rt_frame(vdso + abi->vdso->off_rt_sigreturn,
                                          ksig, regs, oldset);
@@ -868,6 +870,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
+               rseq_handle_notify_resume(NULL, regs);
        }
 
        user_enter();
index d67fa74622ee287200bf6b6664c3292ad72131d5..8d505a21396e33626061adb82c20be60f5b217bf 100644 (file)
@@ -351,6 +351,7 @@ static void __show_regs(const struct pt_regs *regs)
 void show_regs(struct pt_regs *regs)
 {
        __show_regs((struct pt_regs *)regs);
+       dump_stack();
 }
 
 void show_registers(struct pt_regs *regs)
index 1986e09fb457c55ba16e3cd19f56f65e2737cb54..1601d90b087b8f933853ac87118aa09749f70f03 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/export.h>
 #include <asm/addrspace.h>
 #include <asm/byteorder.h>
+#include <linux/ioport.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
@@ -98,6 +99,20 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
        return error;
 }
 
+static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
+                              void *arg)
+{
+       unsigned long i;
+
+       for (i = 0; i < nr_pages; i++) {
+               if (pfn_valid(start_pfn + i) &&
+                   !PageReserved(pfn_to_page(start_pfn + i)))
+                       return 1;
+       }
+
+       return 0;
+}
+
 /*
  * Generic mapping function (not visible outside):
  */
@@ -116,8 +131,8 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
 
 void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags)
 {
+       unsigned long offset, pfn, last_pfn;
        struct vm_struct * area;
-       unsigned long offset;
        phys_addr_t last_addr;
        void * addr;
 
@@ -137,18 +152,16 @@ void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long
                return (void __iomem *) CKSEG1ADDR(phys_addr);
 
        /*
-        * Don't allow anybody to remap normal RAM that we're using..
+        * Don't allow anybody to remap RAM that may be allocated by the page
+        * allocator, since that could lead to races & data clobbering.
         */
-       if (phys_addr < virt_to_phys(high_memory)) {
-               char *t_addr, *t_end;
-               struct page *page;
-
-               t_addr = __va(phys_addr);
-               t_end = t_addr + (size - 1);
-
-               for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
-                       if(!PageReserved(page))
-                               return NULL;
+       pfn = PFN_DOWN(phys_addr);
+       last_pfn = PFN_DOWN(last_addr);
+       if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
+                                 __ioremap_check_ram) == 1) {
+               WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
+                         &phys_addr, &last_addr);
+               return NULL;
        }
 
        /*
index 9632436d74d7a74b3d584ab6e87a1fc7e55827cc..c2e94cf5ecdab7c7f3263bd65e76c30cf8eb32fc 100644 (file)
@@ -54,5 +54,5 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
        phys_addr_t size = resource_size(rsrc);
 
        *start = fixup_bigphys_addr(rsrc->start, size);
-       *end = rsrc->start + size;
+       *end = rsrc->start + size - 1;
 }
index 6aed974276d8f2cf9f337e73b04cd8479e99bab8..34f7222c5efe0405af96d09f1c691405109ab810 100644 (file)
@@ -12,17 +12,17 @@ config NDS32
        select CLONE_BACKWARDS
        select COMMON_CLK
        select DMA_NONCOHERENT_OPS
-       select GENERIC_ASHLDI3
-       select GENERIC_ASHRDI3
-       select GENERIC_LSHRDI3
-       select GENERIC_CMPDI2
-       select GENERIC_MULDI3
-       select GENERIC_UCMPDI2
        select GENERIC_ATOMIC64
        select GENERIC_CPU_DEVICES
        select GENERIC_CLOCKEVENTS
        select GENERIC_IRQ_CHIP
        select GENERIC_IRQ_SHOW
+       select GENERIC_LIB_ASHLDI3
+       select GENERIC_LIB_ASHRDI3
+       select GENERIC_LIB_CMPDI2
+       select GENERIC_LIB_LSHRDI3
+       select GENERIC_LIB_MULDI3
+       select GENERIC_LIB_UCMPDI2
        select GENERIC_STRNCPY_FROM_USER
        select GENERIC_STRNLEN_USER
        select GENERIC_TIME_VSYSCALL
index 513bb2e9baf9fa84615a8d03ce9ec57fb7f55849..031c676821ff8797a879c6af56aeffd9ea4c4ed3 100644 (file)
@@ -34,10 +34,12 @@ ifdef CONFIG_CPU_LITTLE_ENDIAN
 KBUILD_CFLAGS   += $(call cc-option, -EL)
 KBUILD_AFLAGS   += $(call cc-option, -EL)
 LDFLAGS         += $(call cc-option, -EL)
+CHECKFLAGS      += -D__NDS32_EL__
 else
 KBUILD_CFLAGS   += $(call cc-option, -EB)
 KBUILD_AFLAGS   += $(call cc-option, -EB)
 LDFLAGS         += $(call cc-option, -EB)
+CHECKFLAGS      += -D__NDS32_EB__
 endif
 
 boot := arch/nds32/boot
index 10b48f0d8e857fe9ae3ec6d35bd4a6004c2b4aba..8b26198d51bb78b60a28748248e1e50f89be52ab 100644 (file)
@@ -8,6 +8,8 @@
 
 #define PG_dcache_dirty PG_arch_1
 
+void flush_icache_range(unsigned long start, unsigned long end);
+void flush_icache_page(struct vm_area_struct *vma, struct page *page);
 #ifdef CONFIG_CPU_CACHE_ALIASING
 void flush_cache_mm(struct mm_struct *mm);
 void flush_cache_dup_mm(struct mm_struct *mm);
@@ -34,13 +36,16 @@ void flush_anon_page(struct vm_area_struct *vma,
 void flush_kernel_dcache_page(struct page *page);
 void flush_kernel_vmap_range(void *addr, int size);
 void invalidate_kernel_vmap_range(void *addr, int size);
-void flush_icache_range(unsigned long start, unsigned long end);
-void flush_icache_page(struct vm_area_struct *vma, struct page *page);
 #define flush_dcache_mmap_lock(mapping)   xa_lock_irq(&(mapping)->i_pages)
 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages)
 
 #else
 #include <asm-generic/cacheflush.h>
+#undef flush_icache_range
+#undef flush_icache_page
+#undef flush_icache_user_range
+void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+                            unsigned long addr, int len);
 #endif
 
 #endif /* __NDS32_CACHEFLUSH_H__ */
index eab5e84bd9919eaa7503eef129c3cd8072b9a61c..cb6cb91cfdf81622dc170286d83803e2d4e7ad73 100644 (file)
@@ -16,7 +16,7 @@
        "       .popsection\n"                                  \
        "       .pushsection .fixup,\"ax\"\n"                   \
        "4:     move    %0, " err_reg "\n"                      \
-       "       j       3b\n"                                   \
+       "       b       3b\n"                                   \
        "       .popsection"
 
 #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg)        \
index 2f5b2ccebe47166a9863468960f145e9601a9bf4..63a1a5ef5219f47bcd9797e543298056294da22f 100644 (file)
@@ -278,7 +278,8 @@ static void __init setup_memory(void)
 
 void __init setup_arch(char **cmdline_p)
 {
-       early_init_devtree( __dtb_start);
+       early_init_devtree(__atags_pointer ? \
+               phys_to_virt(__atags_pointer) : __dtb_start);
 
        setup_cpuinfo();
 
index ce8fd34497bf045beafa845d2df1500b00281a4c..254703653b6f5db430af4f4ae01cca63aa67049a 100644 (file)
 
 extern struct cache_info L1_cache_info[2];
 
-#ifndef CONFIG_CPU_CACHE_ALIASING
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+       unsigned long line_size, flags;
+       line_size = L1_cache_info[DCACHE].line_size;
+       start = start & ~(line_size - 1);
+       end = (end + line_size - 1) & ~(line_size - 1);
+       local_irq_save(flags);
+       cpu_cache_wbinval_range(start, end, 1);
+       local_irq_restore(flags);
+}
+EXPORT_SYMBOL(flush_icache_range);
+
+void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+{
+       unsigned long flags;
+       unsigned long kaddr;
+       local_irq_save(flags);
+       kaddr = (unsigned long)kmap_atomic(page);
+       cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
+       kunmap_atomic((void *)kaddr);
+       local_irq_restore(flags);
+}
+EXPORT_SYMBOL(flush_icache_page);
+
+void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+                            unsigned long addr, int len)
+{
+       unsigned long kaddr;
+       kaddr = (unsigned long)kmap_atomic(page) + (addr & ~PAGE_MASK);
+       flush_icache_range(kaddr, kaddr + len);
+       kunmap_atomic((void *)kaddr);
+}
+
 void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
                      pte_t * pte)
 {
@@ -35,19 +67,15 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
 
        if ((test_and_clear_bit(PG_dcache_dirty, &page->flags)) ||
            (vma->vm_flags & VM_EXEC)) {
-
-               if (!PageHighMem(page)) {
-                       cpu_cache_wbinval_page((unsigned long)
-                                              page_address(page),
-                                              vma->vm_flags & VM_EXEC);
-               } else {
-                       unsigned long kaddr = (unsigned long)kmap_atomic(page);
-                       cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
-                       kunmap_atomic((void *)kaddr);
-               }
+               unsigned long kaddr;
+               local_irq_save(flags);
+               kaddr = (unsigned long)kmap_atomic(page);
+               cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
+               kunmap_atomic((void *)kaddr);
+               local_irq_restore(flags);
        }
 }
-#else
+#ifdef CONFIG_CPU_CACHE_ALIASING
 extern pte_t va_present(struct mm_struct *mm, unsigned long addr);
 
 static inline unsigned long aliasing(unsigned long addr, unsigned long page)
@@ -317,52 +345,4 @@ void invalidate_kernel_vmap_range(void *addr, int size)
        local_irq_restore(flags);
 }
 EXPORT_SYMBOL(invalidate_kernel_vmap_range);
-
-void flush_icache_range(unsigned long start, unsigned long end)
-{
-       unsigned long line_size, flags;
-       line_size = L1_cache_info[DCACHE].line_size;
-       start = start & ~(line_size - 1);
-       end = (end + line_size - 1) & ~(line_size - 1);
-       local_irq_save(flags);
-       cpu_cache_wbinval_range(start, end, 1);
-       local_irq_restore(flags);
-}
-EXPORT_SYMBOL(flush_icache_range);
-
-void flush_icache_page(struct vm_area_struct *vma, struct page *page)
-{
-       unsigned long flags;
-       local_irq_save(flags);
-       cpu_cache_wbinval_page((unsigned long)page_address(page),
-                              vma->vm_flags & VM_EXEC);
-       local_irq_restore(flags);
-}
-
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
-                     pte_t * pte)
-{
-       struct page *page;
-       unsigned long flags;
-       unsigned long pfn = pte_pfn(*pte);
-
-       if (!pfn_valid(pfn))
-               return;
-
-       if (vma->vm_mm == current->active_mm) {
-               local_irq_save(flags);
-               __nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
-               __nds32__tlbop_rwr(*pte);
-               __nds32__isb();
-               local_irq_restore(flags);
-       }
-
-       page = pfn_to_page(pfn);
-       if (test_and_clear_bit(PG_dcache_dirty, &page->flags) ||
-           (vma->vm_flags & VM_EXEC)) {
-               local_irq_save(flags);
-               cpu_dcache_wbinval_page((unsigned long)page_address(page));
-               local_irq_restore(flags);
-       }
-}
 #endif
index 3e1a46615120a566adbfff65b5aab8e860bf3809..8999b922651210f6c20c83e7aa72b3bccf6c3d58 100644 (file)
@@ -98,8 +98,12 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte)
        __free_page(pte);
 }
 
+#define __pte_free_tlb(tlb, pte, addr) \
+do {                                   \
+       pgtable_page_dtor(pte);         \
+       tlb_remove_page((tlb), (pte));  \
+} while (0)
 
-#define __pte_free_tlb(tlb, pte, addr) tlb_remove_page((tlb), (pte))
 #define pmd_pgtable(pmd) pmd_page(pmd)
 
 #define check_pgt_cache()          do { } while (0)
index 690d55272ba688a2adc88bca00e66cc61903c711..0c826ad6e994cce359474229acf08ff0d0330b78 100644 (file)
@@ -277,12 +277,6 @@ EXCEPTION_ENTRY(_data_page_fault_handler)
        l.addi  r3,r1,0                    // pt_regs
        /* r4 set be EXCEPTION_HANDLE */   // effective address of fault
 
-       /*
-        * __PHX__: TODO
-        *
-        * all this can be written much simpler. look at
-        * DTLB miss handler in the CONFIG_GUARD_PROTECTED_CORE part
-        */
 #ifdef CONFIG_OPENRISC_NO_SPR_SR_DSX
        l.lwz   r6,PT_PC(r3)               // address of an offending insn
        l.lwz   r6,0(r6)                   // instruction that caused pf
@@ -314,7 +308,7 @@ EXCEPTION_ENTRY(_data_page_fault_handler)
 
 #else
 
-       l.lwz   r6,PT_SR(r3)               // SR
+       l.mfspr r6,r0,SPR_SR               // SR
        l.andi  r6,r6,SPR_SR_DSX           // check for delay slot exception
        l.sfne  r6,r0                      // exception happened in delay slot
        l.bnf   7f
index fb02b2a1d6f2d875372b125cf837feb119d0164e..9fc6b60140f007bea1442f60727a22aee24776c9 100644 (file)
  *      r4  - EEAR     exception EA
  *      r10 - current  pointing to current_thread_info struct
  *      r12 - syscall  0, since we didn't come from syscall
- *      r13 - temp     it actually contains new SR, not needed anymore
- *      r31 - handler  address of the handler we'll jump to
+ *      r30 - handler  address of the handler we'll jump to
  *
  *      handler has to save remaining registers to the exception
  *      ksp frame *before* tainting them!
        /* r1 is KSP, r30 is __pa(KSP) */                       ;\
        tophys  (r30,r1)                                        ;\
        l.sw    PT_GPR12(r30),r12                               ;\
+       /* r4 use for tmp before EA */                          ;\
        l.mfspr r12,r0,SPR_EPCR_BASE                            ;\
        l.sw    PT_PC(r30),r12                                  ;\
        l.mfspr r12,r0,SPR_ESR_BASE                             ;\
        /* r12 == 1 if we come from syscall */                  ;\
        CLEAR_GPR(r12)                                          ;\
        /* ----- turn on MMU ----- */                           ;\
-       l.ori   r30,r0,(EXCEPTION_SR)                           ;\
+       /* Carry DSX into exception SR */                       ;\
+       l.mfspr r30,r0,SPR_SR                                   ;\
+       l.andi  r30,r30,SPR_SR_DSX                              ;\
+       l.ori   r30,r30,(EXCEPTION_SR)                          ;\
        l.mtspr r0,r30,SPR_ESR_BASE                             ;\
        /* r30: EA address of handler */                        ;\
        LOAD_SYMBOL_2_GPR(r30,handler)                          ;\
index fac246e6f37a278e4cd7c001c2cd53a8df88dc4e..d8981cbb852a5f1fc1ea80667df3ed451579d13c 100644 (file)
@@ -300,7 +300,7 @@ static inline int in_delay_slot(struct pt_regs *regs)
                return 0;
        }
 #else
-       return regs->sr & SPR_SR_DSX;
+       return mfspr(SPR_SR) & SPR_SR_DSX;
 #endif
 }
 
index c480770fabcd6287571dacb9d40ccc224f8e13b1..17526bebcbd277765c791b30e04e0096052d78cb 100644 (file)
@@ -244,11 +244,11 @@ config PARISC_PAGE_SIZE_4KB
 
 config PARISC_PAGE_SIZE_16KB
        bool "16KB"
-       depends on PA8X00
+       depends on PA8X00 && BROKEN
 
 config PARISC_PAGE_SIZE_64KB
        bool "64KB"
-       depends on PA8X00
+       depends on PA8X00 && BROKEN
 
 endchoice
 
@@ -347,7 +347,7 @@ config NR_CPUS
        int "Maximum number of CPUs (2-32)"
        range 2 32
        depends on SMP
-       default "32"
+       default "4"
 
 endmenu
 
index 714284ea6cc214f1011c6e0593f5ad2b0c962ddc..5ce030266e7d03bbfd7da5885471b1a874eefcd7 100644 (file)
@@ -65,10 +65,6 @@ endif
 # kernel.
 cflags-y       += -mdisable-fpregs
 
-# Without this, "ld -r" results in .text sections that are too big
-# (> 0x40000) for branches to reach stubs.
-cflags-y       += -ffunction-sections
-
 # Use long jumps instead of long branches (needed if your linker fails to
 # link a too big vmlinux executable). Not enabled for building modules.
 ifdef CONFIG_MLONGCALLS
index eeb5c88586631e8935b96e0edfe410bbbc2ecffc..715c96ba2ec81c2907ead07ffd21fbf79a0fb0cb 100644 (file)
@@ -21,14 +21,6 @@ typedef struct {
        unsigned long sig[_NSIG_WORDS];
 } sigset_t;
 
-#ifndef __KERNEL__
-struct sigaction {
-       __sighandler_t sa_handler;
-       unsigned long sa_flags;
-       sigset_t sa_mask;               /* mask last for extensibility */
-};
-#endif
-
 #include <asm/sigcontext.h>
 
 #endif /* !__ASSEMBLY */
index 4872e77aa96b784d5a1e19bd7f9c4996b8cd0992..dc77c5a51db774a7c691568c010ce0a4500e7286 100644 (file)
 #define __NR_preadv2           (__NR_Linux + 347)
 #define __NR_pwritev2          (__NR_Linux + 348)
 #define __NR_statx             (__NR_Linux + 349)
+#define __NR_io_pgetevents     (__NR_Linux + 350)
 
-#define __NR_Linux_syscalls    (__NR_statx + 1)
+#define __NR_Linux_syscalls    (__NR_io_pgetevents + 1)
 
 
 #define __IGNORE_select                /* newselect */
index e0e1c9775c320b46d85da0f2e6ce22bc2275b9fb..5eb979d04b905420e28f63dd526e6ca13aaa9842 100644 (file)
@@ -154,17 +154,14 @@ int register_parisc_driver(struct parisc_driver *driver)
 {
        /* FIXME: we need this because apparently the sti
         * driver can be registered twice */
-       if(driver->drv.name) {
-               printk(KERN_WARNING 
-                      "BUG: skipping previously registered driver %s\n",
-                      driver->name);
+       if (driver->drv.name) {
+               pr_warn("BUG: skipping previously registered driver %s\n",
+                       driver->name);
                return 1;
        }
 
        if (!driver->probe) {
-               printk(KERN_WARNING 
-                      "BUG: driver %s has no probe routine\n",
-                      driver->name);
+               pr_warn("BUG: driver %s has no probe routine\n", driver->name);
                return 1;
        }
 
@@ -491,12 +488,9 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path)
 
        dev = create_parisc_device(mod_path);
        if (dev->id.hw_type != HPHW_FAULTY) {
-               printk(KERN_ERR "Two devices have hardware path [%s].  "
-                               "IODC data for second device: "
-                               "%02x%02x%02x%02x%02x%02x\n"
-                               "Rearranging GSC cards sometimes helps\n",
-                       parisc_pathname(dev), iodc_data[0], iodc_data[1],
-                       iodc_data[3], iodc_data[4], iodc_data[5], iodc_data[6]);
+               pr_err("Two devices have hardware path [%s].  IODC data for second device: %7phN\n"
+                      "Rearranging GSC cards sometimes helps\n",
+                       parisc_pathname(dev), iodc_data);
                return NULL;
        }
 
@@ -528,8 +522,7 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path)
         * the keyboard controller
         */
        if ((hpa & 0xfff) == 0 && insert_resource(&iomem_resource, &dev->hpa))
-               printk("Unable to claim HPA %lx for device %s\n",
-                               hpa, name);
+               pr_warn("Unable to claim HPA %lx for device %s\n", hpa, name);
 
        return dev;
 }
@@ -875,7 +868,7 @@ static void print_parisc_device(struct parisc_device *dev)
        static int count;
 
        print_pa_hwpath(dev, hw_path);
-       printk(KERN_INFO "%d. %s at 0x%px [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
+       pr_info("%d. %s at 0x%px [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
                ++count, dev->name, (void*) dev->hpa.start, hw_path, dev->id.hw_type,
                dev->id.hversion_rev, dev->id.hversion, dev->id.sversion);
 
index 6308749359e4b7d6ee348062d584f7b747f1a115..fe3f2a49d2b1063a93daa0a9d4077d2978c5bdaf 100644 (file)
        ENTRY_COMP(preadv2)
        ENTRY_COMP(pwritev2)
        ENTRY_SAME(statx)
+       ENTRY_COMP(io_pgetevents)       /* 350 */
 
 
 .ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
index 143f90e2f9f3c631616d4af52f0fe3fa08f44af9..2ef83d78eec42bd3ad55a3c2e0f976e081417266 100644 (file)
@@ -25,7 +25,7 @@
 
 /* #define DEBUG 1 */
 #ifdef DEBUG
-#define dbg(x...) printk(x)
+#define dbg(x...) pr_debug(x)
 #else
 #define dbg(x...)
 #endif
@@ -182,7 +182,7 @@ int __init unwind_init(void)
        start = (long)&__start___unwind[0];
        stop = (long)&__stop___unwind[0];
 
-       printk("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n", 
+       dbg("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n",
            start, stop,
            (stop - start) / sizeof(struct unwind_table_entry));
 
index bd06a3ccda312a0a645cd0dbff887924f691d2ce..fb96206de3175d65f86a63d9a7db0a815300a791 100644 (file)
@@ -243,7 +243,9 @@ endif
 cpu-as-$(CONFIG_4xx)           += -Wa,-m405
 cpu-as-$(CONFIG_ALTIVEC)       += $(call as-option,-Wa$(comma)-maltivec)
 cpu-as-$(CONFIG_E200)          += -Wa,-me200
+cpu-as-$(CONFIG_E500)          += -Wa,-me500
 cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4
+cpu-as-$(CONFIG_PPC_E500MC)    += $(call as-option,-Wa$(comma)-me500mc)
 
 KBUILD_AFLAGS += $(cpu-as-y)
 KBUILD_CFLAGS += $(cpu-as-y)
index 6a6673907e45eeb934e66023e8630fe21d8fd31d..82e44b1a00ae91219f482afa654f2d0440c5aa78 100644 (file)
@@ -108,6 +108,7 @@ static inline void pgtable_free(void *table, unsigned index_size)
 }
 
 #define check_pgt_cache()      do { } while (0)
+#define get_hugepd_cache_index(x)  (x)
 
 #ifdef CONFIG_SMP
 static inline void pgtable_free_tlb(struct mmu_gather *tlb,
@@ -137,7 +138,6 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
                                  unsigned long address)
 {
-       pgtable_page_dtor(table);
        pgtable_free_tlb(tlb, page_address(table), 0);
 }
 #endif /* _ASM_POWERPC_BOOK3S_32_PGALLOC_H */
index af5f2baac80f991951ac77dc3b3eaeb1e72aee46..a069dfcac9a94a94efe66a162cbbff88f1596934 100644 (file)
@@ -49,6 +49,27 @@ static inline int hugepd_ok(hugepd_t hpd)
 }
 #define is_hugepd(hpd)         (hugepd_ok(hpd))
 
+/*
+ * 16M and 16G huge page directory tables are allocated from slab cache
+ *
+ */
+#define H_16M_CACHE_INDEX (PAGE_SHIFT + H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE - 24)
+#define H_16G_CACHE_INDEX                                                      \
+       (PAGE_SHIFT + H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + H_PUD_INDEX_SIZE - 34)
+
+static inline int get_hugepd_cache_index(int index)
+{
+       switch (index) {
+       case H_16M_CACHE_INDEX:
+               return HTLB_16M_INDEX;
+       case H_16G_CACHE_INDEX:
+               return HTLB_16G_INDEX;
+       default:
+               BUG();
+       }
+       /* should not reach */
+}
+
 #else /* !CONFIG_HUGETLB_PAGE */
 static inline int pmd_huge(pmd_t pmd) { return 0; }
 static inline int pud_huge(pud_t pud) { return 0; }
index fb4b3ba52339e9233207ce7345e2f9d920835f97..d7ee249d6890cb30fcf10ebd608665d57ba2f781 100644 (file)
@@ -45,8 +45,17 @@ static inline int hugepd_ok(hugepd_t hpd)
 {
        return 0;
 }
+
 #define is_hugepd(pdep)                        0
 
+/*
+ * This should never get called
+ */
+static inline int get_hugepd_cache_index(int index)
+{
+       BUG();
+}
+
 #else /* !CONFIG_HUGETLB_PAGE */
 static inline int pmd_huge(pmd_t pmd) { return 0; }
 static inline int pud_huge(pud_t pud) { return 0; }
index 63cee159022b51400fbc52dd21ebd31f55f3db67..42aafba7a30834db7643213a3aec583a3cdd1b6a 100644 (file)
@@ -287,6 +287,11 @@ enum pgtable_index {
        PMD_INDEX,
        PUD_INDEX,
        PGD_INDEX,
+       /*
+        * Below are used with 4k page size and hugetlb
+        */
+       HTLB_16M_INDEX,
+       HTLB_16G_INDEX,
 };
 
 extern unsigned long __vmalloc_start;
index 896efa55999694cdad22f92615d073468d7cfe3b..79d570cbf3325c0e2c67f648badf6edd3dbcd730 100644 (file)
@@ -35,9 +35,9 @@ extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
 extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
                unsigned long ua, unsigned long entries);
 extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
-               unsigned long ua, unsigned long *hpa);
+               unsigned long ua, unsigned int pageshift, unsigned long *hpa);
 extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
-               unsigned long ua, unsigned long *hpa);
+               unsigned long ua, unsigned int pageshift, unsigned long *hpa);
 extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
 extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
 #endif
index 0f571e0ebca19ccdc8b89540324ccc71849b75e5..bd9ba8defd7258ab6e853be0c39d7290f9f02393 100644 (file)
@@ -8,7 +8,7 @@ extern void arch_touch_nmi_watchdog(void);
 static inline void arch_touch_nmi_watchdog(void) {}
 #endif
 
-#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_STACKTRACE)
+#if defined(CONFIG_NMI_IPI) && defined(CONFIG_STACKTRACE)
 extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
                                           bool exclude_self);
 #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
index 1707781d2f208096517859d94f30c6533e0a3771..8825953c225b2e48e9e0cd7938d2185b5e821977 100644 (file)
@@ -109,6 +109,7 @@ static inline void pgtable_free(void *table, unsigned index_size)
 }
 
 #define check_pgt_cache()      do { } while (0)
+#define get_hugepd_cache_index(x)      (x)
 
 #ifdef CONFIG_SMP
 static inline void pgtable_free_tlb(struct mmu_gather *tlb,
@@ -139,7 +140,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
                                  unsigned long address)
 {
        tlb_flush_pgtable(tlb, address);
-       pgtable_page_dtor(table);
        pgtable_free_tlb(tlb, page_address(table), 0);
 }
 #endif /* _ASM_POWERPC_PGALLOC_32_H */
index 0e693f322cb2e03a353e3803517820f4c324498b..e2d62d033708c4494a5e95d941b8d34cad3ec3e0 100644 (file)
@@ -141,6 +141,7 @@ static inline void pgtable_free(void *table, int shift)
        }
 }
 
+#define get_hugepd_cache_index(x)      (x)
 #ifdef CONFIG_SMP
 static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
 {
index cfcf6a874cfab3a094d4c931bdc7cade28184665..01b5171ea189994ab394685f8f4645a3fd86594c 100644 (file)
@@ -393,3 +393,4 @@ SYSCALL(pkey_alloc)
 SYSCALL(pkey_free)
 SYSCALL(pkey_mprotect)
 SYSCALL(rseq)
+COMPAT_SYS(io_pgetevents)
index 1e9708632dce30e1093d48dbed2db8d0d90a4e89..c19379f0a32e2b0fe59a9634140582a8afbc291e 100644 (file)
@@ -12,7 +12,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls            388
+#define NR_syscalls            389
 
 #define __NR__exit __NR_exit
 
index ac5ba55066dd76a26f133d91623309036bcad4c8..985534d0b448b7ae7b9d4cad7c3f9257d4ce0789 100644 (file)
 #define __NR_pkey_free         385
 #define __NR_pkey_mprotect     386
 #define __NR_rseq              387
+#define __NR_io_pgetevents     388
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
index 4be1c0de9406b159eede5503b3a8044645dac7fa..96dd3d871986428dadcbc9bb350c1b876fde8ab4 100644 (file)
@@ -711,7 +711,8 @@ static __init void cpufeatures_cpu_quirks(void)
                cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST;
                cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG;
                cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
-       } else /* DD2.1 and up have DD2_1 */
+       } else if ((version & 0xffff0000) == 0x004e0000)
+               /* DD2.1 and up have DD2_1 */
                cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
 
        if ((version & 0xffff0000) == 0x004e0000) {
index e734f6e45abc1ecb64cc8fe68b88054210e30bd3..689306118b48641495ea3b7ad9f7b058b4ce14c8 100644 (file)
@@ -144,7 +144,9 @@ power9_restore_additional_sprs:
        mtspr   SPRN_MMCR1, r4
 
        ld      r3, STOP_MMCR2(r13)
+       ld      r4, PACA_SPRG_VDSO(r13)
        mtspr   SPRN_MMCR2, r3
+       mtspr   SPRN_SPRG3, r4
        blr
 
 /*
index 4f861055a8521276c89c71cd67c41425c38c0ac2..d63b488d34d79033fa7229bfeb4d306cf6b56bc0 100644 (file)
@@ -285,9 +285,6 @@ pci_bus_to_hose(int bus)
  * Note that the returned IO or memory base is a physical address
  */
 
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
 SYSCALL_DEFINE3(pciconfig_iobase, long, which,
                unsigned long, bus, unsigned long, devfn)
 {
@@ -313,4 +310,3 @@ SYSCALL_DEFINE3(pciconfig_iobase, long, which,
 
        return result;
 }
-#pragma GCC diagnostic pop
index 812171c09f42fecf2c97757e37f7ad45ec9a35d8..dff28f90351245d58f6b77130fb26fcb73351c5d 100644 (file)
@@ -203,9 +203,6 @@ void pcibios_setup_phb_io_space(struct pci_controller *hose)
 #define IOBASE_ISA_IO          3
 #define IOBASE_ISA_MEM         4
 
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
 SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, in_bus,
                          unsigned long, in_devfn)
 {
@@ -259,7 +256,6 @@ SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, in_bus,
 
        return -EOPNOTSUPP;
 }
-#pragma GCC diagnostic pop
 
 #ifdef CONFIG_NUMA
 int pcibus_to_node(struct pci_bus *bus)
index 7fb9f83dcde889f8340daa94ec66cc6b3cb1804b..8afd146bc9c70dc6480e2fff20d6239d327e33d3 100644 (file)
@@ -1051,9 +1051,6 @@ struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
 }
 
 /* We assume to be passed big endian arguments */
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
 SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
 {
        struct rtas_args args;
@@ -1140,7 +1137,6 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
 
        return 0;
 }
-#pragma GCC diagnostic pop
 
 /*
  * Call early during boot, before mem init, to retrieve the RTAS
index 62b1a40d895777a10b3c7279fde05583ae3dc66b..40b44bb53a4efbb8b25c64786262e0123a3da640 100644 (file)
@@ -700,12 +700,19 @@ EXPORT_SYMBOL(check_legacy_ioport);
 static int ppc_panic_event(struct notifier_block *this,
                              unsigned long event, void *ptr)
 {
+       /*
+        * panic does a local_irq_disable, but we really
+        * want interrupts to be hard disabled.
+        */
+       hard_irq_disable();
+
        /*
         * If firmware-assisted dump has been registered then trigger
         * firmware-assisted dump and let firmware handle everything else.
         */
        crash_fadump(NULL, ptr);
-       ppc_md.panic(ptr);  /* May not return */
+       if (ppc_md.panic)
+               ppc_md.panic(ptr);  /* May not return */
        return NOTIFY_DONE;
 }
 
@@ -716,7 +723,8 @@ static struct notifier_block ppc_panic_block = {
 
 void __init setup_panic(void)
 {
-       if (!ppc_md.panic)
+       /* PPC64 always does a hard irq disable in its panic handler */
+       if (!IS_ENABLED(CONFIG_PPC64) && !ppc_md.panic)
                return;
        atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block);
 }
index 7a7ce8ad455e1533498fc3c7a5d8a853abb4d9cd..225bc5f91049436277e7c45787d8a7370d6dac78 100644 (file)
@@ -387,6 +387,14 @@ void early_setup_secondary(void)
 
 #endif /* CONFIG_SMP */
 
+void panic_smp_self_stop(void)
+{
+       hard_irq_disable();
+       spin_begin();
+       while (1)
+               spin_cpu_relax();
+}
+
 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
 static bool use_spinloop(void)
 {
index 17fe4339ba596150e8dc2b0eaf698aee3303ad18..b3e8db376ecde459bb8b5a1cd00b10c9606df289 100644 (file)
@@ -134,7 +134,7 @@ static void do_signal(struct task_struct *tsk)
        /* Re-enable the breakpoints for the signal stack */
        thread_change_pc(tsk, tsk->thread.regs);
 
-       rseq_signal_deliver(tsk->thread.regs);
+       rseq_signal_deliver(&ksig, tsk->thread.regs);
 
        if (is32) {
                if (ksig.ka.sa.sa_flags & SA_SIGINFO)
@@ -170,7 +170,7 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               rseq_handle_notify_resume(regs);
+               rseq_handle_notify_resume(NULL, regs);
        }
 
        user_enter();
index 5eedbb282d42fcf2caed7f3d09d0227b2e9e0734..e6474a45cef50623be68bc1fbf0b83635275dceb 100644 (file)
@@ -1038,9 +1038,6 @@ static int do_setcontext_tm(struct ucontext __user *ucp,
 }
 #endif
 
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
 #ifdef CONFIG_PPC64
 COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
                       struct ucontext __user *, new_ctx, int, ctx_size)
@@ -1134,7 +1131,6 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
        set_thread_flag(TIF_RESTOREALL);
        return 0;
 }
-#pragma GCC diagnostic pop
 
 #ifdef CONFIG_PPC64
 COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
@@ -1231,9 +1227,6 @@ SYSCALL_DEFINE0(rt_sigreturn)
        return 0;
 }
 
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
 #ifdef CONFIG_PPC32
 SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
                         int, ndbg, struct sig_dbg_op __user *, dbg)
@@ -1337,7 +1330,6 @@ SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
        return 0;
 }
 #endif
-#pragma GCC diagnostic pop
 
 /*
  * OK, we're invoking a handler
index d42b600203892d57d7fb3398f7cad38090df9ce6..83d51bf586c7e1ec3697a424a33a1559579147b8 100644 (file)
@@ -625,9 +625,6 @@ static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp)
 /*
  * Handle {get,set,swap}_context operations
  */
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
 SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
                struct ucontext __user *, new_ctx, long, ctx_size)
 {
@@ -693,7 +690,6 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
        set_thread_flag(TIF_RESTOREALL);
        return 0;
 }
-#pragma GCC diagnostic pop
 
 
 /*
index 5eadfffabe35134f6f34a6acca61c738c4efcbc9..4794d6b4f4d27a4db7f637a309897d64f1ad9e9c 100644 (file)
@@ -600,9 +600,6 @@ static void nmi_stop_this_cpu(struct pt_regs *regs)
        nmi_ipi_busy_count--;
        nmi_ipi_unlock();
 
-       /* Remove this CPU */
-       set_cpu_online(smp_processor_id(), false);
-
        spin_begin();
        while (1)
                spin_cpu_relax();
@@ -617,9 +614,6 @@ void smp_send_stop(void)
 
 static void stop_this_cpu(void *dummy)
 {
-       /* Remove this CPU */
-       set_cpu_online(smp_processor_id(), false);
-
        hard_irq_disable();
        spin_begin();
        while (1)
index 07e97f289c5207389ffb817330e5d66a4beb6e70..e2c50b55138f8ab52eecace4c6aad72c382e6bcd 100644 (file)
@@ -196,7 +196,7 @@ save_stack_trace_tsk_reliable(struct task_struct *tsk,
 EXPORT_SYMBOL_GPL(save_stack_trace_tsk_reliable);
 #endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
 
-#ifdef CONFIG_PPC_BOOK3S_64
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI)
 static void handle_backtrace_ipi(struct pt_regs *regs)
 {
        nmi_cpu_backtrace(regs);
@@ -242,4 +242,4 @@ void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
 {
        nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace_ipi);
 }
-#endif /* CONFIG_PPC64 */
+#endif /* defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) */
index 083fa06962fda045cb5f00ac0ea5b61046e3d4c4..466216506eb2f4bfa7b6b94ed89140914b1ea682 100644 (file)
@@ -62,9 +62,6 @@ out:
        return ret;
 }
 
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
 SYSCALL_DEFINE6(mmap2, unsigned long, addr, size_t, len,
                unsigned long, prot, unsigned long, flags,
                unsigned long, fd, unsigned long, pgoff)
@@ -78,7 +75,6 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, size_t, len,
 {
        return do_mmap2(addr, len, prot, flags, fd, offset, PAGE_SHIFT);
 }
-#pragma GCC diagnostic pop
 
 #ifdef CONFIG_PPC32
 /*
index d066e37551ec861c1d71a8a958784fd792e2dae6..8c456fa691a586d95127ad8cc54214cf3daf5ce4 100644 (file)
@@ -449,7 +449,7 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
                /* This only handles v2 IOMMU type, v1 is handled via ioctl() */
                return H_TOO_HARD;
 
-       if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, &hpa)))
+       if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
                return H_HARDWARE;
 
        if (mm_iommu_mapped_inc(mem))
index 925fc316a104cc1ce33b6630cf9aaa46ffde7c0c..5b298f5a1a14ee65ed0be2ad3a85c692ef9b5e8a 100644 (file)
@@ -279,7 +279,8 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
        if (!mem)
                return H_TOO_HARD;
 
-       if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, &hpa)))
+       if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
+                       &hpa)))
                return H_HARDWARE;
 
        pua = (void *) vmalloc_to_phys(pua);
@@ -469,7 +470,8 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
 
                mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
                if (mem)
-                       prereg = mm_iommu_ua_to_hpa_rm(mem, ua, &tces) == 0;
+                       prereg = mm_iommu_ua_to_hpa_rm(mem, ua,
+                                       IOMMU_PAGE_SHIFT_4K, &tces) == 0;
        }
 
        if (!prereg) {
index 7c5f479c5c00fb0f562801285e3795400edab084..8a9a49c138652ba2b971a265db233988e01aa7b1 100644 (file)
@@ -337,7 +337,8 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
        if (shift >= pdshift)
                hugepd_free(tlb, hugepte);
        else
-               pgtable_free_tlb(tlb, hugepte, pdshift - shift);
+               pgtable_free_tlb(tlb, hugepte,
+                                get_hugepd_cache_index(pdshift - shift));
 }
 
 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
index abb43646927aa9575c3aeec91ae55905545b6026..a4ca576125580d5449d7fadb87decb06d03c41e9 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/hugetlb.h>
 #include <linux/swap.h>
 #include <asm/mmu_context.h>
+#include <asm/pte-walk.h>
 
 static DEFINE_MUTEX(mem_list_mutex);
 
@@ -27,6 +28,7 @@ struct mm_iommu_table_group_mem_t {
        struct rcu_head rcu;
        unsigned long used;
        atomic64_t mapped;
+       unsigned int pageshift;
        u64 ua;                 /* userspace address */
        u64 entries;            /* number of entries in hpas[] */
        u64 *hpas;              /* vmalloc'ed */
@@ -125,6 +127,8 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
 {
        struct mm_iommu_table_group_mem_t *mem;
        long i, j, ret = 0, locked_entries = 0;
+       unsigned int pageshift;
+       unsigned long flags;
        struct page *page = NULL;
 
        mutex_lock(&mem_list_mutex);
@@ -159,6 +163,12 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
                goto unlock_exit;
        }
 
+       /*
+        * For a starting point for a maximum page size calculation
+        * we use @ua and @entries natural alignment to allow IOMMU pages
+        * smaller than huge pages but still bigger than PAGE_SIZE.
+        */
+       mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT));
        mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0])));
        if (!mem->hpas) {
                kfree(mem);
@@ -199,6 +209,23 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
                        }
                }
 populate:
+               pageshift = PAGE_SHIFT;
+               if (PageCompound(page)) {
+                       pte_t *pte;
+                       struct page *head = compound_head(page);
+                       unsigned int compshift = compound_order(head);
+
+                       local_irq_save(flags); /* disables as well */
+                       pte = find_linux_pte(mm->pgd, ua, NULL, &pageshift);
+                       local_irq_restore(flags);
+
+                       /* Double check it is still the same pinned page */
+                       if (pte && pte_page(*pte) == head &&
+                                       pageshift == compshift)
+                               pageshift = max_t(unsigned int, pageshift,
+                                               PAGE_SHIFT);
+               }
+               mem->pageshift = min(mem->pageshift, pageshift);
                mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
        }
 
@@ -349,7 +376,7 @@ struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
 EXPORT_SYMBOL_GPL(mm_iommu_find);
 
 long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
-               unsigned long ua, unsigned long *hpa)
+               unsigned long ua, unsigned int pageshift, unsigned long *hpa)
 {
        const long entry = (ua - mem->ua) >> PAGE_SHIFT;
        u64 *va = &mem->hpas[entry];
@@ -357,6 +384,9 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
        if (entry >= mem->entries)
                return -EFAULT;
 
+       if (pageshift > mem->pageshift)
+               return -EFAULT;
+
        *hpa = *va | (ua & ~PAGE_MASK);
 
        return 0;
@@ -364,7 +394,7 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
 EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
 
 long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
-               unsigned long ua, unsigned long *hpa)
+               unsigned long ua, unsigned int pageshift, unsigned long *hpa)
 {
        const long entry = (ua - mem->ua) >> PAGE_SHIFT;
        void *va = &mem->hpas[entry];
@@ -373,6 +403,9 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
        if (entry >= mem->entries)
                return -EFAULT;
 
+       if (pageshift > mem->pageshift)
+               return -EFAULT;
+
        pa = (void *) vmalloc_to_phys(va);
        if (!pa)
                return -EFAULT;
index c1f4ca45c93a488df07d66525f0d935ca342f84c..4afbfbb64bfd0a21254a177f4fa3df3c37bff6ea 100644 (file)
@@ -409,6 +409,18 @@ static inline void pgtable_free(void *table, int index)
        case PUD_INDEX:
                kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table);
                break;
+#if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE)
+               /* 16M hugepd directory at pud level */
+       case HTLB_16M_INDEX:
+               BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0);
+               kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table);
+               break;
+               /* 16G hugepd directory at the pgd level */
+       case HTLB_16G_INDEX:
+               BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0);
+               kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table);
+               break;
+#endif
                /* We don't free pgd table via RCU callback */
        default:
                BUG();
index 75cb646a79c383bc39c578a49ddf48a23ee9c44b..9d16ee251fc0131118c375282b2c3e103a2e0b0f 100644 (file)
@@ -186,9 +186,6 @@ static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
  * in a 2-bit field won't allow writes to a page that is otherwise
  * write-protected.
  */
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
 SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
                unsigned long, len, u32 __user *, map)
 {
@@ -272,4 +269,3 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
        up_write(&mm->mmap_sem);
        return err;
 }
-#pragma GCC diagnostic pop
index 67a6e86d3e7efb25e170af7218453230703aa4a5..1135b43a597c5045be9a0425b67a5e5edd17d876 100644 (file)
@@ -689,22 +689,17 @@ EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
 static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
 static unsigned long tlb_local_single_page_flush_ceiling __read_mostly = POWER9_TLB_SETS_RADIX * 2;
 
-void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
-                    unsigned long end)
+static inline void __radix__flush_tlb_range(struct mm_struct *mm,
+                                       unsigned long start, unsigned long end,
+                                       bool flush_all_sizes)
 
 {
-       struct mm_struct *mm = vma->vm_mm;
        unsigned long pid;
        unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift;
        unsigned long page_size = 1UL << page_shift;
        unsigned long nr_pages = (end - start) >> page_shift;
        bool local, full;
 
-#ifdef CONFIG_HUGETLB_PAGE
-       if (is_vm_hugetlb_page(vma))
-               return radix__flush_hugetlb_tlb_range(vma, start, end);
-#endif
-
        pid = mm->context.id;
        if (unlikely(pid == MMU_NO_CONTEXT))
                return;
@@ -738,37 +733,64 @@ is_local:
                                _tlbie_pid(pid, RIC_FLUSH_TLB);
                }
        } else {
-               bool hflush = false;
+               bool hflush = flush_all_sizes;
+               bool gflush = flush_all_sizes;
                unsigned long hstart, hend;
+               unsigned long gstart, gend;
 
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-               hstart = (start + HPAGE_PMD_SIZE - 1) >> HPAGE_PMD_SHIFT;
-               hend = end >> HPAGE_PMD_SHIFT;
-               if (hstart < hend) {
-                       hstart <<= HPAGE_PMD_SHIFT;
-                       hend <<= HPAGE_PMD_SHIFT;
+               if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
                        hflush = true;
+
+               if (hflush) {
+                       hstart = (start + PMD_SIZE - 1) & PMD_MASK;
+                       hend = end & PMD_MASK;
+                       if (hstart == hend)
+                               hflush = false;
+               }
+
+               if (gflush) {
+                       gstart = (start + PUD_SIZE - 1) & PUD_MASK;
+                       gend = end & PUD_MASK;
+                       if (gstart == gend)
+                               gflush = false;
                }
-#endif
 
                asm volatile("ptesync": : :"memory");
                if (local) {
                        __tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize);
                        if (hflush)
                                __tlbiel_va_range(hstart, hend, pid,
-                                               HPAGE_PMD_SIZE, MMU_PAGE_2M);
+                                               PMD_SIZE, MMU_PAGE_2M);
+                       if (gflush)
+                               __tlbiel_va_range(gstart, gend, pid,
+                                               PUD_SIZE, MMU_PAGE_1G);
                        asm volatile("ptesync": : :"memory");
                } else {
                        __tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize);
                        if (hflush)
                                __tlbie_va_range(hstart, hend, pid,
-                                               HPAGE_PMD_SIZE, MMU_PAGE_2M);
+                                               PMD_SIZE, MMU_PAGE_2M);
+                       if (gflush)
+                               __tlbie_va_range(gstart, gend, pid,
+                                               PUD_SIZE, MMU_PAGE_1G);
                        fixup_tlbie();
                        asm volatile("eieio; tlbsync; ptesync": : :"memory");
                }
        }
        preempt_enable();
 }
+
+void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+                    unsigned long end)
+
+{
+#ifdef CONFIG_HUGETLB_PAGE
+       if (is_vm_hugetlb_page(vma))
+               return radix__flush_hugetlb_tlb_range(vma, start, end);
+#endif
+
+       __radix__flush_tlb_range(vma->vm_mm, start, end, false);
+}
 EXPORT_SYMBOL(radix__flush_tlb_range);
 
 static int radix_get_mmu_psize(int page_size)
@@ -837,6 +859,8 @@ void radix__tlb_flush(struct mmu_gather *tlb)
        int psize = 0;
        struct mm_struct *mm = tlb->mm;
        int page_size = tlb->page_size;
+       unsigned long start = tlb->start;
+       unsigned long end = tlb->end;
 
        /*
         * if page size is not something we understand, do a full mm flush
@@ -847,15 +871,45 @@ void radix__tlb_flush(struct mmu_gather *tlb)
         */
        if (tlb->fullmm) {
                __flush_all_mm(mm, true);
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
+       } else if (mm_tlb_flush_nested(mm)) {
+               /*
+                * If there is a concurrent invalidation that is clearing ptes,
+                * then it's possible this invalidation will miss one of those
+                * cleared ptes and miss flushing the TLB. If this invalidate
+                * returns before the other one flushes TLBs, that can result
+                * in it returning while there are still valid TLBs inside the
+                * range to be invalidated.
+                *
+                * See mm/memory.c:tlb_finish_mmu() for more details.
+                *
+                * The solution to this is ensure the entire range is always
+                * flushed here. The problem for powerpc is that the flushes
+                * are page size specific, so this "forced flush" would not
+                * do the right thing if there are a mix of page sizes in
+                * the range to be invalidated. So use __flush_tlb_range
+                * which invalidates all possible page sizes in the range.
+                *
+                * PWC flush probably is not be required because the core code
+                * shouldn't free page tables in this path, but accounting
+                * for the possibility makes us a bit more robust.
+                *
+                * need_flush_all is an uncommon case because page table
+                * teardown should be done with exclusive locks held (but
+                * after locks are dropped another invalidate could come
+                * in), it could be optimized further if necessary.
+                */
+               if (!tlb->need_flush_all)
+                       __radix__flush_tlb_range(mm, start, end, true);
+               else
+                       radix__flush_all_mm(mm);
+#endif
        } else if ( (psize = radix_get_mmu_psize(page_size)) == -1) {
                if (!tlb->need_flush_all)
                        radix__flush_tlb_mm(mm);
                else
                        radix__flush_all_mm(mm);
        } else {
-               unsigned long start = tlb->start;
-               unsigned long end = tlb->end;
-
                if (!tlb->need_flush_all)
                        radix__flush_tlb_range_psize(mm, start, end, psize);
                else
@@ -1043,6 +1097,8 @@ extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
                for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
                        if (sib == cpu)
                                continue;
+                       if (!cpu_possible(sib))
+                               continue;
                        if (paca_ptrs[sib]->kvm_hstate.kvm_vcpu)
                                flush = true;
                }
index 380cbf9a40d98f76718f22d6e6b5ffa6c1cd25bc..c0a9bcd28356dfcacacb6bae4db7b2f8d18b85f0 100644 (file)
@@ -286,6 +286,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
                u64 imm64;
                u8 *func;
                u32 true_cond;
+               u32 tmp_idx;
 
                /*
                 * addrs[] maps a BPF bytecode address into a real offset from
@@ -637,11 +638,7 @@ emit_clear:
                case BPF_STX | BPF_XADD | BPF_W:
                        /* Get EA into TMP_REG_1 */
                        PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
-                       /* error if EA is not word-aligned */
-                       PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03);
-                       PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12);
-                       PPC_LI(b2p[BPF_REG_0], 0);
-                       PPC_JMP(exit_addr);
+                       tmp_idx = ctx->idx * 4;
                        /* load value from memory into TMP_REG_2 */
                        PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
                        /* add value from src_reg into this */
@@ -649,32 +646,16 @@ emit_clear:
                        /* store result back */
                        PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
                        /* we're done if this succeeded */
-                       PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
-                       /* otherwise, let's try once more */
-                       PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
-                       PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
-                       PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
-                       /* exit if the store was not successful */
-                       PPC_LI(b2p[BPF_REG_0], 0);
-                       PPC_BCC(COND_NE, exit_addr);
+                       PPC_BCC_SHORT(COND_NE, tmp_idx);
                        break;
                /* *(u64 *)(dst + off) += src */
                case BPF_STX | BPF_XADD | BPF_DW:
                        PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
-                       /* error if EA is not doubleword-aligned */
-                       PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07);
-                       PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4));
-                       PPC_LI(b2p[BPF_REG_0], 0);
-                       PPC_JMP(exit_addr);
-                       PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
-                       PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
-                       PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
-                       PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
+                       tmp_idx = ctx->idx * 4;
                        PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
                        PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
                        PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
-                       PPC_LI(b2p[BPF_REG_0], 0);
-                       PPC_BCC(COND_NE, exit_addr);
+                       PPC_BCC_SHORT(COND_NE, tmp_idx);
                        break;
 
                /*
index 7c968e46736faa598861259d5a3781577256a7ff..12e6e4d3060236a05fcd987690b385576d8821cb 100644 (file)
 #define DBG(x...)
 #endif
 
-/* Apparently the RTC stores seconds since 1 Jan 1904 */
+/*
+ * Offset between Unix time (1970-based) and Mac time (1904-based). Cuda and PMU
+ * times wrap in 2040. If we need to handle later times, the read_time functions
+ * need to be changed to interpret wrapped times as post-2040.
+ */
 #define RTC_OFFSET     2082844800
 
 /*
@@ -97,8 +101,11 @@ static time64_t cuda_get_time(void)
        if (req.reply_len != 7)
                printk(KERN_ERR "cuda_get_time: got %d byte reply\n",
                       req.reply_len);
-       now = (req.reply[3] << 24) + (req.reply[4] << 16)
-               + (req.reply[5] << 8) + req.reply[6];
+       now = (u32)((req.reply[3] << 24) + (req.reply[4] << 16) +
+                   (req.reply[5] << 8) + req.reply[6]);
+       /* it's either after year 2040, or the RTC has gone backwards */
+       WARN_ON(now < RTC_OFFSET);
+
        return now - RTC_OFFSET;
 }
 
@@ -106,10 +113,10 @@ static time64_t cuda_get_time(void)
 
 static int cuda_set_rtc_time(struct rtc_time *tm)
 {
-       time64_t nowtime;
+       u32 nowtime;
        struct adb_request req;
 
-       nowtime = rtc_tm_to_time64(tm) + RTC_OFFSET;
+       nowtime = lower_32_bits(rtc_tm_to_time64(tm) + RTC_OFFSET);
        if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
                         nowtime >> 24, nowtime >> 16, nowtime >> 8,
                         nowtime) < 0)
@@ -140,8 +147,12 @@ static time64_t pmu_get_time(void)
        if (req.reply_len != 4)
                printk(KERN_ERR "pmu_get_time: got %d byte reply from PMU\n",
                       req.reply_len);
-       now = (req.reply[0] << 24) + (req.reply[1] << 16)
-               + (req.reply[2] << 8) + req.reply[3];
+       now = (u32)((req.reply[0] << 24) + (req.reply[1] << 16) +
+                   (req.reply[2] << 8) + req.reply[3]);
+
+       /* it's either after year 2040, or the RTC has gone backwards */
+       WARN_ON(now < RTC_OFFSET);
+
        return now - RTC_OFFSET;
 }
 
@@ -149,10 +160,10 @@ static time64_t pmu_get_time(void)
 
 static int pmu_set_rtc_time(struct rtc_time *tm)
 {
-       time64_t nowtime;
+       u32 nowtime;
        struct adb_request req;
 
-       nowtime = rtc_tm_to_time64(tm) + RTC_OFFSET;
+       nowtime = lower_32_bits(rtc_tm_to_time64(tm) + RTC_OFFSET);
        if (pmu_request(&req, NULL, 5, PMU_SET_RTC, nowtime >> 24,
                        nowtime >> 16, nowtime >> 8, nowtime) < 0)
                return -ENXIO;
index 47166ad2a669186c98e4ddb656a1edf5665e66ef..196978733e6407d05b0b2c97f7d4d980a2ce9f05 100644 (file)
@@ -2734,7 +2734,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr,
 {
        int nr, dotted;
        unsigned long first_adr;
-       unsigned long inst, last_inst = 0;
+       unsigned int inst, last_inst = 0;
        unsigned char val[4];
 
        dotted = 0;
@@ -2758,7 +2758,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr,
                dotted = 0;
                last_inst = inst;
                if (praddr)
-                       printf(REG"  %.8lx", adr, inst);
+                       printf(REG"  %.8x", adr, inst);
                printf("\t");
                dump_func(inst, adr);
                printf("\n");
index f12680c9b9475e2b130da3369644e797575f7a80..4764fdeb4f1f6837c771e42f9e84ca5bd8291af6 100644 (file)
@@ -107,6 +107,7 @@ config ARCH_RV32I
        select GENERIC_LIB_ASHLDI3
        select GENERIC_LIB_ASHRDI3
        select GENERIC_LIB_LSHRDI3
+       select GENERIC_LIB_UCMPDI2
 
 config ARCH_RV64I
        bool "RV64I"
index 5cae4c30cd8e2e2147b59285f9eccaaae7e9a789..1e0dfc36aab9e597aaf0d0fb3c99b2ac3dcae750 100644 (file)
@@ -21,8 +21,13 @@ typedef struct user_regs_struct elf_gregset_t;
 
 typedef union __riscv_fp_state elf_fpregset_t;
 
-#define ELF_RISCV_R_SYM(r_info) ((r_info) >> 32)
-#define ELF_RISCV_R_TYPE(r_info) ((r_info) & 0xffffffff)
+#if __riscv_xlen == 64
+#define ELF_RISCV_R_SYM(r_info)                ELF64_R_SYM(r_info)
+#define ELF_RISCV_R_TYPE(r_info)       ELF64_R_TYPE(r_info)
+#else
+#define ELF_RISCV_R_SYM(r_info)                ELF32_R_SYM(r_info)
+#define ELF_RISCV_R_TYPE(r_info)       ELF32_R_TYPE(r_info)
+#endif
 
 /*
  * RISC-V relocation types
index b74cbfbce2d0dd9df65ba779ab9dd8feb0d38eed..7bcdaed15703be6d8f141a8582abd024dc966fec 100644 (file)
 #include <linux/irqchip.h>
 #include <linux/irqdomain.h>
 
-#ifdef CONFIG_RISCV_INTC
-#include <linux/irqchip/irq-riscv-intc.h>
-#endif
-
 void __init init_IRQ(void)
 {
        irqchip_init();
index 1d5e9b934b8ca5b5b78a64af5e1c06e334b7e5f2..3303ed2cd4193f82c51730a992d6c875b361ff80 100644 (file)
@@ -37,7 +37,7 @@ static int apply_r_riscv_64_rela(struct module *me, u32 *location, Elf_Addr v)
 static int apply_r_riscv_branch_rela(struct module *me, u32 *location,
                                     Elf_Addr v)
 {
-       s64 offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - (void *)location;
        u32 imm12 = (offset & 0x1000) << (31 - 12);
        u32 imm11 = (offset & 0x800) >> (11 - 7);
        u32 imm10_5 = (offset & 0x7e0) << (30 - 10);
@@ -50,7 +50,7 @@ static int apply_r_riscv_branch_rela(struct module *me, u32 *location,
 static int apply_r_riscv_jal_rela(struct module *me, u32 *location,
                                  Elf_Addr v)
 {
-       s64 offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - (void *)location;
        u32 imm20 = (offset & 0x100000) << (31 - 20);
        u32 imm19_12 = (offset & 0xff000);
        u32 imm11 = (offset & 0x800) << (20 - 11);
@@ -63,7 +63,7 @@ static int apply_r_riscv_jal_rela(struct module *me, u32 *location,
 static int apply_r_riscv_rcv_branch_rela(struct module *me, u32 *location,
                                         Elf_Addr v)
 {
-       s64 offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - (void *)location;
        u16 imm8 = (offset & 0x100) << (12 - 8);
        u16 imm7_6 = (offset & 0xc0) >> (6 - 5);
        u16 imm5 = (offset & 0x20) >> (5 - 2);
@@ -78,7 +78,7 @@ static int apply_r_riscv_rcv_branch_rela(struct module *me, u32 *location,
 static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location,
                                       Elf_Addr v)
 {
-       s64 offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - (void *)location;
        u16 imm11 = (offset & 0x800) << (12 - 11);
        u16 imm10 = (offset & 0x400) >> (10 - 8);
        u16 imm9_8 = (offset & 0x300) << (12 - 11);
@@ -96,7 +96,7 @@ static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location,
 static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location,
                                         Elf_Addr v)
 {
-       s64 offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - (void *)location;
        s32 hi20;
 
        if (offset != (s32)offset) {
@@ -178,7 +178,7 @@ static int apply_r_riscv_lo12_s_rela(struct module *me, u32 *location,
 static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location,
                                       Elf_Addr v)
 {
-       s64 offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - (void *)location;
        s32 hi20;
 
        /* Always emit the got entry */
@@ -200,7 +200,7 @@ static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location,
 static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
                                       Elf_Addr v)
 {
-       s64 offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - (void *)location;
        s32 fill_v = offset;
        u32 hi20, lo12;
 
@@ -227,7 +227,7 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
 static int apply_r_riscv_call_rela(struct module *me, u32 *location,
                                   Elf_Addr v)
 {
-       s64 offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - (void *)location;
        s32 fill_v = offset;
        u32 hi20, lo12;
 
@@ -263,14 +263,14 @@ static int apply_r_riscv_align_rela(struct module *me, u32 *location,
 static int apply_r_riscv_add32_rela(struct module *me, u32 *location,
                                    Elf_Addr v)
 {
-       *(u32 *)location += (*(u32 *)v);
+       *(u32 *)location += (u32)v;
        return 0;
 }
 
 static int apply_r_riscv_sub32_rela(struct module *me, u32 *location,
                                    Elf_Addr v)
 {
-       *(u32 *)location -= (*(u32 *)v);
+       *(u32 *)location -= (u32)v;
        return 0;
 }
 
@@ -347,7 +347,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
                        unsigned int j;
 
                        for (j = 0; j < sechdrs[relsec].sh_size / sizeof(*rel); j++) {
-                               u64 hi20_loc =
+                               unsigned long hi20_loc =
                                        sechdrs[sechdrs[relsec].sh_info].sh_addr
                                        + rel[j].r_offset;
                                u32 hi20_type = ELF_RISCV_R_TYPE(rel[j].r_info);
@@ -360,12 +360,12 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
                                        Elf_Sym *hi20_sym =
                                                (Elf_Sym *)sechdrs[symindex].sh_addr
                                                + ELF_RISCV_R_SYM(rel[j].r_info);
-                                       u64 hi20_sym_val =
+                                       unsigned long hi20_sym_val =
                                                hi20_sym->st_value
                                                + rel[j].r_addend;
 
                                        /* Calculate lo12 */
-                                       u64 offset = hi20_sym_val - hi20_loc;
+                                       size_t offset = hi20_sym_val - hi20_loc;
                                        if (IS_ENABLED(CONFIG_MODULE_SECTIONS)
                                            && hi20_type == R_RISCV_GOT_HI20) {
                                                offset = module_emit_got_entry(
index ba3e80712797c8ece03b07930f2ffb4b370588cc..9f82a7e34c648a370ec42f2e0bad711058e9baf2 100644 (file)
@@ -50,7 +50,7 @@ static int riscv_gpr_set(struct task_struct *target,
        struct pt_regs *regs;
 
        regs = task_pt_regs(target);
-       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0, -1);
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, 0, -1);
        return ret;
 }
 
index ee44a48faf79dfd8e01cc07db341cacbeb9feed6..f0d2070866d49b170da74ae0e20e779f55a02199 100644 (file)
@@ -220,8 +220,3 @@ void __init setup_arch(char **cmdline_p)
        riscv_fill_hwcap();
 }
 
-static int __init riscv_device_init(void)
-{
-       return of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
-}
-subsys_initcall_sync(riscv_device_init);
index c77df8142be2eaa9525130b3cbea70e191a20aba..58a522f9bcc319ae5d40a8ae15da5d9021921ebd 100644 (file)
@@ -28,7 +28,9 @@ static void __init zone_sizes_init(void)
 {
        unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
 
+#ifdef CONFIG_ZONE_DMA32
        max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, max_low_pfn));
+#endif
        max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
 
        free_area_init_nodes(max_zone_pfns);
index baed39772c845d74d91c292aba3b3ea4063aa130..8a1863d9ed53385cd5e72813e962bb4310c38abc 100644 (file)
@@ -140,7 +140,7 @@ config S390
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_FUNCTION_TRACER
        select HAVE_FUTEX_CMPXCHG if FUTEX
-       select HAVE_GCC_PLUGINS
+       select HAVE_GCC_PLUGINS if BROKEN
        select HAVE_KERNEL_BZIP2
        select HAVE_KERNEL_GZIP
        select HAVE_KERNEL_LZ4
@@ -160,6 +160,7 @@ config S390
        select HAVE_OPROFILE
        select HAVE_PERF_EVENTS
        select HAVE_REGS_AND_STACK_ACCESS_API
+       select HAVE_RSEQ
        select HAVE_SYSCALL_TRACEPOINTS
        select HAVE_VIRT_CPU_ACCOUNTING
        select MODULES_USE_ELF_RELA
index 0563fd3e84585769f7acd0f093f30a48eaab9f54..480bb02ccacdd07de17ffda81fae140ee748a505 100644 (file)
@@ -6,36 +6,38 @@
 
 struct css_general_char {
        u64 : 12;
-       u32 dynio : 1;   /* bit 12 */
-       u32 : 4;
-       u32 eadm : 1;    /* bit 17 */
-       u32 : 23;
-       u32 aif : 1;     /* bit 41 */
-       u32 : 3;
-       u32 mcss : 1;    /* bit 45 */
-       u32 fcs : 1;     /* bit 46 */
-       u32 : 1;
-       u32 ext_mb : 1;  /* bit 48 */
-       u32 : 7;
-       u32 aif_tdd : 1; /* bit 56 */
-       u32 : 1;
-       u32 qebsm : 1;   /* bit 58 */
-       u32 : 2;
-       u32 aiv : 1;     /* bit 61 */
-       u32 : 5;
-       u32 aif_osa : 1; /* bit 67 */
-       u32 : 12;
-       u32 eadm_rf : 1; /* bit 80 */
-       u32 : 1;
-       u32 cib : 1;     /* bit 82 */
-       u32 : 5;
-       u32 fcx : 1;     /* bit 88 */
-       u32 : 19;
-       u32 alt_ssi : 1; /* bit 108 */
-       u32 : 1;
-       u32 narf : 1;    /* bit 110 */
-       u32 : 12;
-       u32 util_str : 1;/* bit 123 */
+       u64 dynio : 1;   /* bit 12 */
+       u64 : 4;
+       u64 eadm : 1;    /* bit 17 */
+       u64 : 23;
+       u64 aif : 1;     /* bit 41 */
+       u64 : 3;
+       u64 mcss : 1;    /* bit 45 */
+       u64 fcs : 1;     /* bit 46 */
+       u64 : 1;
+       u64 ext_mb : 1;  /* bit 48 */
+       u64 : 7;
+       u64 aif_tdd : 1; /* bit 56 */
+       u64 : 1;
+       u64 qebsm : 1;   /* bit 58 */
+       u64 : 2;
+       u64 aiv : 1;     /* bit 61 */
+       u64 : 2;
+
+       u64 : 3;
+       u64 aif_osa : 1; /* bit 67 */
+       u64 : 12;
+       u64 eadm_rf : 1; /* bit 80 */
+       u64 : 1;
+       u64 cib : 1;     /* bit 82 */
+       u64 : 5;
+       u64 fcx : 1;     /* bit 88 */
+       u64 : 19;
+       u64 alt_ssi : 1; /* bit 108 */
+       u64 : 1;
+       u64 narf : 1;    /* bit 110 */
+       u64 : 12;
+       u64 util_str : 1;/* bit 123 */
 } __packed;
 
 extern struct css_general_char css_general_characteristics;
index 607c5e9fba3ddcdfe762c347c8441c447fc529f5..2ce28bf0c5ec44939d815c5187153b0858475aa7 100644 (file)
@@ -183,3 +183,4 @@ COMPAT_SYSCALL_WRAP2(s390_guarded_storage, int, command, struct gs_cb *, gs_cb);
 COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer);
 COMPAT_SYSCALL_WRAP4(s390_sthyi, unsigned long, code, void __user *, info, u64 __user *, rc, unsigned long, flags);
 COMPAT_SYSCALL_WRAP5(kexec_file_load, int, kernel_fd, int, initrd_fd, unsigned long, cmdline_len, const char __user *, cmdline_ptr, unsigned long, flags)
+COMPAT_SYSCALL_WRAP4(rseq, struct rseq __user *, rseq, u32, rseq_len, int, flags, u32, sig)
index f03402efab4b414eefdfd59135f4ee89dda68e8a..150130c897c39938d03d04e497100cca77d0a353 100644 (file)
@@ -357,6 +357,10 @@ ENTRY(system_call)
        stg     %r2,__PT_R2(%r11)               # store return value
 
 .Lsysc_return:
+#ifdef CONFIG_DEBUG_RSEQ
+       lgr     %r2,%r11
+       brasl   %r14,rseq_syscall
+#endif
        LOCKDEP_SYS_EXIT
 .Lsysc_tif:
        TSTMSK  __PT_FLAGS(%r11),_PIF_WORK
@@ -1265,7 +1269,7 @@ cleanup_critical:
        jl      0f
        clg     %r9,BASED(.Lcleanup_table+104)  # .Lload_fpu_regs_end
        jl      .Lcleanup_load_fpu_regs
-0:     BR_EX   %r14
+0:     BR_EX   %r14,%r11
 
        .align  8
 .Lcleanup_table:
@@ -1301,7 +1305,7 @@ cleanup_critical:
        ni      __SIE_PROG0C+3(%r9),0xfe        # no longer in SIE
        lctlg   %c1,%c1,__LC_USER_ASCE          # load primary asce
        larl    %r9,sie_exit                    # skip forward to sie_exit
-       BR_EX   %r14
+       BR_EX   %r14,%r11
 #endif
 
 .Lcleanup_system_call:
index 2d2960ab3e108ca5b0d6ef06476987ed8d5f4839..22f08245aa5d46ef5f80398ebcc4e064099c91a6 100644 (file)
@@ -498,7 +498,7 @@ void do_signal(struct pt_regs *regs)
                }
                /* No longer in a system call */
                clear_pt_regs_flag(regs, PIF_SYSCALL);
-
+               rseq_signal_deliver(&ksig, regs);
                if (is_compat_task())
                        handle_signal32(&ksig, oldset, regs);
                else
@@ -537,4 +537,5 @@ void do_notify_resume(struct pt_regs *regs)
 {
        clear_thread_flag(TIF_NOTIFY_RESUME);
        tracehook_notify_resume(regs);
+       rseq_handle_notify_resume(NULL, regs);
 }
index 8b210ead79569413ab74e3a1c03b506f48a1622f..022fc099b628292e3c9daeecb2eb18ac54816935 100644 (file)
 379  common    statx                   sys_statx                       compat_sys_statx
 380  common    s390_sthyi              sys_s390_sthyi                  compat_sys_s390_sthyi
 381  common    kexec_file_load         sys_kexec_file_load             compat_sys_kexec_file_load
+382  common    io_pgetevents           sys_io_pgetevents               compat_sys_io_pgetevents
+383  common    rseq                    sys_rseq                        compat_sys_rseq
index 84bd6329a88dd3ace39e612197dccec0a48dc4fc..e3bd5627afef3452c50325e955611a5814341b6f 100644 (file)
@@ -252,6 +252,8 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
                spin_unlock_bh(&mm->context.lock);
                if (mask != 0)
                        return;
+       } else {
+               atomic_xor_bits(&page->_refcount, 3U << 24);
        }
 
        pgtable_page_dtor(page);
@@ -304,6 +306,8 @@ static void __tlb_remove_table(void *_table)
                        break;
                /* fallthrough */
        case 3:         /* 4K page table with pgstes */
+               if (mask & 3)
+                       atomic_xor_bits(&page->_refcount, 3 << 24);
                pgtable_page_dtor(page);
                __free_page(page);
                break;
index d2db8acb1a55480895e38fdf142c3d074610230d..5f0234ec8038eb2d11e93b190f3f35e29f29207b 100644 (file)
@@ -1286,6 +1286,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
                goto free_addrs;
        }
        if (bpf_jit_prog(&jit, fp)) {
+               bpf_jit_binary_free(header);
                fp = orig_fp;
                goto free_addrs;
        }
index f1dbb4ee19d781751ac22f233ec7dea5f9a66ed3..887d3a7bb64633dd0df064ef23b037c281d0ec29 100644 (file)
@@ -63,7 +63,7 @@ config X86
        select ARCH_HAS_PTE_SPECIAL
        select ARCH_HAS_REFCOUNT
        select ARCH_HAS_UACCESS_FLUSHCACHE      if X86_64
-       select ARCH_HAS_UACCESS_MCSAFE          if X86_64
+       select ARCH_HAS_UACCESS_MCSAFE          if X86_64 && X86_MCE
        select ARCH_HAS_SET_MEMORY
        select ARCH_HAS_SG_CHAIN
        select ARCH_HAS_STRICT_KERNEL_RWX
index f0a6ea22429d7384d81f81e38fb39dbfc9e720ed..a08e82856563ddc34079e96b592e5c60edbc30aa 100644 (file)
@@ -258,11 +258,6 @@ archscripts: scripts_basic
 archheaders:
        $(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all
 
-archprepare:
-ifeq ($(CONFIG_KEXEC_FILE),y)
-       $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
-endif
-
 ###
 # Kernel objects
 
@@ -327,7 +322,6 @@ archclean:
        $(Q)rm -rf $(objtree)/arch/x86_64
        $(Q)$(MAKE) $(clean)=$(boot)
        $(Q)$(MAKE) $(clean)=arch/x86/tools
-       $(Q)$(MAKE) $(clean)=arch/x86/purgatory
 
 define archhelp
   echo  '* bzImage      - Compressed kernel image (arch/x86/boot/bzImage)'
index a8a8642d2b0b802424caf7b4f925edbce7e3284e..e98522ea6f09ee2fb52c2d69f333e6d4916940e5 100644 (file)
@@ -114,18 +114,12 @@ __setup_efi_pci(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
        struct pci_setup_rom *rom = NULL;
        efi_status_t status;
        unsigned long size;
-       uint64_t attributes, romsize;
+       uint64_t romsize;
        void *romimage;
 
-       status = efi_call_proto(efi_pci_io_protocol, attributes, pci,
-                               EfiPciIoAttributeOperationGet, 0, 0,
-                               &attributes);
-       if (status != EFI_SUCCESS)
-               return status;
-
        /*
-        * Some firmware images contain EFI function pointers at the place where the
-        * romimage and romsize fields are supposed to be. Typically the EFI
+        * Some firmware images contain EFI function pointers at the place where
+        * the romimage and romsize fields are supposed to be. Typically the EFI
         * code is mapped at high addresses, translating to an unrealistically
         * large romsize. The UEFI spec limits the size of option ROMs to 16
         * MiB so we reject any ROMs over 16 MiB in size to catch this.
index 9254e0b6cc060011d63b2bfa9ec281768776b2bc..717bf07764210f065315d7e3afa09774d9a87efe 100644 (file)
@@ -535,6 +535,7 @@ ENTRY(crypto_aegis128_aesni_enc_tail)
        movdqu STATE3, 0x40(STATEP)
 
        FRAME_END
+       ret
 ENDPROC(crypto_aegis128_aesni_enc_tail)
 
 .macro decrypt_block a s0 s1 s2 s3 s4 i
index 9263c344f2c797d847b7b7ec534803010a5c9e72..4eda2b8db9e1b08d4af85e37c6cf19039bc9220b 100644 (file)
@@ -645,6 +645,7 @@ ENTRY(crypto_aegis128l_aesni_enc_tail)
        state_store0
 
        FRAME_END
+       ret
 ENDPROC(crypto_aegis128l_aesni_enc_tail)
 
 /*
index 1d977d515bf992c649d8890316dfca41fc511364..32aae83972680731a4f36f92b608344d15339d9f 100644 (file)
@@ -543,6 +543,7 @@ ENTRY(crypto_aegis256_aesni_enc_tail)
        state_store0
 
        FRAME_END
+       ret
 ENDPROC(crypto_aegis256_aesni_enc_tail)
 
 /*
index 37d422e77931129d06e88c30edd040bc78c3de04..07653d4582a66b45370fa764747e5fa1056ef748 100644 (file)
@@ -453,6 +453,7 @@ ENTRY(crypto_morus1280_avx2_enc_tail)
        vmovdqu STATE4, (4 * 32)(%rdi)
 
        FRAME_END
+       ret
 ENDPROC(crypto_morus1280_avx2_enc_tail)
 
 /*
index 1fe637c7be9db5515bbaff68f935dea11d151062..bd1aa1b608698fd50c967a250dc257a89b5fa3b4 100644 (file)
@@ -652,6 +652,7 @@ ENTRY(crypto_morus1280_sse2_enc_tail)
        movdqu STATE4_HI, (9 * 16)(%rdi)
 
        FRAME_END
+       ret
 ENDPROC(crypto_morus1280_sse2_enc_tail)
 
 /*
index 71c72a0a0862c25da3293b499f0b2994e14f9926..efa02816d921c246b02a40a5b85e1396068c9beb 100644 (file)
@@ -437,6 +437,7 @@ ENTRY(crypto_morus640_sse2_enc_tail)
        movdqu STATE4, (4 * 16)(%rdi)
 
        FRAME_END
+       ret
 ENDPROC(crypto_morus640_sse2_enc_tail)
 
 /*
index 92190879b228c82f4ec681aa9c07bccc0e32204a..3b2490b81918128a61f6df1807788436d4f8ceb7 100644 (file)
@@ -164,7 +164,7 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
                if (cached_flags & _TIF_NOTIFY_RESUME) {
                        clear_thread_flag(TIF_NOTIFY_RESUME);
                        tracehook_notify_resume(regs);
-                       rseq_handle_notify_resume(regs);
+                       rseq_handle_notify_resume(NULL, regs);
                }
 
                if (cached_flags & _TIF_USER_RETURN_NOTIFY)
index 2582881d19ceeeb75a9a90588547914ccdefdbd0..c371bfee137ac976a01716118faf6cf490a3b5aa 100644 (file)
@@ -477,7 +477,7 @@ ENTRY(entry_SYSENTER_32)
         * whereas POPF does not.)
         */
        addl    $PT_EFLAGS-PT_DS, %esp  /* point esp at pt_regs->flags */
-       btr     $X86_EFLAGS_IF_BIT, (%esp)
+       btrl    $X86_EFLAGS_IF_BIT, (%esp)
        popfl
 
        /*
index 9de7f1e1dede7f6e6ebdc66e5f63756a173cdc0a..7d0df78db727296d1c4451e3a930033669f47aa3 100644 (file)
@@ -84,13 +84,13 @@ ENTRY(entry_SYSENTER_compat)
        pushq   %rdx                    /* pt_regs->dx */
        pushq   %rcx                    /* pt_regs->cx */
        pushq   $-ENOSYS                /* pt_regs->ax */
-       pushq   %r8                     /* pt_regs->r8 */
+       pushq   $0                      /* pt_regs->r8  = 0 */
        xorl    %r8d, %r8d              /* nospec   r8 */
-       pushq   %r9                     /* pt_regs->r9 */
+       pushq   $0                      /* pt_regs->r9  = 0 */
        xorl    %r9d, %r9d              /* nospec   r9 */
-       pushq   %r10                    /* pt_regs->r10 */
+       pushq   $0                      /* pt_regs->r10 = 0 */
        xorl    %r10d, %r10d            /* nospec   r10 */
-       pushq   %r11                    /* pt_regs->r11 */
+       pushq   $0                      /* pt_regs->r11 = 0 */
        xorl    %r11d, %r11d            /* nospec   r11 */
        pushq   %rbx                    /* pt_regs->rbx */
        xorl    %ebx, %ebx              /* nospec   rbx */
@@ -374,13 +374,13 @@ ENTRY(entry_INT80_compat)
        pushq   %rcx                    /* pt_regs->cx */
        xorl    %ecx, %ecx              /* nospec   cx */
        pushq   $-ENOSYS                /* pt_regs->ax */
-       pushq   $0                      /* pt_regs->r8  = 0 */
+       pushq   %r8                     /* pt_regs->r8 */
        xorl    %r8d, %r8d              /* nospec   r8 */
-       pushq   $0                      /* pt_regs->r9  = 0 */
+       pushq   %r9                     /* pt_regs->r9 */
        xorl    %r9d, %r9d              /* nospec   r9 */
-       pushq   $0                      /* pt_regs->r10 = 0 */
+       pushq   %r10                    /* pt_regs->r10*/
        xorl    %r10d, %r10d            /* nospec   r10 */
-       pushq   $0                      /* pt_regs->r11 = 0 */
+       pushq   %r11                    /* pt_regs->r11 */
        xorl    %r11d, %r11d            /* nospec   r11 */
        pushq   %rbx                    /* pt_regs->rbx */
        xorl    %ebx, %ebx              /* nospec   rbx */
index 8a10a045b57bde1345c542b8bbe2a19920d2400b..8cf03f1019380bf0ba0c201067c2c781eb1a543c 100644 (file)
@@ -408,9 +408,11 @@ static int alloc_bts_buffer(int cpu)
        ds->bts_buffer_base = (unsigned long) cea;
        ds_update_cea(cea, buffer, BTS_BUFFER_SIZE, PAGE_KERNEL);
        ds->bts_index = ds->bts_buffer_base;
-       max = BTS_RECORD_SIZE * (BTS_BUFFER_SIZE / BTS_RECORD_SIZE);
-       ds->bts_absolute_maximum = ds->bts_buffer_base + max;
-       ds->bts_interrupt_threshold = ds->bts_absolute_maximum - (max / 16);
+       max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
+       ds->bts_absolute_maximum = ds->bts_buffer_base +
+                                       max * BTS_RECORD_SIZE;
+       ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
+                                       (max / 16) * BTS_RECORD_SIZE;
        return 0;
 }
 
index f68855499391f4637532fa0de4c95ca844e6563e..40233836565118f4fb60239ded1fc09aabe34cf1 100644 (file)
@@ -114,6 +114,8 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
                ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
                nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask);
        }
+       if (nr_bank < 0)
+               goto ipi_mask_ex_done;
        if (!nr_bank)
                ipi_arg->vp_set.format = HV_GENERIC_SET_ALL;
 
@@ -158,6 +160,9 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
 
        for_each_cpu(cur_cpu, mask) {
                vcpu = hv_cpu_number_to_vp_number(cur_cpu);
+               if (vcpu == VP_INVAL)
+                       goto ipi_mask_done;
+
                /*
                 * This particular version of the IPI hypercall can
                 * only target upto 64 CPUs.
index 4c431e1c1effc42ade651f9a69c3e06136827e17..1ff420217298edf88648d35b56cefb73b8beb788 100644 (file)
@@ -265,7 +265,7 @@ void __init hyperv_init(void)
 {
        u64 guest_id, required_msrs;
        union hv_x64_msr_hypercall_contents hypercall_msr;
-       int cpuhp;
+       int cpuhp, i;
 
        if (x86_hyper_type != X86_HYPER_MS_HYPERV)
                return;
@@ -293,6 +293,9 @@ void __init hyperv_init(void)
        if (!hv_vp_index)
                return;
 
+       for (i = 0; i < num_possible_cpus(); i++)
+               hv_vp_index[i] = VP_INVAL;
+
        hv_vp_assist_page = kcalloc(num_possible_cpus(),
                                    sizeof(*hv_vp_assist_page), GFP_KERNEL);
        if (!hv_vp_assist_page) {
index c356098b6fb92b8ff7d42b2fd813c2a8551d3db1..4d4015ddcf2633e9e8388216f9e9c8639e2eced8 100644 (file)
@@ -7,8 +7,6 @@
 #ifndef _ASM_X86_MACH_DEFAULT_APM_H
 #define _ASM_X86_MACH_DEFAULT_APM_H
 
-#include <asm/nospec-branch.h>
-
 #ifdef APM_ZERO_SEGS
 #      define APM_DO_ZERO_SEGS \
                "pushl %%ds\n\t" \
@@ -34,7 +32,6 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
         * N.B. We do NOT need a cld after the BIOS call
         * because we always save and restore the flags.
         */
-       firmware_restrict_branch_speculation_start();
        __asm__ __volatile__(APM_DO_ZERO_SEGS
                "pushl %%edi\n\t"
                "pushl %%ebp\n\t"
@@ -47,7 +44,6 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
                  "=S" (*esi)
                : "a" (func), "b" (ebx_in), "c" (ecx_in)
                : "memory", "cc");
-       firmware_restrict_branch_speculation_end();
 }
 
 static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
@@ -60,7 +56,6 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
         * N.B. We do NOT need a cld after the BIOS call
         * because we always save and restore the flags.
         */
-       firmware_restrict_branch_speculation_start();
        __asm__ __volatile__(APM_DO_ZERO_SEGS
                "pushl %%edi\n\t"
                "pushl %%ebp\n\t"
@@ -73,7 +68,6 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
                  "=S" (si)
                : "a" (func), "b" (ebx_in), "c" (ecx_in)
                : "memory", "cc");
-       firmware_restrict_branch_speculation_end();
        return error;
 }
 
index 219faaec51dfa192f69d8893c8844219c0c89029..990770f9e76b5a52af6f85692883a8507af00af6 100644 (file)
 #define _ASM_SI                __ASM_REG(si)
 #define _ASM_DI                __ASM_REG(di)
 
+#ifndef __x86_64__
+/* 32 bit */
+
+#define _ASM_ARG1      _ASM_AX
+#define _ASM_ARG2      _ASM_DX
+#define _ASM_ARG3      _ASM_CX
+
+#define _ASM_ARG1L     eax
+#define _ASM_ARG2L     edx
+#define _ASM_ARG3L     ecx
+
+#define _ASM_ARG1W     ax
+#define _ASM_ARG2W     dx
+#define _ASM_ARG3W     cx
+
+#define _ASM_ARG1B     al
+#define _ASM_ARG2B     dl
+#define _ASM_ARG3B     cl
+
+#else
+/* 64 bit */
+
+#define _ASM_ARG1      _ASM_DI
+#define _ASM_ARG2      _ASM_SI
+#define _ASM_ARG3      _ASM_DX
+#define _ASM_ARG4      _ASM_CX
+#define _ASM_ARG5      r8
+#define _ASM_ARG6      r9
+
+#define _ASM_ARG1Q     rdi
+#define _ASM_ARG2Q     rsi
+#define _ASM_ARG3Q     rdx
+#define _ASM_ARG4Q     rcx
+#define _ASM_ARG5Q     r8
+#define _ASM_ARG6Q     r9
+
+#define _ASM_ARG1L     edi
+#define _ASM_ARG2L     esi
+#define _ASM_ARG3L     edx
+#define _ASM_ARG4L     ecx
+#define _ASM_ARG5L     r8d
+#define _ASM_ARG6L     r9d
+
+#define _ASM_ARG1W     di
+#define _ASM_ARG2W     si
+#define _ASM_ARG3W     dx
+#define _ASM_ARG4W     cx
+#define _ASM_ARG5W     r8w
+#define _ASM_ARG6W     r9w
+
+#define _ASM_ARG1B     dil
+#define _ASM_ARG2B     sil
+#define _ASM_ARG3B     dl
+#define _ASM_ARG4B     cl
+#define _ASM_ARG5B     r8b
+#define _ASM_ARG6B     r9b
+
+#endif
+
 /*
  * Macros to generate condition code outputs from inline assembly,
  * The output operand must be type "bool".
index 042b5e892ed1063769b253bdf35e31171eb55c4d..14de0432d288414bd1437e44b8cb13facc6f12e9 100644 (file)
@@ -38,7 +38,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
 {
        unsigned long mask;
 
-       asm ("cmp %1,%2; sbb %0,%0;"
+       asm volatile ("cmp %1,%2; sbb %0,%0;"
                        :"=r" (mask)
                        :"g"(size),"r" (index)
                        :"cc");
index 89f08955fff733c688a5ce4f4a0b8d74050ee617..c4fc17220df959f2d5feb493af6374e7dacce613 100644 (file)
@@ -13,7 +13,7 @@
  * Interrupt control:
  */
 
-static inline unsigned long native_save_fl(void)
+extern inline unsigned long native_save_fl(void)
 {
        unsigned long flags;
 
index 3cd14311edfad6d04c5ea382b2b6845f66a9d6f8..5a7375ed5f7cd80ca9925f05d422a9090e3602be 100644 (file)
@@ -9,6 +9,8 @@
 #include <asm/hyperv-tlfs.h>
 #include <asm/nospec-branch.h>
 
+#define VP_INVAL       U32_MAX
+
 struct ms_hyperv_info {
        u32 features;
        u32 misc_features;
@@ -20,7 +22,6 @@ struct ms_hyperv_info {
 
 extern struct ms_hyperv_info ms_hyperv;
 
-
 /*
  * Generate the guest ID.
  */
@@ -281,6 +282,8 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
         */
        for_each_cpu(cpu, cpus) {
                vcpu = hv_cpu_number_to_vp_number(cpu);
+               if (vcpu == VP_INVAL)
+                       return -1;
                vcpu_bank = vcpu / 64;
                vcpu_offset = vcpu % 64;
                __set_bit(vcpu_offset, (unsigned long *)
index ada6410fd2ecf6fdde039a185ce570b20af53fca..fbd578daa66e97416058e961dd440774fa9ed586 100644 (file)
@@ -184,6 +184,9 @@ static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
 
 static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
 {
+       if (!pgtable_l5_enabled())
+               return;
+
        BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
        free_page((unsigned long)p4d);
 }
index 99ecde23c3ec03e02a9aba157e4b31e3c6f53ed7..5715647fc4feca86c8b00e299b347ee602b1b4e6 100644 (file)
@@ -898,7 +898,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
 #define pgd_page(pgd)  pfn_to_page(pgd_pfn(pgd))
 
 /* to find an entry in a page-table-directory. */
-static __always_inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
+static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
 {
        if (!pgtable_l5_enabled())
                return (p4d_t *)pgd;
index 0fdcd21dadbd6422bf40f5cbb2361c08c5fafc14..3c5385f9a88fc1e78729647566d819abbd210b42 100644 (file)
@@ -216,7 +216,7 @@ static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
 }
 #endif
 
-static __always_inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
+static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
 {
        pgd_t pgd;
 
@@ -230,7 +230,7 @@ static __always_inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
        *p4dp = native_make_p4d(native_pgd_val(pgd));
 }
 
-static __always_inline void native_p4d_clear(p4d_t *p4d)
+static inline void native_p4d_clear(p4d_t *p4d)
 {
        native_set_p4d(p4d, native_make_p4d(0));
 }
index 62acb613114b2322088083f7a9ccc85495a5afa4..a9d637bc301d7dd0086b5126a5ebac8f042c62c9 100644 (file)
@@ -52,7 +52,12 @@ copy_to_user_mcsafe(void *to, const void *from, unsigned len)
        unsigned long ret;
 
        __uaccess_begin();
-       ret = memcpy_mcsafe(to, from, len);
+       /*
+        * Note, __memcpy_mcsafe() is explicitly used since it can
+        * handle exceptions / faults.  memcpy_mcsafe() may fall back to
+        * memcpy() which lacks this handling.
+        */
+       ret = __memcpy_mcsafe(to, from, len);
        __uaccess_end();
        return ret;
 }
index 425e6b8b95478248dd3a32122b1aca408691cadf..6aa8499e1f62042a510aaafb2a1ef1e3a89804bd 100644 (file)
 #define VMX_MISC_PREEMPTION_TIMER_RATE_MASK    0x0000001f
 #define VMX_MISC_SAVE_EFER_LMA                 0x00000020
 #define VMX_MISC_ACTIVITY_HLT                  0x00000040
+#define VMX_MISC_ZERO_LEN_INS                  0x40000000
 
 /* VMFUNC functions */
 #define VMX_VMFUNC_EPTP_SWITCHING               0x00000001
@@ -351,11 +352,13 @@ enum vmcs_field {
 #define VECTORING_INFO_VALID_MASK              INTR_INFO_VALID_MASK
 
 #define INTR_TYPE_EXT_INTR              (0 << 8) /* external interrupt */
+#define INTR_TYPE_RESERVED              (1 << 8) /* reserved */
 #define INTR_TYPE_NMI_INTR             (2 << 8) /* NMI */
 #define INTR_TYPE_HARD_EXCEPTION       (3 << 8) /* processor exception */
 #define INTR_TYPE_SOFT_INTR             (4 << 8) /* software interrupt */
 #define INTR_TYPE_PRIV_SW_EXCEPTION    (5 << 8) /* ICE breakpoint - undocumented */
 #define INTR_TYPE_SOFT_EXCEPTION       (6 << 8) /* software exception */
+#define INTR_TYPE_OTHER_EVENT           (7 << 8) /* other event */
 
 /* GUEST_INTERRUPTIBILITY_INFO flags. */
 #define GUEST_INTR_STATE_STI           0x00000001
index 02d6f5cf4e70800188994e7e64f52916a9d7d83e..8824d01c0c352d6dbd2c12e228bd0de9ca335166 100644 (file)
@@ -61,6 +61,7 @@ obj-y                 += alternative.o i8253.o hw_breakpoint.o
 obj-y                  += tsc.o tsc_msr.o io_delay.o rtc.o
 obj-y                  += pci-iommu_table.o
 obj-y                  += resource.o
+obj-y                  += irqflags.o
 
 obj-y                          += process.o
 obj-y                          += fpu/
index efaf2d4f9c3c7983221298c2ecb37ce367345b1e..d492752f79e1b9f120de025a9fa89b692e720705 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/delay.h>
 #include <linux/crash_dump.h>
 #include <linux/reboot.h>
+#include <linux/memory.h>
 
 #include <asm/uv/uv_mmrs.h>
 #include <asm/uv/uv_hub.h>
@@ -392,6 +393,51 @@ extern int uv_hub_info_version(void)
 }
 EXPORT_SYMBOL(uv_hub_info_version);
 
+/* Default UV memory block size is 2GB */
+static unsigned long mem_block_size = (2UL << 30);
+
+/* Kernel parameter to specify UV mem block size */
+static int parse_mem_block_size(char *ptr)
+{
+       unsigned long size = memparse(ptr, NULL);
+
+       /* Size will be rounded down by set_block_size() below */
+       mem_block_size = size;
+       return 0;
+}
+early_param("uv_memblksize", parse_mem_block_size);
+
+static __init int adj_blksize(u32 lgre)
+{
+       unsigned long base = (unsigned long)lgre << UV_GAM_RANGE_SHFT;
+       unsigned long size;
+
+       for (size = mem_block_size; size > MIN_MEMORY_BLOCK_SIZE; size >>= 1)
+               if (IS_ALIGNED(base, size))
+                       break;
+
+       if (size >= mem_block_size)
+               return 0;
+
+       mem_block_size = size;
+       return 1;
+}
+
+static __init void set_block_size(void)
+{
+       unsigned int order = ffs(mem_block_size);
+
+       if (order) {
+               /* adjust for ffs return of 1..64 */
+               set_memory_block_size_order(order - 1);
+               pr_info("UV: mem_block_size set to 0x%lx\n", mem_block_size);
+       } else {
+               /* bad or zero value, default to 1UL << 31 (2GB) */
+               pr_err("UV: mem_block_size error with 0x%lx\n", mem_block_size);
+               set_memory_block_size_order(31);
+       }
+}
+
 /* Build GAM range lookup table: */
 static __init void build_uv_gr_table(void)
 {
@@ -1180,23 +1226,30 @@ static void __init decode_gam_rng_tbl(unsigned long ptr)
                                        << UV_GAM_RANGE_SHFT);
                int order = 0;
                char suffix[] = " KMGTPE";
+               int flag = ' ';
 
                while (size > 9999 && order < sizeof(suffix)) {
                        size /= 1024;
                        order++;
                }
 
+               /* adjust max block size to current range start */
+               if (gre->type == 1 || gre->type == 2)
+                       if (adj_blksize(lgre))
+                               flag = '*';
+
                if (!index) {
                        pr_info("UV: GAM Range Table...\n");
-                       pr_info("UV:  # %20s %14s %5s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN");
+                       pr_info("UV:  # %20s %14s %6s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN");
                }
-               pr_info("UV: %2d: 0x%014lx-0x%014lx %5lu%c %3d   %04x  %02x %02x\n",
+               pr_info("UV: %2d: 0x%014lx-0x%014lx%c %5lu%c %3d   %04x  %02x %02x\n",
                        index++,
                        (unsigned long)lgre << UV_GAM_RANGE_SHFT,
                        (unsigned long)gre->limit << UV_GAM_RANGE_SHFT,
-                       size, suffix[order],
+                       flag, size, suffix[order],
                        gre->type, gre->nasid, gre->sockid, gre->pnode);
 
+               /* update to next range start */
                lgre = gre->limit;
                if (sock_min > gre->sockid)
                        sock_min = gre->sockid;
@@ -1427,6 +1480,7 @@ static void __init uv_system_init_hub(void)
 
        build_socket_tables();
        build_uv_gr_table();
+       set_block_size();
        uv_init_hub_info(&hub_info);
        uv_possible_blades = num_possible_nodes();
        if (!_node_to_pnode)
index 5d0de79fdab06cbffc55dfd80d094ac0f07742e2..ec00d1ff5098b3701b1f1af324be2576676b7786 100644 (file)
 #include <asm/olpc.h>
 #include <asm/paravirt.h>
 #include <asm/reboot.h>
+#include <asm/nospec-branch.h>
 
 #if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT)
 extern int (*console_blank_hook)(int);
@@ -614,11 +615,13 @@ static long __apm_bios_call(void *_call)
        gdt[0x40 / 8] = bad_bios_desc;
 
        apm_irq_save(flags);
+       firmware_restrict_branch_speculation_start();
        APM_DO_SAVE_SEGS;
        apm_bios_call_asm(call->func, call->ebx, call->ecx,
                          &call->eax, &call->ebx, &call->ecx, &call->edx,
                          &call->esi);
        APM_DO_RESTORE_SEGS;
+       firmware_restrict_branch_speculation_end();
        apm_irq_restore(flags);
        gdt[0x40 / 8] = save_desc_40;
        put_cpu();
@@ -690,10 +693,12 @@ static long __apm_bios_call_simple(void *_call)
        gdt[0x40 / 8] = bad_bios_desc;
 
        apm_irq_save(flags);
+       firmware_restrict_branch_speculation_start();
        APM_DO_SAVE_SEGS;
        error = apm_bios_call_simple_asm(call->func, call->ebx, call->ecx,
                                         &call->eax);
        APM_DO_RESTORE_SEGS;
+       firmware_restrict_branch_speculation_end();
        apm_irq_restore(flags);
        gdt[0x40 / 8] = save_desc_40;
        put_cpu();
index 082d7875cef82eb779b68e5105330482f5a419d8..38915fbfae73d5cfeacbf16624fa49cb9edc7ec7 100644 (file)
@@ -543,7 +543,9 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
                nodes_per_socket = ((value >> 3) & 7) + 1;
        }
 
-       if (c->x86 >= 0x15 && c->x86 <= 0x17) {
+       if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
+           !boot_cpu_has(X86_FEATURE_VIRT_SSBD) &&
+           c->x86 >= 0x15 && c->x86 <= 0x17) {
                unsigned int bit;
 
                switch (c->x86) {
index cd0fda1fff6d3800fbbbf59a19711eba2df0f96c..5c0ea39311fe305ab183cc3f5bde0fe3bf5d1c5f 100644 (file)
@@ -27,6 +27,7 @@
 #include <asm/pgtable.h>
 #include <asm/set_memory.h>
 #include <asm/intel-family.h>
+#include <asm/hypervisor.h>
 
 static void __init spectre_v2_select_mitigation(void);
 static void __init ssb_select_mitigation(void);
@@ -154,7 +155,8 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
                guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
 
                /* SSBD controlled in MSR_SPEC_CTRL */
-               if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
+               if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
+                   static_cpu_has(X86_FEATURE_AMD_SSBD))
                        hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
 
                if (hostval != guestval) {
@@ -532,9 +534,10 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
                 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
                 * use a completely different MSR and bit dependent on family.
                 */
-               if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+               if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
+                   !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
                        x86_amd_ssb_disable();
-               else {
+               else {
                        x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
                        x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
                        wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
@@ -664,6 +667,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
                if (boot_cpu_has(X86_FEATURE_PTI))
                        return sprintf(buf, "Mitigation: PTI\n");
 
+               if (hypervisor_is_type(X86_HYPER_XEN_PV))
+                       return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
+
                break;
 
        case X86_BUG_SPECTRE_V1:
index 38354c66df81144b7d2998ee42fce7a6b15485cd..0c5fcbd998cf11badefad906a2122400a3512d58 100644 (file)
@@ -671,7 +671,7 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
                        num_sharing_cache = ((eax >> 14) & 0xfff) + 1;
 
                if (num_sharing_cache) {
-                       int bits = get_count_order(num_sharing_cache) - 1;
+                       int bits = get_count_order(num_sharing_cache);
 
                        per_cpu(cpu_llc_id, cpu) = c->apicid >> bits;
                }
index 0df7151cfef42cb908c9d76f0b4e78db1620f615..eb4cb3efd20e4cd5ec31d90c1dcf88b62b94622b 100644 (file)
@@ -1,3 +1,6 @@
+/* cpu_feature_enabled() cannot be used this early */
+#define USE_EARLY_PGTABLE_L5
+
 #include <linux/bootmem.h>
 #include <linux/linkage.h>
 #include <linux/bitops.h>
index 5bbd06f38ff68f58d1efc980db0fd9fc0af7d89a..f34d89c01edc5c761e0df331da1331f8a0f98f3a 100644 (file)
@@ -160,6 +160,11 @@ static struct severity {
                SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
                USER
                ),
+       MCESEV(
+               PANIC, "Data load in unrecoverable area of kernel",
+               SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
+               KERNEL
+               ),
 #endif
        MCESEV(
                PANIC, "Action required: unknown MCACOD",
index e4cf6ff1c2e1d341bb5ca890cd8dba266ce2aa18..8c50754c09c1d83c8700824348036715142d11d6 100644 (file)
@@ -772,23 +772,25 @@ EXPORT_SYMBOL_GPL(machine_check_poll);
 static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
                          struct pt_regs *regs)
 {
-       int i, ret = 0;
        char *tmp;
+       int i;
 
        for (i = 0; i < mca_cfg.banks; i++) {
                m->status = mce_rdmsrl(msr_ops.status(i));
-               if (m->status & MCI_STATUS_VAL) {
-                       __set_bit(i, validp);
-                       if (quirk_no_way_out)
-                               quirk_no_way_out(i, m, regs);
-               }
+               if (!(m->status & MCI_STATUS_VAL))
+                       continue;
+
+               __set_bit(i, validp);
+               if (quirk_no_way_out)
+                       quirk_no_way_out(i, m, regs);
 
                if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
+                       mce_read_aux(m, i);
                        *msg = tmp;
-                       ret = 1;
+                       return 1;
                }
        }
-       return ret;
+       return 0;
 }
 
 /*
@@ -1205,13 +1207,18 @@ void do_machine_check(struct pt_regs *regs, long error_code)
                lmce = m.mcgstatus & MCG_STATUS_LMCES;
 
        /*
+        * Local machine check may already know that we have to panic.
+        * Broadcast machine check begins rendezvous in mce_start()
         * Go through all banks in exclusion of the other CPUs. This way we
         * don't report duplicated events on shared banks because the first one
-        * to see it will clear it. If this is a Local MCE, then no need to
-        * perform rendezvous.
+        * to see it will clear it.
         */
-       if (!lmce)
+       if (lmce) {
+               if (no_way_out)
+                       mce_panic("Fatal local machine check", &m, msg);
+       } else {
                order = mce_start(&no_way_out);
+       }
 
        for (i = 0; i < cfg->banks; i++) {
                __clear_bit(i, toclear);
@@ -1287,12 +1294,17 @@ void do_machine_check(struct pt_regs *regs, long error_code)
                        no_way_out = worst >= MCE_PANIC_SEVERITY;
        } else {
                /*
-                * Local MCE skipped calling mce_reign()
-                * If we found a fatal error, we need to panic here.
+                * If there was a fatal machine check we should have
+                * already called mce_panic earlier in this function.
+                * Since we re-read the banks, we might have found
+                * something new. Check again to see if we found a
+                * fatal error. We call "mce_severity()" again to
+                * make sure we have the right "msg".
                 */
-                if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
-                       mce_panic("Machine check from unknown source",
-                               NULL, NULL);
+               if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) {
+                       mce_severity(&m, cfg->tolerant, &msg, true);
+                       mce_panic("Local fatal machine check!", &m, msg);
+               }
        }
 
        /*
@@ -2153,9 +2165,6 @@ static ssize_t store_int_with_restart(struct device *s,
        if (check_interval == old_check_interval)
                return ret;
 
-       if (check_interval < 1)
-               check_interval = 1;
-
        mutex_lock(&mce_sysfs_mutex);
        mce_restart();
        mutex_unlock(&mce_sysfs_mutex);
index 1c2cfa0644aa979c97cc01a42925a44c25f9f852..97ccf4c3b45bec517605813b1f24518b10466002 100644 (file)
@@ -190,8 +190,11 @@ static void save_microcode_patch(void *data, unsigned int size)
                        p = memdup_patch(data, size);
                        if (!p)
                                pr_err("Error allocating buffer %p\n", data);
-                       else
+                       else {
                                list_replace(&iter->plist, &p->plist);
+                               kfree(iter->data);
+                               kfree(iter);
+                       }
                }
        }
 
index 4021d3859499c77c14eaa1c40864c752547df68c..40eee6cc412484470daba013f2a197439163707a 100644 (file)
@@ -106,7 +106,8 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
 
        memset(line, 0, LINE_SIZE);
 
-       length = strncpy_from_user(line, buf, LINE_SIZE - 1);
+       len = min_t(size_t, len, LINE_SIZE - 1);
+       length = strncpy_from_user(line, buf, len);
        if (length < 0)
                return length;
 
index d1f25c83144752272401afe8c8aec313d40298ae..c88c23c658c1e99faad3daa236448bc4208901d7 100644 (file)
@@ -1248,6 +1248,7 @@ void __init e820__memblock_setup(void)
 {
        int i;
        u64 end;
+       u64 addr = 0;
 
        /*
         * The bootstrap memblock region count maximum is 128 entries
@@ -1264,13 +1265,21 @@ void __init e820__memblock_setup(void)
                struct e820_entry *entry = &e820_table->entries[i];
 
                end = entry->addr + entry->size;
+               if (addr < entry->addr)
+                       memblock_reserve(addr, entry->addr - addr);
+               addr = end;
                if (end != (resource_size_t)end)
                        continue;
 
+               /*
+                * all !E820_TYPE_RAM ranges (including gap ranges) are put
+                * into memblock.reserved to make sure that struct pages in
+                * such regions are not left uninitialized after bootup.
+                */
                if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN)
-                       continue;
-
-               memblock_add(entry->addr, entry->size);
+                       memblock_reserve(entry->addr, entry->size);
+               else
+                       memblock_add(entry->addr, entry->size);
        }
 
        /* Throw away partial pages: */
index da5d8ac600623fad46152cdd708245029d3ca2aa..50d5848bf22efb5ffed1292bd8aa354689d2f7dd 100644 (file)
@@ -338,6 +338,18 @@ static resource_size_t __init gen3_stolen_base(int num, int slot, int func,
        return bsm & INTEL_BSM_MASK;
 }
 
+static resource_size_t __init gen11_stolen_base(int num, int slot, int func,
+                                               resource_size_t stolen_size)
+{
+       u64 bsm;
+
+       bsm = read_pci_config(num, slot, func, INTEL_GEN11_BSM_DW0);
+       bsm &= INTEL_BSM_MASK;
+       bsm |= (u64)read_pci_config(num, slot, func, INTEL_GEN11_BSM_DW1) << 32;
+
+       return bsm;
+}
+
 static resource_size_t __init i830_stolen_size(int num, int slot, int func)
 {
        u16 gmch_ctrl;
@@ -498,6 +510,11 @@ static const struct intel_early_ops chv_early_ops __initconst = {
        .stolen_size = chv_stolen_size,
 };
 
+static const struct intel_early_ops gen11_early_ops __initconst = {
+       .stolen_base = gen11_stolen_base,
+       .stolen_size = gen9_stolen_size,
+};
+
 static const struct pci_device_id intel_early_ids[] __initconst = {
        INTEL_I830_IDS(&i830_early_ops),
        INTEL_I845G_IDS(&i845_early_ops),
@@ -529,6 +546,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
        INTEL_CFL_IDS(&gen9_early_ops),
        INTEL_GLK_IDS(&gen9_early_ops),
        INTEL_CNL_IDS(&gen9_early_ops),
+       INTEL_ICL_11_IDS(&gen11_early_ops),
 };
 
 struct resource intel_graphics_stolen_res __ro_after_init = DEFINE_RES_MEM(0, 0);
index a21d6ace648e3006045f5bd13578f3b29d4ea0bf..8047379e575ad39cb47cdbb055131e9bb094bb4d 100644 (file)
@@ -44,7 +44,7 @@ static unsigned int __initdata next_early_pgt;
 pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
 
 #ifdef CONFIG_X86_5LEVEL
-unsigned int __pgtable_l5_enabled __initdata;
+unsigned int __pgtable_l5_enabled __ro_after_init;
 unsigned int pgdir_shift __ro_after_init = 39;
 EXPORT_SYMBOL(pgdir_shift);
 unsigned int ptrs_per_p4d __ro_after_init = 1;
diff --git a/arch/x86/kernel/irqflags.S b/arch/x86/kernel/irqflags.S
new file mode 100644 (file)
index 0000000..ddeeaac
--- /dev/null
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <asm/asm.h>
+#include <asm/export.h>
+#include <linux/linkage.h>
+
+/*
+ * unsigned long native_save_fl(void)
+ */
+ENTRY(native_save_fl)
+       pushf
+       pop %_ASM_AX
+       ret
+ENDPROC(native_save_fl)
+EXPORT_SYMBOL(native_save_fl)
+
+/*
+ * void native_restore_fl(unsigned long flags)
+ * %eax/%rdi: flags
+ */
+ENTRY(native_restore_fl)
+       push %_ASM_ARG1
+       popf
+       ret
+ENDPROC(native_restore_fl)
+EXPORT_SYMBOL(native_restore_fl)
index bf8d1eb7fca3d97976b7747f49a5e5d77d18edde..3b8e7c13c614a41fcf4533bd840630c4e3912d8f 100644 (file)
@@ -138,6 +138,7 @@ static unsigned long kvm_get_tsc_khz(void)
        src = &hv_clock[cpu].pvti;
        tsc_khz = pvclock_tsc_khz(src);
        put_cpu();
+       setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
        return tsc_khz;
 }
 
@@ -319,6 +320,8 @@ void __init kvmclock_init(void)
        printk(KERN_INFO "kvm-clock: Using msrs %x and %x",
                msr_kvm_system_time, msr_kvm_wall_clock);
 
+       pvclock_set_pvti_cpu0_va(hv_clock);
+
        if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
                pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
 
@@ -366,14 +369,11 @@ int __init kvm_setup_vsyscall_timeinfo(void)
        vcpu_time = &hv_clock[cpu].pvti;
        flags = pvclock_read_flags(vcpu_time);
 
-       if (!(flags & PVCLOCK_TSC_STABLE_BIT)) {
-               put_cpu();
-               return 1;
-       }
-
-       pvclock_set_pvti_cpu0_va(hv_clock);
        put_cpu();
 
+       if (!(flags & PVCLOCK_TSC_STABLE_BIT))
+               return 1;
+
        kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
 #endif
        return 0;
index 697a4ce0430827c89be2cbd86caedfac97e884f7..736348ead4218a0007b715efbc1d56bd1bb73e65 100644 (file)
@@ -645,12 +645,19 @@ static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev)
 /* Skylake */
 static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev)
 {
-       u32 capid0;
+       u32 capid0, capid5;
 
        pci_read_config_dword(pdev, 0x84, &capid0);
+       pci_read_config_dword(pdev, 0x98, &capid5);
 
-       if ((capid0 & 0xc0) == 0xc0)
+       /*
+        * CAPID0{7:6} indicate whether this is an advanced RAS SKU
+        * CAPID5{8:5} indicate that various NVDIMM usage modes are
+        * enabled, so memory machine check recovery is also enabled.
+        */
+       if ((capid0 & 0xc0) == 0xc0 || (capid5 & 0x1e0))
                static_branch_inc(&mcsafe_key);
+
 }
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap);
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_ras_cap);
index 445ca11ff8634eb27fb93f93fe362fc4bffaf588..92a3b312a53c465bbde5f006b5707b62671a49ae 100644 (file)
@@ -692,7 +692,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
         * Increment event counter and perform fixup for the pre-signal
         * frame.
         */
-       rseq_signal_deliver(regs);
+       rseq_signal_deliver(ksig, regs);
 
        /* Set up the stack frame */
        if (is_ia32_frame(ksig)) {
index c2f7d1d2a5c36fca041809b727bde36afb347c34..db9656e13ea0418dbf9e619f183bd2176e2d94d6 100644 (file)
@@ -221,6 +221,11 @@ static void notrace start_secondary(void *unused)
 #ifdef CONFIG_X86_32
        /* switch away from the initial page table */
        load_cr3(swapper_pg_dir);
+       /*
+        * Initialize the CR4 shadow before doing anything that could
+        * try to read it.
+        */
+       cr4_init_shadow();
        __flush_tlb_all();
 #endif
        load_current_idt();
index a535dd64de6397b02b3f53cd685584ebf7ebf445..e6db475164edec4f33e6f056cde5cbdfbe51a556 100644 (file)
@@ -835,16 +835,18 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
        char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
                                                "simd exception";
 
-       if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
-               return;
        cond_local_irq_enable(regs);
 
        if (!user_mode(regs)) {
-               if (!fixup_exception(regs, trapnr)) {
-                       task->thread.error_code = error_code;
-                       task->thread.trap_nr = trapnr;
+               if (fixup_exception(regs, trapnr))
+                       return;
+
+               task->thread.error_code = error_code;
+               task->thread.trap_nr = trapnr;
+
+               if (notify_die(DIE_TRAP, str, regs, error_code,
+                                       trapnr, SIGFPE) != NOTIFY_STOP)
                        die(str, regs, error_code);
-               }
                return;
        }
 
index 58d8d800875d0c6a3789a0406fec1eed366eecfc..deb576b23b7cf49817533d00555d0dc976c42486 100644 (file)
@@ -293,7 +293,7 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool
        insn_init(insn, auprobe->insn, sizeof(auprobe->insn), x86_64);
        /* has the side-effect of processing the entire instruction */
        insn_get_length(insn);
-       if (WARN_ON_ONCE(!insn_complete(insn)))
+       if (!insn_complete(insn))
                return -ENOEXEC;
 
        if (is_prefix_bad(insn))
index 92fd433c50b9b5135e4ada92dc8968f4c5ed75d4..1bbec387d289cb785e4acbd28389e5e071fdfdbb 100644 (file)
@@ -85,7 +85,7 @@ config KVM_AMD_SEV
        def_bool y
        bool "AMD Secure Encrypted Virtualization (SEV) support"
        depends on KVM_AMD && X86_64
-       depends on CRYPTO_DEV_CCP && CRYPTO_DEV_CCP_DD && CRYPTO_DEV_SP_PSP
+       depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m)
        ---help---
        Provides support for launching Encrypted VMs on AMD processors.
 
index d594690d8b9597a87f4cba26e8c1be5cb2de22de..6b8f11521c410be2ae902cc10fe3dc63095f822b 100644 (file)
@@ -890,7 +890,7 @@ static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
        if (cache->nobjs >= min)
                return 0;
        while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
-               page = (void *)__get_free_page(GFP_KERNEL);
+               page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
                if (!page)
                        return -ENOMEM;
                cache->objects[cache->nobjs++] = page;
index 559a12b6184de38c67ef4f2001963600f41f8753..e30da9a2430cad425c56decdb5dd284c381fd9bc 100644 (file)
@@ -1705,6 +1705,17 @@ static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu)
                MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS;
 }
 
+static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu)
+{
+       return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS;
+}
+
+static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu)
+{
+       return to_vmx(vcpu)->nested.msrs.procbased_ctls_high &
+                       CPU_BASED_MONITOR_TRAP_FLAG;
+}
+
 static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
 {
        return vmcs12->cpu_based_vm_exec_control & bit;
@@ -2560,6 +2571,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 #ifdef CONFIG_X86_64
        int cpu = raw_smp_processor_id();
+       unsigned long fs_base, kernel_gs_base;
 #endif
        int i;
 
@@ -2575,12 +2587,20 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
        vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
 
 #ifdef CONFIG_X86_64
-       save_fsgs_for_kvm();
-       vmx->host_state.fs_sel = current->thread.fsindex;
-       vmx->host_state.gs_sel = current->thread.gsindex;
-#else
-       savesegment(fs, vmx->host_state.fs_sel);
-       savesegment(gs, vmx->host_state.gs_sel);
+       if (likely(is_64bit_mm(current->mm))) {
+               save_fsgs_for_kvm();
+               vmx->host_state.fs_sel = current->thread.fsindex;
+               vmx->host_state.gs_sel = current->thread.gsindex;
+               fs_base = current->thread.fsbase;
+               kernel_gs_base = current->thread.gsbase;
+       } else {
+#endif
+               savesegment(fs, vmx->host_state.fs_sel);
+               savesegment(gs, vmx->host_state.gs_sel);
+#ifdef CONFIG_X86_64
+               fs_base = read_msr(MSR_FS_BASE);
+               kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
+       }
 #endif
        if (!(vmx->host_state.fs_sel & 7)) {
                vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
@@ -2600,10 +2620,10 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
        savesegment(ds, vmx->host_state.ds_sel);
        savesegment(es, vmx->host_state.es_sel);
 
-       vmcs_writel(HOST_FS_BASE, current->thread.fsbase);
+       vmcs_writel(HOST_FS_BASE, fs_base);
        vmcs_writel(HOST_GS_BASE, cpu_kernelmode_gs_base(cpu));
 
-       vmx->msr_host_kernel_gs_base = current->thread.gsbase;
+       vmx->msr_host_kernel_gs_base = kernel_gs_base;
        if (is_long_mode(&vmx->vcpu))
                wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
 #else
@@ -4311,11 +4331,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
        vmcs_conf->order = get_order(vmcs_conf->size);
        vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff;
 
-       /* KVM supports Enlightened VMCS v1 only */
-       if (static_branch_unlikely(&enable_evmcs))
-               vmcs_conf->revision_id = KVM_EVMCS_VERSION;
-       else
-               vmcs_conf->revision_id = vmx_msr_low;
+       vmcs_conf->revision_id = vmx_msr_low;
 
        vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
        vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
@@ -4385,7 +4401,13 @@ static struct vmcs *alloc_vmcs_cpu(int cpu)
                return NULL;
        vmcs = page_address(pages);
        memset(vmcs, 0, vmcs_config.size);
-       vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
+
+       /* KVM supports Enlightened VMCS v1 only */
+       if (static_branch_unlikely(&enable_evmcs))
+               vmcs->revision_id = KVM_EVMCS_VERSION;
+       else
+               vmcs->revision_id = vmcs_config.revision_id;
+
        return vmcs;
 }
 
@@ -4553,6 +4575,19 @@ static __init int alloc_kvm_area(void)
                        return -ENOMEM;
                }
 
+               /*
+                * When eVMCS is enabled, alloc_vmcs_cpu() sets
+                * vmcs->revision_id to KVM_EVMCS_VERSION instead of
+                * revision_id reported by MSR_IA32_VMX_BASIC.
+                *
+                * However, even though not explictly documented by
+                * TLFS, VMXArea passed as VMXON argument should
+                * still be marked with revision_id reported by
+                * physical CPU.
+                */
+               if (static_branch_unlikely(&enable_evmcs))
+                       vmcs->revision_id = vmcs_config.revision_id;
+
                per_cpu(vmxarea, cpu) = vmcs;
        }
        return 0;
@@ -11620,6 +11655,62 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
            !nested_cr3_valid(vcpu, vmcs12->host_cr3))
                return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
 
+       /*
+        * From the Intel SDM, volume 3:
+        * Fields relevant to VM-entry event injection must be set properly.
+        * These fields are the VM-entry interruption-information field, the
+        * VM-entry exception error code, and the VM-entry instruction length.
+        */
+       if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) {
+               u32 intr_info = vmcs12->vm_entry_intr_info_field;
+               u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
+               u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
+               bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
+               bool should_have_error_code;
+               bool urg = nested_cpu_has2(vmcs12,
+                                          SECONDARY_EXEC_UNRESTRICTED_GUEST);
+               bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
+
+               /* VM-entry interruption-info field: interruption type */
+               if (intr_type == INTR_TYPE_RESERVED ||
+                   (intr_type == INTR_TYPE_OTHER_EVENT &&
+                    !nested_cpu_supports_monitor_trap_flag(vcpu)))
+                       return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+               /* VM-entry interruption-info field: vector */
+               if ((intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
+                   (intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
+                   (intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
+                       return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+               /* VM-entry interruption-info field: deliver error code */
+               should_have_error_code =
+                       intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
+                       x86_exception_has_error_code(vector);
+               if (has_error_code != should_have_error_code)
+                       return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+               /* VM-entry exception error code */
+               if (has_error_code &&
+                   vmcs12->vm_entry_exception_error_code & GENMASK(31, 15))
+                       return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+               /* VM-entry interruption-info field: reserved bits */
+               if (intr_info & INTR_INFO_RESVD_BITS_MASK)
+                       return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+               /* VM-entry instruction length */
+               switch (intr_type) {
+               case INTR_TYPE_SOFT_EXCEPTION:
+               case INTR_TYPE_SOFT_INTR:
+               case INTR_TYPE_PRIV_SW_EXCEPTION:
+                       if ((vmcs12->vm_entry_instruction_len > 15) ||
+                           (vmcs12->vm_entry_instruction_len == 0 &&
+                            !nested_cpu_has_zero_length_injection(vcpu)))
+                               return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+               }
+       }
+
        return 0;
 }
 
@@ -11686,7 +11777,6 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
-       u32 msr_entry_idx;
        u32 exit_qual;
        int r;
 
@@ -11708,10 +11798,10 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu)
        nested_get_vmcs12_pages(vcpu, vmcs12);
 
        r = EXIT_REASON_MSR_LOAD_FAIL;
-       msr_entry_idx = nested_vmx_load_msr(vcpu,
-                                           vmcs12->vm_entry_msr_load_addr,
-                                           vmcs12->vm_entry_msr_load_count);
-       if (msr_entry_idx)
+       exit_qual = nested_vmx_load_msr(vcpu,
+                                       vmcs12->vm_entry_msr_load_addr,
+                                       vmcs12->vm_entry_msr_load_count);
+       if (exit_qual)
                goto fail;
 
        /*
index 0046aa70205aa2dfbc0577065250be717ca25b4e..2b812b3c50881d2b42738792a7ef1a72cdcb9d66 100644 (file)
@@ -1097,6 +1097,7 @@ static u32 msr_based_features[] = {
 
        MSR_F10H_DECFG,
        MSR_IA32_UCODE_REV,
+       MSR_IA32_ARCH_CAPABILITIES,
 };
 
 static unsigned int num_msr_based_features;
@@ -1105,7 +1106,8 @@ static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
 {
        switch (msr->index) {
        case MSR_IA32_UCODE_REV:
-               rdmsrl(msr->index, msr->data);
+       case MSR_IA32_ARCH_CAPABILITIES:
+               rdmsrl_safe(msr->index, &msr->data);
                break;
        default:
                if (kvm_x86_ops->get_msr_feature(msr))
index 331993c49dae9bd852c759afecbb3c6c17477e15..257f27620bc272e3312295714a120de07963441f 100644 (file)
@@ -110,6 +110,15 @@ static inline bool is_la57_mode(struct kvm_vcpu *vcpu)
 #endif
 }
 
+static inline bool x86_exception_has_error_code(unsigned int vector)
+{
+       static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) |
+                       BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) |
+                       BIT(PF_VECTOR) | BIT(AC_VECTOR);
+
+       return (1U << vector) & exception_has_error_code;
+}
+
 static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
 {
        return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
index 9a84a0d08727b7452ebea1e2e35b5ad3eb0b6e79..2aafa6ab6103d150ad26e097221a972f5d16363f 100644 (file)
@@ -641,11 +641,6 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
        return 0;
 }
 
-static const char nx_warning[] = KERN_CRIT
-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
-static const char smep_warning[] = KERN_CRIT
-"unable to execute userspace code (SMEP?) (uid: %d)\n";
-
 static void
 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
                unsigned long address)
@@ -664,20 +659,18 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
                pte = lookup_address_in_pgd(pgd, address, &level);
 
                if (pte && pte_present(*pte) && !pte_exec(*pte))
-                       printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
+                       pr_crit("kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n",
+                               from_kuid(&init_user_ns, current_uid()));
                if (pte && pte_present(*pte) && pte_exec(*pte) &&
                                (pgd_flags(*pgd) & _PAGE_USER) &&
                                (__read_cr4() & X86_CR4_SMEP))
-                       printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
+                       pr_crit("unable to execute userspace code (SMEP?) (uid: %d)\n",
+                               from_kuid(&init_user_ns, current_uid()));
        }
 
-       printk(KERN_ALERT "BUG: unable to handle kernel ");
-       if (address < PAGE_SIZE)
-               printk(KERN_CONT "NULL pointer dereference");
-       else
-               printk(KERN_CONT "paging request");
-
-       printk(KERN_CONT " at %px\n", (void *) address);
+       pr_alert("BUG: unable to handle kernel %s at %px\n",
+                address < PAGE_SIZE ? "NULL pointer dereference" : "paging request",
+                (void *)address);
 
        dump_pagetable(address);
 }
index 045f492d5f68260a581f44c210aa3753dc4bc225..a688617c727e1ec3558e158156c98b4632b0d9a2 100644 (file)
@@ -1350,16 +1350,28 @@ int kern_addr_valid(unsigned long addr)
 /* Amount of ram needed to start using large blocks */
 #define MEM_SIZE_FOR_LARGE_BLOCK (64UL << 30)
 
+/* Adjustable memory block size */
+static unsigned long set_memory_block_size;
+int __init set_memory_block_size_order(unsigned int order)
+{
+       unsigned long size = 1UL << order;
+
+       if (size > MEM_SIZE_FOR_LARGE_BLOCK || size < MIN_MEMORY_BLOCK_SIZE)
+               return -EINVAL;
+
+       set_memory_block_size = size;
+       return 0;
+}
+
 static unsigned long probe_memory_block_size(void)
 {
        unsigned long boot_mem_end = max_pfn << PAGE_SHIFT;
        unsigned long bz;
 
-       /* If this is UV system, always set 2G block size */
-       if (is_uv_system()) {
-               bz = MAX_BLOCK_SIZE;
+       /* If memory block size has been set, then use it */
+       bz = set_memory_block_size;
+       if (bz)
                goto done;
-       }
 
        /* Use regular block if RAM is smaller than MEM_SIZE_FOR_LARGE_BLOCK */
        if (boot_mem_end < MEM_SIZE_FOR_LARGE_BLOCK) {
index e01f7ceb9e7a17436eb71634c5467bbb20a2a2de..77873ce700ae7d703a3385719282dbd31a092ed9 100644 (file)
@@ -166,14 +166,14 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
                pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
                set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
 
-               if (!(pgd_val(*pgd) & _PAGE_PRESENT))
+               if (!pgd_present(*pgd))
                        continue;
 
                for (i = 0; i < PTRS_PER_P4D; i++) {
                        p4d = p4d_offset(pgd,
                                         pgd_idx * PGDIR_SIZE + i * P4D_SIZE);
 
-                       if (!(p4d_val(*p4d) & _PAGE_PRESENT))
+                       if (!p4d_present(*p4d))
                                continue;
 
                        pud = (pud_t *)p4d_page_vaddr(*p4d);
index 2e9ee023e6bcff25055bf5e05b0fc597d75f49bb..81a8e33115ad5b72d53fc0829930d60238dbb0c2 100644 (file)
@@ -6,7 +6,7 @@ purgatory-y := purgatory.o stack.o setup-x86_$(BITS).o sha256.o entry64.o string
 targets += $(purgatory-y)
 PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
 
-$(obj)/sha256.o: $(srctree)/lib/sha256.c
+$(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE
        $(call if_changed_rule,cc_o_c)
 
 LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib
index 744afdc18cf3a0ee8fab8624520fa8b2db2d2fd5..56c44d865f7bedb628aba3f0de218a1b5dcb9ccd 100644 (file)
@@ -16,7 +16,7 @@ static int __init gate_vma_init(void)
        if (!FIXADDR_USER_START)
                return 0;
 
-       gate_vma.vm_mm = NULL;
+       vma_init(&gate_vma, NULL);
        gate_vma.vm_start = FIXADDR_USER_START;
        gate_vma.vm_end = FIXADDR_USER_END;
        gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
index c9081c6671f0b7a05ecfaaf206e7e1ed2b1f456a..3b5318505c69c487f8cfc9c46c93c526197caef6 100644 (file)
@@ -64,6 +64,13 @@ struct shared_info xen_dummy_shared_info;
 __read_mostly int xen_have_vector_callback;
 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
 
+/*
+ * NB: needs to live in .data because it's used by xen_prepare_pvh which runs
+ * before clearing the bss.
+ */
+uint32_t xen_start_flags __attribute__((section(".data"))) = 0;
+EXPORT_SYMBOL(xen_start_flags);
+
 /*
  * Point at some empty memory to start with. We map the real shared_info
  * page as soon as fixmap is up and running.
index 357969a3697cc7af6e08c12144ec06f43a8841ad..439a94bf89adb7d013afc084e0e69da6f25222db 100644 (file)
@@ -1203,15 +1203,24 @@ asmlinkage __visible void __init xen_start_kernel(void)
                return;
 
        xen_domain_type = XEN_PV_DOMAIN;
+       xen_start_flags = xen_start_info->flags;
 
        xen_setup_features();
 
-       xen_setup_machphys_mapping();
-
        /* Install Xen paravirt ops */
        pv_info = xen_info;
        pv_init_ops.patch = paravirt_patch_default;
        pv_cpu_ops = xen_cpu_ops;
+       xen_init_irq_ops();
+
+       /*
+        * Setup xen_vcpu early because it is needed for
+        * local_irq_disable(), irqs_disabled(), e.g. in printk().
+        *
+        * Don't do the full vcpu_info placement stuff until we have
+        * the cpu_possible_mask and a non-dummy shared_info.
+        */
+       xen_vcpu_info_reset(0);
 
        x86_platform.get_nmi_reason = xen_get_nmi_reason;
 
@@ -1224,10 +1233,12 @@ asmlinkage __visible void __init xen_start_kernel(void)
         * Set up some pagetable state before starting to set any ptes.
         */
 
+       xen_setup_machphys_mapping();
        xen_init_mmu_ops();
 
        /* Prevent unwanted bits from being set in PTEs. */
        __supported_pte_mask &= ~_PAGE_GLOBAL;
+       __default_kernel_pte_mask &= ~_PAGE_GLOBAL;
 
        /*
         * Prevent page tables from being allocated in highmem, even
@@ -1248,20 +1259,9 @@ asmlinkage __visible void __init xen_start_kernel(void)
        get_cpu_cap(&boot_cpu_data);
        x86_configure_nx();
 
-       xen_init_irq_ops();
-
        /* Let's presume PV guests always boot on vCPU with id 0. */
        per_cpu(xen_vcpu_id, 0) = 0;
 
-       /*
-        * Setup xen_vcpu early because idt_setup_early_handler needs it for
-        * local_irq_disable(), irqs_disabled().
-        *
-        * Don't do the full vcpu_info placement stuff until we have
-        * the cpu_possible_mask and a non-dummy shared_info.
-        */
-       xen_vcpu_info_reset(0);
-
        idt_setup_early_handler();
 
        xen_init_capabilities();
index aa1c6a6831a94dd383e11c575a2a0c91f32136b5..c85d1a88f47693232369411588cfc19084086b25 100644 (file)
@@ -97,6 +97,7 @@ void __init xen_prepare_pvh(void)
        }
 
        xen_pvh = 1;
+       xen_start_flags = pvh_start_info.flags;
 
        msr = cpuid_ebx(xen_cpuid_base() + 2);
        pfn = __pa(hypercall_page);
index 74179852e46c31108adf405e86230c3830add94a..7515a19fd324b54e15d5b6deb632e385913ce4fa 100644 (file)
@@ -128,8 +128,6 @@ static const struct pv_irq_ops xen_irq_ops __initconst = {
 
 void __init xen_init_irq_ops(void)
 {
-       /* For PVH we use default pv_irq_ops settings. */
-       if (!xen_feature(XENFEAT_hvm_callback_vector))
-               pv_irq_ops = xen_irq_ops;
+       pv_irq_ops = xen_irq_ops;
        x86_init.irqs.intr_init = xen_init_IRQ;
 }
index 2e20ae2fa2d6c3b865f2c745ad9896a752954907..e3b18ad49889afc5ae35d2e2796aecd108a93819 100644 (file)
@@ -32,6 +32,7 @@
 #include <xen/interface/vcpu.h>
 #include <xen/interface/xenpmu.h>
 
+#include <asm/spec-ctrl.h>
 #include <asm/xen/interface.h>
 #include <asm/xen/hypercall.h>
 
@@ -70,6 +71,8 @@ static void cpu_bringup(void)
        cpu_data(cpu).x86_max_cores = 1;
        set_cpu_sibling_map(cpu);
 
+       speculative_store_bypass_ht_init();
+
        xen_setup_cpu_clockevents();
 
        notify_cpu_starting(cpu);
@@ -250,6 +253,8 @@ static void __init xen_pv_smp_prepare_cpus(unsigned int max_cpus)
        }
        set_cpu_sibling_map(0);
 
+       speculative_store_bypass_ht_init();
+
        xen_pmu_init(0);
 
        if (xen_smp_intr_init(0) || xen_smp_intr_init_pv(0))
index 9710e275f23079b8b7548ee935ab653036e82c5d..047c5dca6d90260f0a06f1a5d69b95d61a41460e 100644 (file)
@@ -903,25 +903,27 @@ int bio_add_page(struct bio *bio, struct page *page,
 EXPORT_SYMBOL(bio_add_page);
 
 /**
- * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
+ * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
  * @bio: bio to add pages to
  * @iter: iov iterator describing the region to be mapped
  *
- * Pins as many pages from *iter and appends them to @bio's bvec array. The
+ * Pins pages from *iter and appends them to @bio's bvec array. The
  * pages will have to be released using put_page() when done.
+ * For multi-segment *iter, this function only adds pages from the
+ * the next non-empty segment of the iov iterator.
  */
-int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
+static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
 {
-       unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
+       unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt, idx;
        struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
        struct page **pages = (struct page **)bv;
-       size_t offset, diff;
+       size_t offset;
        ssize_t size;
 
        size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
        if (unlikely(size <= 0))
                return size ? size : -EFAULT;
-       nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
+       idx = nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
 
        /*
         * Deep magic below:  We need to walk the pinned pages backwards
@@ -934,21 +936,46 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
        bio->bi_iter.bi_size += size;
        bio->bi_vcnt += nr_pages;
 
-       diff = (nr_pages * PAGE_SIZE - offset) - size;
-       while (nr_pages--) {
-               bv[nr_pages].bv_page = pages[nr_pages];
-               bv[nr_pages].bv_len = PAGE_SIZE;
-               bv[nr_pages].bv_offset = 0;
+       while (idx--) {
+               bv[idx].bv_page = pages[idx];
+               bv[idx].bv_len = PAGE_SIZE;
+               bv[idx].bv_offset = 0;
        }
 
        bv[0].bv_offset += offset;
        bv[0].bv_len -= offset;
-       if (diff)
-               bv[bio->bi_vcnt - 1].bv_len -= diff;
+       bv[nr_pages - 1].bv_len -= nr_pages * PAGE_SIZE - offset - size;
 
        iov_iter_advance(iter, size);
        return 0;
 }
+
+/**
+ * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
+ * @bio: bio to add pages to
+ * @iter: iov iterator describing the region to be mapped
+ *
+ * Pins pages from *iter and appends them to @bio's bvec array. The
+ * pages will have to be released using put_page() when done.
+ * The function tries, but does not guarantee, to pin as many pages as
+ * fit into the bio, or are requested in *iter, whatever is smaller.
+ * If MM encounters an error pinning the requested pages, it stops.
+ * Error is returned only if 0 pages could be pinned.
+ */
+int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
+{
+       unsigned short orig_vcnt = bio->bi_vcnt;
+
+       do {
+               int ret = __bio_iov_iter_get_pages(bio, iter);
+
+               if (unlikely(ret))
+                       return bio->bi_vcnt > orig_vcnt ? 0 : ret;
+
+       } while (iov_iter_count(iter) && !bio_full(bio));
+
+       return 0;
+}
 EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
 
 static void submit_bio_wait_endio(struct bio *bio)
@@ -1807,9 +1834,6 @@ again:
        if (!bio_integrity_endio(bio))
                return;
 
-       if (WARN_ONCE(bio->bi_next, "driver left bi_next not NULL"))
-               bio->bi_next = NULL;
-
        /*
         * Need to have a real endio function for chained bios, otherwise
         * various corner cases will break (like stacking block devices that
@@ -1869,6 +1893,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
                bio_integrity_trim(split);
 
        bio_advance(bio, split->bi_iter.bi_size);
+       bio->bi_iter.bi_done = 0;
 
        if (bio_flagged(bio, BIO_TRACE_COMPLETION))
                bio_set_flag(split, BIO_TRACE_COMPLETION);
index cf0ee764b908b384f69be9efbb9d7a1352eb7a52..f84a9b7b6f5aa167c5559079f095e3d3dff28f0d 100644 (file)
@@ -273,10 +273,6 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
        bio_advance(bio, nbytes);
 
        /* don't actually finish bio if it's part of flush sequence */
-       /*
-        * XXX this code looks suspicious - it's not consistent with advancing
-        * req->bio in caller
-        */
        if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
                bio_endio(bio);
 }
@@ -3081,10 +3077,8 @@ bool blk_update_request(struct request *req, blk_status_t error,
                struct bio *bio = req->bio;
                unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
 
-               if (bio_bytes == bio->bi_iter.bi_size) {
+               if (bio_bytes == bio->bi_iter.bi_size)
                        req->bio = bio->bi_next;
-                       bio->bi_next = NULL;
-               }
 
                /* Completion has already been traced */
                bio_clear_flag(bio, BIO_TRACE_COMPLETION);
@@ -3479,6 +3473,10 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src)
        dst->cpu = src->cpu;
        dst->__sector = blk_rq_pos(src);
        dst->__data_len = blk_rq_bytes(src);
+       if (src->rq_flags & RQF_SPECIAL_PAYLOAD) {
+               dst->rq_flags |= RQF_SPECIAL_PAYLOAD;
+               dst->special_vec = src->special_vec;
+       }
        dst->nr_phys_segments = src->nr_phys_segments;
        dst->ioprio = src->ioprio;
        dst->extra_len = src->extra_len;
index ffa622366922fed04e9dbd2606ffa294b25a697b..1c4532e9293800662d92b809d78637e06607fd90 100644 (file)
@@ -356,7 +356,7 @@ static const char *const blk_mq_rq_state_name_array[] = {
 
 static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
 {
-       if (WARN_ON_ONCE((unsigned int)rq_state >
+       if (WARN_ON_ONCE((unsigned int)rq_state >=
                         ARRAY_SIZE(blk_mq_rq_state_name_array)))
                return "(?)";
        return blk_mq_rq_state_name_array[rq_state];
index 70c65bb6c0131c84130fae44808acb51cf427ace..654b0dc7e00191c5d61c35b249037d36a2c98932 100644 (file)
@@ -558,10 +558,8 @@ static void __blk_mq_complete_request(struct request *rq)
        bool shared = false;
        int cpu;
 
-       if (cmpxchg(&rq->state, MQ_RQ_IN_FLIGHT, MQ_RQ_COMPLETE) !=
-                       MQ_RQ_IN_FLIGHT)
+       if (!blk_mq_mark_complete(rq))
                return;
-
        if (rq->internal_tag != -1)
                blk_mq_sched_completed_request(rq);
 
@@ -781,7 +779,6 @@ static void blk_mq_rq_timed_out(struct request *req, bool reserved)
                WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
        }
 
-       req->rq_flags &= ~RQF_TIMED_OUT;
        blk_add_timer(req);
 }
 
@@ -1076,6 +1073,9 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx,
 
 #define BLK_MQ_RESOURCE_DELAY  3               /* ms units */
 
+/*
+ * Returns true if we did some work AND can potentially do more.
+ */
 bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
                             bool got_budget)
 {
@@ -1206,8 +1206,17 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
                        blk_mq_run_hw_queue(hctx, true);
                else if (needs_restart && (ret == BLK_STS_RESOURCE))
                        blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
+
+               return false;
        }
 
+       /*
+        * If the host/device is unable to accept more work, inform the
+        * caller of that.
+        */
+       if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
+               return false;
+
        return (queued + errors) != 0;
 }
 
index 01e2b353a2b9aadc2b5d20fe227739d1d0ae296b..15c1f5e12eb89460bc42eb7f5807eaa03254e51d 100644 (file)
@@ -144,6 +144,7 @@ do_local:
 
        local_irq_restore(flags);
 }
+EXPORT_SYMBOL(__blk_complete_request);
 
 /**
  * blk_complete_request - end I/O on a request
index 4b8a48d48ba13394cf0ae7a7dc0016696ae5efd6..f2cfd56e1606ed9d8e1da979a1e1e6cdcb506a38 100644 (file)
@@ -210,6 +210,7 @@ void blk_add_timer(struct request *req)
        if (!req->timeout)
                req->timeout = q->rq_timeout;
 
+       req->rq_flags &= ~RQF_TIMED_OUT;
        blk_rq_set_deadline(req, jiffies + req->timeout);
 
        /*
index 66602c48995643dcff921e6f10bba8cd203d3c5c..3da540faf6735c2c3ccb6a81ae2ffb4443aedd3c 100644 (file)
@@ -267,8 +267,6 @@ bsg_map_hdr(struct request_queue *q, struct sg_io_v4 *hdr, fmode_t mode)
        } else if (hdr->din_xfer_len) {
                ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->din_xferp),
                                hdr->din_xfer_len, GFP_KERNEL);
-       } else {
-               ret = blk_rq_map_user(q, rq, NULL, NULL, 0, GFP_KERNEL);
        }
 
        if (ret)
index 945f4b8610e0c7d85242b500141d4bc1c0f671a2..e0de4dd448b3c7238e8656b572de72206302bf87 100644 (file)
@@ -877,7 +877,7 @@ static size_t response_get_string(const struct parsed_resp *resp, int n,
                return 0;
        }
 
-       if (n > resp->num) {
+       if (n >= resp->num) {
                pr_debug("Response has %d tokens. Can't access %d\n",
                         resp->num, n);
                return 0;
@@ -916,7 +916,7 @@ static u64 response_get_u64(const struct parsed_resp *resp, int n)
                return 0;
        }
 
-       if (n > resp->num) {
+       if (n >= resp->num) {
                pr_debug("Response has %d tokens. Can't access %d\n",
                         resp->num, n);
                return 0;
index 150d82da8e996d8c28be4ae3e4bd64a0331c9c08..1efd6fa0dc608c2a3d598b56c798f3e772a2bdbc 100644 (file)
@@ -1,3 +1,3 @@
 #include <linux/kernel.h>
 
-extern const char __initdata *const blacklist_hashes[];
+extern const char __initconst *const blacklist_hashes[];
index 49fa8582138b2df45e087f3a31a80ac5d5bbdc2a..c166f424871c86a356b15eff8bdd3b2be6406a87 100644 (file)
@@ -1060,12 +1060,19 @@ void af_alg_async_cb(struct crypto_async_request *_req, int err)
 }
 EXPORT_SYMBOL_GPL(af_alg_async_cb);
 
-__poll_t af_alg_poll_mask(struct socket *sock, __poll_t events)
+/**
+ * af_alg_poll - poll system call handler
+ */
+__poll_t af_alg_poll(struct file *file, struct socket *sock,
+                        poll_table *wait)
 {
        struct sock *sk = sock->sk;
        struct alg_sock *ask = alg_sk(sk);
        struct af_alg_ctx *ctx = ask->private;
-       __poll_t mask = 0;
+       __poll_t mask;
+
+       sock_poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
 
        if (!ctx->more || ctx->used)
                mask |= EPOLLIN | EPOLLRDNORM;
@@ -1075,7 +1082,7 @@ __poll_t af_alg_poll_mask(struct socket *sock, __poll_t events)
 
        return mask;
 }
-EXPORT_SYMBOL_GPL(af_alg_poll_mask);
+EXPORT_SYMBOL_GPL(af_alg_poll);
 
 /**
  * af_alg_alloc_areq - allocate struct af_alg_async_req
@@ -1148,8 +1155,10 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
 
                /* make one iovec available as scatterlist */
                err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
-               if (err < 0)
+               if (err < 0) {
+                       rsgl->sg_num_bytes = 0;
                        return err;
+               }
 
                /* chain the new scatterlist with previous one */
                if (areq->last_rsgl)
index 825524f274384fdfd2a569be01e593d8f41a72b2..c40a8c7ee8aedcb0f6adb3afb1e0bb60a233d68c 100644 (file)
@@ -375,7 +375,7 @@ static struct proto_ops algif_aead_ops = {
        .sendmsg        =       aead_sendmsg,
        .sendpage       =       af_alg_sendpage,
        .recvmsg        =       aead_recvmsg,
-       .poll_mask      =       af_alg_poll_mask,
+       .poll           =       af_alg_poll,
 };
 
 static int aead_check_key(struct socket *sock)
@@ -471,7 +471,7 @@ static struct proto_ops algif_aead_ops_nokey = {
        .sendmsg        =       aead_sendmsg_nokey,
        .sendpage       =       aead_sendpage_nokey,
        .recvmsg        =       aead_recvmsg_nokey,
-       .poll_mask      =       af_alg_poll_mask,
+       .poll           =       af_alg_poll,
 };
 
 static void *aead_bind(const char *name, u32 type, u32 mask)
index 4c04eb9888adf82f68a18d17c9d6e73adc74aa90..cfdaab2b7d766d517e239687bf2232e09a749991 100644 (file)
@@ -206,7 +206,7 @@ static struct proto_ops algif_skcipher_ops = {
        .sendmsg        =       skcipher_sendmsg,
        .sendpage       =       af_alg_sendpage,
        .recvmsg        =       skcipher_recvmsg,
-       .poll_mask      =       af_alg_poll_mask,
+       .poll           =       af_alg_poll,
 };
 
 static int skcipher_check_key(struct socket *sock)
@@ -302,7 +302,7 @@ static struct proto_ops algif_skcipher_ops_nokey = {
        .sendmsg        =       skcipher_sendmsg_nokey,
        .sendpage       =       skcipher_sendpage_nokey,
        .recvmsg        =       skcipher_recvmsg_nokey,
-       .poll_mask      =       af_alg_poll_mask,
+       .poll           =       af_alg_poll,
 };
 
 static void *skcipher_bind(const char *name, u32 type, u32 mask)
index 7d81e6bb461a330a225658dde9b002b3b24e26bc..b6cabac4b62ba6b920cb5947c56db5839711bcc7 100644 (file)
@@ -249,6 +249,15 @@ int x509_note_signature(void *context, size_t hdrlen,
                return -EINVAL;
        }
 
+       if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0) {
+               /* Discard the BIT STRING metadata */
+               if (vlen < 1 || *(const u8 *)value != 0)
+                       return -EBADMSG;
+
+               value++;
+               vlen--;
+       }
+
        ctx->cert->raw_sig = value;
        ctx->cert->raw_sig_size = vlen;
        return 0;
index 9fbcde307daf90b554ac5e96da627f0f77eb24e9..5eede3749e646b425614aa86de9143c82545fcc6 100644 (file)
@@ -274,8 +274,9 @@ static void crypto_morus640_decrypt_chunk(struct morus640_state *state, u8 *dst,
                union morus640_block_in tail;
 
                memcpy(tail.bytes, src, size);
+               memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size);
 
-               crypto_morus640_load_a(&m, src);
+               crypto_morus640_load_a(&m, tail.bytes);
                crypto_morus640_core(state, &m);
                crypto_morus640_store_a(tail.bytes, &m);
                memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size);
index 264ec12c0b9c334a16d743651771a18b616616a0..7f6735d9003f13c1e4adb7ffde8d8be5cc700fe1 100644 (file)
@@ -152,7 +152,7 @@ static SHA3_INLINE void keccakf_round(u64 st[25])
        st[24] ^= bc[ 4];
 }
 
-static void __optimize("O3") keccakf(u64 st[25])
+static void keccakf(u64 st[25])
 {
        int round;
 
index 38a286975c31e152206b3e55b28473a2763a717a..f8fecfec5df9b85be3e0d6c78a54087952140fdc 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/pm_domain.h>
 #include <linux/pm_runtime.h>
 #include <linux/pwm.h>
+#include <linux/suspend.h>
 #include <linux/delay.h>
 
 #include "internal.h"
@@ -946,9 +947,10 @@ static void lpss_iosf_exit_d3_state(void)
        mutex_unlock(&lpss_iosf_mutex);
 }
 
-static int acpi_lpss_suspend(struct device *dev, bool wakeup)
+static int acpi_lpss_suspend(struct device *dev, bool runtime)
 {
        struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
+       bool wakeup = runtime || device_may_wakeup(dev);
        int ret;
 
        if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
@@ -961,13 +963,14 @@ static int acpi_lpss_suspend(struct device *dev, bool wakeup)
         * wrong status for devices being about to be powered off. See
         * lpss_iosf_enter_d3_state() for further information.
         */
-       if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
+       if ((runtime || !pm_suspend_via_firmware()) &&
+           lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
                lpss_iosf_enter_d3_state();
 
        return ret;
 }
 
-static int acpi_lpss_resume(struct device *dev)
+static int acpi_lpss_resume(struct device *dev, bool runtime)
 {
        struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
        int ret;
@@ -976,7 +979,8 @@ static int acpi_lpss_resume(struct device *dev)
         * This call is kept first to be in symmetry with
         * acpi_lpss_runtime_suspend() one.
         */
-       if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
+       if ((runtime || !pm_resume_via_firmware()) &&
+           lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
                lpss_iosf_exit_d3_state();
 
        ret = acpi_dev_resume(dev);
@@ -1000,12 +1004,12 @@ static int acpi_lpss_suspend_late(struct device *dev)
                return 0;
 
        ret = pm_generic_suspend_late(dev);
-       return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
+       return ret ? ret : acpi_lpss_suspend(dev, false);
 }
 
 static int acpi_lpss_resume_early(struct device *dev)
 {
-       int ret = acpi_lpss_resume(dev);
+       int ret = acpi_lpss_resume(dev, false);
 
        return ret ? ret : pm_generic_resume_early(dev);
 }
@@ -1020,7 +1024,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev)
 
 static int acpi_lpss_runtime_resume(struct device *dev)
 {
-       int ret = acpi_lpss_resume(dev);
+       int ret = acpi_lpss_resume(dev, true);
 
        return ret ? ret : pm_generic_runtime_resume(dev);
 }
index fc0c2e2328cd35218c71a2db56f3634cce4a0d46..fe9d46d81750792350270c4f6c1920a77e236a05 100644 (file)
@@ -51,16 +51,23 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state)
                return_ACPI_STATUS(status);
        }
 
-       /*
-        * 1) Disable all GPEs
-        * 2) Enable all wakeup GPEs
-        */
+       /* Disable all GPEs */
        status = acpi_hw_disable_all_gpes();
        if (ACPI_FAILURE(status)) {
                return_ACPI_STATUS(status);
        }
+       /*
+        * If the target sleep state is S5, clear all GPEs and fixed events too
+        */
+       if (sleep_state == ACPI_STATE_S5) {
+               status = acpi_hw_clear_acpi_status();
+               if (ACPI_FAILURE(status)) {
+                       return_ACPI_STATUS(status);
+               }
+       }
        acpi_gbl_system_awake_and_running = FALSE;
 
+        /* Enable all wakeup GPEs */
        status = acpi_hw_enable_all_wakeup_gpes();
        if (ACPI_FAILURE(status)) {
                return_ACPI_STATUS(status);
index bc5f05906bd1c871403896c3a2bce9122f3a67df..ee840be150b5e0fff900488274b40b19027aca6c 100644 (file)
@@ -497,6 +497,18 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
                        status =
                            acpi_ps_create_op(walk_state, aml_op_start, &op);
                        if (ACPI_FAILURE(status)) {
+                               /*
+                                * ACPI_PARSE_MODULE_LEVEL means that we are loading a table by
+                                * executing it as a control method. However, if we encounter
+                                * an error while loading the table, we need to keep trying to
+                                * load the table rather than aborting the table load. Set the
+                                * status to AE_OK to proceed with the table load.
+                                */
+                               if ((walk_state->
+                                    parse_flags & ACPI_PARSE_MODULE_LEVEL)
+                                   && status == AE_ALREADY_EXISTS) {
+                                       status = AE_OK;
+                               }
                                if (status == AE_CTRL_PARSE_CONTINUE) {
                                        continue;
                                }
@@ -694,6 +706,20 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
                            acpi_ps_next_parse_state(walk_state, op, status);
                        if (status == AE_CTRL_PENDING) {
                                status = AE_OK;
+                       } else
+                           if ((walk_state->
+                                parse_flags & ACPI_PARSE_MODULE_LEVEL)
+                               && ACPI_FAILURE(status)) {
+                               /*
+                                * ACPI_PARSE_MODULE_LEVEL means that we are loading a table by
+                                * executing it as a control method. However, if we encounter
+                                * an error while loading the table, we need to keep trying to
+                                * load the table rather than aborting the table load. Set the
+                                * status to AE_OK to proceed with the table load. If we get a
+                                * failure at this point, it means that the dispatcher got an
+                                * error while processing Op (most likely an AML operand error.
+                                */
+                               status = AE_OK;
                        }
                }
 
index 5a64ddaed8a3782f94e278424368a7ce7167bfbb..e474302726926dd0c997c9432da0f9488af65011 100644 (file)
@@ -182,19 +182,19 @@ acpi_ut_prefixed_namespace_error(const char *module_name,
        switch (lookup_status) {
        case AE_ALREADY_EXISTS:
 
-               acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR);
+               acpi_os_printf(ACPI_MSG_BIOS_ERROR);
                message = "Failure creating";
                break;
 
        case AE_NOT_FOUND:
 
-               acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR);
+               acpi_os_printf(ACPI_MSG_BIOS_ERROR);
                message = "Could not resolve";
                break;
 
        default:
 
-               acpi_os_printf("\n" ACPI_MSG_ERROR);
+               acpi_os_printf(ACPI_MSG_ERROR);
                message = "Failure resolving";
                break;
        }
index b0113a5802a3c073f5787de456bc601f0f8c11cd..d79ad844c78fcee1e51cfa7cde066363e45f51ef 100644 (file)
@@ -717,10 +717,11 @@ void battery_hook_register(struct acpi_battery_hook *hook)
                         */
                        pr_err("extension failed to load: %s", hook->name);
                        __battery_hook_unregister(hook, 0);
-                       return;
+                       goto end;
                }
        }
        pr_info("new extension: %s\n", hook->name);
+end:
        mutex_unlock(&hook_mutex);
 }
 EXPORT_SYMBOL_GPL(battery_hook_register);
@@ -732,7 +733,7 @@ EXPORT_SYMBOL_GPL(battery_hook_register);
 */
 static void battery_hook_add_battery(struct acpi_battery *battery)
 {
-       struct acpi_battery_hook *hook_node;
+       struct acpi_battery_hook *hook_node, *tmp;
 
        mutex_lock(&hook_mutex);
        INIT_LIST_HEAD(&battery->list);
@@ -744,15 +745,15 @@ static void battery_hook_add_battery(struct acpi_battery *battery)
         * when a battery gets hotplugged or initialized
         * during the battery module initialization.
         */
-       list_for_each_entry(hook_node, &battery_hook_list, list) {
+       list_for_each_entry_safe(hook_node, tmp, &battery_hook_list, list) {
                if (hook_node->add_battery(battery->bat)) {
                        /*
                         * The notification of the extensions has failed, to
                         * prevent further errors we will unload the extension.
                         */
-                       __battery_hook_unregister(hook_node, 0);
                        pr_err("error in extension, unloading: %s",
                                        hook_node->name);
+                       __battery_hook_unregister(hook_node, 0);
                }
        }
        mutex_unlock(&hook_mutex);
index bb94cf0731feb92b89b78cec274668fe204ed1e4..917f77f4cb556b9f4a1061c9b10dea72f8a9d81f 100644 (file)
@@ -2037,6 +2037,17 @@ static inline void acpi_ec_query_exit(void)
        }
 }
 
+static const struct dmi_system_id acpi_ec_no_wakeup[] = {
+       {
+               .ident = "Thinkpad X1 Carbon 6th",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_FAMILY, "Thinkpad X1 Carbon 6th"),
+               },
+       },
+       { },
+};
+
 int __init acpi_ec_init(void)
 {
        int result;
@@ -2047,6 +2058,15 @@ int __init acpi_ec_init(void)
        if (result)
                return result;
 
+       /*
+        * Disable EC wakeup on following systems to prevent periodic
+        * wakeup from EC GPE.
+        */
+       if (dmi_check_system(acpi_ec_no_wakeup)) {
+               ec_no_wakeup = true;
+               pr_debug("Disabling EC wakeup on suspend-to-idle\n");
+       }
+
        /* Drivers must be started after acpi_ec_query_init() */
        dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
        /*
index d15814e1727fad991bf8c24c2d7fa728ad1bfcad..7c479002e798bf92f3dc58263c3c2064182922bb 100644 (file)
@@ -408,6 +408,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
        const guid_t *guid;
        int rc, i;
 
+       if (cmd_rc)
+               *cmd_rc = -EINVAL;
        func = cmd;
        if (cmd == ND_CMD_CALL) {
                call_pkg = buf;
@@ -518,6 +520,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
                 * If we return an error (like elsewhere) then caller wouldn't
                 * be able to rely upon data returned to make calculation.
                 */
+               if (cmd_rc)
+                       *cmd_rc = 0;
                return 0;
        }
 
@@ -1273,7 +1277,7 @@ static ssize_t scrub_show(struct device *dev,
 
                mutex_lock(&acpi_desc->init_mutex);
                rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
-                               work_busy(&acpi_desc->dwork.work)
+                               acpi_desc->scrub_busy
                                && !acpi_desc->cancel ? "+\n" : "\n");
                mutex_unlock(&acpi_desc->init_mutex);
        }
@@ -2939,6 +2943,32 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
        return 0;
 }
 
+static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo)
+{
+       lockdep_assert_held(&acpi_desc->init_mutex);
+
+       acpi_desc->scrub_busy = 1;
+       /* note this should only be set from within the workqueue */
+       if (tmo)
+               acpi_desc->scrub_tmo = tmo;
+       queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ);
+}
+
+static void sched_ars(struct acpi_nfit_desc *acpi_desc)
+{
+       __sched_ars(acpi_desc, 0);
+}
+
+static void notify_ars_done(struct acpi_nfit_desc *acpi_desc)
+{
+       lockdep_assert_held(&acpi_desc->init_mutex);
+
+       acpi_desc->scrub_busy = 0;
+       acpi_desc->scrub_count++;
+       if (acpi_desc->scrub_count_state)
+               sysfs_notify_dirent(acpi_desc->scrub_count_state);
+}
+
 static void acpi_nfit_scrub(struct work_struct *work)
 {
        struct acpi_nfit_desc *acpi_desc;
@@ -2949,14 +2979,10 @@ static void acpi_nfit_scrub(struct work_struct *work)
        mutex_lock(&acpi_desc->init_mutex);
        query_rc = acpi_nfit_query_poison(acpi_desc);
        tmo = __acpi_nfit_scrub(acpi_desc, query_rc);
-       if (tmo) {
-               queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ);
-               acpi_desc->scrub_tmo = tmo;
-       } else {
-               acpi_desc->scrub_count++;
-               if (acpi_desc->scrub_count_state)
-                       sysfs_notify_dirent(acpi_desc->scrub_count_state);
-       }
+       if (tmo)
+               __sched_ars(acpi_desc, tmo);
+       else
+               notify_ars_done(acpi_desc);
        memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
        mutex_unlock(&acpi_desc->init_mutex);
 }
@@ -3037,7 +3063,7 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
                        break;
                }
 
-       queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0);
+       sched_ars(acpi_desc);
        return 0;
 }
 
@@ -3239,7 +3265,7 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags)
                }
        }
        if (scheduled) {
-               queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0);
+               sched_ars(acpi_desc);
                dev_dbg(dev, "ars_scan triggered\n");
        }
        mutex_unlock(&acpi_desc->init_mutex);
index 7d15856a739f9dc70cbb4e325d95395829cb6b63..a97ff42fe311bfa5041f54d67124aed4b85deb4d 100644 (file)
@@ -203,6 +203,7 @@ struct acpi_nfit_desc {
        unsigned int max_ars;
        unsigned int scrub_count;
        unsigned int scrub_mode;
+       unsigned int scrub_busy:1;
        unsigned int cancel:1;
        unsigned long dimm_cmd_force_en;
        unsigned long bus_cmd_force_en;
index 7ca41bf023c9f354cad85617f0cceab228640d65..8df9abfa947b0dca4719c674fd645d187ade242d 100644 (file)
@@ -45,6 +45,8 @@
 #include <linux/uaccess.h>
 #include <linux/io-64-nonatomic-lo-hi.h>
 
+#include "acpica/accommon.h"
+#include "acpica/acnamesp.h"
 #include "internal.h"
 
 #define _COMPONENT             ACPI_OS_SERVICES
@@ -1490,6 +1492,76 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
 }
 EXPORT_SYMBOL(acpi_check_region);
 
+static acpi_status acpi_deactivate_mem_region(acpi_handle handle, u32 level,
+                                             void *_res, void **return_value)
+{
+       struct acpi_mem_space_context **mem_ctx;
+       union acpi_operand_object *handler_obj;
+       union acpi_operand_object *region_obj2;
+       union acpi_operand_object *region_obj;
+       struct resource *res = _res;
+       acpi_status status;
+
+       region_obj = acpi_ns_get_attached_object(handle);
+       if (!region_obj)
+               return AE_OK;
+
+       handler_obj = region_obj->region.handler;
+       if (!handler_obj)
+               return AE_OK;
+
+       if (region_obj->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+               return AE_OK;
+
+       if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE))
+               return AE_OK;
+
+       region_obj2 = acpi_ns_get_secondary_object(region_obj);
+       if (!region_obj2)
+               return AE_OK;
+
+       mem_ctx = (void *)&region_obj2->extra.region_context;
+
+       if (!(mem_ctx[0]->address >= res->start &&
+             mem_ctx[0]->address < res->end))
+               return AE_OK;
+
+       status = handler_obj->address_space.setup(region_obj,
+                                                 ACPI_REGION_DEACTIVATE,
+                                                 NULL, (void **)mem_ctx);
+       if (ACPI_SUCCESS(status))
+               region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE);
+
+       return status;
+}
+
+/**
+ * acpi_release_memory - Release any mappings done to a memory region
+ * @handle: Handle to namespace node
+ * @res: Memory resource
+ * @level: A level that terminates the search
+ *
+ * Walks through @handle and unmaps all SystemMemory Operation Regions that
+ * overlap with @res and that have already been activated (mapped).
+ *
+ * This is a helper that allows drivers to place special requirements on memory
+ * region that may overlap with operation regions, primarily allowing them to
+ * safely map the region as non-cached memory.
+ *
+ * The unmapped Operation Regions will be automatically remapped next time they
+ * are called, so the drivers do not need to do anything else.
+ */
+acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
+                               u32 level)
+{
+       if (!(res->flags & IORESOURCE_MEM))
+               return AE_TYPE;
+
+       return acpi_walk_namespace(ACPI_TYPE_REGION, handle, level,
+                                  acpi_deactivate_mem_region, NULL, res, NULL);
+}
+EXPORT_SYMBOL_GPL(acpi_release_memory);
+
 /*
  * Let drivers know whether the resource checks are effective
  */
index e5ea1974d1e3820db7e97f8437a7931de70784f8..d1e26cb599bfca340e076500b9e27ec2f3c0bc73 100644 (file)
@@ -481,8 +481,14 @@ static int topology_get_acpi_cpu_tag(struct acpi_table_header *table,
        if (cpu_node) {
                cpu_node = acpi_find_processor_package_id(table, cpu_node,
                                                          level, flag);
-               /* Only the first level has a guaranteed id */
-               if (level == 0)
+               /*
+                * As per specification if the processor structure represents
+                * an actual processor, then ACPI processor ID must be valid.
+                * For processor containers ACPI_PPTT_ACPI_PROCESSOR_ID_VALID
+                * should be set if the UID is valid
+                */
+               if (level == 0 ||
+                   cpu_node->flags & ACPI_PPTT_ACPI_PROCESSOR_ID_VALID)
                        return cpu_node->acpi_processor_id;
                return ACPI_PTR_DIFF(cpu_node, table);
        }
index 2b16e7c8fff357645d3cec4069330b89985347bb..39b181d6bd0d8cf2cbcd9dde1cf89b373ecae6a4 100644 (file)
@@ -398,7 +398,6 @@ config SATA_DWC_VDEBUG
 
 config SATA_HIGHBANK
        tristate "Calxeda Highbank SATA support"
-       depends on HAS_DMA
        depends on ARCH_HIGHBANK || COMPILE_TEST
        help
          This option enables support for the Calxeda Highbank SoC's
@@ -408,7 +407,6 @@ config SATA_HIGHBANK
 
 config SATA_MV
        tristate "Marvell SATA support"
-       depends on HAS_DMA
        depends on PCI || ARCH_DOVE || ARCH_MV78XX0 || \
                   ARCH_MVEBU || ARCH_ORION5X || COMPILE_TEST
        select GENERIC_PHY
index 738fb22978ddcd14ad1956c5119972f19b17d2a6..b2b9eba1d214765723165f1d3d7c1bda64720207 100644 (file)
@@ -400,6 +400,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x0f23), board_ahci_mobile }, /* Bay Trail AHCI */
        { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */
        { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_mobile }, /* ApolloLake AHCI */
+       { PCI_VDEVICE(INTEL, 0x34d3), board_ahci_mobile }, /* Ice Lake LP AHCI */
 
        /* JMicron 360/1/3/5/6, match class to avoid IDE function */
        { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -1280,6 +1281,59 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
        return strcmp(buf, dmi->driver_data) < 0;
 }
 
+static bool ahci_broken_lpm(struct pci_dev *pdev)
+{
+       static const struct dmi_system_id sysids[] = {
+               /* Various Lenovo 50 series have LPM issues with older BIOSen */
+               {
+                       .matches = {
+                               DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                               DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X250"),
+                       },
+                       .driver_data = "20180406", /* 1.31 */
+               },
+               {
+                       .matches = {
+                               DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                               DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L450"),
+                       },
+                       .driver_data = "20180420", /* 1.28 */
+               },
+               {
+                       .matches = {
+                               DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                               DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T450s"),
+                       },
+                       .driver_data = "20180315", /* 1.33 */
+               },
+               {
+                       .matches = {
+                               DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                               DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W541"),
+                       },
+                       /*
+                        * Note date based on release notes, 2.35 has been
+                        * reported to be good, but I've been unable to get
+                        * a hold of the reporter to get the DMI BIOS date.
+                        * TODO: fix this.
+                        */
+                       .driver_data = "20180310", /* 2.35 */
+               },
+               { }     /* terminate list */
+       };
+       const struct dmi_system_id *dmi = dmi_first_match(sysids);
+       int year, month, date;
+       char buf[9];
+
+       if (!dmi)
+               return false;
+
+       dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
+       snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
+
+       return strcmp(buf, dmi->driver_data) < 0;
+}
+
 static bool ahci_broken_online(struct pci_dev *pdev)
 {
 #define ENCODE_BUSDEVFN(bus, slot, func)                       \
@@ -1694,6 +1748,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                        "quirky BIOS, skipping spindown on poweroff\n");
        }
 
+       if (ahci_broken_lpm(pdev)) {
+               pi.flags |= ATA_FLAG_NO_LPM;
+               dev_warn(&pdev->dev,
+                        "BIOS update required for Link Power Management support\n");
+       }
+
        if (ahci_broken_suspend(pdev)) {
                hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
                dev_warn(&pdev->dev,
index 0045dacd814b44ec21f87e4acceb07e69056f214..72d90b4c3aaefa4b9051d02383b55c9e3899072b 100644 (file)
@@ -82,7 +82,7 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv)
  *
  * Return: 0 on success; Error code otherwise.
  */
-int ahci_mvebu_stop_engine(struct ata_port *ap)
+static int ahci_mvebu_stop_engine(struct ata_port *ap)
 {
        void __iomem *port_mmio = ahci_port_base(ap);
        u32 tmp, port_fbs;
index 965842a08743f38d08d4a4c58cc345ffc4c81f97..09620c2ffa0f72e1a696d10d3e4480818b101e51 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/kernel.h>
 #include <linux/gfp.h>
 #include <linux/module.h>
+#include <linux/nospec.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
@@ -1146,10 +1147,12 @@ static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
 
        /* get the slot number from the message */
        pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
-       if (pmp < EM_MAX_SLOTS)
+       if (pmp < EM_MAX_SLOTS) {
+               pmp = array_index_nospec(pmp, EM_MAX_SLOTS);
                emp = &pp->em_priv[pmp];
-       else
+       } else {
                return -EINVAL;
+       }
 
        /* mask off the activity bits if we are in sw_activity
         * mode, user should turn off sw_activity before setting
index 27d15ed7fa3d03771f020cf064749f6f9fe38633..cc71c63df3819f8da0ff312fed83dc17d706136c 100644 (file)
@@ -2493,6 +2493,9 @@ int ata_dev_configure(struct ata_device *dev)
            (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
                dev->horkage |= ATA_HORKAGE_NOLPM;
 
+       if (ap->flags & ATA_FLAG_NO_LPM)
+               dev->horkage |= ATA_HORKAGE_NOLPM;
+
        if (dev->horkage & ATA_HORKAGE_NOLPM) {
                ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
                dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
index d5412145d76d60c2cc1393f07315bff2431c28a0..01306c018398fa16583cab46bd1e51b9ccf86309 100644 (file)
@@ -614,8 +614,7 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
                list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
                        struct ata_queued_cmd *qc;
 
-                       for (i = 0; i < ATA_MAX_QUEUE; i++) {
-                               qc = __ata_qc_from_tag(ap, i);
+                       ata_qc_for_each_raw(ap, qc, i) {
                                if (qc->flags & ATA_QCFLAG_ACTIVE &&
                                    qc->scsicmd == scmd)
                                        break;
@@ -818,14 +817,13 @@ EXPORT_SYMBOL_GPL(ata_port_wait_eh);
 
 static int ata_eh_nr_in_flight(struct ata_port *ap)
 {
+       struct ata_queued_cmd *qc;
        unsigned int tag;
        int nr = 0;
 
        /* count only non-internal commands */
-       for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
-               if (ata_tag_internal(tag))
-                       continue;
-               if (ata_qc_from_tag(ap, tag))
+       ata_qc_for_each(ap, qc, tag) {
+               if (qc)
                        nr++;
        }
 
@@ -847,13 +845,13 @@ void ata_eh_fastdrain_timerfn(struct timer_list *t)
                goto out_unlock;
 
        if (cnt == ap->fastdrain_cnt) {
+               struct ata_queued_cmd *qc;
                unsigned int tag;
 
                /* No progress during the last interval, tag all
                 * in-flight qcs as timed out and freeze the port.
                 */
-               for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
-                       struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
+               ata_qc_for_each(ap, qc, tag) {
                        if (qc)
                                qc->err_mask |= AC_ERR_TIMEOUT;
                }
@@ -999,6 +997,7 @@ void ata_port_schedule_eh(struct ata_port *ap)
 
 static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
 {
+       struct ata_queued_cmd *qc;
        int tag, nr_aborted = 0;
 
        WARN_ON(!ap->ops->error_handler);
@@ -1007,9 +1006,7 @@ static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
        ata_eh_set_pending(ap, 0);
 
        /* include internal tag in iteration */
-       for (tag = 0; tag <= ATA_MAX_QUEUE; tag++) {
-               struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
-
+       ata_qc_for_each_with_internal(ap, qc, tag) {
                if (qc && (!link || qc->dev->link == link)) {
                        qc->flags |= ATA_QCFLAG_FAILED;
                        ata_qc_complete(qc);
@@ -1712,9 +1709,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
                return;
 
        /* has LLDD analyzed already? */
-       for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
-               qc = __ata_qc_from_tag(ap, tag);
-
+       ata_qc_for_each_raw(ap, qc, tag) {
                if (!(qc->flags & ATA_QCFLAG_FAILED))
                        continue;
 
@@ -2136,6 +2131,7 @@ static void ata_eh_link_autopsy(struct ata_link *link)
 {
        struct ata_port *ap = link->ap;
        struct ata_eh_context *ehc = &link->eh_context;
+       struct ata_queued_cmd *qc;
        struct ata_device *dev;
        unsigned int all_err_mask = 0, eflags = 0;
        int tag, nr_failed = 0, nr_quiet = 0;
@@ -2168,9 +2164,7 @@ static void ata_eh_link_autopsy(struct ata_link *link)
 
        all_err_mask |= ehc->i.err_mask;
 
-       for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
-               struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
-
+       ata_qc_for_each_raw(ap, qc, tag) {
                if (!(qc->flags & ATA_QCFLAG_FAILED) ||
                    ata_dev_phys_link(qc->dev) != link)
                        continue;
@@ -2436,6 +2430,7 @@ static void ata_eh_link_report(struct ata_link *link)
 {
        struct ata_port *ap = link->ap;
        struct ata_eh_context *ehc = &link->eh_context;
+       struct ata_queued_cmd *qc;
        const char *frozen, *desc;
        char tries_buf[6] = "";
        int tag, nr_failed = 0;
@@ -2447,9 +2442,7 @@ static void ata_eh_link_report(struct ata_link *link)
        if (ehc->i.desc[0] != '\0')
                desc = ehc->i.desc;
 
-       for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
-               struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
-
+       ata_qc_for_each_raw(ap, qc, tag) {
                if (!(qc->flags & ATA_QCFLAG_FAILED) ||
                    ata_dev_phys_link(qc->dev) != link ||
                    ((qc->flags & ATA_QCFLAG_QUIET) &&
@@ -2511,8 +2504,7 @@ static void ata_eh_link_report(struct ata_link *link)
                  ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
 #endif
 
-       for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
-               struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
+       ata_qc_for_each_raw(ap, qc, tag) {
                struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
                char data_buf[20] = "";
                char cdb_buf[70] = "";
@@ -3992,12 +3984,11 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
  */
 void ata_eh_finish(struct ata_port *ap)
 {
+       struct ata_queued_cmd *qc;
        int tag;
 
        /* retry or finish qcs */
-       for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
-               struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
-
+       ata_qc_for_each_raw(ap, qc, tag) {
                if (!(qc->flags & ATA_QCFLAG_FAILED))
                        continue;
 
index 6a91d04351d9b64d20251febd070ad28bfb3eb38..aad1b01447de6924df4b1c1713d5fc402df112a0 100644 (file)
@@ -3805,10 +3805,20 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc)
                 */
                goto invalid_param_len;
        }
-       if (block > dev->n_sectors)
-               goto out_of_range;
 
        all = cdb[14] & 0x1;
+       if (all) {
+               /*
+                * Ignore the block address (zone ID) as defined by ZBC.
+                */
+               block = 0;
+       } else if (block >= dev->n_sectors) {
+               /*
+                * Block must be a valid zone ID (a zone start LBA).
+                */
+               fp = 2;
+               goto invalid_fld;
+       }
 
        if (ata_ncq_enabled(qc->dev) &&
            ata_fpdma_zac_mgmt_out_supported(qc->dev)) {
@@ -3837,10 +3847,6 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc)
  invalid_fld:
        ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff);
        return 1;
- out_of_range:
-       /* "Logical Block Address out of range" */
-       ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x00);
-       return 1;
 invalid_param_len:
        /* "Parameter list length error" */
        ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
index b8d9cfc60374e08dbed9b2f646d51aa4fec0e025..4dc528bf8e85e3088fa55859d056613e8db73281 100644 (file)
@@ -395,12 +395,6 @@ static inline unsigned int sata_fsl_tag(unsigned int tag,
 {
        /* We let libATA core do actual (queue) tag allocation */
 
-       /* all non NCQ/queued commands should have tag#0 */
-       if (ata_tag_internal(tag)) {
-               DPRINTK("mapping internal cmds to tag#0\n");
-               return 0;
-       }
-
        if (unlikely(tag >= SATA_FSL_QUEUE_DEPTH)) {
                DPRINTK("tag %d invalid : out of range\n", tag);
                return 0;
@@ -1229,8 +1223,7 @@ static void sata_fsl_host_intr(struct ata_port *ap)
 
        /* Workaround for data length mismatch errata */
        if (unlikely(hstatus & INT_ON_DATA_LENGTH_MISMATCH)) {
-               for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
-                       qc = ata_qc_from_tag(ap, tag);
+               ata_qc_for_each_with_internal(ap, qc, tag) {
                        if (qc && ata_is_atapi(qc->tf.protocol)) {
                                u32 hcontrol;
                                /* Set HControl[27] to clear error registers */
index 10ae11aa1926f3dca460e81879d3b24d8bc58a8b..72c9b922a77bc7793bb20ccd6432f249bcce45e1 100644 (file)
@@ -675,7 +675,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
        struct ata_port *ap = ata_shost_to_port(sdev->host);
        struct nv_adma_port_priv *pp = ap->private_data;
        struct nv_adma_port_priv *port0, *port1;
-       struct scsi_device *sdev0, *sdev1;
        struct pci_dev *pdev = to_pci_dev(ap->host->dev);
        unsigned long segment_boundary, flags;
        unsigned short sg_tablesize;
@@ -736,8 +735,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
 
        port0 = ap->host->ports[0]->private_data;
        port1 = ap->host->ports[1]->private_data;
-       sdev0 = ap->host->ports[0]->link.device[0].sdev;
-       sdev1 = ap->host->ports[1]->link.device[0].sdev;
        if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
            (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
                /*
index ff81a576347e5154c10c997717548be69e81bbab..82532c299bb5964a429e81353b9c5f94d9bb5ed2 100644 (file)
@@ -1618,7 +1618,7 @@ static int rx_init(struct atm_dev *dev)
        skb_queue_head_init(&iadev->rx_dma_q);  
        iadev->rx_free_desc_qhead = NULL;   
 
-       iadev->rx_open = kcalloc(4, iadev->num_vc, GFP_KERNEL);
+       iadev->rx_open = kcalloc(iadev->num_vc, sizeof(void *), GFP_KERNEL);
        if (!iadev->rx_open) {
                printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
                dev->number);  
index a8d2eb0ceb8d8f78788182f81f8e1e9f9dc8fbbb..2c288d1f42bba0fcdf31ccec72c069bfa60688b9 100644 (file)
@@ -1483,6 +1483,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
                                        return -EFAULT;
                                if (pool < 0 || pool > ZATM_LAST_POOL)
                                        return -EINVAL;
+                               pool = array_index_nospec(pool,
+                                                         ZATM_LAST_POOL + 1);
                                if (copy_from_user(&info,
                                    &((struct zatm_pool_req __user *) arg)->info,
                                    sizeof(info))) return -EFAULT;
index b074f242a43594fc3d3a383a9dce5d126e8f3a78..704f442958103545aa89ad0e986130aa6ebc5b06 100644 (file)
@@ -8,10 +8,7 @@ obj-y                  := component.o core.o bus.o dd.o syscore.o \
                           topology.o container.o property.o cacheinfo.o \
                           devcon.o
 obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
-obj-$(CONFIG_DMA_CMA) += dma-contiguous.o
 obj-y                  += power/
-obj-$(CONFIG_HAS_DMA)  += dma-mapping.o
-obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
 obj-$(CONFIG_ISA_BUS_API)      += isa.o
 obj-y                          += firmware_loader/
 obj-$(CONFIG_NUMA)     += node.o
index 36622b52e419db9573c5cfb31f03bf740324e961..df3e1a44707acc74010cf5ce6fab815c4f744896 100644 (file)
@@ -236,6 +236,13 @@ struct device_link *device_link_add(struct device *consumer,
                        link->rpm_active = true;
                }
                pm_runtime_new_link(consumer);
+               /*
+                * If the link is being added by the consumer driver at probe
+                * time, balance the decrementation of the supplier's runtime PM
+                * usage counter after consumer probe in driver_probe_device().
+                */
+               if (consumer->links.status == DL_DEV_PROBING)
+                       pm_runtime_get_noresume(supplier);
        }
        get_device(supplier);
        link->supplier = supplier;
@@ -255,12 +262,12 @@ struct device_link *device_link_add(struct device *consumer,
                        switch (consumer->links.status) {
                        case DL_DEV_PROBING:
                                /*
-                                * Balance the decrementation of the supplier's
-                                * runtime PM usage counter after consumer probe
-                                * in driver_probe_device().
+                                * Some callers expect the link creation during
+                                * consumer driver probe to resume the supplier
+                                * even without DL_FLAG_RPM_ACTIVE.
                                 */
                                if (flags & DL_FLAG_PM_RUNTIME)
-                                       pm_runtime_get_sync(supplier);
+                                       pm_runtime_resume(supplier);
 
                                link->status = DL_STATE_CONSUMER_PROBE;
                                break;
index 1435d7281c66e3d82e877fc98eb8b7ae7321ec19..6ebcd65d64b6dc64603ede7676a68529fcb74210 100644 (file)
@@ -434,14 +434,6 @@ re_probe:
                        goto probe_failed;
        }
 
-       /*
-        * Ensure devices are listed in devices_kset in correct order
-        * It's important to move Dev to the end of devices_kset before
-        * calling .probe, because it could be recursive and parent Dev
-        * should always go first
-        */
-       devices_kset_move_last(dev);
-
        if (dev->bus->probe) {
                ret = dev->bus->probe(dev);
                if (ret)
index 4925af5c4cf039e6cc07918967aa6995353e4bd8..9e8484189034b83efb218fb3eb4604a0b2cd8f6d 100644 (file)
@@ -2235,7 +2235,7 @@ static void genpd_dev_pm_sync(struct device *dev)
 }
 
 static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
-                                unsigned int index)
+                                unsigned int index, bool power_on)
 {
        struct of_phandle_args pd_args;
        struct generic_pm_domain *pd;
@@ -2271,9 +2271,11 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
        dev->pm_domain->detach = genpd_dev_pm_detach;
        dev->pm_domain->sync = genpd_dev_pm_sync;
 
-       genpd_lock(pd);
-       ret = genpd_power_on(pd, 0);
-       genpd_unlock(pd);
+       if (power_on) {
+               genpd_lock(pd);
+               ret = genpd_power_on(pd, 0);
+               genpd_unlock(pd);
+       }
 
        if (ret)
                genpd_remove_device(pd, dev);
@@ -2307,7 +2309,7 @@ int genpd_dev_pm_attach(struct device *dev)
                                       "#power-domain-cells") != 1)
                return 0;
 
-       return __genpd_dev_pm_attach(dev, dev->of_node, 0);
+       return __genpd_dev_pm_attach(dev, dev->of_node, 0, true);
 }
 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
 
@@ -2359,14 +2361,14 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev,
        }
 
        /* Try to attach the device to the PM domain at the specified index. */
-       ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index);
+       ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index, false);
        if (ret < 1) {
                device_unregister(genpd_dev);
                return ret ? ERR_PTR(ret) : NULL;
        }
 
-       pm_runtime_set_active(genpd_dev);
        pm_runtime_enable(genpd_dev);
+       genpd_queue_power_off_work(dev_to_genpd(genpd_dev));
 
        return genpd_dev;
 }
@@ -2487,10 +2489,9 @@ EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
  * power domain corresponding to a DT node's "required-opps" property.
  *
  * @dev: Device for which the performance-state needs to be found.
- * @opp_node: DT node where the "required-opps" property is present. This can be
+ * @np: DT node where the "required-opps" property is present. This can be
  *     the device node itself (if it doesn't have an OPP table) or a node
  *     within the OPP table of a device (if device has an OPP table).
- * @state: Pointer to return performance state.
  *
  * Returns performance state corresponding to the "required-opps" property of
  * a DT node. This calls platform specific genpd->opp_to_performance_state()
@@ -2499,7 +2500,7 @@ EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
  * Returns performance state on success and 0 on failure.
  */
 unsigned int of_genpd_opp_to_performance_state(struct device *dev,
-                                              struct device_node *opp_node)
+                                              struct device_node *np)
 {
        struct generic_pm_domain *genpd;
        struct dev_pm_opp *opp;
@@ -2514,7 +2515,7 @@ unsigned int of_genpd_opp_to_performance_state(struct device *dev,
 
        genpd_lock(genpd);
 
-       opp = of_dev_pm_opp_find_required_opp(&genpd->dev, opp_node);
+       opp = of_dev_pm_opp_find_required_opp(&genpd->dev, np);
        if (IS_ERR(opp)) {
                dev_err(dev, "Failed to find required OPP: %ld\n",
                        PTR_ERR(opp));
index a47e4987ee467ed04578b47499c29def872525f6..d146fedc38bb26535e3960963058a9635e5d7f7b 100644 (file)
@@ -1244,8 +1244,8 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long
        _drbd_start_io_acct(device, req);
 
        /* process discards always from our submitter thread */
-       if ((bio_op(bio) & REQ_OP_WRITE_ZEROES) ||
-           (bio_op(bio) & REQ_OP_DISCARD))
+       if (bio_op(bio) == REQ_OP_WRITE_ZEROES ||
+           bio_op(bio) == REQ_OP_DISCARD)
                goto queue_for_submitter_thread;
 
        if (rw == WRITE && req->private_bio && req->i.size
index 1476cb3439f46e53a8f42a9397fb6b19afd8ff95..5e793dd7adfbd096239f4d0994d2f20e24b2b596 100644 (file)
@@ -282,8 +282,8 @@ void drbd_request_endio(struct bio *bio)
                what = COMPLETED_OK;
        }
 
-       bio_put(req->private_bio);
        req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status));
+       bio_put(bio);
 
        /* not req_mod(), we need irqsave here! */
        spin_lock_irqsave(&device->resource->req_lock, flags);
index d6b6f434fd4bb7652faf597ef9ab6c7b6dd7c362..4cb1d1be3cfbc9c14a6129ecdc2de5368ac668c4 100644 (file)
@@ -1613,6 +1613,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
                arg = (unsigned long) compat_ptr(arg);
        case LOOP_SET_FD:
        case LOOP_CHANGE_FD:
+       case LOOP_SET_BLOCK_SIZE:
                err = lo_ioctl(bdev, mode, cmd, arg);
                break;
        default:
index 3b7083b8ecbb3b0ffcad0d2879954780075333ee..3fb95c8d9fd83567496d77e1e4ade83975658401 100644 (file)
@@ -76,6 +76,7 @@ struct link_dead_args {
 #define NBD_HAS_CONFIG_REF             4
 #define NBD_BOUND                      5
 #define NBD_DESTROY_ON_DISCONNECT      6
+#define NBD_DISCONNECT_ON_CLOSE        7
 
 struct nbd_config {
        u32 flags;
@@ -111,12 +112,16 @@ struct nbd_device {
        struct task_struct *task_setup;
 };
 
+#define NBD_CMD_REQUEUED       1
+
 struct nbd_cmd {
        struct nbd_device *nbd;
+       struct mutex lock;
        int index;
        int cookie;
-       struct completion send_complete;
        blk_status_t status;
+       unsigned long flags;
+       u32 cmd_cookie;
 };
 
 #if IS_ENABLED(CONFIG_DEBUG_FS)
@@ -138,12 +143,42 @@ static void nbd_config_put(struct nbd_device *nbd);
 static void nbd_connect_reply(struct genl_info *info, int index);
 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
 static void nbd_dead_link_work(struct work_struct *work);
+static void nbd_disconnect_and_put(struct nbd_device *nbd);
 
 static inline struct device *nbd_to_dev(struct nbd_device *nbd)
 {
        return disk_to_dev(nbd->disk);
 }
 
+static void nbd_requeue_cmd(struct nbd_cmd *cmd)
+{
+       struct request *req = blk_mq_rq_from_pdu(cmd);
+
+       if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
+               blk_mq_requeue_request(req, true);
+}
+
+#define NBD_COOKIE_BITS 32
+
+static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
+{
+       struct request *req = blk_mq_rq_from_pdu(cmd);
+       u32 tag = blk_mq_unique_tag(req);
+       u64 cookie = cmd->cmd_cookie;
+
+       return (cookie << NBD_COOKIE_BITS) | tag;
+}
+
+static u32 nbd_handle_to_tag(u64 handle)
+{
+       return (u32)handle;
+}
+
+static u32 nbd_handle_to_cookie(u64 handle)
+{
+       return (u32)(handle >> NBD_COOKIE_BITS);
+}
+
 static const char *nbdcmd_to_ascii(int cmd)
 {
        switch (cmd) {
@@ -317,6 +352,9 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
        }
        config = nbd->config;
 
+       if (!mutex_trylock(&cmd->lock))
+               return BLK_EH_RESET_TIMER;
+
        if (config->num_connections > 1) {
                dev_err_ratelimited(nbd_to_dev(nbd),
                                    "Connection timed out, retrying (%d/%d alive)\n",
@@ -341,7 +379,8 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
                                        nbd_mark_nsock_dead(nbd, nsock, 1);
                                mutex_unlock(&nsock->tx_lock);
                        }
-                       blk_mq_requeue_request(req, true);
+                       mutex_unlock(&cmd->lock);
+                       nbd_requeue_cmd(cmd);
                        nbd_config_put(nbd);
                        return BLK_EH_DONE;
                }
@@ -351,6 +390,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
        }
        set_bit(NBD_TIMEDOUT, &config->runtime_flags);
        cmd->status = BLK_STS_IOERR;
+       mutex_unlock(&cmd->lock);
        sock_shutdown(nbd);
        nbd_config_put(nbd);
 done:
@@ -428,9 +468,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
        struct iov_iter from;
        unsigned long size = blk_rq_bytes(req);
        struct bio *bio;
+       u64 handle;
        u32 type;
        u32 nbd_cmd_flags = 0;
-       u32 tag = blk_mq_unique_tag(req);
        int sent = nsock->sent, skip = 0;
 
        iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
@@ -472,6 +512,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
                        goto send_pages;
                }
                iov_iter_advance(&from, sent);
+       } else {
+               cmd->cmd_cookie++;
        }
        cmd->index = index;
        cmd->cookie = nsock->cookie;
@@ -480,7 +522,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
                request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
                request.len = htonl(size);
        }
-       memcpy(request.handle, &tag, sizeof(tag));
+       handle = nbd_cmd_handle(cmd);
+       memcpy(request.handle, &handle, sizeof(handle));
 
        dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
                req, nbdcmd_to_ascii(type),
@@ -498,6 +541,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
                                nsock->pending = req;
                                nsock->sent = sent;
                        }
+                       set_bit(NBD_CMD_REQUEUED, &cmd->flags);
                        return BLK_STS_RESOURCE;
                }
                dev_err_ratelimited(disk_to_dev(nbd->disk),
@@ -539,6 +583,7 @@ send_pages:
                                         */
                                        nsock->pending = req;
                                        nsock->sent = sent;
+                                       set_bit(NBD_CMD_REQUEUED, &cmd->flags);
                                        return BLK_STS_RESOURCE;
                                }
                                dev_err(disk_to_dev(nbd->disk),
@@ -571,10 +616,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
        struct nbd_reply reply;
        struct nbd_cmd *cmd;
        struct request *req = NULL;
+       u64 handle;
        u16 hwq;
        u32 tag;
        struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
        struct iov_iter to;
+       int ret = 0;
 
        reply.magic = 0;
        iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply));
@@ -592,8 +639,8 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
                return ERR_PTR(-EPROTO);
        }
 
-       memcpy(&tag, reply.handle, sizeof(u32));
-
+       memcpy(&handle, reply.handle, sizeof(handle));
+       tag = nbd_handle_to_tag(handle);
        hwq = blk_mq_unique_tag_to_hwq(tag);
        if (hwq < nbd->tag_set.nr_hw_queues)
                req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
@@ -604,11 +651,25 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
                return ERR_PTR(-ENOENT);
        }
        cmd = blk_mq_rq_to_pdu(req);
+
+       mutex_lock(&cmd->lock);
+       if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
+               dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
+                       req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
+               ret = -ENOENT;
+               goto out;
+       }
+       if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
+               dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
+                       req);
+               ret = -ENOENT;
+               goto out;
+       }
        if (ntohl(reply.error)) {
                dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
                        ntohl(reply.error));
                cmd->status = BLK_STS_IOERR;
-               return cmd;
+               goto out;
        }
 
        dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
@@ -633,18 +694,18 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
                                if (nbd_disconnected(config) ||
                                    config->num_connections <= 1) {
                                        cmd->status = BLK_STS_IOERR;
-                                       return cmd;
+                                       goto out;
                                }
-                               return ERR_PTR(-EIO);
+                               ret = -EIO;
+                               goto out;
                        }
                        dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
                                req, bvec.bv_len);
                }
-       } else {
-               /* See the comment in nbd_queue_rq. */
-               wait_for_completion(&cmd->send_complete);
        }
-       return cmd;
+out:
+       mutex_unlock(&cmd->lock);
+       return ret ? ERR_PTR(ret) : cmd;
 }
 
 static void recv_work(struct work_struct *work)
@@ -803,7 +864,7 @@ again:
         */
        blk_mq_start_request(req);
        if (unlikely(nsock->pending && nsock->pending != req)) {
-               blk_mq_requeue_request(req, true);
+               nbd_requeue_cmd(cmd);
                ret = 0;
                goto out;
        }
@@ -816,7 +877,7 @@ again:
                dev_err_ratelimited(disk_to_dev(nbd->disk),
                                    "Request send failed, requeueing\n");
                nbd_mark_nsock_dead(nbd, nsock, 1);
-               blk_mq_requeue_request(req, true);
+               nbd_requeue_cmd(cmd);
                ret = 0;
        }
 out:
@@ -840,7 +901,8 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
         * that the server is misbehaving (or there was an error) before we're
         * done sending everything over the wire.
         */
-       init_completion(&cmd->send_complete);
+       mutex_lock(&cmd->lock);
+       clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
 
        /* We can be called directly from the user space process, which means we
         * could possibly have signals pending so our sendmsg will fail.  In
@@ -852,7 +914,7 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
                ret = BLK_STS_IOERR;
        else if (!ret)
                ret = BLK_STS_OK;
-       complete(&cmd->send_complete);
+       mutex_unlock(&cmd->lock);
 
        return ret;
 }
@@ -1305,6 +1367,12 @@ out:
 static void nbd_release(struct gendisk *disk, fmode_t mode)
 {
        struct nbd_device *nbd = disk->private_data;
+       struct block_device *bdev = bdget_disk(disk, 0);
+
+       if (test_bit(NBD_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
+                       bdev->bd_openers == 0)
+               nbd_disconnect_and_put(nbd);
+
        nbd_config_put(nbd);
        nbd_put(nbd);
 }
@@ -1452,6 +1520,8 @@ static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
 {
        struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
        cmd->nbd = set->driver_data;
+       cmd->flags = 0;
+       mutex_init(&cmd->lock);
        return 0;
 }
 
@@ -1705,6 +1775,10 @@ again:
                                &config->runtime_flags);
                        put_dev = true;
                }
+               if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
+                       set_bit(NBD_DISCONNECT_ON_CLOSE,
+                               &config->runtime_flags);
+               }
        }
 
        if (info->attrs[NBD_ATTR_SOCKETS]) {
@@ -1749,6 +1823,17 @@ out:
        return ret;
 }
 
+static void nbd_disconnect_and_put(struct nbd_device *nbd)
+{
+       mutex_lock(&nbd->config_lock);
+       nbd_disconnect(nbd);
+       nbd_clear_sock(nbd);
+       mutex_unlock(&nbd->config_lock);
+       if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
+                              &nbd->config->runtime_flags))
+               nbd_config_put(nbd);
+}
+
 static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
 {
        struct nbd_device *nbd;
@@ -1781,13 +1866,7 @@ static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
                nbd_put(nbd);
                return 0;
        }
-       mutex_lock(&nbd->config_lock);
-       nbd_disconnect(nbd);
-       nbd_clear_sock(nbd);
-       mutex_unlock(&nbd->config_lock);
-       if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
-                              &nbd->config->runtime_flags))
-               nbd_config_put(nbd);
+       nbd_disconnect_and_put(nbd);
        nbd_config_put(nbd);
        nbd_put(nbd);
        return 0;
@@ -1798,7 +1877,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
        struct nbd_device *nbd = NULL;
        struct nbd_config *config;
        int index;
-       int ret = -EINVAL;
+       int ret = 0;
        bool put_dev = false;
 
        if (!netlink_capable(skb, CAP_SYS_ADMIN))
@@ -1838,6 +1917,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
            !nbd->task_recv) {
                dev_err(nbd_to_dev(nbd),
                        "not configured, cannot reconfigure\n");
+               ret = -EINVAL;
                goto out;
        }
 
@@ -1862,6 +1942,14 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
                                               &config->runtime_flags))
                                refcount_inc(&nbd->refs);
                }
+
+               if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
+                       set_bit(NBD_DISCONNECT_ON_CLOSE,
+                                       &config->runtime_flags);
+               } else {
+                       clear_bit(NBD_DISCONNECT_ON_CLOSE,
+                                       &config->runtime_flags);
+               }
        }
 
        if (info->attrs[NBD_ATTR_SOCKETS]) {
index 7948049f6c4321b02e1611383dae1be86a7748f1..042c778e5a4e0bf2009c38a6b1cf37bc5d23ce89 100644 (file)
@@ -1365,7 +1365,7 @@ static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
 static enum blk_eh_timer_return null_rq_timed_out_fn(struct request *rq)
 {
        pr_info("null: rq %p timed out\n", rq);
-       blk_mq_complete_request(rq);
+       __blk_complete_request(rq);
        return BLK_EH_DONE;
 }
 
index 14d159e2042d5c488c1e23b3247508aab0a2ebff..2dc33e65d2d0c957199f1e3c1bf8028d4e09ca88 100644 (file)
@@ -29,7 +29,7 @@
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/types.h>
-#include <linux/unaligned/le_struct.h>
+#include <asm/unaligned.h>
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
index 1cc29629d23807b83bfee9f7d23fec1d5757941d..80d60f43db56123076fed5ea2047da35af88af8f 100644 (file)
@@ -169,9 +169,9 @@ static int sysc_get_clocks(struct sysc *ddata)
        const char *name;
        int nr_fck = 0, nr_ick = 0, i, error = 0;
 
-       ddata->clock_roles = devm_kzalloc(ddata->dev,
-                                         sizeof(*ddata->clock_roles) *
+       ddata->clock_roles = devm_kcalloc(ddata->dev,
                                          SYSC_MAX_CLOCKS,
+                                         sizeof(*ddata->clock_roles),
                                          GFP_KERNEL);
        if (!ddata->clock_roles)
                return -ENOMEM;
@@ -200,8 +200,8 @@ static int sysc_get_clocks(struct sysc *ddata)
                return -EINVAL;
        }
 
-       ddata->clocks = devm_kzalloc(ddata->dev,
-                                    sizeof(*ddata->clocks) * ddata->nr_clocks,
+       ddata->clocks = devm_kcalloc(ddata->dev,
+                                    ddata->nr_clocks, sizeof(*ddata->clocks),
                                     GFP_KERNEL);
        if (!ddata->clocks)
                return -ENOMEM;
index 53fe633df1e8d9c1187e862b6a305a1905bbc2bb..c9bf2c219841846570c6cffefe8e2e4c59583997 100644 (file)
@@ -11,7 +11,7 @@
 
 #include "agp.h"
 
-static int alpha_core_agp_vm_fault(struct vm_fault *vmf)
+static vm_fault_t alpha_core_agp_vm_fault(struct vm_fault *vmf)
 {
        alpha_agp_info *agp = agp_bridge->dev_private_data;
        dma_addr_t dma_addr;
index e50c29c97ca74d20542a176387d3de8ee4780b79..c69e39fdd02b8c5c9a35931271c45383f4b18da3 100644 (file)
@@ -156,7 +156,7 @@ static u64 amd64_configure(struct pci_dev *hammer, u64 gatt_table)
 
        /* Address to map to */
        pci_read_config_dword(hammer, AMD64_GARTAPERTUREBASE, &tmp);
-       aperturebase = tmp << 25;
+       aperturebase = (u64)tmp << 25;
        aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK);
 
        enable_gart_translation(hammer, gatt_table);
@@ -277,7 +277,7 @@ static int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, u16 cap)
        pci_read_config_dword(nb, AMD64_GARTAPERTURECTL, &nb_order);
        nb_order = (nb_order >> 1) & 7;
        pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base);
-       nb_aper = nb_base << 25;
+       nb_aper = (u64)nb_base << 25;
 
        /* Northbridge seems to contain crap. Try the AGP bridge. */
 
index 91bb98c42a1ca76376ae0db4b43fd7a89fca27b0..aaf9e5afaad435e2342a15fc963aa91367079957 100644 (file)
@@ -516,11 +516,18 @@ EXPORT_SYMBOL_GPL(hwrng_register);
 
 void hwrng_unregister(struct hwrng *rng)
 {
+       int err;
+
        mutex_lock(&rng_mutex);
 
        list_del(&rng->list);
-       if (current_rng == rng)
-               enable_best_rng();
+       if (current_rng == rng) {
+               err = enable_best_rng();
+               if (err) {
+                       drop_current_rng();
+                       cur_rng_set_by_user = 0;
+               }
+       }
 
        if (list_empty(&rng_list)) {
                mutex_unlock(&rng_mutex);
index ad353be871bf005c6c0ca875d663bed49906c302..90ec010bffbd9776c012586b4e01b24cdd0bd2d6 100644 (file)
@@ -2088,8 +2088,10 @@ static int try_smi_init(struct smi_info *new_smi)
        return 0;
 
 out_err:
-       ipmi_unregister_smi(new_smi->intf);
-       new_smi->intf = NULL;
+       if (new_smi->intf) {
+               ipmi_unregister_smi(new_smi->intf);
+               new_smi->intf = NULL;
+       }
 
        kfree(init_name);
 
index fbfc05e3f3d1756a58455dbdbf73c90162898f4c..bb882ab161fe1bbb4b678cc9bf105b77273296e3 100644 (file)
@@ -210,34 +210,23 @@ static void kcs_bmc_handle_cmd(struct kcs_bmc *kcs_bmc)
 int kcs_bmc_handle_event(struct kcs_bmc *kcs_bmc)
 {
        unsigned long flags;
-       int ret = 0;
+       int ret = -ENODATA;
        u8 status;
 
        spin_lock_irqsave(&kcs_bmc->lock, flags);
 
-       if (!kcs_bmc->running) {
-               kcs_force_abort(kcs_bmc);
-               ret = -ENODEV;
-               goto out_unlock;
-       }
-
-       status = read_status(kcs_bmc) & (KCS_STATUS_IBF | KCS_STATUS_CMD_DAT);
-
-       switch (status) {
-       case KCS_STATUS_IBF | KCS_STATUS_CMD_DAT:
-               kcs_bmc_handle_cmd(kcs_bmc);
-               break;
-
-       case KCS_STATUS_IBF:
-               kcs_bmc_handle_data(kcs_bmc);
-               break;
+       status = read_status(kcs_bmc);
+       if (status & KCS_STATUS_IBF) {
+               if (!kcs_bmc->running)
+                       kcs_force_abort(kcs_bmc);
+               else if (status & KCS_STATUS_CMD_DAT)
+                       kcs_bmc_handle_cmd(kcs_bmc);
+               else
+                       kcs_bmc_handle_data(kcs_bmc);
 
-       default:
-               ret = -ENODATA;
-               break;
+               ret = 0;
        }
 
-out_unlock:
        spin_unlock_irqrestore(&kcs_bmc->lock, flags);
 
        return ret;
index ffeb60d3434c5150650d1ef4c9a3aec3d8ffd59e..df66a9dd0aae3c5ef27a4b92410d347eb8f82a3a 100644 (file)
@@ -708,6 +708,7 @@ static int mmap_zero(struct file *file, struct vm_area_struct *vma)
 #endif
        if (vma->vm_flags & VM_SHARED)
                return shmem_zero_setup(vma);
+       vma_set_anonymous(vma);
        return 0;
 }
 
index a8fb0020ba5ccfb9f4b72b689544299815fab60a..bd449ad524423c92584489b49496dfede65c52ae 100644 (file)
@@ -402,7 +402,8 @@ static struct poolinfo {
 /*
  * Static global variables
  */
-static DECLARE_WAIT_QUEUE_HEAD(random_wait);
+static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
+static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
 static struct fasync_struct *fasync;
 
 static DEFINE_SPINLOCK(random_ready_list_lock);
@@ -721,8 +722,8 @@ retry:
 
                /* should we wake readers? */
                if (entropy_bits >= random_read_wakeup_bits &&
-                   wq_has_sleeper(&random_wait)) {
-                       wake_up_interruptible_poll(&random_wait, POLLIN);
+                   wq_has_sleeper(&random_read_wait)) {
+                       wake_up_interruptible(&random_read_wait);
                        kill_fasync(&fasync, SIGIO, POLL_IN);
                }
                /* If the input pool is getting full, send some
@@ -1396,7 +1397,7 @@ retry:
        trace_debit_entropy(r->name, 8 * ibytes);
        if (ibytes &&
            (r->entropy_count >> ENTROPY_SHIFT) < random_write_wakeup_bits) {
-               wake_up_interruptible_poll(&random_wait, POLLOUT);
+               wake_up_interruptible(&random_write_wait);
                kill_fasync(&fasync, SIGIO, POLL_OUT);
        }
 
@@ -1838,7 +1839,7 @@ _random_read(int nonblock, char __user *buf, size_t nbytes)
                if (nonblock)
                        return -EAGAIN;
 
-               wait_event_interruptible(random_wait,
+               wait_event_interruptible(random_read_wait,
                        ENTROPY_BITS(&input_pool) >=
                        random_read_wakeup_bits);
                if (signal_pending(current))
@@ -1875,17 +1876,14 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
        return ret;
 }
 
-static struct wait_queue_head *
-random_get_poll_head(struct file *file, __poll_t events)
-{
-       return &random_wait;
-}
-
 static __poll_t
-random_poll_mask(struct file *file, __poll_t events)
+random_poll(struct file *file, poll_table * wait)
 {
-       __poll_t mask = 0;
+       __poll_t mask;
 
+       poll_wait(file, &random_read_wait, wait);
+       poll_wait(file, &random_write_wait, wait);
+       mask = 0;
        if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits)
                mask |= EPOLLIN | EPOLLRDNORM;
        if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
@@ -1897,14 +1895,22 @@ static int
 write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
 {
        size_t bytes;
-       __u32 buf[16];
+       __u32 t, buf[16];
        const char __user *p = buffer;
 
        while (count > 0) {
+               int b, i = 0;
+
                bytes = min(count, sizeof(buf));
                if (copy_from_user(&buf, p, bytes))
                        return -EFAULT;
 
+               for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
+                       if (!arch_get_random_int(&t))
+                               break;
+                       buf[i] ^= t;
+               }
+
                count -= bytes;
                p += bytes;
 
@@ -1992,8 +1998,7 @@ static int random_fasync(int fd, struct file *filp, int on)
 const struct file_operations random_fops = {
        .read  = random_read,
        .write = random_write,
-       .get_poll_head  = random_get_poll_head,
-       .poll_mask  = random_poll_mask,
+       .poll  = random_poll,
        .unlocked_ioctl = random_ioctl,
        .fasync = random_fasync,
        .llseek = noop_llseek,
@@ -2326,7 +2331,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
         * We'll be woken up again once below random_write_wakeup_thresh,
         * or when the calling thread is about to terminate.
         */
-       wait_event_interruptible(random_wait, kthread_should_stop() ||
+       wait_event_interruptible(random_write_wait, kthread_should_stop() ||
                        ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
        mix_pool_bytes(poolp, buffer, count);
        credit_entropy_bits(poolp, entropy);
index ae40cbe770f059d0ca1aded1dc7fc3530b3153df..0bb25dd009d18467c7ae6f5238e1a88c24844c7b 100644 (file)
@@ -96,7 +96,7 @@ obj-$(CONFIG_ARCH_SPRD)                       += sprd/
 obj-$(CONFIG_ARCH_STI)                 += st/
 obj-$(CONFIG_ARCH_STRATIX10)           += socfpga/
 obj-$(CONFIG_ARCH_SUNXI)               += sunxi/
-obj-$(CONFIG_ARCH_SUNXI)               += sunxi-ng/
+obj-$(CONFIG_SUNXI_CCU)                        += sunxi-ng/
 obj-$(CONFIG_ARCH_TEGRA)               += tegra/
 obj-y                                  += ti/
 obj-$(CONFIG_CLK_UNIPHIER)             += uniphier/
index 38b366b00c571eda38e7faf7febc751bbeb212ad..7b70a074095df9e3871677e51981928248a7192a 100644 (file)
@@ -24,7 +24,7 @@
 #define ASPEED_MPLL_PARAM      0x20
 #define ASPEED_HPLL_PARAM      0x24
 #define  AST2500_HPLL_BYPASS_EN        BIT(20)
-#define  AST2400_HPLL_STRAPPED BIT(18)
+#define  AST2400_HPLL_PROGRAMMED BIT(18)
 #define  AST2400_HPLL_BYPASS_EN        BIT(17)
 #define ASPEED_MISC_CTRL       0x2c
 #define  UART_DIV13_EN         BIT(12)
@@ -91,8 +91,8 @@ static const struct aspeed_gate_data aspeed_gates[] = {
        [ASPEED_CLK_GATE_GCLK] =        {  1,  7, "gclk-gate",          NULL,   0 }, /* 2D engine */
        [ASPEED_CLK_GATE_MCLK] =        {  2, -1, "mclk-gate",          "mpll", CLK_IS_CRITICAL }, /* SDRAM */
        [ASPEED_CLK_GATE_VCLK] =        {  3,  6, "vclk-gate",          NULL,   0 }, /* Video Capture */
-       [ASPEED_CLK_GATE_BCLK] =        {  4,  8, "bclk-gate",          "bclk", 0 }, /* PCIe/PCI */
-       [ASPEED_CLK_GATE_DCLK] =        {  5, -1, "dclk-gate",          NULL,   0 }, /* DAC */
+       [ASPEED_CLK_GATE_BCLK] =        {  4,  8, "bclk-gate",          "bclk", CLK_IS_CRITICAL }, /* PCIe/PCI */
+       [ASPEED_CLK_GATE_DCLK] =        {  5, -1, "dclk-gate",          NULL,   CLK_IS_CRITICAL }, /* DAC */
        [ASPEED_CLK_GATE_REFCLK] =      {  6, -1, "refclk-gate",        "clkin", CLK_IS_CRITICAL },
        [ASPEED_CLK_GATE_USBPORT2CLK] = {  7,  3, "usb-port2-gate",     NULL,   0 }, /* USB2.0 Host port 2 */
        [ASPEED_CLK_GATE_LCLK] =        {  8,  5, "lclk-gate",          NULL,   0 }, /* LPC */
@@ -212,9 +212,22 @@ static int aspeed_clk_is_enabled(struct clk_hw *hw)
 {
        struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
        u32 clk = BIT(gate->clock_idx);
+       u32 rst = BIT(gate->reset_idx);
        u32 enval = (gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : clk;
        u32 reg;
 
+       /*
+        * If the IP is in reset, treat the clock as not enabled,
+        * this happens with some clocks such as the USB one when
+        * coming from cold reset. Without this, aspeed_clk_enable()
+        * will fail to lift the reset.
+        */
+       if (gate->reset_idx >= 0) {
+               regmap_read(gate->map, ASPEED_RESET_CTRL, &reg);
+               if (reg & rst)
+                       return 0;
+       }
+
        regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, &reg);
 
        return ((reg & clk) == enval) ? 1 : 0;
@@ -565,29 +578,45 @@ builtin_platform_driver(aspeed_clk_driver);
 static void __init aspeed_ast2400_cc(struct regmap *map)
 {
        struct clk_hw *hw;
-       u32 val, freq, div;
+       u32 val, div, clkin, hpll;
+       const u16 hpll_rates[][4] = {
+               {384, 360, 336, 408},
+               {400, 375, 350, 425},
+       };
+       int rate;
 
        /*
         * CLKIN is the crystal oscillator, 24, 48 or 25MHz selected by
         * strapping
         */
        regmap_read(map, ASPEED_STRAP, &val);
-       if (val & CLKIN_25MHZ_EN)
-               freq = 25000000;
-       else if (val & AST2400_CLK_SOURCE_SEL)
-               freq = 48000000;
-       else
-               freq = 24000000;
-       hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, freq);
-       pr_debug("clkin @%u MHz\n", freq / 1000000);
+       rate = (val >> 8) & 3;
+       if (val & CLKIN_25MHZ_EN) {
+               clkin = 25000000;
+               hpll = hpll_rates[1][rate];
+       } else if (val & AST2400_CLK_SOURCE_SEL) {
+               clkin = 48000000;
+               hpll = hpll_rates[0][rate];
+       } else {
+               clkin = 24000000;
+               hpll = hpll_rates[0][rate];
+       }
+       hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, clkin);
+       pr_debug("clkin @%u MHz\n", clkin / 1000000);
 
        /*
         * High-speed PLL clock derived from the crystal. This the CPU clock,
-        * and we assume that it is enabled
+        * and we assume that it is enabled. It can be configured through the
+        * HPLL_PARAM register, or set to a specified frequency by strapping.
         */
        regmap_read(map, ASPEED_HPLL_PARAM, &val);
-       WARN(val & AST2400_HPLL_STRAPPED, "hpll is strapped not configured");
-       aspeed_clk_data->hws[ASPEED_CLK_HPLL] = aspeed_ast2400_calc_pll("hpll", val);
+       if (val & AST2400_HPLL_PROGRAMMED)
+               hw = aspeed_ast2400_calc_pll("hpll", val);
+       else
+               hw = clk_hw_register_fixed_rate(NULL, "hpll", "clkin", 0,
+                               hpll * 1000000);
+
+       aspeed_clk_data->hws[ASPEED_CLK_HPLL] = hw;
 
        /*
         * Strap bits 11:10 define the CPU/AHB clock frequency ratio (aka HCLK)
index 9760b526ca31da90c4c288ac096cb0fee7d16cbf..e2ed078abd90c7b0e775a4e8e510ab46151c82ef 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/pm_runtime.h>
 #include <linux/sched.h>
 #include <linux/clkdev.h>
-#include <linux/stringify.h>
 
 #include "clk.h"
 
@@ -2559,7 +2558,7 @@ static const struct {
        unsigned long flag;
        const char *name;
 } clk_flags[] = {
-#define ENTRY(f) { f, __stringify(f) }
+#define ENTRY(f) { f, #f }
        ENTRY(CLK_SET_RATE_GATE),
        ENTRY(CLK_SET_PARENT_GATE),
        ENTRY(CLK_SET_RATE_PARENT),
index aae62a5b8734e859e76a16f995b05f43bf5f9b4a..d1bbee19ed0fcf74edfb2019fc4907ba00533a66 100644 (file)
@@ -672,7 +672,7 @@ static int of_da8xx_usb_phy_clk_init(struct device *dev, struct regmap *regmap)
 
        usb1 = da8xx_cfgchip_register_usb1_clk48(dev, regmap);
        if (IS_ERR(usb1)) {
-               if (PTR_ERR(usb0) == -EPROBE_DEFER)
+               if (PTR_ERR(usb1) == -EPROBE_DEFER)
                        return -EPROBE_DEFER;
 
                dev_warn(dev, "Failed to register usb1_clk48 (%ld)\n",
index 6a42529d31a91aa644d64c8708b45297ec361efe..cc5614567a70d61cf76aa6777caf9c2f39479c5d 100644 (file)
@@ -107,7 +107,7 @@ extern const struct davinci_psc_init_data of_da850_psc1_init_data;
 #ifdef CONFIG_ARCH_DAVINCI_DM355
 extern const struct davinci_psc_init_data dm355_psc_init_data;
 #endif
-#ifdef CONFIG_ARCH_DAVINCI_DM356
+#ifdef CONFIG_ARCH_DAVINCI_DM365
 extern const struct davinci_psc_init_data dm365_psc_init_data;
 #endif
 #ifdef CONFIG_ARCH_DAVINCI_DM644x
index 58f546e048073160e40bedf637894b189072b143..e4cf96ba704ed98ff2a277af75da8c7a3a5b8ee0 100644 (file)
@@ -51,7 +51,7 @@ static unsigned long audio_divider_recalc_rate(struct clk_hw *hw,
        struct meson_clk_audio_div_data *adiv = meson_clk_audio_div_data(clk);
        unsigned long divider;
 
-       divider = meson_parm_read(clk->map, &adiv->div);
+       divider = meson_parm_read(clk->map, &adiv->div) + 1;
 
        return DIV_ROUND_UP_ULL((u64)parent_rate, divider);
 }
index 240658404367f38670b79a2d40fcd1cdba92c67b..177fffb9ebefe4c5d059e149a912f3c14ecc3ea2 100644 (file)
@@ -498,6 +498,7 @@ static struct clk_regmap gxbb_fclk_div2 = {
                .ops = &clk_regmap_gate_ops,
                .parent_names = (const char *[]){ "fclk_div2_div" },
                .num_parents = 1,
+               .flags = CLK_IS_CRITICAL,
        },
 };
 
index 6860bd5a37c5e50e9be9e26c40981dea33d57dfc..44e4e27eddada1dd96a3a3d9ae414bd711a89d4d 100644 (file)
@@ -35,6 +35,7 @@
 #define CLK_SEL                0x10
 #define CLK_DIS                0x14
 
+#define  ARMADA_37XX_DVFS_LOAD_1 1
 #define LOAD_LEVEL_NR  4
 
 #define ARMADA_37XX_NB_L0L1    0x18
@@ -507,6 +508,40 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
        return -EINVAL;
 }
 
+/*
+ * Switching the CPU from the L2 or L3 frequencies (300 and 200 Mhz
+ * respectively) to L0 frequency (1.2 Ghz) requires a significant
+ * amount of time to let VDD stabilize to the appropriate
+ * voltage. This amount of time is large enough that it cannot be
+ * covered by the hardware countdown register. Due to this, the CPU
+ * might start operating at L0 before the voltage is stabilized,
+ * leading to CPU stalls.
+ *
+ * To work around this problem, we prevent switching directly from the
+ * L2/L3 frequencies to the L0 frequency, and instead switch to the L1
+ * frequency in-between. The sequence therefore becomes:
+ * 1. First switch from L2/L3(200/300MHz) to L1(600MHZ)
+ * 2. Sleep 20ms for stabling VDD voltage
+ * 3. Then switch from L1(600MHZ) to L0(1200Mhz).
+ */
+static void clk_pm_cpu_set_rate_wa(unsigned long rate, struct regmap *base)
+{
+       unsigned int cur_level;
+
+       if (rate != 1200 * 1000 * 1000)
+               return;
+
+       regmap_read(base, ARMADA_37XX_NB_CPU_LOAD, &cur_level);
+       cur_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
+       if (cur_level <= ARMADA_37XX_DVFS_LOAD_1)
+               return;
+
+       regmap_update_bits(base, ARMADA_37XX_NB_CPU_LOAD,
+                          ARMADA_37XX_NB_CPU_LOAD_MASK,
+                          ARMADA_37XX_DVFS_LOAD_1);
+       msleep(20);
+}
+
 static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
                               unsigned long parent_rate)
 {
@@ -537,6 +572,9 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
                         */
                        reg = ARMADA_37XX_NB_CPU_LOAD;
                        mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
+
+                       clk_pm_cpu_set_rate_wa(rate, base);
+
                        regmap_update_bits(base, reg, mask, load_level);
 
                        return rate;
index 9f35b3fe1d9731f78fcd095cc544274a3c1c872a..ff8d66fd94e6198ac7abe91ce4f8e4dcc071897c 100644 (file)
@@ -2781,6 +2781,7 @@ static struct clk_branch gcc_ufs_rx_cfg_clk = {
 
 static struct clk_branch gcc_ufs_tx_symbol_0_clk = {
        .halt_reg = 0x75018,
+       .halt_check = BRANCH_HALT_SKIP,
        .clkr = {
                .enable_reg = 0x75018,
                .enable_mask = BIT(0),
index 1a25ee4f3658672bc8f4a4bc8c55c0492289ea61..4b20d1b67a1b77b0f6c735213c43e28a4dfa6dee 100644 (file)
@@ -2910,6 +2910,7 @@ static struct gdsc mmagic_bimc_gdsc = {
                .name = "mmagic_bimc",
        },
        .pwrsts = PWRSTS_OFF_ON,
+       .flags = ALWAYS_ON,
 };
 
 static struct gdsc mmagic_video_gdsc = {
index acaa14cfa25ca3922178e865ddb4daefc676ee2a..49454700f2e5c2e469cbdb037c54900f8c74cf79 100644 (file)
@@ -1,24 +1,24 @@
 # SPDX-License-Identifier: GPL-2.0
 # Common objects
-lib-$(CONFIG_SUNXI_CCU)                += ccu_common.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_mmc_timing.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_reset.o
+obj-y                          += ccu_common.o
+obj-y                          += ccu_mmc_timing.o
+obj-y                          += ccu_reset.o
 
 # Base clock types
-lib-$(CONFIG_SUNXI_CCU)                += ccu_div.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_frac.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_gate.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_mux.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_mult.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_phase.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_sdm.o
+obj-y                          += ccu_div.o
+obj-y                          += ccu_frac.o
+obj-y                          += ccu_gate.o
+obj-y                          += ccu_mux.o
+obj-y                          += ccu_mult.o
+obj-y                          += ccu_phase.o
+obj-y                          += ccu_sdm.o
 
 # Multi-factor clocks
-lib-$(CONFIG_SUNXI_CCU)                += ccu_nk.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_nkm.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_nkmp.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_nm.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_mp.o
+obj-y                          += ccu_nk.o
+obj-y                          += ccu_nkm.o
+obj-y                          += ccu_nkmp.o
+obj-y                          += ccu_nm.o
+obj-y                          += ccu_mp.o
 
 # SoC support
 obj-$(CONFIG_SUN50I_A64_CCU)   += ccu-sun50i-a64.o
@@ -38,12 +38,3 @@ obj-$(CONFIG_SUN8I_R40_CCU)  += ccu-sun8i-r40.o
 obj-$(CONFIG_SUN9I_A80_CCU)    += ccu-sun9i-a80.o
 obj-$(CONFIG_SUN9I_A80_CCU)    += ccu-sun9i-a80-de.o
 obj-$(CONFIG_SUN9I_A80_CCU)    += ccu-sun9i-a80-usb.o
-
-# The lib-y file goals is supposed to work only in arch/*/lib or lib/. In our
-# case, we want to use that goal, but even though lib.a will be properly
-# generated, it will not be linked in, eventually resulting in a linker error
-# for missing symbols.
-#
-# We can work around that by explicitly adding lib.a to the obj-y goal. This is
-# an undocumented behaviour, but works well for now.
-obj-$(CONFIG_SUNXI_CCU)                += lib.a
index 57cb2f00fc07ce7f5ffb526bd9bb03ed11287626..d8c7f5750cdb025dfd3eae42d691318fc472e29b 100644 (file)
@@ -735,7 +735,7 @@ static void __arch_timer_setup(unsigned type,
                clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
                clk->name = "arch_mem_timer";
                clk->rating = 400;
-               clk->cpumask = cpu_all_mask;
+               clk->cpumask = cpu_possible_mask;
                if (arch_timer_mem_use_virtual) {
                        clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
                        clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
index e5cdc3af684cbbe2370406c79af63df143776cd3..2717f88c79040a1ec26e2dd0d01f5d2dd25d9734 100644 (file)
@@ -304,8 +304,10 @@ static int __init stm32_timer_init(struct device_node *node)
 
        to->private_data = kzalloc(sizeof(struct stm32_timer_private),
                                   GFP_KERNEL);
-       if (!to->private_data)
+       if (!to->private_data) {
+               ret = -ENOMEM;
                goto deinit;
+       }
 
        rstc = of_reset_control_get(node, NULL);
        if (!IS_ERR(rstc)) {
index 1de5ec8d5ea3e9995e3ffd413f728df078c25f8f..3c397125613038f3ea1809adfa3ab6de1a19d98b 100644 (file)
@@ -294,6 +294,7 @@ struct pstate_funcs {
 static struct pstate_funcs pstate_funcs __read_mostly;
 
 static int hwp_active __read_mostly;
+static int hwp_mode_bdw __read_mostly;
 static bool per_cpu_limits __read_mostly;
 static bool hwp_boost __read_mostly;
 
@@ -1413,7 +1414,15 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
        cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
        cpu->pstate.scaling = pstate_funcs.get_scaling();
        cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
-       cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+
+       if (hwp_active && !hwp_mode_bdw) {
+               unsigned int phy_max, current_max;
+
+               intel_pstate_get_hwp_max(cpu->cpu, &phy_max, &current_max);
+               cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
+       } else {
+               cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+       }
 
        if (pstate_funcs.get_aperf_mperf_shift)
                cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
@@ -2385,6 +2394,18 @@ static bool __init intel_pstate_no_acpi_pss(void)
        return true;
 }
 
+static bool __init intel_pstate_no_acpi_pcch(void)
+{
+       acpi_status status;
+       acpi_handle handle;
+
+       status = acpi_get_handle(NULL, "\\_SB", &handle);
+       if (ACPI_FAILURE(status))
+               return true;
+
+       return !acpi_has_method(handle, "PCCH");
+}
+
 static bool __init intel_pstate_has_acpi_ppc(void)
 {
        int i;
@@ -2444,7 +2465,10 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
 
        switch (plat_info[idx].data) {
        case PSS:
-               return intel_pstate_no_acpi_pss();
+               if (!intel_pstate_no_acpi_pss())
+                       return false;
+
+               return intel_pstate_no_acpi_pcch();
        case PPC:
                return intel_pstate_has_acpi_ppc() && !force_load;
        }
@@ -2467,28 +2491,36 @@ static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
 static inline void intel_pstate_request_control_from_smm(void) {}
 #endif /* CONFIG_ACPI */
 
+#define INTEL_PSTATE_HWP_BROADWELL     0x01
+
+#define ICPU_HWP(model, hwp_mode) \
+       { X86_VENDOR_INTEL, 6, model, X86_FEATURE_HWP, hwp_mode }
+
 static const struct x86_cpu_id hwp_support_ids[] __initconst = {
-       { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP },
+       ICPU_HWP(INTEL_FAM6_BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL),
+       ICPU_HWP(INTEL_FAM6_BROADWELL_XEON_D, INTEL_PSTATE_HWP_BROADWELL),
+       ICPU_HWP(X86_MODEL_ANY, 0),
        {}
 };
 
 static int __init intel_pstate_init(void)
 {
+       const struct x86_cpu_id *id;
        int rc;
 
        if (no_load)
                return -ENODEV;
 
-       if (x86_match_cpu(hwp_support_ids)) {
+       id = x86_match_cpu(hwp_support_ids);
+       if (id) {
                copy_cpu_funcs(&core_funcs);
                if (!no_hwp) {
                        hwp_active++;
+                       hwp_mode_bdw = id->driver_data;
                        intel_pstate.attr = hwp_cpufreq_attrs;
                        goto hwp_cpu_matched;
                }
        } else {
-               const struct x86_cpu_id *id;
-
                id = x86_match_cpu(intel_pstate_cpu_ids);
                if (!id)
                        return -ENODEV;
index 3f0ce2ae35ee432637c28e7dddd2851a20e29f16..0c56c97596725edf323a2abf471bbaf69582b903 100644 (file)
@@ -580,6 +580,10 @@ static int __init pcc_cpufreq_init(void)
 {
        int ret;
 
+       /* Skip initialization if another cpufreq driver is there. */
+       if (cpufreq_get_current_driver())
+               return 0;
+
        if (acpi_disabled)
                return 0;
 
index d049fe4b80c48e00d169f3835bb7b70b8022a879..efc9a7ae485707e32c2dade8c64c58fb798a8c8d 100644 (file)
@@ -42,6 +42,8 @@ enum _msm8996_version {
        NUM_OF_MSM8996_VERSIONS,
 };
 
+struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev;
+
 static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void)
 {
        size_t len;
@@ -74,7 +76,6 @@ static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void)
 static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
 {
        struct opp_table *opp_tables[NR_CPUS] = {0};
-       struct platform_device *cpufreq_dt_pdev;
        enum _msm8996_version msm8996_version;
        struct nvmem_cell *speedbin_nvmem;
        struct device_node *np;
@@ -86,8 +87,8 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
        int ret;
 
        cpu_dev = get_cpu_device(0);
-       if (NULL == cpu_dev)
-               ret = -ENODEV;
+       if (!cpu_dev)
+               return -ENODEV;
 
        msm8996_version = qcom_cpufreq_kryo_get_msm_id();
        if (NUM_OF_MSM8996_VERSIONS == msm8996_version) {
@@ -96,8 +97,8 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
        }
 
        np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
-       if (IS_ERR(np))
-               return PTR_ERR(np);
+       if (!np)
+               return -ENOENT;
 
        ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu");
        if (!ret) {
@@ -115,6 +116,8 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
 
        speedbin = nvmem_cell_read(speedbin_nvmem, &len);
        nvmem_cell_put(speedbin_nvmem);
+       if (IS_ERR(speedbin))
+               return PTR_ERR(speedbin);
 
        switch (msm8996_version) {
        case MSM8996_V3:
@@ -127,6 +130,7 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
                BUG();
                break;
        }
+       kfree(speedbin);
 
        for_each_possible_cpu(cpu) {
                cpu_dev = get_cpu_device(cpu);
@@ -162,8 +166,15 @@ free_opp:
        return ret;
 }
 
+static int qcom_cpufreq_kryo_remove(struct platform_device *pdev)
+{
+       platform_device_unregister(cpufreq_dt_pdev);
+       return 0;
+}
+
 static struct platform_driver qcom_cpufreq_kryo_driver = {
        .probe = qcom_cpufreq_kryo_probe,
+       .remove = qcom_cpufreq_kryo_remove,
        .driver = {
                .name = "qcom-cpufreq-kryo",
        },
@@ -172,6 +183,7 @@ static struct platform_driver qcom_cpufreq_kryo_driver = {
 static const struct of_device_id qcom_cpufreq_kryo_match_list[] __initconst = {
        { .compatible = "qcom,apq8096", },
        { .compatible = "qcom,msm8996", },
+       {}
 };
 
 /*
@@ -198,8 +210,9 @@ static int __init qcom_cpufreq_kryo_init(void)
        if (unlikely(ret < 0))
                return ret;
 
-       ret = PTR_ERR_OR_ZERO(platform_device_register_simple(
-               "qcom-cpufreq-kryo", -1, NULL, 0));
+       kryo_cpufreq_pdev = platform_device_register_simple(
+               "qcom-cpufreq-kryo", -1, NULL, 0);
+       ret = PTR_ERR_OR_ZERO(kryo_cpufreq_pdev);
        if (0 == ret)
                return 0;
 
@@ -208,5 +221,12 @@ static int __init qcom_cpufreq_kryo_init(void)
 }
 module_init(qcom_cpufreq_kryo_init);
 
+static void __init qcom_cpufreq_kryo_exit(void)
+{
+       platform_device_unregister(kryo_cpufreq_pdev);
+       platform_driver_unregister(&qcom_cpufreq_kryo_driver);
+}
+module_exit(qcom_cpufreq_kryo_exit);
+
 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Kryo CPUfreq driver");
 MODULE_LICENSE("GPL v2");
index 00c7aab8e7d0f5861e778dc4d26affe5c1234603..afebbd87c4aa1d22ca179f558552cb2f410fcc0a 100644 (file)
@@ -1548,15 +1548,14 @@ skip_copy:
                        tp->urg_data = 0;
 
                if ((avail + offset) >= skb->len) {
-                       if (likely(skb))
-                               chtls_free_skb(sk, skb);
-                       buffers_freed++;
                        if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) {
                                tp->copied_seq += skb->len;
                                hws->rcvpld = skb->hdr_len;
                        } else {
                                tp->copied_seq += hws->rcvpld;
                        }
+                       chtls_free_skb(sk, skb);
+                       buffers_freed++;
                        hws->copied_seq = 0;
                        if (copied >= target &&
                            !skb_peek(&sk->sk_receive_queue))
index de2f8297a210bb4ea3998815353e72b7c11598da..108c37fca78279c06e896afcc411220996872760 100644 (file)
@@ -189,14 +189,16 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
 
        /* prevent private mappings from being established */
        if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
-               dev_info(dev, "%s: %s: fail, attempted private mapping\n",
+               dev_info_ratelimited(dev,
+                               "%s: %s: fail, attempted private mapping\n",
                                current->comm, func);
                return -EINVAL;
        }
 
        mask = dax_region->align - 1;
        if (vma->vm_start & mask || vma->vm_end & mask) {
-               dev_info(dev, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
+               dev_info_ratelimited(dev,
+                               "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
                                current->comm, func, vma->vm_start, vma->vm_end,
                                mask);
                return -EINVAL;
@@ -204,13 +206,15 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
 
        if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV
                        && (vma->vm_flags & VM_DONTCOPY) == 0) {
-               dev_info(dev, "%s: %s: fail, dax range requires MADV_DONTFORK\n",
+               dev_info_ratelimited(dev,
+                               "%s: %s: fail, dax range requires MADV_DONTFORK\n",
                                current->comm, func);
                return -EINVAL;
        }
 
        if (!vma_is_dax(vma)) {
-               dev_info(dev, "%s: %s: fail, vma is not DAX capable\n",
+               dev_info_ratelimited(dev,
+                               "%s: %s: fail, vma is not DAX capable\n",
                                current->comm, func);
                return -EINVAL;
        }
index 903d9c473749c24d636f573aba798a9680df2909..45276abf03aa2bd52aa9af56b8cbd45a4b1e5135 100644 (file)
@@ -86,6 +86,7 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
 {
        struct dax_device *dax_dev;
        bool dax_enabled = false;
+       struct request_queue *q;
        pgoff_t pgoff;
        int err, id;
        void *kaddr;
@@ -99,6 +100,13 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
                return false;
        }
 
+       q = bdev_get_queue(bdev);
+       if (!q || !blk_queue_dax(q)) {
+               pr_debug("%s: error: request queue doesn't support dax\n",
+                               bdevname(bdev, buf));
+               return false;
+       }
+
        err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff);
        if (err) {
                pr_debug("%s: error: unaligned partition for dax\n",
index d78d5fc173dc3d9b05b523462301a060e5482473..13884474d1588f7a086154d3b06dbd09ff28e881 100644 (file)
@@ -405,7 +405,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
                          || !exp_info->ops->map_dma_buf
                          || !exp_info->ops->unmap_dma_buf
                          || !exp_info->ops->release
-                         || !exp_info->ops->map_atomic
                          || !exp_info->ops->map
                          || !exp_info->ops->mmap)) {
                return ERR_PTR(-EINVAL);
@@ -568,7 +567,7 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
        mutex_lock(&dmabuf->lock);
 
        if (dmabuf->ops->attach) {
-               ret = dmabuf->ops->attach(dmabuf, dev, attach);
+               ret = dmabuf->ops->attach(dmabuf, attach);
                if (ret)
                        goto err_attach;
        }
@@ -687,26 +686,14 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
  *      void \*dma_buf_kmap(struct dma_buf \*, unsigned long);
  *      void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*);
  *
- *   There are also atomic variants of these interfaces. Like for kmap they
- *   facilitate non-blocking fast-paths. Neither the importer nor the exporter
- *   (in the callback) is allowed to block when using these.
- *
- *   Interfaces::
- *      void \*dma_buf_kmap_atomic(struct dma_buf \*, unsigned long);
- *      void dma_buf_kunmap_atomic(struct dma_buf \*, unsigned long, void \*);
- *
- *   For importers all the restrictions of using kmap apply, like the limited
- *   supply of kmap_atomic slots. Hence an importer shall only hold onto at
- *   max 2 atomic dma_buf kmaps at the same time (in any given process context).
+ *   Implementing the functions is optional for exporters and for importers all
+ *   the restrictions of using kmap apply.
  *
  *   dma_buf kmap calls outside of the range specified in begin_cpu_access are
  *   undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on
  *   the partial chunks at the beginning and end but may return stale or bogus
  *   data outside of the range (in these partial chunks).
  *
- *   Note that these calls need to always succeed. The exporter needs to
- *   complete any preparations that might fail in begin_cpu_access.
- *
  *   For some cases the overhead of kmap can be too high, a vmap interface
  *   is introduced. This interface should be used very carefully, as vmalloc
  *   space is a limited resources on many architectures.
@@ -859,41 +846,6 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
 }
 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
 
-/**
- * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
- * space. The same restrictions as for kmap_atomic and friends apply.
- * @dmabuf:    [in]    buffer to map page from.
- * @page_num:  [in]    page in PAGE_SIZE units to map.
- *
- * This call must always succeed, any necessary preparations that might fail
- * need to be done in begin_cpu_access.
- */
-void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
-{
-       WARN_ON(!dmabuf);
-
-       return dmabuf->ops->map_atomic(dmabuf, page_num);
-}
-EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);
-
-/**
- * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic.
- * @dmabuf:    [in]    buffer to unmap page from.
- * @page_num:  [in]    page in PAGE_SIZE units to unmap.
- * @vaddr:     [in]    kernel space pointer obtained from dma_buf_kmap_atomic.
- *
- * This call must always succeed.
- */
-void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
-                          void *vaddr)
-{
-       WARN_ON(!dmabuf);
-
-       if (dmabuf->ops->unmap_atomic)
-               dmabuf->ops->unmap_atomic(dmabuf, page_num, vaddr);
-}
-EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);
-
 /**
  * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
  * same restrictions as for kmap and friends apply.
@@ -907,6 +859,8 @@ void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
 {
        WARN_ON(!dmabuf);
 
+       if (!dmabuf->ops->map)
+               return NULL;
        return dmabuf->ops->map(dmabuf, page_num);
 }
 EXPORT_SYMBOL_GPL(dma_buf_kmap);
index dd1edfb27b61a3a24f64f1372f904467390e8fe4..a8c2544972518d0fe100288bbf7ffe717dfa2773 100644 (file)
@@ -104,7 +104,6 @@ const struct dma_fence_ops dma_fence_array_ops = {
        .get_timeline_name = dma_fence_array_get_timeline_name,
        .enable_signaling = dma_fence_array_enable_signaling,
        .signaled = dma_fence_array_signaled,
-       .wait = dma_fence_default_wait,
        .release = dma_fence_array_release,
 };
 EXPORT_SYMBOL(dma_fence_array_ops);
index 4edb9fd3cf4790a1b7e575f4fbe6d0910efec10e..1551ca7df394113fca8fc923fb1fdb98d63b13aa 100644 (file)
@@ -38,12 +38,43 @@ EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
  */
 static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0);
 
+/**
+ * DOC: DMA fences overview
+ *
+ * DMA fences, represented by &struct dma_fence, are the kernel internal
+ * synchronization primitive for DMA operations like GPU rendering, video
+ * encoding/decoding, or displaying buffers on a screen.
+ *
+ * A fence is initialized using dma_fence_init() and completed using
+ * dma_fence_signal(). Fences are associated with a context, allocated through
+ * dma_fence_context_alloc(), and all fences on the same context are
+ * fully ordered.
+ *
+ * Since the purposes of fences is to facilitate cross-device and
+ * cross-application synchronization, there's multiple ways to use one:
+ *
+ * - Individual fences can be exposed as a &sync_file, accessed as a file
+ *   descriptor from userspace, created by calling sync_file_create(). This is
+ *   called explicit fencing, since userspace passes around explicit
+ *   synchronization points.
+ *
+ * - Some subsystems also have their own explicit fencing primitives, like
+ *   &drm_syncobj. Compared to &sync_file, a &drm_syncobj allows the underlying
+ *   fence to be updated.
+ *
+ * - Then there's also implicit fencing, where the synchronization points are
+ *   implicitly passed around as part of shared &dma_buf instances. Such
+ *   implicit fences are stored in &struct reservation_object through the
+ *   &dma_buf.resv pointer.
+ */
+
 /**
  * dma_fence_context_alloc - allocate an array of fence contexts
- * @num:       [in]    amount of contexts to allocate
+ * @num: amount of contexts to allocate
  *
- * This function will return the first index of the number of fences allocated.
- * The fence context is used for setting fence->context to a unique number.
+ * This function will return the first index of the number of fence contexts
+ * allocated.  The fence context is used for setting &dma_fence.context to a
+ * unique number by passing the context to dma_fence_init().
  */
 u64 dma_fence_context_alloc(unsigned num)
 {
@@ -59,10 +90,14 @@ EXPORT_SYMBOL(dma_fence_context_alloc);
  * Signal completion for software callbacks on a fence, this will unblock
  * dma_fence_wait() calls and run all the callbacks added with
  * dma_fence_add_callback(). Can be called multiple times, but since a fence
- * can only go from unsignaled to signaled state, it will only be effective
- * the first time.
+ * can only go from the unsignaled to the signaled state and not back, it will
+ * only be effective the first time.
  *
- * Unlike dma_fence_signal, this function must be called with fence->lock held.
+ * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock
+ * held.
+ *
+ * Returns 0 on success and a negative error value when @fence has been
+ * signalled already.
  */
 int dma_fence_signal_locked(struct dma_fence *fence)
 {
@@ -102,8 +137,11 @@ EXPORT_SYMBOL(dma_fence_signal_locked);
  * Signal completion for software callbacks on a fence, this will unblock
  * dma_fence_wait() calls and run all the callbacks added with
  * dma_fence_add_callback(). Can be called multiple times, but since a fence
- * can only go from unsignaled to signaled state, it will only be effective
- * the first time.
+ * can only go from the unsignaled to the signaled state and not back, it will
+ * only be effective the first time.
+ *
+ * Returns 0 on success and a negative error value when @fence has been
+ * signalled already.
  */
 int dma_fence_signal(struct dma_fence *fence)
 {
@@ -136,9 +174,9 @@ EXPORT_SYMBOL(dma_fence_signal);
 /**
  * dma_fence_wait_timeout - sleep until the fence gets signaled
  * or until timeout elapses
- * @fence:     [in]    the fence to wait on
- * @intr:      [in]    if true, do an interruptible wait
- * @timeout:   [in]    timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
+ * @fence: the fence to wait on
+ * @intr: if true, do an interruptible wait
+ * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
  *
  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
  * remaining timeout in jiffies on success. Other error values may be
@@ -148,6 +186,8 @@ EXPORT_SYMBOL(dma_fence_signal);
  * directly or indirectly (buf-mgr between reservation and committing)
  * holds a reference to the fence, otherwise the fence might be
  * freed before return, resulting in undefined behavior.
+ *
+ * See also dma_fence_wait() and dma_fence_wait_any_timeout().
  */
 signed long
 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
@@ -158,12 +198,22 @@ dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
                return -EINVAL;
 
        trace_dma_fence_wait_start(fence);
-       ret = fence->ops->wait(fence, intr, timeout);
+       if (fence->ops->wait)
+               ret = fence->ops->wait(fence, intr, timeout);
+       else
+               ret = dma_fence_default_wait(fence, intr, timeout);
        trace_dma_fence_wait_end(fence);
        return ret;
 }
 EXPORT_SYMBOL(dma_fence_wait_timeout);
 
+/**
+ * dma_fence_release - default relese function for fences
+ * @kref: &dma_fence.recfount
+ *
+ * This is the default release functions for &dma_fence. Drivers shouldn't call
+ * this directly, but instead call dma_fence_put().
+ */
 void dma_fence_release(struct kref *kref)
 {
        struct dma_fence *fence =
@@ -181,6 +231,13 @@ void dma_fence_release(struct kref *kref)
 }
 EXPORT_SYMBOL(dma_fence_release);
 
+/**
+ * dma_fence_free - default release function for &dma_fence.
+ * @fence: fence to release
+ *
+ * This is the default implementation for &dma_fence_ops.release. It calls
+ * kfree_rcu() on @fence.
+ */
 void dma_fence_free(struct dma_fence *fence)
 {
        kfree_rcu(fence, rcu);
@@ -189,10 +246,11 @@ EXPORT_SYMBOL(dma_fence_free);
 
 /**
  * dma_fence_enable_sw_signaling - enable signaling on fence
- * @fence:     [in]    the fence to enable
+ * @fence: the fence to enable
  *
- * this will request for sw signaling to be enabled, to make the fence
- * complete as soon as possible
+ * This will request for sw signaling to be enabled, to make the fence
+ * complete as soon as possible. This calls &dma_fence_ops.enable_signaling
+ * internally.
  */
 void dma_fence_enable_sw_signaling(struct dma_fence *fence)
 {
@@ -200,7 +258,8 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence)
 
        if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
                              &fence->flags) &&
-           !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+           !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
+           fence->ops->enable_signaling) {
                trace_dma_fence_enable_signal(fence);
 
                spin_lock_irqsave(fence->lock, flags);
@@ -216,24 +275,24 @@ EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
 /**
  * dma_fence_add_callback - add a callback to be called when the fence
  * is signaled
- * @fence:     [in]    the fence to wait on
- * @cb:                [in]    the callback to register
- * @func:      [in]    the function to call
+ * @fence: the fence to wait on
+ * @cb: the callback to register
+ * @func: the function to call
  *
- * cb will be initialized by dma_fence_add_callback, no initialization
+ * @cb will be initialized by dma_fence_add_callback(), no initialization
  * by the caller is required. Any number of callbacks can be registered
  * to a fence, but a callback can only be registered to one fence at a time.
  *
  * Note that the callback can be called from an atomic context.  If
  * fence is already signaled, this function will return -ENOENT (and
- * *not* call the callback)
+ * *not* call the callback).
  *
  * Add a software callback to the fence. Same restrictions apply to
- * refcount as it does to dma_fence_wait, however the caller doesn't need to
- * keep a refcount to fence afterwards: when software access is enabled,
- * the creator of the fence is required to keep the fence alive until
- * after it signals with dma_fence_signal. The callback itself can be called
- * from irq context.
+ * refcount as it does to dma_fence_wait(), however the caller doesn't need to
+ * keep a refcount to fence afterward dma_fence_add_callback() has returned:
+ * when software access is enabled, the creator of the fence is required to keep
+ * the fence alive until after it signals with dma_fence_signal(). The callback
+ * itself can be called from irq context.
  *
  * Returns 0 in case of success, -ENOENT if the fence is already signaled
  * and -EINVAL in case of error.
@@ -260,7 +319,7 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
 
        if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
                ret = -ENOENT;
-       else if (!was_set) {
+       else if (!was_set && fence->ops->enable_signaling) {
                trace_dma_fence_enable_signal(fence);
 
                if (!fence->ops->enable_signaling(fence)) {
@@ -282,7 +341,7 @@ EXPORT_SYMBOL(dma_fence_add_callback);
 
 /**
  * dma_fence_get_status - returns the status upon completion
- * @fence: [in]        the dma_fence to query
+ * @fence: the dma_fence to query
  *
  * This wraps dma_fence_get_status_locked() to return the error status
  * condition on a signaled fence. See dma_fence_get_status_locked() for more
@@ -307,8 +366,8 @@ EXPORT_SYMBOL(dma_fence_get_status);
 
 /**
  * dma_fence_remove_callback - remove a callback from the signaling list
- * @fence:     [in]    the fence to wait on
- * @cb:                [in]    the callback to remove
+ * @fence: the fence to wait on
+ * @cb: the callback to remove
  *
  * Remove a previously queued callback from the fence. This function returns
  * true if the callback is successfully removed, or false if the fence has
@@ -319,6 +378,9 @@ EXPORT_SYMBOL(dma_fence_get_status);
  * doing, since deadlocks and race conditions could occur all too easily. For
  * this reason, it should only ever be done on hardware lockup recovery,
  * with a reference held to the fence.
+ *
+ * Behaviour is undefined if @cb has not been added to @fence using
+ * dma_fence_add_callback() beforehand.
  */
 bool
 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
@@ -355,9 +417,9 @@ dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
 /**
  * dma_fence_default_wait - default sleep until the fence gets signaled
  * or until timeout elapses
- * @fence:     [in]    the fence to wait on
- * @intr:      [in]    if true, do an interruptible wait
- * @timeout:   [in]    timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
+ * @fence: the fence to wait on
+ * @intr: if true, do an interruptible wait
+ * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
  *
  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
  * remaining timeout in jiffies on success. If timeout is zero the value one is
@@ -388,7 +450,7 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
        if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
                goto out;
 
-       if (!was_set) {
+       if (!was_set && fence->ops->enable_signaling) {
                trace_dma_fence_enable_signal(fence);
 
                if (!fence->ops->enable_signaling(fence)) {
@@ -450,12 +512,12 @@ dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
 /**
  * dma_fence_wait_any_timeout - sleep until any fence gets signaled
  * or until timeout elapses
- * @fences:    [in]    array of fences to wait on
- * @count:     [in]    number of fences to wait on
- * @intr:      [in]    if true, do an interruptible wait
- * @timeout:   [in]    timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
- * @idx:       [out]   the first signaled fence index, meaningful only on
- *                     positive return
+ * @fences: array of fences to wait on
+ * @count: number of fences to wait on
+ * @intr: if true, do an interruptible wait
+ * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
+ * @idx: used to store the first signaled fence index, meaningful only on
+ *     positive return
  *
  * Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if
  * interrupted, 0 if the wait timed out, or the remaining timeout in jiffies
@@ -464,6 +526,8 @@ dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
  * Synchronous waits for the first fence in the array to be signaled. The
  * caller needs to hold a reference to all fences in the array, otherwise a
  * fence might be freed before return, resulting in undefined behavior.
+ *
+ * See also dma_fence_wait() and dma_fence_wait_timeout().
  */
 signed long
 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
@@ -496,11 +560,6 @@ dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
        for (i = 0; i < count; ++i) {
                struct dma_fence *fence = fences[i];
 
-               if (fence->ops->wait != dma_fence_default_wait) {
-                       ret = -EINVAL;
-                       goto fence_rm_cb;
-               }
-
                cb[i].task = current;
                if (dma_fence_add_callback(fence, &cb[i].base,
                                           dma_fence_default_wait_cb)) {
@@ -541,27 +600,25 @@ EXPORT_SYMBOL(dma_fence_wait_any_timeout);
 
 /**
  * dma_fence_init - Initialize a custom fence.
- * @fence:     [in]    the fence to initialize
- * @ops:       [in]    the dma_fence_ops for operations on this fence
- * @lock:      [in]    the irqsafe spinlock to use for locking this fence
- * @context:   [in]    the execution context this fence is run on
- * @seqno:     [in]    a linear increasing sequence number for this context
+ * @fence: the fence to initialize
+ * @ops: the dma_fence_ops for operations on this fence
+ * @lock: the irqsafe spinlock to use for locking this fence
+ * @context: the execution context this fence is run on
+ * @seqno: a linear increasing sequence number for this context
  *
  * Initializes an allocated fence, the caller doesn't have to keep its
  * refcount after committing with this fence, but it will need to hold a
- * refcount again if dma_fence_ops.enable_signaling gets called. This can
- * be used for other implementing other types of fence.
+ * refcount again if &dma_fence_ops.enable_signaling gets called.
  *
  * context and seqno are used for easy comparison between fences, allowing
- * to check which fence is later by simply using dma_fence_later.
+ * to check which fence is later by simply using dma_fence_later().
  */
 void
 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
               spinlock_t *lock, u64 context, unsigned seqno)
 {
        BUG_ON(!lock);
-       BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
-              !ops->get_driver_name || !ops->get_timeline_name);
+       BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);
 
        kref_init(&fence->refcount);
        fence->ops = ops;
index 314eb1071cce79af9d276c09acd0a832cbd55163..6c95f61a32e73d54ed70f461e676826075419f45 100644 (file)
@@ -46,7 +46,7 @@
  * write-side updates.
  */
 
-DEFINE_WW_CLASS(reservation_ww_class);
+DEFINE_WD_CLASS(reservation_ww_class);
 EXPORT_SYMBOL(reservation_ww_class);
 
 struct lock_class_key reservation_seqcount_class;
@@ -141,6 +141,7 @@ reservation_object_add_shared_inplace(struct reservation_object *obj,
        if (signaled) {
                RCU_INIT_POINTER(fobj->shared[signaled_idx], fence);
        } else {
+               BUG_ON(fobj->shared_count >= fobj->shared_max);
                RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
                fobj->shared_count++;
        }
@@ -230,10 +231,9 @@ void reservation_object_add_shared_fence(struct reservation_object *obj,
        old = reservation_object_get_list(obj);
        obj->staged = NULL;
 
-       if (!fobj) {
-               BUG_ON(old->shared_count >= old->shared_max);
+       if (!fobj)
                reservation_object_add_shared_inplace(obj, old, fence);
-       else
+       else
                reservation_object_add_shared_replace(obj, old, fobj, fence);
 }
 EXPORT_SYMBOL(reservation_object_add_shared_fence);
index 3d78ca89a605396af847367d8f6aafc4a3b36456..53c1d6d36a642f04ec49aa0e3cfeabeb84205609 100644 (file)
@@ -188,7 +188,6 @@ static const struct dma_fence_ops timeline_fence_ops = {
        .get_timeline_name = timeline_fence_get_timeline_name,
        .enable_signaling = timeline_fence_enable_signaling,
        .signaled = timeline_fence_signaled,
-       .wait = dma_fence_default_wait,
        .release = timeline_fence_release,
        .fence_value_str = timeline_fence_value_str,
        .timeline_value_str = timeline_fence_timeline_value_str,
index fa31cccbe04faf5fa6a8adb07abf6b48a4a6cdd2..6bfa217ed6d0de81d1ef51b37accdba9ae4cd04b 100644 (file)
@@ -794,7 +794,7 @@ static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
        struct k3_dma_dev *d = ofdma->of_dma_data;
        unsigned int request = dma_spec->args[0];
 
-       if (request > d->dma_requests)
+       if (request >= d->dma_requests)
                return NULL;
 
        return dma_get_slave_channel(&(d->chans[request].vc.chan));
index defcdde4d358b19cc5430de95fb5e9f16ec538ca..de0957fe966821beb79ee1b75470a8834509ef44 100644 (file)
@@ -3033,7 +3033,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
        pd->src_addr_widths = PL330_DMA_BUSWIDTHS;
        pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
        pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
-       pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+       pd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
        pd->max_burst = ((pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) ?
                         1 : PL330_MAX_BURST);
 
index 9b5ca8691f27dcf6561fbd98051b697ab2da6011..a4a931ddf6f695fa21a25a359a94ee0c57f92beb 100644 (file)
@@ -1485,7 +1485,11 @@ static int omap_dma_probe(struct platform_device *pdev)
        od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS;
        od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS;
        od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
-       od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+       if (__dma_omap15xx(od->plat->dma_attr))
+               od->ddev.residue_granularity =
+                               DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+       else
+               od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
        od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */
        od->ddev.dev = &pdev->dev;
        INIT_LIST_HEAD(&od->ddev.channels);
index 951b6c79f166a7d2b4ec14e12096559df9aed684..624a11cb07e23b775d097d930997cc6ebd171b06 100644 (file)
@@ -47,6 +47,7 @@ DEFINE_DMI_ATTR_WITH_SHOW(product_name,               0444, DMI_PRODUCT_NAME);
 DEFINE_DMI_ATTR_WITH_SHOW(product_version,     0444, DMI_PRODUCT_VERSION);
 DEFINE_DMI_ATTR_WITH_SHOW(product_serial,      0400, DMI_PRODUCT_SERIAL);
 DEFINE_DMI_ATTR_WITH_SHOW(product_uuid,                0400, DMI_PRODUCT_UUID);
+DEFINE_DMI_ATTR_WITH_SHOW(product_sku,         0444, DMI_PRODUCT_SKU);
 DEFINE_DMI_ATTR_WITH_SHOW(product_family,      0444, DMI_PRODUCT_FAMILY);
 DEFINE_DMI_ATTR_WITH_SHOW(board_vendor,                0444, DMI_BOARD_VENDOR);
 DEFINE_DMI_ATTR_WITH_SHOW(board_name,          0444, DMI_BOARD_NAME);
@@ -193,6 +194,7 @@ static void __init dmi_id_init_attr_table(void)
        ADD_DMI_ATTR(product_serial,    DMI_PRODUCT_SERIAL);
        ADD_DMI_ATTR(product_uuid,      DMI_PRODUCT_UUID);
        ADD_DMI_ATTR(product_family,    DMI_PRODUCT_FAMILY);
+       ADD_DMI_ATTR(product_sku,       DMI_PRODUCT_SKU);
        ADD_DMI_ATTR(board_vendor,      DMI_BOARD_VENDOR);
        ADD_DMI_ATTR(board_name,        DMI_BOARD_NAME);
        ADD_DMI_ATTR(board_version,     DMI_BOARD_VERSION);
index 54e66adef2525179e49ecfe9fc04e253ecc18e51..f2483548cde92d692f748d6a9c7da0cbf98274a3 100644 (file)
@@ -447,6 +447,7 @@ static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
                dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6);
                dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7);
                dmi_save_uuid(dm, DMI_PRODUCT_UUID, 8);
+               dmi_save_ident(dm, DMI_PRODUCT_SKU, 25);
                dmi_save_ident(dm, DMI_PRODUCT_FAMILY, 26);
                break;
        case 2:         /* Base Board Information */
index caa37a6dd9d4eca506e3a0c2fed3c636fbd05d2b..a90b0b8fc69a18abb62d10c3f046a88a7300fd5b 100644 (file)
@@ -64,7 +64,7 @@ static void efi_retrieve_tpm2_eventlog_1_2(efi_system_table_t *sys_table_arg)
        efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
        efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID;
        efi_status_t status;
-       efi_physical_addr_t log_location, log_last_entry;
+       efi_physical_addr_t log_location = 0, log_last_entry = 0;
        struct linux_efi_tpm_eventlog *log_tbl = NULL;
        unsigned long first_entry_addr, last_entry_addr;
        size_t log_size, last_entry_size;
index dd4edd8f22ceebb67c4f6e1ab486c4ac9e9a1106..7fa793672a7a969239329ef1fccc5a2391c764c8 100644 (file)
@@ -455,8 +455,10 @@ static int altera_cvp_probe(struct pci_dev *pdev,
 
        mgr = fpga_mgr_create(&pdev->dev, conf->mgr_name,
                              &altera_cvp_ops, conf);
-       if (!mgr)
-               return -ENOMEM;
+       if (!mgr) {
+               ret = -ENOMEM;
+               goto err_unmap;
+       }
 
        pci_set_drvdata(pdev, mgr);
 
index d3cf9502e7e7f46abeb366c735f2e47d6808e64d..58faeb1cef63abaf8cadd77e9c3960f3ee7e17ee 100644 (file)
@@ -181,7 +181,11 @@ static int uniphier_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
        fwspec.fwnode = of_node_to_fwnode(chip->parent->of_node);
        fwspec.param_count = 2;
        fwspec.param[0] = offset - UNIPHIER_GPIO_IRQ_OFFSET;
-       fwspec.param[1] = IRQ_TYPE_NONE;
+       /*
+        * IRQ_TYPE_NONE is rejected by the parent irq domain. Set LEVEL_HIGH
+        * temporarily. Anyway, ->irq_set_type() will override it later.
+        */
+       fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
 
        return irq_create_fwspec_mapping(&fwspec);
 }
index 28d968088131f433901d3b5b4bee4da8a9002250..53a14ee8ad6d364788a891d4f2aa083956df5f04 100644 (file)
@@ -64,7 +64,8 @@ static void of_gpio_flags_quirks(struct device_node *np,
         * Note that active low is the default.
         */
        if (IS_ENABLED(CONFIG_REGULATOR) &&
-           (of_device_is_compatible(np, "reg-fixed-voltage") ||
+           (of_device_is_compatible(np, "regulator-fixed") ||
+            of_device_is_compatible(np, "reg-fixed-voltage") ||
             of_device_is_compatible(np, "regulator-gpio"))) {
                /*
                 * The regulator GPIO handles are specified such that the
index 2a72d2feb76d3dfb780b968cdaa791a88ffbd149..cb88528e7b10c0aff35dc17f3996a451a064e85a 100644 (file)
@@ -122,6 +122,16 @@ config DRM_LOAD_EDID_FIRMWARE
          default case is N. Details and instructions how to build your own
          EDID data are given in Documentation/EDID/HOWTO.txt.
 
+config DRM_DP_CEC
+       bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support"
+       select CEC_CORE
+       help
+         Choose this option if you want to enable HDMI CEC support for
+         DisplayPort/USB-C to HDMI adapters.
+
+         Note: not all adapters support this feature, and even for those
+         that do support this they often do not hook up the CEC pin.
+
 config DRM_TTM
        tristate
        depends on DRM && MMU
@@ -213,6 +223,17 @@ config DRM_VGEM
          as used by Mesa's software renderer for enhanced performance.
          If M is selected the module will be called vgem.
 
+config DRM_VKMS
+       tristate "Virtual KMS (EXPERIMENTAL)"
+       depends on DRM
+       select DRM_KMS_HELPER
+       default n
+       help
+         Virtual Kernel Mode-Setting (VKMS) is used for testing or for
+         running GPU in a headless machines. Choose this option to get
+         a VKMS.
+
+         If M is selected the module will be called vkms.
 
 source "drivers/gpu/drm/exynos/Kconfig"
 
index ef9f3dab287fd4b38188ea3354117b9abed5cd23..a6771cef85e25d74b73f1a2ff29bd34110605488 100644 (file)
@@ -18,7 +18,7 @@ drm-y       :=        drm_auth.o drm_bufs.o drm_cache.o \
                drm_encoder.o drm_mode_object.o drm_property.o \
                drm_plane.o drm_color_mgmt.o drm_print.o \
                drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \
-               drm_syncobj.o drm_lease.o
+               drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o
 
 drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
 drm-$(CONFIG_DRM_VM) += drm_vm.o
@@ -41,6 +41,7 @@ drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
 drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
 drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
 drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
+drm_kms_helper-$(CONFIG_DRM_DP_CEC) += drm_dp_cec.o
 
 obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
 obj-$(CONFIG_DRM_DEBUG_SELFTEST) += selftests/
@@ -69,6 +70,7 @@ obj-$(CONFIG_DRM_SAVAGE)+= savage/
 obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
 obj-$(CONFIG_DRM_VIA)  +=via/
 obj-$(CONFIG_DRM_VGEM) += vgem/
+obj-$(CONFIG_DRM_VKMS) += vkms/
 obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
 obj-$(CONFIG_DRM_EXYNOS) +=exynos/
 obj-$(CONFIG_DRM_ROCKCHIP) +=rockchip/
index 06192698bd96e786a9b95b24e4ee0892ce961627..5b393622f59205700acf687179841e7e0b0b8f5b 100644 (file)
 #define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE        0x02
 #define GENERIC_OBJECT_ID_MXM_OPM                 0x03
 #define GENERIC_OBJECT_ID_STEREO_PIN              0x04        //This object could show up from Misc Object table, it follows ATOM_OBJECT format, and contains one ATOM_OBJECT_GPIO_CNTL_RECORD for the stereo pin
+#define GENERIC_OBJECT_ID_BRACKET_LAYOUT          0x05
 
 /****************************************************/
 /* Graphics Object ENUM ID Definition               */
                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
                                                  GENERIC_OBJECT_ID_STEREO_PIN << OBJECT_ID_SHIFT)
 
+#define GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1    (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 GENERIC_OBJECT_ID_BRACKET_LAYOUT << OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2    (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 GENERIC_OBJECT_ID_BRACKET_LAYOUT << OBJECT_ID_SHIFT)
 /****************************************************/
 /* Object Cap definition - Shared with BIOS         */
 /****************************************************/
index a59c07590ceec2f314768066cbed28126ed68a6d..447c4c7a36d686b0e2fb93722611066f8e93db4e 100644 (file)
@@ -73,6 +73,8 @@
 #include "amdgpu_virt.h"
 #include "amdgpu_gart.h"
 #include "amdgpu_debugfs.h"
+#include "amdgpu_job.h"
+#include "amdgpu_bo_list.h"
 
 /*
  * Modules parameters.
@@ -105,11 +107,8 @@ extern int amdgpu_vm_fault_stop;
 extern int amdgpu_vm_debug;
 extern int amdgpu_vm_update_mode;
 extern int amdgpu_dc;
-extern int amdgpu_dc_log;
 extern int amdgpu_sched_jobs;
 extern int amdgpu_sched_hw_submission;
-extern int amdgpu_no_evict;
-extern int amdgpu_direct_gma_size;
 extern uint amdgpu_pcie_gen_cap;
 extern uint amdgpu_pcie_lane_cap;
 extern uint amdgpu_cg_mask;
@@ -190,6 +189,7 @@ struct amdgpu_job;
 struct amdgpu_irq_src;
 struct amdgpu_fpriv;
 struct amdgpu_bo_va_mapping;
+struct amdgpu_atif;
 
 enum amdgpu_cp_irq {
        AMDGPU_CP_IRQ_GFX_EOP = 0,
@@ -599,17 +599,6 @@ struct amdgpu_ib {
 
 extern const struct drm_sched_backend_ops amdgpu_sched_ops;
 
-int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
-                    struct amdgpu_job **job, struct amdgpu_vm *vm);
-int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
-                            struct amdgpu_job **job);
-
-void amdgpu_job_free_resources(struct amdgpu_job *job);
-void amdgpu_job_free(struct amdgpu_job *job);
-int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
-                     struct drm_sched_entity *entity, void *owner,
-                     struct dma_fence **f);
-
 /*
  * Queue manager
  */
@@ -683,8 +672,8 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
 int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id);
 
 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
-void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr);
 void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
+void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr);
 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
 
 
@@ -701,37 +690,6 @@ struct amdgpu_fpriv {
        struct amdgpu_ctx_mgr   ctx_mgr;
 };
 
-/*
- * residency list
- */
-struct amdgpu_bo_list_entry {
-       struct amdgpu_bo                *robj;
-       struct ttm_validate_buffer      tv;
-       struct amdgpu_bo_va             *bo_va;
-       uint32_t                        priority;
-       struct page                     **user_pages;
-       int                             user_invalidated;
-};
-
-struct amdgpu_bo_list {
-       struct mutex lock;
-       struct rcu_head rhead;
-       struct kref refcount;
-       struct amdgpu_bo *gds_obj;
-       struct amdgpu_bo *gws_obj;
-       struct amdgpu_bo *oa_obj;
-       unsigned first_userptr;
-       unsigned num_entries;
-       struct amdgpu_bo_list_entry *array;
-};
-
-struct amdgpu_bo_list *
-amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
-void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
-                            struct list_head *validated);
-void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
-void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
-
 /*
  * GFX stuff
  */
@@ -930,6 +888,11 @@ struct amdgpu_ngg {
        bool                    init;
 };
 
+struct sq_work {
+       struct work_struct      work;
+       unsigned ih_data;
+};
+
 struct amdgpu_gfx {
        struct mutex                    gpu_clock_mutex;
        struct amdgpu_gfx_config        config;
@@ -968,6 +931,10 @@ struct amdgpu_gfx {
        struct amdgpu_irq_src           eop_irq;
        struct amdgpu_irq_src           priv_reg_irq;
        struct amdgpu_irq_src           priv_inst_irq;
+       struct amdgpu_irq_src           cp_ecc_error_irq;
+       struct amdgpu_irq_src           sq_irq;
+       struct sq_work                  sq_work;
+
        /* gfx status */
        uint32_t                        gfx_current_status;
        /* ce ram size*/
@@ -1019,6 +986,7 @@ struct amdgpu_cs_parser {
 
        /* scheduler job object */
        struct amdgpu_job       *job;
+       struct amdgpu_ring      *ring;
 
        /* buffer objects */
        struct ww_acquire_ctx           ticket;
@@ -1040,40 +1008,6 @@ struct amdgpu_cs_parser {
        struct drm_syncobj **post_dep_syncobjs;
 };
 
-#define AMDGPU_PREAMBLE_IB_PRESENT          (1 << 0) /* bit set means command submit involves a preamble IB */
-#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST    (1 << 1) /* bit set means preamble IB is first presented in belonging context */
-#define AMDGPU_HAVE_CTX_SWITCH              (1 << 2) /* bit set means context switch occured */
-
-struct amdgpu_job {
-       struct drm_sched_job    base;
-       struct amdgpu_device    *adev;
-       struct amdgpu_vm        *vm;
-       struct amdgpu_ring      *ring;
-       struct amdgpu_sync      sync;
-       struct amdgpu_sync      sched_sync;
-       struct amdgpu_ib        *ibs;
-       struct dma_fence        *fence; /* the hw fence */
-       uint32_t                preamble_status;
-       uint32_t                num_ibs;
-       void                    *owner;
-       uint64_t                fence_ctx; /* the fence_context this job uses */
-       bool                    vm_needs_flush;
-       uint64_t                vm_pd_addr;
-       unsigned                vmid;
-       unsigned                pasid;
-       uint32_t                gds_base, gds_size;
-       uint32_t                gws_base, gws_size;
-       uint32_t                oa_base, oa_size;
-       uint32_t                vram_lost_counter;
-
-       /* user fence handling */
-       uint64_t                uf_addr;
-       uint64_t                uf_sequence;
-
-};
-#define to_amdgpu_job(sched_job)               \
-               container_of((sched_job), struct amdgpu_job, base)
-
 static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p,
                                      uint32_t ib_idx, int idx)
 {
@@ -1269,43 +1203,6 @@ struct amdgpu_vram_scratch {
 /*
  * ACPI
  */
-struct amdgpu_atif_notification_cfg {
-       bool enabled;
-       int command_code;
-};
-
-struct amdgpu_atif_notifications {
-       bool display_switch;
-       bool expansion_mode_change;
-       bool thermal_state;
-       bool forced_power_state;
-       bool system_power_state;
-       bool display_conf_change;
-       bool px_gfx_switch;
-       bool brightness_change;
-       bool dgpu_display_event;
-};
-
-struct amdgpu_atif_functions {
-       bool system_params;
-       bool sbios_requests;
-       bool select_active_disp;
-       bool lid_state;
-       bool get_tv_standard;
-       bool set_tv_standard;
-       bool get_panel_expansion_mode;
-       bool set_panel_expansion_mode;
-       bool temperature_change;
-       bool graphics_device_types;
-};
-
-struct amdgpu_atif {
-       struct amdgpu_atif_notifications notifications;
-       struct amdgpu_atif_functions functions;
-       struct amdgpu_atif_notification_cfg notification_cfg;
-       struct amdgpu_encoder *encoder_for_bl;
-};
-
 struct amdgpu_atcs_functions {
        bool get_ext_state;
        bool pcie_perf_req;
@@ -1425,6 +1322,7 @@ enum amd_hw_ip_block_type {
        PWR_HWIP,
        NBIF_HWIP,
        THM_HWIP,
+       CLK_HWIP,
        MAX_HWIP
 };
 
@@ -1466,7 +1364,7 @@ struct amdgpu_device {
 #if defined(CONFIG_DEBUG_FS)
        struct dentry                   *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
 #endif
-       struct amdgpu_atif              atif;
+       struct amdgpu_atif              *atif;
        struct amdgpu_atcs              atcs;
        struct mutex                    srbm_mutex;
        /* GRBM index mutex. Protects concurrent access to GRBM index */
@@ -1615,9 +1513,9 @@ struct amdgpu_device {
        DECLARE_HASHTABLE(mn_hash, 7);
 
        /* tracking pinned memory */
-       u64 vram_pin_size;
-       u64 invisible_pin_size;
-       u64 gart_pin_size;
+       atomic64_t vram_pin_size;
+       atomic64_t visible_pin_size;
+       atomic64_t gart_pin_size;
 
        /* amdkfd interface */
        struct kfd_dev          *kfd;
@@ -1812,6 +1710,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
 #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
 #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
+#define amdgpu_ring_patch_cs_in_place(r, p, ib) ((r)->funcs->patch_cs_in_place((p), (ib)))
 #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
 #define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
 #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
@@ -1865,8 +1764,6 @@ void amdgpu_display_update_priority(struct amdgpu_device *adev);
 
 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
                                  u64 num_vis_bytes);
-void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
-bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
 void amdgpu_device_vram_location(struct amdgpu_device *adev,
                                 struct amdgpu_gmc *mc, u64 base);
 void amdgpu_device_gart_location(struct amdgpu_device *adev,
@@ -1894,6 +1791,12 @@ static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false;
 static inline bool amdgpu_has_atpx(void) { return false; }
 #endif
 
+#if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI)
+void *amdgpu_atpx_get_dhandle(void);
+#else
+static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; }
+#endif
+
 /*
  * KMS
  */
index f4c474a9587510ed844ae7ec275b8b73c7695572..71efcf38f11beb2c628cee39ff4bae7fa50bedbd 100644 (file)
 #define ACP_I2S_COMP2_CAP_REG_OFFSET           0xa8
 #define ACP_I2S_COMP1_PLAY_REG_OFFSET          0x6c
 #define ACP_I2S_COMP2_PLAY_REG_OFFSET          0x68
+#define ACP_BT_PLAY_REGS_START                 0x14970
+#define ACP_BT_PLAY_REGS_END                   0x14a24
+#define ACP_BT_COMP1_REG_OFFSET                        0xac
+#define ACP_BT_COMP2_REG_OFFSET                        0xa8
 
 #define mmACP_PGFSM_RETAIN_REG                 0x51c9
 #define mmACP_PGFSM_CONFIG_REG                 0x51ca
@@ -77,7 +81,7 @@
 #define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE     0x000000FF
 
 #define ACP_TIMEOUT_LOOP                       0x000000FF
-#define ACP_DEVS                               3
+#define ACP_DEVS                               4
 #define ACP_SRC_ID                             162
 
 enum {
@@ -316,14 +320,13 @@ static int acp_hw_init(void *handle)
        if (adev->acp.acp_cell == NULL)
                return -ENOMEM;
 
-       adev->acp.acp_res = kcalloc(4, sizeof(struct resource), GFP_KERNEL);
-
+       adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
        if (adev->acp.acp_res == NULL) {
                kfree(adev->acp.acp_cell);
                return -ENOMEM;
        }
 
-       i2s_pdata = kcalloc(2, sizeof(struct i2s_platform_data), GFP_KERNEL);
+       i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
        if (i2s_pdata == NULL) {
                kfree(adev->acp.acp_res);
                kfree(adev->acp.acp_cell);
@@ -358,6 +361,20 @@ static int acp_hw_init(void *handle)
        i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
        i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
 
+       i2s_pdata[2].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
+       switch (adev->asic_type) {
+       case CHIP_STONEY:
+               i2s_pdata[2].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
+               break;
+       default:
+               break;
+       }
+
+       i2s_pdata[2].cap = DWC_I2S_PLAY | DWC_I2S_RECORD;
+       i2s_pdata[2].snd_rates = SNDRV_PCM_RATE_8000_96000;
+       i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET;
+       i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET;
+
        adev->acp.acp_res[0].name = "acp2x_dma";
        adev->acp.acp_res[0].flags = IORESOURCE_MEM;
        adev->acp.acp_res[0].start = acp_base;
@@ -373,13 +390,18 @@ static int acp_hw_init(void *handle)
        adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
        adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
 
-       adev->acp.acp_res[3].name = "acp2x_dma_irq";
-       adev->acp.acp_res[3].flags = IORESOURCE_IRQ;
-       adev->acp.acp_res[3].start = amdgpu_irq_create_mapping(adev, 162);
-       adev->acp.acp_res[3].end = adev->acp.acp_res[3].start;
+       adev->acp.acp_res[3].name = "acp2x_dw_bt_i2s_play_cap";
+       adev->acp.acp_res[3].flags = IORESOURCE_MEM;
+       adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START;
+       adev->acp.acp_res[3].end = acp_base + ACP_BT_PLAY_REGS_END;
+
+       adev->acp.acp_res[4].name = "acp2x_dma_irq";
+       adev->acp.acp_res[4].flags = IORESOURCE_IRQ;
+       adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162);
+       adev->acp.acp_res[4].end = adev->acp.acp_res[4].start;
 
        adev->acp.acp_cell[0].name = "acp_audio_dma";
-       adev->acp.acp_cell[0].num_resources = 4;
+       adev->acp.acp_cell[0].num_resources = 5;
        adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
        adev->acp.acp_cell[0].platform_data = &adev->asic_type;
        adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
@@ -396,6 +418,12 @@ static int acp_hw_init(void *handle)
        adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
        adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
 
+       adev->acp.acp_cell[3].name = "designware-i2s";
+       adev->acp.acp_cell[3].num_resources = 1;
+       adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3];
+       adev->acp.acp_cell[3].platform_data = &i2s_pdata[2];
+       adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data);
+
        r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
                                                                ACP_DEVS);
        if (r)
@@ -451,7 +479,6 @@ static int acp_hw_init(void *handle)
        val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
        val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
        cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
-
        return 0;
 }
 
index 8fa850a070e0fe8ea7a67823008ff7e543d8d1dc..353993218f213ff6e3686edfc9444b3296ba31f9 100644 (file)
 #include "amd_acpi.h"
 #include "atom.h"
 
+struct amdgpu_atif_notification_cfg {
+       bool enabled;
+       int command_code;
+};
+
+struct amdgpu_atif_notifications {
+       bool display_switch;
+       bool expansion_mode_change;
+       bool thermal_state;
+       bool forced_power_state;
+       bool system_power_state;
+       bool display_conf_change;
+       bool px_gfx_switch;
+       bool brightness_change;
+       bool dgpu_display_event;
+};
+
+struct amdgpu_atif_functions {
+       bool system_params;
+       bool sbios_requests;
+       bool select_active_disp;
+       bool lid_state;
+       bool get_tv_standard;
+       bool set_tv_standard;
+       bool get_panel_expansion_mode;
+       bool set_panel_expansion_mode;
+       bool temperature_change;
+       bool graphics_device_types;
+};
+
+struct amdgpu_atif {
+       acpi_handle handle;
+
+       struct amdgpu_atif_notifications notifications;
+       struct amdgpu_atif_functions functions;
+       struct amdgpu_atif_notification_cfg notification_cfg;
+       struct amdgpu_encoder *encoder_for_bl;
+};
+
 /* Call the ATIF method
  */
 /**
@@ -46,8 +85,9 @@
  * Executes the requested ATIF function (all asics).
  * Returns a pointer to the acpi output buffer.
  */
-static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function,
-               struct acpi_buffer *params)
+static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
+                                          int function,
+                                          struct acpi_buffer *params)
 {
        acpi_status status;
        union acpi_object atif_arg_elements[2];
@@ -70,7 +110,8 @@ static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function,
                atif_arg_elements[1].integer.value = 0;
        }
 
-       status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer);
+       status = acpi_evaluate_object(atif->handle, NULL, &atif_arg,
+                                     &buffer);
 
        /* Fail only if calling the method fails and ATIF is supported */
        if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
@@ -141,15 +182,14 @@ static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mas
  * (all asics).
  * returns 0 on success, error on failure.
  */
-static int amdgpu_atif_verify_interface(acpi_handle handle,
-               struct amdgpu_atif *atif)
+static int amdgpu_atif_verify_interface(struct amdgpu_atif *atif)
 {
        union acpi_object *info;
        struct atif_verify_interface output;
        size_t size;
        int err = 0;
 
-       info = amdgpu_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
+       info = amdgpu_atif_call(atif, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
        if (!info)
                return -EIO;
 
@@ -176,6 +216,35 @@ out:
        return err;
 }
 
+static acpi_handle amdgpu_atif_probe_handle(acpi_handle dhandle)
+{
+       acpi_handle handle = NULL;
+       char acpi_method_name[255] = { 0 };
+       struct acpi_buffer buffer = { sizeof(acpi_method_name), acpi_method_name };
+       acpi_status status;
+
+       /* For PX/HG systems, ATIF and ATPX are in the iGPU's namespace, on dGPU only
+        * systems, ATIF is in the dGPU's namespace.
+        */
+       status = acpi_get_handle(dhandle, "ATIF", &handle);
+       if (ACPI_SUCCESS(status))
+               goto out;
+
+       if (amdgpu_has_atpx()) {
+               status = acpi_get_handle(amdgpu_atpx_get_dhandle(), "ATIF",
+                                        &handle);
+               if (ACPI_SUCCESS(status))
+                       goto out;
+       }
+
+       DRM_DEBUG_DRIVER("No ATIF handle found\n");
+       return NULL;
+out:
+       acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+       DRM_DEBUG_DRIVER("Found ATIF handle %s\n", acpi_method_name);
+       return handle;
+}
+
 /**
  * amdgpu_atif_get_notification_params - determine notify configuration
  *
@@ -188,15 +257,16 @@ out:
  * where n is specified in the result if a notifier is used.
  * Returns 0 on success, error on failure.
  */
-static int amdgpu_atif_get_notification_params(acpi_handle handle,
-               struct amdgpu_atif_notification_cfg *n)
+static int amdgpu_atif_get_notification_params(struct amdgpu_atif *atif)
 {
        union acpi_object *info;
+       struct amdgpu_atif_notification_cfg *n = &atif->notification_cfg;
        struct atif_system_params params;
        size_t size;
        int err = 0;
 
-       info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL);
+       info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS,
+                               NULL);
        if (!info) {
                err = -EIO;
                goto out;
@@ -250,14 +320,15 @@ out:
  * (all asics).
  * Returns 0 on success, error on failure.
  */
-static int amdgpu_atif_get_sbios_requests(acpi_handle handle,
-               struct atif_sbios_requests *req)
+static int amdgpu_atif_get_sbios_requests(struct amdgpu_atif *atif,
+                                         struct atif_sbios_requests *req)
 {
        union acpi_object *info;
        size_t size;
        int count = 0;
 
-       info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL);
+       info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS,
+                               NULL);
        if (!info)
                return -EIO;
 
@@ -290,11 +361,9 @@ out:
  * Returns NOTIFY code
  */
 static int amdgpu_atif_handler(struct amdgpu_device *adev,
-                       struct acpi_bus_event *event)
+                              struct acpi_bus_event *event)
 {
-       struct amdgpu_atif *atif = &adev->atif;
-       struct atif_sbios_requests req;
-       acpi_handle handle;
+       struct amdgpu_atif *atif = adev->atif;
        int count;
 
        DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n",
@@ -303,48 +372,54 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
        if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
                return NOTIFY_DONE;
 
-       if (!atif->notification_cfg.enabled ||
+       if (!atif ||
+           !atif->notification_cfg.enabled ||
            event->type != atif->notification_cfg.command_code)
                /* Not our event */
                return NOTIFY_DONE;
 
-       /* Check pending SBIOS requests */
-       handle = ACPI_HANDLE(&adev->pdev->dev);
-       count = amdgpu_atif_get_sbios_requests(handle, &req);
+       if (atif->functions.sbios_requests) {
+               struct atif_sbios_requests req;
 
-       if (count <= 0)
-               return NOTIFY_DONE;
+               /* Check pending SBIOS requests */
+               count = amdgpu_atif_get_sbios_requests(atif, &req);
 
-       DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
+               if (count <= 0)
+                       return NOTIFY_DONE;
 
-       if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) {
-               struct amdgpu_encoder *enc = atif->encoder_for_bl;
+               DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
 
-               if (enc) {
-                       struct amdgpu_encoder_atom_dig *dig = enc->enc_priv;
+               /* todo: add DC handling */
+               if ((req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) &&
+                   !amdgpu_device_has_dc_support(adev)) {
+                       struct amdgpu_encoder *enc = atif->encoder_for_bl;
 
-                       DRM_DEBUG_DRIVER("Changing brightness to %d\n",
-                                       req.backlight_level);
+                       if (enc) {
+                               struct amdgpu_encoder_atom_dig *dig = enc->enc_priv;
+
+                               DRM_DEBUG_DRIVER("Changing brightness to %d\n",
+                                                req.backlight_level);
 
-                       amdgpu_display_backlight_set_level(adev, enc, req.backlight_level);
+                               amdgpu_display_backlight_set_level(adev, enc, req.backlight_level);
 
 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
-                       backlight_force_update(dig->bl_dev,
-                                              BACKLIGHT_UPDATE_HOTKEY);
+                               backlight_force_update(dig->bl_dev,
+                                                      BACKLIGHT_UPDATE_HOTKEY);
 #endif
+                       }
                }
-       }
-       if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
-               if ((adev->flags & AMD_IS_PX) &&
-                   amdgpu_atpx_dgpu_req_power_for_displays()) {
-                       pm_runtime_get_sync(adev->ddev->dev);
-                       /* Just fire off a uevent and let userspace tell us what to do */
-                       drm_helper_hpd_irq_event(adev->ddev);
-                       pm_runtime_mark_last_busy(adev->ddev->dev);
-                       pm_runtime_put_autosuspend(adev->ddev->dev);
+               if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
+                       if ((adev->flags & AMD_IS_PX) &&
+                           amdgpu_atpx_dgpu_req_power_for_displays()) {
+                               pm_runtime_get_sync(adev->ddev->dev);
+                               /* Just fire off a uevent and let userspace tell us what to do */
+                               drm_helper_hpd_irq_event(adev->ddev);
+                               pm_runtime_mark_last_busy(adev->ddev->dev);
+                               pm_runtime_put_autosuspend(adev->ddev->dev);
+                       }
                }
+               /* TODO: check other events */
        }
-       /* TODO: check other events */
 
        /* We've handled the event, stop the notifier chain. The ACPI interface
         * overloads ACPI_VIDEO_NOTIFY_PROBE, we don't want to send that to
@@ -641,8 +716,8 @@ static int amdgpu_acpi_event(struct notifier_block *nb,
  */
 int amdgpu_acpi_init(struct amdgpu_device *adev)
 {
-       acpi_handle handle;
-       struct amdgpu_atif *atif = &adev->atif;
+       acpi_handle handle, atif_handle;
+       struct amdgpu_atif *atif;
        struct amdgpu_atcs *atcs = &adev->atcs;
        int ret;
 
@@ -658,12 +733,26 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
                DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret);
        }
 
+       /* Probe for ATIF, and initialize it if found */
+       atif_handle = amdgpu_atif_probe_handle(handle);
+       if (!atif_handle)
+               goto out;
+
+       atif = kzalloc(sizeof(*atif), GFP_KERNEL);
+       if (!atif) {
+               DRM_WARN("Not enough memory to initialize ATIF\n");
+               goto out;
+       }
+       atif->handle = atif_handle;
+
        /* Call the ATIF method */
-       ret = amdgpu_atif_verify_interface(handle, atif);
+       ret = amdgpu_atif_verify_interface(atif);
        if (ret) {
                DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret);
+               kfree(atif);
                goto out;
        }
+       adev->atif = atif;
 
        if (atif->notifications.brightness_change) {
                struct drm_encoder *tmp;
@@ -693,8 +782,7 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
        }
 
        if (atif->functions.system_params) {
-               ret = amdgpu_atif_get_notification_params(handle,
-                               &atif->notification_cfg);
+               ret = amdgpu_atif_get_notification_params(atif);
                if (ret) {
                        DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n",
                                        ret);
@@ -720,4 +808,6 @@ out:
 void amdgpu_acpi_fini(struct amdgpu_device *adev)
 {
        unregister_acpi_notifier(&adev->acpi_nb);
+       if (adev->atif)
+               kfree(adev->atif);
 }
index 305143fcc1ceed0d1231efa742864de460b608d0..f8bbbb3a95043ebee9d5d02471b83e98fb923a0c 100644 (file)
@@ -243,6 +243,33 @@ int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
        return r;
 }
 
+int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev)
+{
+       int r = 0;
+
+       if (adev->kfd)
+               r = kgd2kfd->pre_reset(adev->kfd);
+
+       return r;
+}
+
+int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
+{
+       int r = 0;
+
+       if (adev->kfd)
+               r = kgd2kfd->post_reset(adev->kfd);
+
+       return r;
+}
+
+void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+       amdgpu_device_gpu_recover(adev, NULL, false);
+}
+
 int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
                        void **mem_obj, uint64_t *gpu_addr,
                        void **cpu_ptr)
@@ -251,7 +278,6 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
        struct amdgpu_bo *bo = NULL;
        struct amdgpu_bo_param bp;
        int r;
-       uint64_t gpu_addr_tmp = 0;
        void *cpu_ptr_tmp = NULL;
 
        memset(&bp, 0, sizeof(bp));
@@ -275,13 +301,18 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
                goto allocate_mem_reserve_bo_failed;
        }
 
-       r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT,
-                               &gpu_addr_tmp);
+       r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
        if (r) {
                dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
                goto allocate_mem_pin_bo_failed;
        }
 
+       r = amdgpu_ttm_alloc_gart(&bo->tbo);
+       if (r) {
+               dev_err(adev->dev, "%p bind failed\n", bo);
+               goto allocate_mem_kmap_bo_failed;
+       }
+
        r = amdgpu_bo_kmap(bo, &cpu_ptr_tmp);
        if (r) {
                dev_err(adev->dev,
@@ -290,7 +321,7 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
        }
 
        *mem_obj = bo;
-       *gpu_addr = gpu_addr_tmp;
+       *gpu_addr = amdgpu_bo_gpu_offset(bo);
        *cpu_ptr = cpu_ptr_tmp;
 
        amdgpu_bo_unreserve(bo);
@@ -457,6 +488,14 @@ err:
        return ret;
 }
 
+void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+       amdgpu_dpm_switch_power_profile(adev,
+                                       PP_SMC_POWER_PROFILE_COMPUTE, !idle);
+}
+
 bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
 {
        if (adev->kfd) {
index a8418a3f4e9d3f28d39cff6ece0e9462c1b9845e..2f379c183ed20be8af61e839ebbd75b9890a8593 100644 (file)
@@ -119,6 +119,7 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm);
 int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
                                uint32_t vmid, uint64_t gpu_addr,
                                uint32_t *ib_cmd, uint32_t ib_len);
+void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle);
 
 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void);
 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void);
@@ -126,6 +127,12 @@ struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void);
 
 bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
 
+int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev);
+
+int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev);
+
+void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
+
 /* Shared API */
 int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
                        void **mem_obj, uint64_t *gpu_addr,
@@ -183,6 +190,9 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
                                            struct dma_fence **ef);
 
+int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
+                                             struct kfd_vm_fault_info *info);
+
 void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
 void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo);
 
index 2c14025e5e7680b563dd3e6b83afc2815d19dd1d..574c1181ae9a22c1e63f37a5919c47a3b2d9c476 100644 (file)
@@ -173,7 +173,5 @@ static const struct dma_fence_ops amdkfd_fence_ops = {
        .get_driver_name = amdkfd_fence_get_driver_name,
        .get_timeline_name = amdkfd_fence_get_timeline_name,
        .enable_signaling = amdkfd_fence_enable_signaling,
-       .signaled = NULL,
-       .wait = dma_fence_default_wait,
        .release = amdkfd_fence_release,
 };
index ea79908dac4cbcae178b5398b69aee12f4377ea8..ea3f698aef5eaec1353492b636c81eb2d2f84b78 100644 (file)
@@ -145,6 +145,7 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
                uint32_t page_table_base);
 static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
 static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
+static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd);
 
 /* Because of REG_GET_FIELD() being used, we put this function in the
  * asic specific file.
@@ -216,6 +217,10 @@ static const struct kfd2kgd_calls kfd2kgd = {
        .invalidate_tlbs = invalidate_tlbs,
        .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
        .submit_ib = amdgpu_amdkfd_submit_ib,
+       .get_vm_fault_info = amdgpu_amdkfd_gpuvm_get_vm_fault_info,
+       .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
+       .gpu_recover = amdgpu_amdkfd_gpu_reset,
+       .set_compute_idle = amdgpu_amdkfd_set_compute_idle
 };
 
 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
@@ -571,6 +576,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
        unsigned long flags, end_jiffies;
        int retry;
 
+       if (adev->in_gpu_reset)
+               return -EIO;
+
        acquire_queue(kgd, pipe_id, queue_id);
        WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
 
@@ -882,6 +890,9 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
        int vmid;
        unsigned int tmp;
 
+       if (adev->in_gpu_reset)
+               return -EIO;
+
        for (vmid = 0; vmid < 16; vmid++) {
                if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
                        continue;
@@ -911,3 +922,19 @@ static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
        RREG32(mmVM_INVALIDATE_RESPONSE);
        return 0;
 }
+
+ /**
+  * read_vmid_from_vmfault_reg - read vmid from register
+  *
+  * adev: amdgpu_device pointer
+  * @vmid: vmid pointer
+  * read vmid from register (CIK).
+  */
+static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd)
+{
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+       uint32_t status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
+
+       return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
+}
index 19dd665e7307130c0d7a8993815d249f3809cade..f6e53e9352bd83c23f6cfe1d230839a07c17dc20 100644 (file)
@@ -176,6 +176,9 @@ static const struct kfd2kgd_calls kfd2kgd = {
        .invalidate_tlbs = invalidate_tlbs,
        .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
        .submit_ib = amdgpu_amdkfd_submit_ib,
+       .get_vm_fault_info = amdgpu_amdkfd_gpuvm_get_vm_fault_info,
+       .gpu_recover = amdgpu_amdkfd_gpu_reset,
+       .set_compute_idle = amdgpu_amdkfd_set_compute_idle
 };
 
 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
@@ -568,6 +571,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
        int retry;
        struct vi_mqd *m = get_mqd(mqd);
 
+       if (adev->in_gpu_reset)
+               return -EIO;
+
        acquire_queue(kgd, pipe_id, queue_id);
 
        if (m->cp_hqd_vmid == 0)
@@ -844,6 +850,9 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
        int vmid;
        unsigned int tmp;
 
+       if (adev->in_gpu_reset)
+               return -EIO;
+
        for (vmid = 0; vmid < 16; vmid++) {
                if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
                        continue;
index 1db60aa5b7f0eab0c012cc41af2181bb678a7813..8efedfcb9dfca50defdeec3a189968f3f425a287 100644 (file)
@@ -213,6 +213,8 @@ static const struct kfd2kgd_calls kfd2kgd = {
        .invalidate_tlbs = invalidate_tlbs,
        .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
        .submit_ib = amdgpu_amdkfd_submit_ib,
+       .gpu_recover = amdgpu_amdkfd_gpu_reset,
+       .set_compute_idle = amdgpu_amdkfd_set_compute_idle
 };
 
 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)
@@ -679,6 +681,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
        uint32_t temp;
        struct v9_mqd *m = get_mqd(mqd);
 
+       if (adev->in_gpu_reset)
+               return -EIO;
+
        acquire_queue(kgd, pipe_id, queue_id);
 
        if (m->cp_hqd_vmid == 0)
@@ -866,6 +871,9 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
        int vmid;
        struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
 
+       if (adev->in_gpu_reset)
+               return -EIO;
+
        if (ring->ready)
                return invalidate_tlbs_with_kiq(adev, pasid);
 
index ff8fd75f7ca51fcd74587683c84a2a60a1439f64..8a707d8bbb1c3c57870aa3e38f5a85b708b6d18d 100644 (file)
@@ -334,7 +334,7 @@ static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
                 "Called with userptr BO"))
                return -EINVAL;
 
-       amdgpu_ttm_placement_from_domain(bo, domain);
+       amdgpu_bo_placement_from_domain(bo, domain);
 
        ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
        if (ret)
@@ -622,7 +622,7 @@ static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
                pr_err("%s: Failed to reserve BO\n", __func__);
                goto release_out;
        }
-       amdgpu_ttm_placement_from_domain(bo, mem->domain);
+       amdgpu_bo_placement_from_domain(bo, mem->domain);
        ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
        if (ret)
                pr_err("%s: failed to validate BO\n", __func__);
@@ -1587,7 +1587,7 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
                goto bo_reserve_failed;
        }
 
-       ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
+       ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
        if (ret) {
                pr_err("Failed to pin bo. ret %d\n", ret);
                goto pin_failed;
@@ -1621,6 +1621,20 @@ bo_reserve_failed:
        return ret;
 }
 
+int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
+                                             struct kfd_vm_fault_info *mem)
+{
+       struct amdgpu_device *adev;
+
+       adev = (struct amdgpu_device *)kgd;
+       if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
+               *mem = *adev->gmc.vm_fault_info;
+               mb();
+               atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+       }
+       return 0;
+}
+
 /* Evict a userptr BO by stopping the queues if necessary
  *
  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
@@ -1680,7 +1694,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
 
                if (amdgpu_bo_reserve(bo, true))
                        return -EAGAIN;
-               amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
+               amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
                ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
                amdgpu_bo_unreserve(bo);
                if (ret) {
@@ -1824,7 +1838,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
                if (mem->user_pages[0]) {
                        amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
                                                     mem->user_pages);
-                       amdgpu_ttm_placement_from_domain(bo, mem->domain);
+                       amdgpu_bo_placement_from_domain(bo, mem->domain);
                        ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
                        if (ret) {
                                pr_err("%s: failed to validate BO\n", __func__);
index daa06e7c5bb73e2d4073fad2177bf50eee0be006..a028661d9e2013dd2a6e5611448438c7590fec82 100644 (file)
@@ -32,7 +32,7 @@ struct amdgpu_atpx_functions {
        bool switch_start;
        bool switch_end;
        bool disp_connectors_mapping;
-       bool disp_detetion_ports;
+       bool disp_detection_ports;
 };
 
 struct amdgpu_atpx {
@@ -90,6 +90,12 @@ bool amdgpu_atpx_dgpu_req_power_for_displays(void) {
        return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays;
 }
 
+#if defined(CONFIG_ACPI)
+void *amdgpu_atpx_get_dhandle(void) {
+       return amdgpu_atpx_priv.dhandle;
+}
+#endif
+
 /**
  * amdgpu_atpx_call - call an ATPX method
  *
@@ -156,7 +162,7 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
        f->switch_start = mask & ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED;
        f->switch_end = mask & ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED;
        f->disp_connectors_mapping = mask & ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED;
-       f->disp_detetion_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED;
+       f->disp_detection_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED;
 }
 
 /**
@@ -569,6 +575,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
        { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
        { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
        { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
+       { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
        { 0, 0, 0, 0, 0 },
 };
 
index 19cfff31f2e161f6cc6e884a9c110cfb33586a9f..3079ea8523c55db1110e55a98b81dcbca667e576 100644 (file)
@@ -95,11 +95,17 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
        r = amdgpu_bo_reserve(sobj, false);
        if (unlikely(r != 0))
                goto out_cleanup;
-       r = amdgpu_bo_pin(sobj, sdomain, &saddr);
+       r = amdgpu_bo_pin(sobj, sdomain);
+       if (r) {
+               amdgpu_bo_unreserve(sobj);
+               goto out_cleanup;
+       }
+       r = amdgpu_ttm_alloc_gart(&sobj->tbo);
        amdgpu_bo_unreserve(sobj);
        if (r) {
                goto out_cleanup;
        }
+       saddr = amdgpu_bo_gpu_offset(sobj);
        bp.domain = ddomain;
        r = amdgpu_bo_create(adev, &bp, &dobj);
        if (r) {
@@ -108,11 +114,17 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
        r = amdgpu_bo_reserve(dobj, false);
        if (unlikely(r != 0))
                goto out_cleanup;
-       r = amdgpu_bo_pin(dobj, ddomain, &daddr);
+       r = amdgpu_bo_pin(dobj, ddomain);
+       if (r) {
+               amdgpu_bo_unreserve(sobj);
+               goto out_cleanup;
+       }
+       r = amdgpu_ttm_alloc_gart(&dobj->tbo);
        amdgpu_bo_unreserve(dobj);
        if (r) {
                goto out_cleanup;
        }
+       daddr = amdgpu_bo_gpu_offset(dobj);
 
        if (adev->mman.buffer_funcs) {
                time = amdgpu_benchmark_do_move(adev, size, saddr, daddr, n);
index 92be7f6de197372beb1a12a0ad1816bd62f738a1..d472a2c8399febe576f29f0522ddaafeb0eabebe 100644 (file)
 #define AMDGPU_BO_LIST_MAX_PRIORITY    32u
 #define AMDGPU_BO_LIST_NUM_BUCKETS     (AMDGPU_BO_LIST_MAX_PRIORITY + 1)
 
-static int amdgpu_bo_list_set(struct amdgpu_device *adev,
-                                    struct drm_file *filp,
-                                    struct amdgpu_bo_list *list,
-                                    struct drm_amdgpu_bo_list_entry *info,
-                                    unsigned num_entries);
+static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu)
+{
+       struct amdgpu_bo_list *list = container_of(rcu, struct amdgpu_bo_list,
+                                                  rhead);
+
+       kvfree(list);
+}
 
-static void amdgpu_bo_list_release_rcu(struct kref *ref)
+static void amdgpu_bo_list_free(struct kref *ref)
 {
-       unsigned i;
        struct amdgpu_bo_list *list = container_of(ref, struct amdgpu_bo_list,
                                                   refcount);
+       struct amdgpu_bo_list_entry *e;
 
-       for (i = 0; i < list->num_entries; ++i)
-               amdgpu_bo_unref(&list->array[i].robj);
+       amdgpu_bo_list_for_each_entry(e, list)
+               amdgpu_bo_unref(&e->robj);
 
-       mutex_destroy(&list->lock);
-       kvfree(list->array);
-       kfree_rcu(list, rhead);
+       call_rcu(&list->rhead, amdgpu_bo_list_free_rcu);
 }
 
-static int amdgpu_bo_list_create(struct amdgpu_device *adev,
-                                struct drm_file *filp,
-                                struct drm_amdgpu_bo_list_entry *info,
-                                unsigned num_entries,
-                                int *id)
+int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
+                         struct drm_amdgpu_bo_list_entry *info,
+                         unsigned num_entries, struct amdgpu_bo_list **result)
 {
-       int r;
-       struct amdgpu_fpriv *fpriv = filp->driver_priv;
+       unsigned last_entry = 0, first_userptr = num_entries;
+       struct amdgpu_bo_list_entry *array;
        struct amdgpu_bo_list *list;
+       uint64_t total_size = 0;
+       size_t size;
+       unsigned i;
+       int r;
+
+       if (num_entries > SIZE_MAX / sizeof(struct amdgpu_bo_list_entry))
+               return -EINVAL;
 
-       list = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
+       size = sizeof(struct amdgpu_bo_list);
+       size += num_entries * sizeof(struct amdgpu_bo_list_entry);
+       list = kvmalloc(size, GFP_KERNEL);
        if (!list)
                return -ENOMEM;
 
-       /* initialize bo list*/
-       mutex_init(&list->lock);
        kref_init(&list->refcount);
-       r = amdgpu_bo_list_set(adev, filp, list, info, num_entries);
-       if (r) {
-               kfree(list);
-               return r;
-       }
-
-       /* idr alloc should be called only after initialization of bo list. */
-       mutex_lock(&fpriv->bo_list_lock);
-       r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
-       mutex_unlock(&fpriv->bo_list_lock);
-       if (r < 0) {
-               amdgpu_bo_list_free(list);
-               return r;
-       }
-       *id = r;
-
-       return 0;
-}
-
-static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
-{
-       struct amdgpu_bo_list *list;
-
-       mutex_lock(&fpriv->bo_list_lock);
-       list = idr_remove(&fpriv->bo_list_handles, id);
-       mutex_unlock(&fpriv->bo_list_lock);
-       if (list)
-               kref_put(&list->refcount, amdgpu_bo_list_release_rcu);
-}
-
-static int amdgpu_bo_list_set(struct amdgpu_device *adev,
-                                    struct drm_file *filp,
-                                    struct amdgpu_bo_list *list,
-                                    struct drm_amdgpu_bo_list_entry *info,
-                                    unsigned num_entries)
-{
-       struct amdgpu_bo_list_entry *array;
-       struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo;
-       struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo;
-       struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;
-
-       unsigned last_entry = 0, first_userptr = num_entries;
-       unsigned i;
-       int r;
-       unsigned long total_size = 0;
+       list->gds_obj = adev->gds.gds_gfx_bo;
+       list->gws_obj = adev->gds.gws_gfx_bo;
+       list->oa_obj = adev->gds.oa_gfx_bo;
 
-       array = kvmalloc_array(num_entries, sizeof(struct amdgpu_bo_list_entry), GFP_KERNEL);
-       if (!array)
-               return -ENOMEM;
+       array = amdgpu_bo_list_array_entry(list, 0);
        memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
 
        for (i = 0; i < num_entries; ++i) {
@@ -157,59 +118,56 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
                entry->tv.shared = !entry->robj->prime_shared_count;
 
                if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
-                       gds_obj = entry->robj;
+                       list->gds_obj = entry->robj;
                if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GWS)
-                       gws_obj = entry->robj;
+                       list->gws_obj = entry->robj;
                if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_OA)
-                       oa_obj = entry->robj;
+                       list->oa_obj = entry->robj;
 
                total_size += amdgpu_bo_size(entry->robj);
                trace_amdgpu_bo_list_set(list, entry->robj);
        }
 
-       for (i = 0; i < list->num_entries; ++i)
-               amdgpu_bo_unref(&list->array[i].robj);
-
-       kvfree(list->array);
-
-       list->gds_obj = gds_obj;
-       list->gws_obj = gws_obj;
-       list->oa_obj = oa_obj;
        list->first_userptr = first_userptr;
-       list->array = array;
        list->num_entries = num_entries;
 
        trace_amdgpu_cs_bo_status(list->num_entries, total_size);
+
+       *result = list;
        return 0;
 
 error_free:
        while (i--)
                amdgpu_bo_unref(&array[i].robj);
-       kvfree(array);
+       kvfree(list);
        return r;
+
 }
 
-struct amdgpu_bo_list *
-amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id)
+static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
 {
-       struct amdgpu_bo_list *result;
+       struct amdgpu_bo_list *list;
+
+       mutex_lock(&fpriv->bo_list_lock);
+       list = idr_remove(&fpriv->bo_list_handles, id);
+       mutex_unlock(&fpriv->bo_list_lock);
+       if (list)
+               kref_put(&list->refcount, amdgpu_bo_list_free);
+}
 
+int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
+                      struct amdgpu_bo_list **result)
+{
        rcu_read_lock();
-       result = idr_find(&fpriv->bo_list_handles, id);
+       *result = idr_find(&fpriv->bo_list_handles, id);
 
-       if (result) {
-               if (kref_get_unless_zero(&result->refcount)) {
-                       rcu_read_unlock();
-                       mutex_lock(&result->lock);
-               } else {
-                       rcu_read_unlock();
-                       result = NULL;
-               }
-       } else {
+       if (*result && kref_get_unless_zero(&(*result)->refcount)) {
                rcu_read_unlock();
+               return 0;
        }
 
-       return result;
+       rcu_read_unlock();
+       return -ENOENT;
 }
 
 void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
@@ -220,6 +178,7 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
         * concatenated in descending order.
         */
        struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS];
+       struct amdgpu_bo_list_entry *e;
        unsigned i;
 
        for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
@@ -230,14 +189,13 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
         * in the list, the sort mustn't change the ordering of buffers
         * with the same priority, i.e. it must be stable.
         */
-       for (i = 0; i < list->num_entries; i++) {
-               unsigned priority = list->array[i].priority;
+       amdgpu_bo_list_for_each_entry(e, list) {
+               unsigned priority = e->priority;
 
-               if (!list->array[i].robj->parent)
-                       list_add_tail(&list->array[i].tv.head,
-                                     &bucket[priority]);
+               if (!e->robj->parent)
+                       list_add_tail(&e->tv.head, &bucket[priority]);
 
-               list->array[i].user_pages = NULL;
+               e->user_pages = NULL;
        }
 
        /* Connect the sorted buckets in the output list. */
@@ -247,71 +205,82 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
 
 void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
 {
-       mutex_unlock(&list->lock);
-       kref_put(&list->refcount, amdgpu_bo_list_release_rcu);
-}
-
-void amdgpu_bo_list_free(struct amdgpu_bo_list *list)
-{
-       unsigned i;
-
-       for (i = 0; i < list->num_entries; ++i)
-               amdgpu_bo_unref(&list->array[i].robj);
-
-       mutex_destroy(&list->lock);
-       kvfree(list->array);
-       kfree(list);
+       kref_put(&list->refcount, amdgpu_bo_list_free);
 }
 
-int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
-                               struct drm_file *filp)
+int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
+                                     struct drm_amdgpu_bo_list_entry **info_param)
 {
+       const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
        const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
-
-       struct amdgpu_device *adev = dev->dev_private;
-       struct amdgpu_fpriv *fpriv = filp->driver_priv;
-       union drm_amdgpu_bo_list *args = data;
-       uint32_t handle = args->in.list_handle;
-       const void __user *uptr = u64_to_user_ptr(args->in.bo_info_ptr);
-
        struct drm_amdgpu_bo_list_entry *info;
-       struct amdgpu_bo_list *list;
-
        int r;
 
-       info = kvmalloc_array(args->in.bo_number,
-                            sizeof(struct drm_amdgpu_bo_list_entry), GFP_KERNEL);
+       info = kvmalloc_array(in->bo_number, info_size, GFP_KERNEL);
        if (!info)
                return -ENOMEM;
 
        /* copy the handle array from userspace to a kernel buffer */
        r = -EFAULT;
-       if (likely(info_size == args->in.bo_info_size)) {
-               unsigned long bytes = args->in.bo_number *
-                       args->in.bo_info_size;
+       if (likely(info_size == in->bo_info_size)) {
+               unsigned long bytes = in->bo_number *
+                       in->bo_info_size;
 
                if (copy_from_user(info, uptr, bytes))
                        goto error_free;
 
        } else {
-               unsigned long bytes = min(args->in.bo_info_size, info_size);
+               unsigned long bytes = min(in->bo_info_size, info_size);
                unsigned i;
 
-               memset(info, 0, args->in.bo_number * info_size);
-               for (i = 0; i < args->in.bo_number; ++i) {
+               memset(info, 0, in->bo_number * info_size);
+               for (i = 0; i < in->bo_number; ++i) {
                        if (copy_from_user(&info[i], uptr, bytes))
                                goto error_free;
 
-                       uptr += args->in.bo_info_size;
+                       uptr += in->bo_info_size;
                }
        }
 
+       *info_param = info;
+       return 0;
+
+error_free:
+       kvfree(info);
+       return r;
+}
+
+int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *filp)
+{
+       struct amdgpu_device *adev = dev->dev_private;
+       struct amdgpu_fpriv *fpriv = filp->driver_priv;
+       union drm_amdgpu_bo_list *args = data;
+       uint32_t handle = args->in.list_handle;
+       struct drm_amdgpu_bo_list_entry *info = NULL;
+       struct amdgpu_bo_list *list, *old;
+       int r;
+
+       r = amdgpu_bo_create_list_entry_array(&args->in, &info);
+       if (r)
+               goto error_free;
+
        switch (args->in.operation) {
        case AMDGPU_BO_LIST_OP_CREATE:
                r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number,
-                                         &handle);
+                                         &list);
                if (r)
                        goto error_free;
+
+               mutex_lock(&fpriv->bo_list_lock);
+               r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
+               mutex_unlock(&fpriv->bo_list_lock);
+               if (r < 0) {
+                       amdgpu_bo_list_put(list);
+                       return r;
+               }
+
+               handle = r;
                break;
 
        case AMDGPU_BO_LIST_OP_DESTROY:
@@ -320,17 +289,22 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
                break;
 
        case AMDGPU_BO_LIST_OP_UPDATE:
-               r = -ENOENT;
-               list = amdgpu_bo_list_get(fpriv, handle);
-               if (!list)
+               r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number,
+                                         &list);
+               if (r)
                        goto error_free;
 
-               r = amdgpu_bo_list_set(adev, filp, list, info,
-                                             args->in.bo_number);
-               amdgpu_bo_list_put(list);
-               if (r)
+               mutex_lock(&fpriv->bo_list_lock);
+               old = idr_replace(&fpriv->bo_list_handles, list, handle);
+               mutex_unlock(&fpriv->bo_list_lock);
+
+               if (IS_ERR(old)) {
+                       amdgpu_bo_list_put(list);
+                       r = PTR_ERR(old);
                        goto error_free;
+               }
 
+               amdgpu_bo_list_put(old);
                break;
 
        default:
@@ -345,6 +319,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
        return 0;
 
 error_free:
-       kvfree(info);
+       if (info)
+               kvfree(info);
        return r;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
new file mode 100644 (file)
index 0000000..61b0897
--- /dev/null
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_BO_LIST_H__
+#define __AMDGPU_BO_LIST_H__
+
+#include <drm/ttm/ttm_execbuf_util.h>
+#include <drm/amdgpu_drm.h>
+
+struct amdgpu_device;
+struct amdgpu_bo;
+struct amdgpu_bo_va;
+struct amdgpu_fpriv;
+
+struct amdgpu_bo_list_entry {
+       struct amdgpu_bo                *robj;
+       struct ttm_validate_buffer      tv;
+       struct amdgpu_bo_va             *bo_va;
+       uint32_t                        priority;
+       struct page                     **user_pages;
+       int                             user_invalidated;
+};
+
+struct amdgpu_bo_list {
+       struct rcu_head rhead;
+       struct kref refcount;
+       struct amdgpu_bo *gds_obj;
+       struct amdgpu_bo *gws_obj;
+       struct amdgpu_bo *oa_obj;
+       unsigned first_userptr;
+       unsigned num_entries;
+};
+
+int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
+                      struct amdgpu_bo_list **result);
+void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
+                            struct list_head *validated);
+void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
+int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
+                                     struct drm_amdgpu_bo_list_entry **info_param);
+
+int amdgpu_bo_list_create(struct amdgpu_device *adev,
+                                struct drm_file *filp,
+                                struct drm_amdgpu_bo_list_entry *info,
+                                unsigned num_entries,
+                                struct amdgpu_bo_list **list);
+
+static inline struct amdgpu_bo_list_entry *
+amdgpu_bo_list_array_entry(struct amdgpu_bo_list *list, unsigned index)
+{
+       struct amdgpu_bo_list_entry *array = (void *)&list[1];
+
+       return &array[index];
+}
+
+#define amdgpu_bo_list_for_each_entry(e, list) \
+       for (e = amdgpu_bo_list_array_entry(list, 0); \
+            e != amdgpu_bo_list_array_entry(list, (list)->num_entries); \
+            ++e)
+
+#define amdgpu_bo_list_for_each_userptr_entry(e, list) \
+       for (e = amdgpu_bo_list_array_entry(list, (list)->first_userptr); \
+            e != amdgpu_bo_list_array_entry(list, (list)->num_entries); \
+            ++e)
+
+#endif
index e950730f1933b66f1bffc62eb4fc24fd10b8b903..693ec5ea4950a8a76653df1085472078314def56 100644 (file)
@@ -314,17 +314,17 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
                                        (adev->pdev->revision == 0x81) ||
                                        (adev->pdev->device == 0x665f)) {
                                        info->is_kicker = true;
-                                       strcpy(fw_name, "radeon/bonaire_k_smc.bin");
+                                       strcpy(fw_name, "amdgpu/bonaire_k_smc.bin");
                                } else {
-                                       strcpy(fw_name, "radeon/bonaire_smc.bin");
+                                       strcpy(fw_name, "amdgpu/bonaire_smc.bin");
                                }
                                break;
                        case CHIP_HAWAII:
                                if (adev->pdev->revision == 0x80) {
                                        info->is_kicker = true;
-                                       strcpy(fw_name, "radeon/hawaii_k_smc.bin");
+                                       strcpy(fw_name, "amdgpu/hawaii_k_smc.bin");
                                } else {
-                                       strcpy(fw_name, "radeon/hawaii_smc.bin");
+                                       strcpy(fw_name, "amdgpu/hawaii_smc.bin");
                                }
                                break;
                        case CHIP_TOPAZ:
index 8e66851eb427b0f00b83ae5b74a68113fe79f0c1..c770d73352a793fc7c91d871b100a503870ca701 100644 (file)
@@ -212,30 +212,21 @@ static void
 amdgpu_connector_update_scratch_regs(struct drm_connector *connector,
                                      enum drm_connector_status status)
 {
-       struct drm_encoder *best_encoder = NULL;
-       struct drm_encoder *encoder = NULL;
+       struct drm_encoder *best_encoder;
+       struct drm_encoder *encoder;
        const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
        bool connected;
        int i;
 
        best_encoder = connector_funcs->best_encoder(connector);
 
-       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-               if (connector->encoder_ids[i] == 0)
-                       break;
-
-               encoder = drm_encoder_find(connector->dev, NULL,
-                                       connector->encoder_ids[i]);
-               if (!encoder)
-                       continue;
-
+       drm_connector_for_each_possible_encoder(connector, encoder, i) {
                if ((encoder == best_encoder) && (status == connector_status_connected))
                        connected = true;
                else
                        connected = false;
 
                amdgpu_atombios_encoder_set_bios_scratch_regs(connector, encoder, connected);
-
        }
 }
 
@@ -246,17 +237,11 @@ amdgpu_connector_find_encoder(struct drm_connector *connector,
        struct drm_encoder *encoder;
        int i;
 
-       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-               if (connector->encoder_ids[i] == 0)
-                       break;
-               encoder = drm_encoder_find(connector->dev, NULL,
-                                       connector->encoder_ids[i]);
-               if (!encoder)
-                       continue;
-
+       drm_connector_for_each_possible_encoder(connector, encoder, i) {
                if (encoder->encoder_type == encoder_type)
                        return encoder;
        }
+
        return NULL;
 }
 
@@ -349,22 +334,24 @@ static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector)
        int ret;
 
        if (amdgpu_connector->edid) {
-               drm_mode_connector_update_edid_property(connector, amdgpu_connector->edid);
+               drm_connector_update_edid_property(connector, amdgpu_connector->edid);
                ret = drm_add_edid_modes(connector, amdgpu_connector->edid);
                return ret;
        }
-       drm_mode_connector_update_edid_property(connector, NULL);
+       drm_connector_update_edid_property(connector, NULL);
        return 0;
 }
 
 static struct drm_encoder *
 amdgpu_connector_best_single_encoder(struct drm_connector *connector)
 {
-       int enc_id = connector->encoder_ids[0];
+       struct drm_encoder *encoder;
+       int i;
+
+       /* pick the first one */
+       drm_connector_for_each_possible_encoder(connector, encoder, i)
+               return encoder;
 
-       /* pick the encoder ids */
-       if (enc_id)
-               return drm_encoder_find(connector->dev, NULL, enc_id);
        return NULL;
 }
 
@@ -985,9 +972,8 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
        struct drm_device *dev = connector->dev;
        struct amdgpu_device *adev = dev->dev_private;
        struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
-       struct drm_encoder *encoder = NULL;
        const struct drm_encoder_helper_funcs *encoder_funcs;
-       int i, r;
+       int r;
        enum drm_connector_status ret = connector_status_disconnected;
        bool dret = false, broken_edid = false;
 
@@ -1077,14 +1063,10 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
 
        /* find analog encoder */
        if (amdgpu_connector->dac_load_detect) {
-               for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-                       if (connector->encoder_ids[i] == 0)
-                               break;
-
-                       encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
-                       if (!encoder)
-                               continue;
+               struct drm_encoder *encoder;
+               int i;
 
+               drm_connector_for_each_possible_encoder(connector, encoder, i) {
                        if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
                            encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
                                continue;
@@ -1132,18 +1114,11 @@ exit:
 static struct drm_encoder *
 amdgpu_connector_dvi_encoder(struct drm_connector *connector)
 {
-       int enc_id = connector->encoder_ids[0];
        struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
        struct drm_encoder *encoder;
        int i;
-       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-               if (connector->encoder_ids[i] == 0)
-                       break;
-
-               encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
-               if (!encoder)
-                       continue;
 
+       drm_connector_for_each_possible_encoder(connector, encoder, i) {
                if (amdgpu_connector->use_digital == true) {
                        if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
                                return encoder;
@@ -1158,8 +1133,9 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector)
 
        /* then check use digitial */
        /* pick the first one */
-       if (enc_id)
-               return drm_encoder_find(connector->dev, NULL, enc_id);
+       drm_connector_for_each_possible_encoder(connector, encoder, i)
+               return encoder;
+
        return NULL;
 }
 
@@ -1296,15 +1272,7 @@ u16 amdgpu_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
        struct amdgpu_encoder *amdgpu_encoder;
        int i;
 
-       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-               if (connector->encoder_ids[i] == 0)
-                       break;
-
-               encoder = drm_encoder_find(connector->dev, NULL,
-                                       connector->encoder_ids[i]);
-               if (!encoder)
-                       continue;
-
+       drm_connector_for_each_possible_encoder(connector, encoder, i) {
                amdgpu_encoder = to_amdgpu_encoder(encoder);
 
                switch (amdgpu_encoder->encoder_id) {
@@ -1326,14 +1294,7 @@ static bool amdgpu_connector_encoder_is_hbr2(struct drm_connector *connector)
        int i;
        bool found = false;
 
-       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-               if (connector->encoder_ids[i] == 0)
-                       break;
-               encoder = drm_encoder_find(connector->dev, NULL,
-                                       connector->encoder_ids[i]);
-               if (!encoder)
-                       continue;
-
+       drm_connector_for_each_possible_encoder(connector, encoder, i) {
                amdgpu_encoder = to_amdgpu_encoder(encoder);
                if (amdgpu_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
                        found = true;
index 82312a7bc6ad5b5a01e232b350f23ac889f47ed7..502b94fb116a7070af89ce51da182c3a936e48a0 100644 (file)
@@ -31,6 +31,7 @@
 #include <drm/drm_syncobj.h>
 #include "amdgpu.h"
 #include "amdgpu_trace.h"
+#include "amdgpu_gmc.h"
 
 static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
                                      struct drm_amdgpu_cs_chunk_fence *data,
@@ -65,11 +66,35 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
        return 0;
 }
 
-static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
+static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
+                                     struct drm_amdgpu_bo_list_in *data)
+{
+       int r;
+       struct drm_amdgpu_bo_list_entry *info = NULL;
+
+       r = amdgpu_bo_create_list_entry_array(data, &info);
+       if (r)
+               return r;
+
+       r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
+                                 &p->bo_list);
+       if (r)
+               goto error_free;
+
+       kvfree(info);
+       return 0;
+
+error_free:
+       if (info)
+               kvfree(info);
+
+       return r;
+}
+
+static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)
 {
        struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
        struct amdgpu_vm *vm = &fpriv->vm;
-       union drm_amdgpu_cs *cs = data;
        uint64_t *chunk_array_user;
        uint64_t *chunk_array;
        unsigned size, num_ibs = 0;
@@ -163,6 +188,19 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
 
                        break;
 
+               case AMDGPU_CHUNK_ID_BO_HANDLES:
+                       size = sizeof(struct drm_amdgpu_bo_list_in);
+                       if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
+                               ret = -EINVAL;
+                               goto free_partial_kdata;
+                       }
+
+                       ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata);
+                       if (ret)
+                               goto free_partial_kdata;
+
+                       break;
+
                case AMDGPU_CHUNK_ID_DEPENDENCIES:
                case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
                case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
@@ -186,6 +224,10 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
        if (p->uf_entry.robj)
                p->job->uf_addr = uf_offset;
        kfree(chunk_array);
+
+       /* Use this opportunity to fill in task info for the vm */
+       amdgpu_vm_set_task_info(vm);
+
        return 0;
 
 free_all_kdata:
@@ -257,7 +299,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
                return;
        }
 
-       total_vram = adev->gmc.real_vram_size - adev->vram_pin_size;
+       total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
        used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
        free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
 
@@ -302,7 +344,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
        *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
 
        /* Do the same for visible VRAM if half of it is free */
-       if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size) {
+       if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
                u64 total_vis_vram = adev->gmc.visible_vram_size;
                u64 used_vis_vram =
                        amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
@@ -359,7 +401,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
         * to move it. Don't move anything if the threshold is zero.
         */
        if (p->bytes_moved < p->bytes_moved_threshold) {
-               if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
+               if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
                    (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
                        /* And don't move a CPU_ACCESS_REQUIRED BO to limited
                         * visible VRAM if we've depleted our allowance to do
@@ -377,11 +419,11 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
        }
 
 retry:
-       amdgpu_ttm_placement_from_domain(bo, domain);
+       amdgpu_bo_placement_from_domain(bo, domain);
        r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 
        p->bytes_moved += ctx.bytes_moved;
-       if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
+       if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
            amdgpu_bo_in_cpu_visible_vram(bo))
                p->bytes_moved_vis += ctx.bytes_moved;
 
@@ -434,9 +476,9 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
 
                /* Good we can try to move this BO somewhere else */
                update_bytes_moved_vis =
-                       adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
-                       amdgpu_bo_in_cpu_visible_vram(bo);
-               amdgpu_ttm_placement_from_domain(bo, other);
+                               !amdgpu_gmc_vram_full_visible(&adev->gmc) &&
+                               amdgpu_bo_in_cpu_visible_vram(bo);
+               amdgpu_bo_placement_from_domain(bo, other);
                r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
                p->bytes_moved += ctx.bytes_moved;
                if (update_bytes_moved_vis)
@@ -490,8 +532,8 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
                /* Check if we have user pages and nobody bound the BO already */
                if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
                    lobj->user_pages) {
-                       amdgpu_ttm_placement_from_domain(bo,
-                                                        AMDGPU_GEM_DOMAIN_CPU);
+                       amdgpu_bo_placement_from_domain(bo,
+                                                       AMDGPU_GEM_DOMAIN_CPU);
                        r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
                        if (r)
                                return r;
@@ -519,23 +561,38 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                                union drm_amdgpu_cs *cs)
 {
        struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+       struct amdgpu_vm *vm = &fpriv->vm;
        struct amdgpu_bo_list_entry *e;
        struct list_head duplicates;
-       unsigned i, tries = 10;
        struct amdgpu_bo *gds;
        struct amdgpu_bo *gws;
        struct amdgpu_bo *oa;
+       unsigned tries = 10;
        int r;
 
        INIT_LIST_HEAD(&p->validated);
 
-       p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
-       if (p->bo_list) {
-               amdgpu_bo_list_get_list(p->bo_list, &p->validated);
-               if (p->bo_list->first_userptr != p->bo_list->num_entries)
-                       p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
+       /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
+       if (cs->in.bo_list_handle) {
+               if (p->bo_list)
+                       return -EINVAL;
+
+               r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
+                                      &p->bo_list);
+               if (r)
+                       return r;
+       } else if (!p->bo_list) {
+               /* Create a empty bo_list when no handle is provided */
+               r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
+                                         &p->bo_list);
+               if (r)
+                       return r;
        }
 
+       amdgpu_bo_list_get_list(p->bo_list, &p->validated);
+       if (p->bo_list->first_userptr != p->bo_list->num_entries)
+               p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
+
        INIT_LIST_HEAD(&duplicates);
        amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
 
@@ -544,7 +601,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
 
        while (1) {
                struct list_head need_pages;
-               unsigned i;
 
                r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
                                           &duplicates);
@@ -554,17 +610,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                        goto error_free_pages;
                }
 
-               /* Without a BO list we don't have userptr BOs */
-               if (!p->bo_list)
-                       break;
-
                INIT_LIST_HEAD(&need_pages);
-               for (i = p->bo_list->first_userptr;
-                    i < p->bo_list->num_entries; ++i) {
-                       struct amdgpu_bo *bo;
-
-                       e = &p->bo_list->array[i];
-                       bo = e->robj;
+               amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+                       struct amdgpu_bo *bo = e->robj;
 
                        if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
                                 &e->user_invalidated) && e->user_pages) {
@@ -656,23 +704,12 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
        amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
                                     p->bytes_moved_vis);
 
-       if (p->bo_list) {
-               struct amdgpu_vm *vm = &fpriv->vm;
-               unsigned i;
-
-               gds = p->bo_list->gds_obj;
-               gws = p->bo_list->gws_obj;
-               oa = p->bo_list->oa_obj;
-               for (i = 0; i < p->bo_list->num_entries; i++) {
-                       struct amdgpu_bo *bo = p->bo_list->array[i].robj;
+       gds = p->bo_list->gds_obj;
+       gws = p->bo_list->gws_obj;
+       oa = p->bo_list->oa_obj;
 
-                       p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo);
-               }
-       } else {
-               gds = p->adev->gds.gds_gfx_bo;
-               gws = p->adev->gds.gws_gfx_bo;
-               oa = p->adev->gds.oa_gfx_bo;
-       }
+       amdgpu_bo_list_for_each_entry(e, p->bo_list)
+               e->bo_va = amdgpu_vm_bo_find(vm, e->robj);
 
        if (gds) {
                p->job->gds_base = amdgpu_bo_gpu_offset(gds);
@@ -700,18 +737,13 @@ error_validate:
 
 error_free_pages:
 
-       if (p->bo_list) {
-               for (i = p->bo_list->first_userptr;
-                    i < p->bo_list->num_entries; ++i) {
-                       e = &p->bo_list->array[i];
-
-                       if (!e->user_pages)
-                               continue;
+       amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+               if (!e->user_pages)
+                       continue;
 
-                       release_pages(e->user_pages,
-                                     e->robj->tbo.ttm->num_pages);
-                       kvfree(e->user_pages);
-               }
+               release_pages(e->user_pages,
+                             e->robj->tbo.ttm->num_pages);
+               kvfree(e->user_pages);
        }
 
        return r;
@@ -773,12 +805,13 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
 
 static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
 {
-       struct amdgpu_device *adev = p->adev;
        struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+       struct amdgpu_device *adev = p->adev;
        struct amdgpu_vm *vm = &fpriv->vm;
+       struct amdgpu_bo_list_entry *e;
        struct amdgpu_bo_va *bo_va;
        struct amdgpu_bo *bo;
-       int i, r;
+       int r;
 
        r = amdgpu_vm_clear_freed(adev, vm, NULL);
        if (r)
@@ -808,29 +841,26 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
                        return r;
        }
 
-       if (p->bo_list) {
-               for (i = 0; i < p->bo_list->num_entries; i++) {
-                       struct dma_fence *f;
-
-                       /* ignore duplicates */
-                       bo = p->bo_list->array[i].robj;
-                       if (!bo)
-                               continue;
+       amdgpu_bo_list_for_each_entry(e, p->bo_list) {
+               struct dma_fence *f;
 
-                       bo_va = p->bo_list->array[i].bo_va;
-                       if (bo_va == NULL)
-                               continue;
+               /* ignore duplicates */
+               bo = e->robj;
+               if (!bo)
+                       continue;
 
-                       r = amdgpu_vm_bo_update(adev, bo_va, false);
-                       if (r)
-                               return r;
+               bo_va = e->bo_va;
+               if (bo_va == NULL)
+                       continue;
 
-                       f = bo_va->last_pt_update;
-                       r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
-                       if (r)
-                               return r;
-               }
+               r = amdgpu_vm_bo_update(adev, bo_va, false);
+               if (r)
+                       return r;
 
+               f = bo_va->last_pt_update;
+               r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
+               if (r)
+                       return r;
        }
 
        r = amdgpu_vm_handle_moved(adev, vm);
@@ -845,15 +875,14 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
        if (r)
                return r;
 
-       if (amdgpu_vm_debug && p->bo_list) {
+       if (amdgpu_vm_debug) {
                /* Invalidate all BOs to test for userspace bugs */
-               for (i = 0; i < p->bo_list->num_entries; i++) {
+               amdgpu_bo_list_for_each_entry(e, p->bo_list) {
                        /* ignore duplicates */
-                       bo = p->bo_list->array[i].robj;
-                       if (!bo)
+                       if (!e->robj)
                                continue;
 
-                       amdgpu_vm_bo_invalidate(adev, bo, false);
+                       amdgpu_vm_bo_invalidate(adev, e->robj, false);
                }
        }
 
@@ -865,11 +894,11 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
 {
        struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
        struct amdgpu_vm *vm = &fpriv->vm;
-       struct amdgpu_ring *ring = p->job->ring;
+       struct amdgpu_ring *ring = p->ring;
        int r;
 
        /* Only for UVD/VCE VM emulation */
-       if (p->job->ring->funcs->parse_cs) {
+       if (p->ring->funcs->parse_cs || p->ring->funcs->patch_cs_in_place) {
                unsigned i, j;
 
                for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
@@ -910,12 +939,20 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
                        offset = m->start * AMDGPU_GPU_PAGE_SIZE;
                        kptr += va_start - offset;
 
-                       memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
-                       amdgpu_bo_kunmap(aobj);
-
-                       r = amdgpu_ring_parse_cs(ring, p, j);
-                       if (r)
-                               return r;
+                       if (p->ring->funcs->parse_cs) {
+                               memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
+                               amdgpu_bo_kunmap(aobj);
+
+                               r = amdgpu_ring_parse_cs(ring, p, j);
+                               if (r)
+                                       return r;
+                       } else {
+                               ib->ptr = (uint32_t *)kptr;
+                               r = amdgpu_ring_patch_cs_in_place(ring, p, j);
+                               amdgpu_bo_kunmap(aobj);
+                               if (r)
+                                       return r;
+                       }
 
                        j++;
                }
@@ -927,6 +964,10 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
                r = amdgpu_bo_vm_update_pte(p);
                if (r)
                        return r;
+
+               r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
+               if (r)
+                       return r;
        }
 
        return amdgpu_cs_sync_rings(p);
@@ -979,10 +1020,10 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
                        }
                }
 
-               if (parser->job->ring && parser->job->ring != ring)
+               if (parser->ring && parser->ring != ring)
                        return -EINVAL;
 
-               parser->job->ring = ring;
+               parser->ring = ring;
 
                r =  amdgpu_ib_get(adev, vm,
                                        ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0,
@@ -1001,11 +1042,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
 
        /* UVD & VCE fw doesn't support user fences */
        if (parser->job->uf_addr && (
-           parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
-           parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
+           parser->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
+           parser->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
                return -EINVAL;
 
-       return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->job->ring->idx);
+       return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->ring->idx);
 }
 
 static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
@@ -1156,31 +1197,30 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
                            union drm_amdgpu_cs *cs)
 {
-       struct amdgpu_ring *ring = p->job->ring;
+       struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+       struct amdgpu_ring *ring = p->ring;
        struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
+       enum drm_sched_priority priority;
+       struct amdgpu_bo_list_entry *e;
        struct amdgpu_job *job;
-       unsigned i;
        uint64_t seq;
 
        int r;
 
        amdgpu_mn_lock(p->mn);
-       if (p->bo_list) {
-               for (i = p->bo_list->first_userptr;
-                    i < p->bo_list->num_entries; ++i) {
-                       struct amdgpu_bo *bo = p->bo_list->array[i].robj;
-
-                       if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
-                               amdgpu_mn_unlock(p->mn);
-                               return -ERESTARTSYS;
-                       }
+       amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+               struct amdgpu_bo *bo = e->robj;
+
+               if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
+                       amdgpu_mn_unlock(p->mn);
+                       return -ERESTARTSYS;
                }
        }
 
        job = p->job;
        p->job = NULL;
 
-       r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp);
+       r = drm_sched_job_init(&job->base, entity, p->filp);
        if (r) {
                amdgpu_job_free(job);
                amdgpu_mn_unlock(p->mn);
@@ -1188,7 +1228,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
        }
 
        job->owner = p->filp;
-       job->fence_ctx = entity->fence_context;
        p->fence = dma_fence_get(&job->base.s_fence->finished);
 
        r = amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq);
@@ -1206,11 +1245,15 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
        job->uf_sequence = seq;
 
        amdgpu_job_free_resources(job);
-       amdgpu_ring_priority_get(job->ring, job->base.s_priority);
 
        trace_amdgpu_cs_ioctl(job);
+       amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
+       priority = job->base.s_priority;
        drm_sched_entity_push_job(&job->base, entity);
 
+       ring = to_amdgpu_ring(entity->rq->sched);
+       amdgpu_ring_priority_get(ring, priority);
+
        ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
        amdgpu_mn_unlock(p->mn);
 
@@ -1601,7 +1644,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
 
        if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
                (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
-               amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
+               amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
                r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
                if (r)
                        return r;
index c5bb36275e9379e2ecf36318233b7fc98f44483a..df69657610460a81104a343fff489a3c1695856e 100644 (file)
@@ -90,8 +90,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
                if (ring == &adev->gfx.kiq.ring)
                        continue;
 
-               r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
-                                         rq, &ctx->guilty);
+               r = drm_sched_entity_init(&ctx->rings[i].entity,
+                                         &rq, 1, &ctx->guilty);
                if (r)
                        goto failed;
        }
@@ -104,8 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
 
 failed:
        for (j = 0; j < i; j++)
-               drm_sched_entity_fini(&adev->rings[j]->sched,
-                                     &ctx->rings[j].entity);
+               drm_sched_entity_destroy(&ctx->rings[j].entity);
        kfree(ctx->fences);
        ctx->fences = NULL;
        return r;
@@ -178,8 +177,7 @@ static void amdgpu_ctx_do_release(struct kref *ref)
                if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
                        continue;
 
-               drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
-                       &ctx->rings[i].entity);
+               drm_sched_entity_destroy(&ctx->rings[i].entity);
        }
 
        amdgpu_ctx_fini(ref);
@@ -444,34 +442,36 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
        idr_init(&mgr->ctx_handles);
 }
 
-void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
+void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
 {
        struct amdgpu_ctx *ctx;
        struct idr *idp;
        uint32_t id, i;
+       long max_wait = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
 
        idp = &mgr->ctx_handles;
 
+       mutex_lock(&mgr->lock);
        idr_for_each_entry(idp, ctx, id) {
 
-               if (!ctx->adev)
+               if (!ctx->adev) {
+                       mutex_unlock(&mgr->lock);
                        return;
+               }
 
                for (i = 0; i < ctx->adev->num_rings; i++) {
 
                        if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
                                continue;
 
-                       if (kref_read(&ctx->refcount) == 1)
-                               drm_sched_entity_do_release(&ctx->adev->rings[i]->sched,
-                                                 &ctx->rings[i].entity);
-                       else
-                               DRM_ERROR("ctx %p is still alive\n", ctx);
+                       max_wait = drm_sched_entity_flush(&ctx->rings[i].entity,
+                                                         max_wait);
                }
        }
+       mutex_unlock(&mgr->lock);
 }
 
-void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr)
+void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
 {
        struct amdgpu_ctx *ctx;
        struct idr *idp;
@@ -490,8 +490,7 @@ void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr)
                                continue;
 
                        if (kref_read(&ctx->refcount) == 1)
-                               drm_sched_entity_cleanup(&ctx->adev->rings[i]->sched,
-                                       &ctx->rings[i].entity);
+                               drm_sched_entity_fini(&ctx->rings[i].entity);
                        else
                                DRM_ERROR("ctx %p is still alive\n", ctx);
                }
@@ -504,7 +503,7 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
        struct idr *idp;
        uint32_t id;
 
-       amdgpu_ctx_mgr_entity_cleanup(mgr);
+       amdgpu_ctx_mgr_entity_fini(mgr);
 
        idp = &mgr->ctx_handles;
 
index 3317d1536f4fc352247756e3c650d72c9236916b..e839470880d7f20c038eda1d2577435080911daa 100644 (file)
@@ -25,6 +25,7 @@
  *          Alex Deucher
  *          Jerome Glisse
  */
+#include <linux/power_supply.h>
 #include <linux/kthread.h>
 #include <linux/console.h>
 #include <linux/slab.h>
@@ -675,17 +676,15 @@ void amdgpu_device_vram_location(struct amdgpu_device *adev,
 }
 
 /**
- * amdgpu_device_gart_location - try to find GTT location
+ * amdgpu_device_gart_location - try to find GART location
  *
  * @adev: amdgpu device structure holding all necessary informations
  * @mc: memory controller structure holding memory informations
  *
- * Function will place try to place GTT before or after VRAM.
+ * Function will place try to place GART before or after VRAM.
  *
- * If GTT size is bigger than space left then we ajust GTT size.
+ * If GART size is bigger than space left then we ajust GART size.
  * Thus function will never fails.
- *
- * FIXME: when reducing GTT size align new size on power of 2.
  */
 void amdgpu_device_gart_location(struct amdgpu_device *adev,
                                 struct amdgpu_gmc *mc)
@@ -698,13 +697,13 @@ void amdgpu_device_gart_location(struct amdgpu_device *adev,
        size_bf = mc->vram_start;
        if (size_bf > size_af) {
                if (mc->gart_size > size_bf) {
-                       dev_warn(adev->dev, "limiting GTT\n");
+                       dev_warn(adev->dev, "limiting GART\n");
                        mc->gart_size = size_bf;
                }
                mc->gart_start = 0;
        } else {
                if (mc->gart_size > size_af) {
-                       dev_warn(adev->dev, "limiting GTT\n");
+                       dev_warn(adev->dev, "limiting GART\n");
                        mc->gart_size = size_af;
                }
                /* VCE doesn't like it when BOs cross a 4GB segment, so align
@@ -713,7 +712,7 @@ void amdgpu_device_gart_location(struct amdgpu_device *adev,
                mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
        }
        mc->gart_end = mc->gart_start + mc->gart_size - 1;
-       dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
+       dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
                        mc->gart_size >> 20, mc->gart_start, mc->gart_end);
 }
 
@@ -1077,7 +1076,7 @@ static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
 /**
  * amdgpu_device_ip_set_clockgating_state - set the CG state
  *
- * @adev: amdgpu_device pointer
+ * @dev: amdgpu_device pointer
  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
  * @state: clockgating state (gate or ungate)
  *
@@ -1111,7 +1110,7 @@ int amdgpu_device_ip_set_clockgating_state(void *dev,
 /**
  * amdgpu_device_ip_set_powergating_state - set the PG state
  *
- * @adev: amdgpu_device pointer
+ * @dev: amdgpu_device pointer
  * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
  * @state: powergating state (gate or ungate)
  *
@@ -1222,7 +1221,7 @@ bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
  * amdgpu_device_ip_get_ip_block - get a hw IP pointer
  *
  * @adev: amdgpu_device pointer
- * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
+ * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
  *
  * Returns a pointer to the hardware IP block structure
  * if it exists for the asic, otherwise NULL.
@@ -1708,10 +1707,6 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
        if (amdgpu_emu_mode == 1)
                return 0;
 
-       r = amdgpu_ib_ring_tests(adev);
-       if (r)
-               DRM_ERROR("ib ring test failed (%d).\n", r);
-
        for (i = 0; i < adev->num_ip_blocks; i++) {
                if (!adev->ip_blocks[i].status.valid)
                        continue;
@@ -1731,17 +1726,34 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
                }
        }
 
-       if (adev->powerplay.pp_feature & PP_GFXOFF_MASK) {
-               /* enable gfx powergating */
-               amdgpu_device_ip_set_powergating_state(adev,
-                                                      AMD_IP_BLOCK_TYPE_GFX,
-                                                      AMD_PG_STATE_GATE);
-               /* enable gfxoff */
-               amdgpu_device_ip_set_powergating_state(adev,
-                                                      AMD_IP_BLOCK_TYPE_SMC,
-                                                      AMD_PG_STATE_GATE);
-       }
+       return 0;
+}
+
+static int amdgpu_device_ip_late_set_pg_state(struct amdgpu_device *adev)
+{
+       int i = 0, r;
+
+       if (amdgpu_emu_mode == 1)
+               return 0;
 
+       for (i = 0; i < adev->num_ip_blocks; i++) {
+               if (!adev->ip_blocks[i].status.valid)
+                       continue;
+               /* skip CG for VCE/UVD, it's handled specially */
+               if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
+                   adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
+                   adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
+                   adev->ip_blocks[i].version->funcs->set_powergating_state) {
+                       /* enable powergating to save power */
+                       r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
+                                                                                    AMD_PG_STATE_GATE);
+                       if (r) {
+                               DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
+                                         adev->ip_blocks[i].version->funcs->name, r);
+                               return r;
+                       }
+               }
+       }
        return 0;
 }
 
@@ -1775,6 +1787,9 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
                }
        }
 
+       amdgpu_device_ip_late_set_cg_state(adev);
+       amdgpu_device_ip_late_set_pg_state(adev);
+
        queue_delayed_work(system_wq, &adev->late_init_work,
                           msecs_to_jiffies(AMDGPU_RESUME_MS));
 
@@ -1813,6 +1828,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
                                          adev->ip_blocks[i].version->funcs->name, r);
                                return r;
                        }
+                       if (adev->powerplay.pp_funcs->set_powergating_by_smu)
+                               amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false);
                        r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
                        /* XXX handle errors */
                        if (r) {
@@ -1901,11 +1918,15 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
 {
        struct amdgpu_device *adev =
                container_of(work, struct amdgpu_device, late_init_work.work);
-       amdgpu_device_ip_late_set_cg_state(adev);
+       int r;
+
+       r = amdgpu_ib_ring_tests(adev);
+       if (r)
+               DRM_ERROR("ib ring test failed (%d).\n", r);
 }
 
 /**
- * amdgpu_device_ip_suspend - run suspend for hardware IPs
+ * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
  *
  * @adev: amdgpu_device pointer
  *
@@ -1915,18 +1936,60 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
  * in each IP into a state suitable for suspend.
  * Returns 0 on success, negative error code on failure.
  */
-int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
+static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
 {
        int i, r;
 
        if (amdgpu_sriov_vf(adev))
                amdgpu_virt_request_full_gpu(adev, false);
 
-       /* ungate SMC block powergating */
-       if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
-               amdgpu_device_ip_set_powergating_state(adev,
-                                                      AMD_IP_BLOCK_TYPE_SMC,
-                                                      AMD_CG_STATE_UNGATE);
+       for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+               if (!adev->ip_blocks[i].status.valid)
+                       continue;
+               /* displays are handled separately */
+               if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
+                       /* ungate blocks so that suspend can properly shut them down */
+                       if (adev->ip_blocks[i].version->funcs->set_clockgating_state) {
+                               r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+                                                                                            AMD_CG_STATE_UNGATE);
+                               if (r) {
+                                       DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
+                                                 adev->ip_blocks[i].version->funcs->name, r);
+                               }
+                       }
+                       /* XXX handle errors */
+                       r = adev->ip_blocks[i].version->funcs->suspend(adev);
+                       /* XXX handle errors */
+                       if (r) {
+                               DRM_ERROR("suspend of IP block <%s> failed %d\n",
+                                         adev->ip_blocks[i].version->funcs->name, r);
+                       }
+               }
+       }
+
+       if (amdgpu_sriov_vf(adev))
+               amdgpu_virt_release_full_gpu(adev, false);
+
+       return 0;
+}
+
+/**
+ * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Main suspend function for hardware IPs.  The list of all the hardware
+ * IPs that make up the asic is walked, clockgating is disabled and the
+ * suspend callbacks are run.  suspend puts the hardware and software state
+ * in each IP into a state suitable for suspend.
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
+{
+       int i, r;
+
+       if (amdgpu_sriov_vf(adev))
+               amdgpu_virt_request_full_gpu(adev, false);
 
        /* ungate SMC block first */
        r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
@@ -1935,9 +1998,16 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
                DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r);
        }
 
+       /* call smu to disable gfx off feature first when suspend */
+       if (adev->powerplay.pp_funcs->set_powergating_by_smu)
+               amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false);
+
        for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
                if (!adev->ip_blocks[i].status.valid)
                        continue;
+               /* displays are handled in phase1 */
+               if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
+                       continue;
                /* ungate blocks so that suspend can properly shut them down */
                if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_SMC &&
                        adev->ip_blocks[i].version->funcs->set_clockgating_state) {
@@ -1963,6 +2033,29 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
        return 0;
 }
 
+/**
+ * amdgpu_device_ip_suspend - run suspend for hardware IPs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Main suspend function for hardware IPs.  The list of all the hardware
+ * IPs that make up the asic is walked, clockgating is disabled and the
+ * suspend callbacks are run.  suspend puts the hardware and software state
+ * in each IP into a state suitable for suspend.
+ * Returns 0 on success, negative error code on failure.
+ */
+int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
+{
+       int r;
+
+       r = amdgpu_device_ip_suspend_phase1(adev);
+       if (r)
+               return r;
+       r = amdgpu_device_ip_suspend_phase2(adev);
+
+       return r;
+}
+
 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
 {
        int i, r;
@@ -1985,7 +2078,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
                                continue;
 
                        r = block->version->funcs->hw_init(adev);
-                       DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
+                       DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
                        if (r)
                                return r;
                }
@@ -2020,7 +2113,7 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
                                continue;
 
                        r = block->version->funcs->hw_init(adev);
-                       DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
+                       DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
                        if (r)
                                return r;
                }
@@ -2158,10 +2251,18 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
        switch (asic_type) {
 #if defined(CONFIG_DRM_AMD_DC)
        case CHIP_BONAIRE:
-       case CHIP_HAWAII:
        case CHIP_KAVERI:
        case CHIP_KABINI:
        case CHIP_MULLINS:
+               /*
+                * We have systems in the wild with these ASICs that require
+                * LVDS and VGA support which is not supported with DC.
+                *
+                * Fallback to the non-DC driver here by default so as not to
+                * cause regressions.
+                */
+               return amdgpu_dc > 0;
+       case CHIP_HAWAII:
        case CHIP_CARRIZO:
        case CHIP_STONEY:
        case CHIP_POLARIS10:
@@ -2173,7 +2274,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
        case CHIP_VEGA10:
        case CHIP_VEGA12:
        case CHIP_VEGA20:
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
        case CHIP_RAVEN:
 #endif
                return amdgpu_dc != 0;
@@ -2202,7 +2303,7 @@ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
  * amdgpu_device_init - initialize the driver
  *
  * @adev: amdgpu_device pointer
- * @pdev: drm dev pointer
+ * @ddev: drm dev pointer
  * @pdev: pci dev pointer
  * @flags: driver flags
  *
@@ -2293,6 +2394,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        INIT_DELAYED_WORK(&adev->late_init_work,
                          amdgpu_device_ip_late_init_func_handler);
 
+       adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
+
        /* Registers mapping */
        /* TODO: block userspace mapping of io register */
        if (adev->asic_type >= CHIP_BONAIRE) {
@@ -2573,8 +2676,9 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
 /**
  * amdgpu_device_suspend - initiate device suspend
  *
- * @pdev: drm dev pointer
- * @state: suspend state
+ * @dev: drm dev pointer
+ * @suspend: suspend state
+ * @fbcon : notify the fbdev of suspend
  *
  * Puts the hw in the suspend state (all asics).
  * Returns 0 for success or an error on failure.
@@ -2598,6 +2702,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
 
        drm_kms_helper_poll_disable(dev);
 
+       if (fbcon)
+               amdgpu_fbdev_set_suspend(adev, 1);
+
        if (!amdgpu_device_has_dc_support(adev)) {
                /* turn off display hw */
                drm_modeset_lock_all(dev);
@@ -2605,44 +2712,46 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
                        drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
                }
                drm_modeset_unlock_all(dev);
-       }
-
-       amdgpu_amdkfd_suspend(adev);
-
-       /* unpin the front buffers and cursors */
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-               struct drm_framebuffer *fb = crtc->primary->fb;
-               struct amdgpu_bo *robj;
-
-               if (amdgpu_crtc->cursor_bo) {
-                       struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
-                       r = amdgpu_bo_reserve(aobj, true);
-                       if (r == 0) {
-                               amdgpu_bo_unpin(aobj);
-                               amdgpu_bo_unreserve(aobj);
+                       /* unpin the front buffers and cursors */
+               list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+                       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+                       struct drm_framebuffer *fb = crtc->primary->fb;
+                       struct amdgpu_bo *robj;
+
+                       if (amdgpu_crtc->cursor_bo) {
+                               struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+                               r = amdgpu_bo_reserve(aobj, true);
+                               if (r == 0) {
+                                       amdgpu_bo_unpin(aobj);
+                                       amdgpu_bo_unreserve(aobj);
+                               }
                        }
-               }
 
-               if (fb == NULL || fb->obj[0] == NULL) {
-                       continue;
-               }
-               robj = gem_to_amdgpu_bo(fb->obj[0]);
-               /* don't unpin kernel fb objects */
-               if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
-                       r = amdgpu_bo_reserve(robj, true);
-                       if (r == 0) {
-                               amdgpu_bo_unpin(robj);
-                               amdgpu_bo_unreserve(robj);
+                       if (fb == NULL || fb->obj[0] == NULL) {
+                               continue;
+                       }
+                       robj = gem_to_amdgpu_bo(fb->obj[0]);
+                       /* don't unpin kernel fb objects */
+                       if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
+                               r = amdgpu_bo_reserve(robj, true);
+                               if (r == 0) {
+                                       amdgpu_bo_unpin(robj);
+                                       amdgpu_bo_unreserve(robj);
+                               }
                        }
                }
        }
+
+       amdgpu_amdkfd_suspend(adev);
+
+       r = amdgpu_device_ip_suspend_phase1(adev);
+
        /* evict vram memory */
        amdgpu_bo_evict_vram(adev);
 
        amdgpu_fence_driver_suspend(adev);
 
-       r = amdgpu_device_ip_suspend(adev);
+       r = amdgpu_device_ip_suspend_phase2(adev);
 
        /* evict remaining vram memory
         * This second call to evict vram is to evict the gart page table
@@ -2661,18 +2770,15 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
                        DRM_ERROR("amdgpu asic reset failed\n");
        }
 
-       if (fbcon) {
-               console_lock();
-               amdgpu_fbdev_set_suspend(adev, 1);
-               console_unlock();
-       }
        return 0;
 }
 
 /**
  * amdgpu_device_resume - initiate device resume
  *
- * @pdev: drm dev pointer
+ * @dev: drm dev pointer
+ * @resume: resume state
+ * @fbcon : notify the fbdev of resume
  *
  * Bring the hw back to operating state (all asics).
  * Returns 0 for success or an error on failure.
@@ -2688,15 +2794,12 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
        if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
-       if (fbcon)
-               console_lock();
-
        if (resume) {
                pci_set_power_state(dev->pdev, PCI_D0);
                pci_restore_state(dev->pdev);
                r = pci_enable_device(dev->pdev);
                if (r)
-                       goto unlock;
+                       return r;
        }
 
        /* post card */
@@ -2709,29 +2812,30 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
        r = amdgpu_device_ip_resume(adev);
        if (r) {
                DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
-               goto unlock;
+               return r;
        }
        amdgpu_fence_driver_resume(adev);
 
 
        r = amdgpu_device_ip_late_init(adev);
        if (r)
-               goto unlock;
-
-       /* pin cursors */
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
-               if (amdgpu_crtc->cursor_bo) {
-                       struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
-                       r = amdgpu_bo_reserve(aobj, true);
-                       if (r == 0) {
-                               r = amdgpu_bo_pin(aobj,
-                                                 AMDGPU_GEM_DOMAIN_VRAM,
-                                                 &amdgpu_crtc->cursor_addr);
-                               if (r != 0)
-                                       DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
-                               amdgpu_bo_unreserve(aobj);
+               return r;
+
+       if (!amdgpu_device_has_dc_support(adev)) {
+               /* pin cursors */
+               list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+                       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+                       if (amdgpu_crtc->cursor_bo) {
+                               struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+                               r = amdgpu_bo_reserve(aobj, true);
+                               if (r == 0) {
+                                       r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
+                                       if (r != 0)
+                                               DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
+                                       amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
+                                       amdgpu_bo_unreserve(aobj);
+                               }
                        }
                }
        }
@@ -2739,6 +2843,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
        if (r)
                return r;
 
+       /* Make sure IB tests flushed */
+       flush_delayed_work(&adev->late_init_work);
+
        /* blat the mode back in */
        if (fbcon) {
                if (!amdgpu_device_has_dc_support(adev)) {
@@ -2752,6 +2859,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
                        }
                        drm_modeset_unlock_all(dev);
                }
+               amdgpu_fbdev_set_suspend(adev, 0);
        }
 
        drm_kms_helper_poll_enable(dev);
@@ -2775,15 +2883,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
 #ifdef CONFIG_PM
        dev->dev->power.disable_depth--;
 #endif
-
-       if (fbcon)
-               amdgpu_fbdev_set_suspend(adev, 0);
-
-unlock:
-       if (fbcon)
-               console_unlock();
-
-       return r;
+       return 0;
 }
 
 /**
@@ -3060,7 +3160,7 @@ static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev)
  * @adev: amdgpu device pointer
  *
  * attempt to do soft-reset or full-reset and reinitialize Asic
- * return 0 means successed otherwise failed
+ * return 0 means succeeded otherwise failed
  */
 static int amdgpu_device_reset(struct amdgpu_device *adev)
 {
@@ -3135,9 +3235,10 @@ out:
  * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
  *
  * @adev: amdgpu device pointer
+ * @from_hypervisor: request from hypervisor
  *
  * do VF FLR and reinitialize Asic
- * return 0 means successed otherwise failed
+ * return 0 means succeeded otherwise failed
  */
 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
                                     bool from_hypervisor)
@@ -3182,7 +3283,7 @@ error:
  *
  * @adev: amdgpu device pointer
  * @job: which job trigger hang
- * @force forces reset regardless of amdgpu_gpu_recovery
+ * @force: forces reset regardless of amdgpu_gpu_recovery
  *
  * Attempt to reset the GPU if it has hung (all asics).
  * Returns 0 for success or an error on failure.
@@ -3209,6 +3310,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
        atomic_inc(&adev->gpu_reset_counter);
        adev->in_gpu_reset = 1;
 
+       /* Block kfd */
+       amdgpu_amdkfd_pre_reset(adev);
+
        /* block TTM */
        resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
 
@@ -3221,10 +3325,10 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 
                kthread_park(ring->sched.thread);
 
-               if (job && job->ring->idx != i)
+               if (job && job->base.sched == &ring->sched)
                        continue;
 
-               drm_sched_hw_job_reset(&ring->sched, &job->base);
+               drm_sched_hw_job_reset(&ring->sched, job ? &job->base : NULL);
 
                /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
                amdgpu_fence_driver_force_completion(ring);
@@ -3245,7 +3349,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
                 * or all rings (in the case @job is NULL)
                 * after above amdgpu_reset accomplished
                 */
-               if ((!job || job->ring->idx == i) && !r)
+               if ((!job || job->base.sched == &ring->sched) && !r)
                        drm_sched_job_recovery(&ring->sched);
 
                kthread_unpark(ring->sched.thread);
@@ -3262,9 +3366,11 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
                dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
                amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
        } else {
-               dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
+               dev_info(adev->dev, "GPU reset(%d) succeeded!\n",atomic_read(&adev->gpu_reset_counter));
        }
 
+       /*unlock kfd */
+       amdgpu_amdkfd_post_reset(adev);
        amdgpu_vf_error_trans_all(adev);
        adev->in_gpu_reset = 0;
        mutex_unlock(&adev->lock_reset);
@@ -3282,8 +3388,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
  */
 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
 {
-       u32 mask;
-       int ret;
+       struct pci_dev *pdev;
+       enum pci_bus_speed speed_cap;
+       enum pcie_link_width link_width;
 
        if (amdgpu_pcie_gen_cap)
                adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
@@ -3301,27 +3408,61 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
        }
 
        if (adev->pm.pcie_gen_mask == 0) {
-               ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
-               if (!ret) {
-                       adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+               /* asic caps */
+               pdev = adev->pdev;
+               speed_cap = pcie_get_speed_cap(pdev);
+               if (speed_cap == PCI_SPEED_UNKNOWN) {
+                       adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
                                                  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
                                                  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
-
-                       if (mask & DRM_PCIE_SPEED_25)
-                               adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
-                       if (mask & DRM_PCIE_SPEED_50)
-                               adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
-                       if (mask & DRM_PCIE_SPEED_80)
-                               adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
                } else {
-                       adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
+                       if (speed_cap == PCIE_SPEED_16_0GT)
+                               adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+                                                         CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+                                                         CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
+                                                         CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
+                       else if (speed_cap == PCIE_SPEED_8_0GT)
+                               adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+                                                         CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+                                                         CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
+                       else if (speed_cap == PCIE_SPEED_5_0GT)
+                               adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+                                                         CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
+                       else
+                               adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
+               }
+               /* platform caps */
+               pdev = adev->ddev->pdev->bus->self;
+               speed_cap = pcie_get_speed_cap(pdev);
+               if (speed_cap == PCI_SPEED_UNKNOWN) {
+                       adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+                                                  CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
+               } else {
+                       if (speed_cap == PCIE_SPEED_16_0GT)
+                               adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+                                                          CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+                                                          CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
+                                                          CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
+                       else if (speed_cap == PCIE_SPEED_8_0GT)
+                               adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+                                                          CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+                                                          CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
+                       else if (speed_cap == PCIE_SPEED_5_0GT)
+                               adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+                                                          CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
+                       else
+                               adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
+
                }
        }
        if (adev->pm.pcie_mlw_mask == 0) {
-               ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
-               if (!ret) {
-                       switch (mask) {
-                       case 32:
+               pdev = adev->ddev->pdev->bus->self;
+               link_width = pcie_get_width_cap(pdev);
+               if (link_width == PCIE_LNK_WIDTH_UNKNOWN) {
+                       adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
+               } else {
+                       switch (link_width) {
+                       case PCIE_LNK_X32:
                                adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
@@ -3330,7 +3471,7 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
                                break;
-                       case 16:
+                       case PCIE_LNK_X16:
                                adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
@@ -3338,36 +3479,34 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
                                break;
-                       case 12:
+                       case PCIE_LNK_X12:
                                adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
                                break;
-                       case 8:
+                       case PCIE_LNK_X8:
                                adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
                                break;
-                       case 4:
+                       case PCIE_LNK_X4:
                                adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
                                break;
-                       case 2:
+                       case PCIE_LNK_X2:
                                adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
                                break;
-                       case 1:
+                       case PCIE_LNK_X1:
                                adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
                                break;
                        default:
                                break;
                        }
-               } else {
-                       adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
                }
        }
 }
index 76ee8e04ff1172b81c18d478b2738f6149661f5d..6748cd7fc129b0e7b83966da865f674c676c04e1 100644 (file)
@@ -157,7 +157,6 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
        struct amdgpu_bo *new_abo;
        unsigned long flags;
        u64 tiling_flags;
-       u64 base;
        int i, r;
 
        work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -189,12 +188,18 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
                goto cleanup;
        }
 
-       r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev), &base);
+       r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev));
        if (unlikely(r != 0)) {
                DRM_ERROR("failed to pin new abo buffer before flip\n");
                goto unreserve;
        }
 
+       r = amdgpu_ttm_alloc_gart(&new_abo->tbo);
+       if (unlikely(r != 0)) {
+               DRM_ERROR("%p bind failed\n", new_abo);
+               goto unpin;
+       }
+
        r = reservation_object_get_fences_rcu(new_abo->tbo.resv, &work->excl,
                                              &work->shared_count,
                                              &work->shared);
@@ -206,7 +211,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
        amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
        amdgpu_bo_unreserve(new_abo);
 
-       work->base = base;
+       work->base = amdgpu_bo_gpu_offset(new_abo);
        work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
                amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
 
index 77ad59ade85ca79b56d352608e96fde4395ece99..1c4595562f8fd29bc682764265e5d2fd472b1f2c 100644 (file)
@@ -28,6 +28,7 @@
 #include "amdgpu_i2c.h"
 #include "amdgpu_dpm.h"
 #include "atom.h"
+#include "amd_pcie.h"
 
 void amdgpu_dpm_print_class_info(u32 class, u32 class2)
 {
@@ -936,9 +937,11 @@ enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
        case AMDGPU_PCIE_GEN3:
                return AMDGPU_PCIE_GEN3;
        default:
-               if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
+               if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
+                   (default_gen == AMDGPU_PCIE_GEN3))
                        return AMDGPU_PCIE_GEN3;
-               else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
+               else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
+                        (default_gen == AMDGPU_PCIE_GEN2))
                        return AMDGPU_PCIE_GEN2;
                else
                        return AMDGPU_PCIE_GEN1;
index dd6203a0a6b77cd4d1f0e3bdb0b093fb1ccfdbd3..ff24e1cc5b65bd66e4657e833a97e1d18ae4d2b3 100644 (file)
@@ -287,12 +287,6 @@ enum amdgpu_pcie_gen {
 #define amdgpu_dpm_force_performance_level(adev, l) \
                ((adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)))
 
-#define amdgpu_dpm_powergate_uvd(adev, g) \
-               ((adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)))
-
-#define amdgpu_dpm_powergate_vce(adev, g) \
-               ((adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)))
-
 #define amdgpu_dpm_get_current_power_state(adev) \
                ((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle))
 
@@ -347,6 +341,10 @@ enum amdgpu_pcie_gen {
                ((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\
                        (adev)->powerplay.pp_handle, msg_id))
 
+#define amdgpu_dpm_set_powergating_by_smu(adev, block_type, gate) \
+               ((adev)->powerplay.pp_funcs->set_powergating_by_smu(\
+                       (adev)->powerplay.pp_handle, block_type, gate))
+
 #define amdgpu_dpm_get_power_profile_mode(adev, buf) \
                ((adev)->powerplay.pp_funcs->get_power_profile_mode(\
                        (adev)->powerplay.pp_handle, buf))
@@ -359,10 +357,6 @@ enum amdgpu_pcie_gen {
                ((adev)->powerplay.pp_funcs->odn_edit_dpm_table(\
                        (adev)->powerplay.pp_handle, type, parameter, size))
 
-#define amdgpu_dpm_set_mmhub_powergating_by_smu(adev) \
-               ((adev)->powerplay.pp_funcs->set_mmhub_powergating_by_smu( \
-               (adev)->powerplay.pp_handle))
-
 struct amdgpu_dpm {
        struct amdgpu_ps        *ps;
        /* number of valid power states */
@@ -402,7 +396,6 @@ struct amdgpu_dpm {
        u32 tdp_adjustment;
        u16 load_line_slope;
        bool power_control;
-       bool ac_power;
        /* special states active */
        bool                    thermal_active;
        bool                    uvd_active;
@@ -439,6 +432,7 @@ struct amdgpu_pm {
        struct amd_pp_display_configuration pm_display_cfg;/* set by dc */
        uint32_t                smu_prv_buffer_size;
        struct amdgpu_bo        *smu_prv_buffer;
+       bool ac_power;
 };
 
 #define R600_SSTU_DFLT                               0
index b0bf2f24da48fb5489a13793c4a819bbef32ae2f..8843a06360fa7bd7e9620c76012b83b21e51f8c7 100644 (file)
@@ -1,10 +1,3 @@
-/**
- * \file amdgpu_drv.c
- * AMD Amdgpu driver
- *
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
 /*
  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  * All Rights Reserved.
  * - 3.24.0 - Add high priority compute support for gfx9
  * - 3.25.0 - Add support for sensor query info (stable pstate sclk/mclk).
  * - 3.26.0 - GFX9: Process AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE.
+ * - 3.27.0 - Add new chunk to to AMDGPU_CS to enable BO_LIST creation.
  */
 #define KMS_DRIVER_MAJOR       3
-#define KMS_DRIVER_MINOR       26
+#define KMS_DRIVER_MINOR       27
 #define KMS_DRIVER_PATCHLEVEL  0
 
 int amdgpu_vram_limit = 0;
@@ -110,11 +104,8 @@ int amdgpu_vram_page_split = 512;
 int amdgpu_vm_update_mode = -1;
 int amdgpu_exp_hw_support = 0;
 int amdgpu_dc = -1;
-int amdgpu_dc_log = 0;
 int amdgpu_sched_jobs = 32;
 int amdgpu_sched_hw_submission = 2;
-int amdgpu_no_evict = 0;
-int amdgpu_direct_gma_size = 0;
 uint amdgpu_pcie_gen_cap = 0;
 uint amdgpu_pcie_lane_cap = 0;
 uint amdgpu_cg_mask = 0xffffffff;
@@ -122,7 +113,8 @@ uint amdgpu_pg_mask = 0xffffffff;
 uint amdgpu_sdma_phase_quantum = 32;
 char *amdgpu_disable_cu = NULL;
 char *amdgpu_virtual_display = NULL;
-uint amdgpu_pp_feature_mask = 0xffff3fff; /* gfxoff (bit 15) disabled by default */
+/* OverDrive(bit 14),gfxoff(bit 15),stutter mode(bit 17) disabled by default*/
+uint amdgpu_pp_feature_mask = 0xfffd3fff;
 int amdgpu_ngg = 0;
 int amdgpu_prim_buf_per_se = 0;
 int amdgpu_pos_buf_per_se = 0;
@@ -135,163 +127,368 @@ int amdgpu_gpu_recovery = -1; /* auto */
 int amdgpu_emu_mode = 0;
 uint amdgpu_smu_memory_pool_size = 0;
 
+/**
+ * DOC: vramlimit (int)
+ * Restrict the total amount of VRAM in MiB for testing.  The default is 0 (Use full VRAM).
+ */
 MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
 module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
 
+/**
+ * DOC: vis_vramlimit (int)
+ * Restrict the amount of CPU visible VRAM in MiB for testing.  The default is 0 (Use full CPU visible VRAM).
+ */
 MODULE_PARM_DESC(vis_vramlimit, "Restrict visible VRAM for testing, in megabytes");
 module_param_named(vis_vramlimit, amdgpu_vis_vram_limit, int, 0444);
 
+/**
+ * DOC: gartsize (uint)
+ * Restrict the size of GART in Mib (32, 64, etc.) for testing. The default is -1 (The size depends on asic).
+ */
 MODULE_PARM_DESC(gartsize, "Size of GART to setup in megabytes (32, 64, etc., -1=auto)");
 module_param_named(gartsize, amdgpu_gart_size, uint, 0600);
 
+/**
+ * DOC: gttsize (int)
+ * Restrict the size of GTT domain in MiB for testing. The default is -1 (It's VRAM size if 3GB < VRAM < 3/4 RAM,
+ * otherwise 3/4 RAM size).
+ */
 MODULE_PARM_DESC(gttsize, "Size of the GTT domain in megabytes (-1 = auto)");
 module_param_named(gttsize, amdgpu_gtt_size, int, 0600);
 
+/**
+ * DOC: moverate (int)
+ * Set maximum buffer migration rate in MB/s. The default is -1 (8 MB/s).
+ */
 MODULE_PARM_DESC(moverate, "Maximum buffer migration rate in MB/s. (32, 64, etc., -1=auto, 0=1=disabled)");
 module_param_named(moverate, amdgpu_moverate, int, 0600);
 
+/**
+ * DOC: benchmark (int)
+ * Run benchmarks. The default is 0 (Skip benchmarks).
+ */
 MODULE_PARM_DESC(benchmark, "Run benchmark");
 module_param_named(benchmark, amdgpu_benchmarking, int, 0444);
 
+/**
+ * DOC: test (int)
+ * Test BO GTT->VRAM and VRAM->GTT GPU copies. The default is 0 (Skip test, only set 1 to run test).
+ */
 MODULE_PARM_DESC(test, "Run tests");
 module_param_named(test, amdgpu_testing, int, 0444);
 
+/**
+ * DOC: audio (int)
+ * Set HDMI/DPAudio. Only affects non-DC display handling. The default is -1 (Enabled), set 0 to disabled it.
+ */
 MODULE_PARM_DESC(audio, "Audio enable (-1 = auto, 0 = disable, 1 = enable)");
 module_param_named(audio, amdgpu_audio, int, 0444);
 
+/**
+ * DOC: disp_priority (int)
+ * Set display Priority (1 = normal, 2 = high). Only affects non-DC display handling. The default is 0 (auto).
+ */
 MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
 module_param_named(disp_priority, amdgpu_disp_priority, int, 0444);
 
+/**
+ * DOC: hw_i2c (int)
+ * To enable hw i2c engine. Only affects non-DC display handling. The default is 0 (Disabled).
+ */
 MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
 module_param_named(hw_i2c, amdgpu_hw_i2c, int, 0444);
 
+/**
+ * DOC: pcie_gen2 (int)
+ * To disable PCIE Gen2/3 mode (0 = disable, 1 = enable). The default is -1 (auto, enabled).
+ */
 MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (-1 = auto, 0 = disable, 1 = enable)");
 module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444);
 
+/**
+ * DOC: msi (int)
+ * To disable Message Signaled Interrupts (MSI) functionality (1 = enable, 0 = disable). The default is -1 (auto, enabled).
+ */
 MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
 module_param_named(msi, amdgpu_msi, int, 0444);
 
+/**
+ * DOC: lockup_timeout (int)
+ * Set GPU scheduler timeout value in ms. Value 0 is invalidated, will be adjusted to 10000.
+ * Negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET). The default is 10000.
+ */
 MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms > 0 (default 10000)");
 module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444);
 
+/**
+ * DOC: dpm (int)
+ * Override for dynamic power management setting (1 = enable, 0 = disable). The default is -1 (auto).
+ */
 MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");
 module_param_named(dpm, amdgpu_dpm, int, 0444);
 
+/**
+ * DOC: fw_load_type (int)
+ * Set different firmware loading type for debugging (0 = direct, 1 = SMU, 2 = PSP). The default is -1 (auto).
+ */
 MODULE_PARM_DESC(fw_load_type, "firmware loading type (0 = direct, 1 = SMU, 2 = PSP, -1 = auto)");
 module_param_named(fw_load_type, amdgpu_fw_load_type, int, 0444);
 
+/**
+ * DOC: aspm (int)
+ * To disable ASPM (1 = enable, 0 = disable). The default is -1 (auto, enabled).
+ */
 MODULE_PARM_DESC(aspm, "ASPM support (1 = enable, 0 = disable, -1 = auto)");
 module_param_named(aspm, amdgpu_aspm, int, 0444);
 
+/**
+ * DOC: runpm (int)
+ * Override for runtime power management control for dGPUs in PX/HG laptops. The amdgpu driver can dynamically power down
+ * the dGPU on PX/HG laptops when it is idle. The default is -1 (auto enable). Setting the value to 0 disables this functionality.
+ */
 MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)");
 module_param_named(runpm, amdgpu_runtime_pm, int, 0444);
 
+/**
+ * DOC: ip_block_mask (uint)
+ * Override what IP blocks are enabled on the GPU. Each GPU is a collection of IP blocks (gfx, display, video, etc.).
+ * Use this parameter to disable specific blocks. Note that the IP blocks do not have a fixed index. Some asics may not have
+ * some IPs or may include multiple instances of an IP so the ordering various from asic to asic. See the driver output in
+ * the kernel log for the list of IPs on the asic. The default is 0xffffffff (enable all blocks on a device).
+ */
 MODULE_PARM_DESC(ip_block_mask, "IP Block Mask (all blocks enabled (default))");
 module_param_named(ip_block_mask, amdgpu_ip_block_mask, uint, 0444);
 
+/**
+ * DOC: bapm (int)
+ * Bidirectional Application Power Management (BAPM) used to dynamically share TDP between CPU and GPU. Set value 0 to disable it.
+ * The default -1 (auto, enabled)
+ */
 MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)");
 module_param_named(bapm, amdgpu_bapm, int, 0444);
 
+/**
+ * DOC: deep_color (int)
+ * Set 1 to enable Deep Color support. Only affects non-DC display handling. The default is 0 (disabled).
+ */
 MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))");
 module_param_named(deep_color, amdgpu_deep_color, int, 0444);
 
+/**
+ * DOC: vm_size (int)
+ * Override the size of the GPU's per client virtual address space in GiB.  The default is -1 (automatic for each asic).
+ */
 MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 64GB)");
 module_param_named(vm_size, amdgpu_vm_size, int, 0444);
 
+/**
+ * DOC: vm_fragment_size (int)
+ * Override VM fragment size in bits (4, 5, etc. 4 = 64K, 9 = 2M). The default is -1 (automatic for each asic).
+ */
 MODULE_PARM_DESC(vm_fragment_size, "VM fragment size in bits (4, 5, etc. 4 = 64K (default), Max 9 = 2M)");
 module_param_named(vm_fragment_size, amdgpu_vm_fragment_size, int, 0444);
 
+/**
+ * DOC: vm_block_size (int)
+ * Override VM page table size in bits (default depending on vm_size and hw setup). The default is -1 (automatic for each asic).
+ */
 MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)");
 module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444);
 
+/**
+ * DOC: vm_fault_stop (int)
+ * Stop on VM fault for debugging (0 = never, 1 = print first, 2 = always). The default is 0 (No stop).
+ */
 MODULE_PARM_DESC(vm_fault_stop, "Stop on VM fault (0 = never (default), 1 = print first, 2 = always)");
 module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444);
 
+/**
+ * DOC: vm_debug (int)
+ * Debug VM handling (0 = disabled, 1 = enabled). The default is 0 (Disabled).
+ */
 MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)");
 module_param_named(vm_debug, amdgpu_vm_debug, int, 0644);
 
+/**
+ * DOC: vm_update_mode (int)
+ * Override VM update mode. VM updated by using CPU (0 = never, 1 = Graphics only, 2 = Compute only, 3 = Both). The default
+ * is -1 (Only in large BAR(LB) systems Compute VM tables will be updated by CPU, otherwise 0, never).
+ */
 MODULE_PARM_DESC(vm_update_mode, "VM update using CPU (0 = never (default except for large BAR(LB)), 1 = Graphics only, 2 = Compute only (default for LB), 3 = Both");
 module_param_named(vm_update_mode, amdgpu_vm_update_mode, int, 0444);
 
+/**
+ * DOC: vram_page_split (int)
+ * Override the number of pages after we split VRAM allocations (default 512, -1 = disable). The default is 512.
+ */
 MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 512, -1 = disable)");
 module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444);
 
+/**
+ * DOC: exp_hw_support (int)
+ * Enable experimental hw support (1 = enable). The default is 0 (disabled).
+ */
 MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
 module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
 
+/**
+ * DOC: dc (int)
+ * Disable/Enable Display Core driver for debugging (1 = enable, 0 = disable). The default is -1 (automatic for each asic).
+ */
 MODULE_PARM_DESC(dc, "Display Core driver (1 = enable, 0 = disable, -1 = auto (default))");
 module_param_named(dc, amdgpu_dc, int, 0444);
 
-MODULE_PARM_DESC(dc_log, "Display Core Log Level (0 = minimal (default), 1 = chatty");
-module_param_named(dc_log, amdgpu_dc_log, int, 0444);
-
+/**
+ * DOC: sched_jobs (int)
+ * Override the max number of jobs supported in the sw queue. The default is 32.
+ */
 MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)");
 module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
 
+/**
+ * DOC: sched_hw_submission (int)
+ * Override the max number of HW submissions. The default is 2.
+ */
 MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
 module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
 
+/**
+ * DOC: ppfeaturemask (uint)
+ * Override power features enabled. See enum PP_FEATURE_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
+ * The default is the current set of stable power features.
+ */
 MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
 module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444);
 
-MODULE_PARM_DESC(no_evict, "Support pinning request from user space (1 = enable, 0 = disable (default))");
-module_param_named(no_evict, amdgpu_no_evict, int, 0444);
-
-MODULE_PARM_DESC(direct_gma_size, "Direct GMA size in megabytes (max 96MB)");
-module_param_named(direct_gma_size, amdgpu_direct_gma_size, int, 0444);
-
+/**
+ * DOC: pcie_gen_cap (uint)
+ * Override PCIE gen speed capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h.
+ * The default is 0 (automatic for each asic).
+ */
 MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
 module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
 
+/**
+ * DOC: pcie_lane_cap (uint)
+ * Override PCIE lanes capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h.
+ * The default is 0 (automatic for each asic).
+ */
 MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))");
 module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444);
 
+/**
+ * DOC: cg_mask (uint)
+ * Override Clockgating features enabled on GPU (0 = disable clock gating). See the AMD_CG_SUPPORT flags in
+ * drivers/gpu/drm/amd/include/amd_shared.h. The default is 0xffffffff (all enabled).
+ */
 MODULE_PARM_DESC(cg_mask, "Clockgating flags mask (0 = disable clock gating)");
 module_param_named(cg_mask, amdgpu_cg_mask, uint, 0444);
 
+/**
+ * DOC: pg_mask (uint)
+ * Override Powergating features enabled on GPU (0 = disable power gating). See the AMD_PG_SUPPORT flags in
+ * drivers/gpu/drm/amd/include/amd_shared.h. The default is 0xffffffff (all enabled).
+ */
 MODULE_PARM_DESC(pg_mask, "Powergating flags mask (0 = disable power gating)");
 module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);
 
+/**
+ * DOC: sdma_phase_quantum (uint)
+ * Override SDMA context switch phase quantum (x 1K GPU clock cycles, 0 = no change). The default is 32.
+ */
 MODULE_PARM_DESC(sdma_phase_quantum, "SDMA context switch phase quantum (x 1K GPU clock cycles, 0 = no change (default 32))");
 module_param_named(sdma_phase_quantum, amdgpu_sdma_phase_quantum, uint, 0444);
 
+/**
+ * DOC: disable_cu (charp)
+ * Set to disable CUs (It's set like se.sh.cu,...). The default is NULL.
+ */
 MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
 module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
 
+/**
+ * DOC: virtual_display (charp)
+ * Set to enable virtual display feature. This feature provides a virtual display hardware on headless boards
+ * or in virtualized environments. It will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x. It's the pci address of
+ * the device, plus the number of crtcs to expose. E.g., 0000:26:00.0,4 would enable 4 virtual crtcs on the pci
+ * device at 26:00.0. The default is NULL.
+ */
 MODULE_PARM_DESC(virtual_display,
                 "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x)");
 module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
 
+/**
+ * DOC: ngg (int)
+ * Set to enable Next Generation Graphics (1 = enable). The default is 0 (disabled).
+ */
 MODULE_PARM_DESC(ngg, "Next Generation Graphics (1 = enable, 0 = disable(default depending on gfx))");
 module_param_named(ngg, amdgpu_ngg, int, 0444);
 
+/**
+ * DOC: prim_buf_per_se (int)
+ * Override the size of Primitive Buffer per Shader Engine in Byte. The default is 0 (depending on gfx).
+ */
 MODULE_PARM_DESC(prim_buf_per_se, "the size of Primitive Buffer per Shader Engine (default depending on gfx)");
 module_param_named(prim_buf_per_se, amdgpu_prim_buf_per_se, int, 0444);
 
+/**
+ * DOC: pos_buf_per_se (int)
+ * Override the size of Position Buffer per Shader Engine in Byte. The default is 0 (depending on gfx).
+ */
 MODULE_PARM_DESC(pos_buf_per_se, "the size of Position Buffer per Shader Engine (default depending on gfx)");
 module_param_named(pos_buf_per_se, amdgpu_pos_buf_per_se, int, 0444);
 
+/**
+ * DOC: cntl_sb_buf_per_se (int)
+ * Override the size of Control Sideband per Shader Engine in Byte. The default is 0 (depending on gfx).
+ */
 MODULE_PARM_DESC(cntl_sb_buf_per_se, "the size of Control Sideband per Shader Engine (default depending on gfx)");
 module_param_named(cntl_sb_buf_per_se, amdgpu_cntl_sb_buf_per_se, int, 0444);
 
+/**
+ * DOC: param_buf_per_se (int)
+ * Override the size of Off-Chip Pramater Cache per Shader Engine in Byte. The default is 0 (depending on gfx).
+ */
 MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Pramater Cache per Shader Engine (default depending on gfx)");
 module_param_named(param_buf_per_se, amdgpu_param_buf_per_se, int, 0444);
 
+/**
+ * DOC: job_hang_limit (int)
+ * Set how much time allow a job hang and not drop it. The default is 0.
+ */
 MODULE_PARM_DESC(job_hang_limit, "how much time allow a job hang and not drop it (default 0)");
 module_param_named(job_hang_limit, amdgpu_job_hang_limit, int ,0444);
 
+/**
+ * DOC: lbpw (int)
+ * Override Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable). The default is -1 (auto, enabled).
+ */
 MODULE_PARM_DESC(lbpw, "Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable, -1 = auto)");
 module_param_named(lbpw, amdgpu_lbpw, int, 0444);
 
 MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be spread across pipes (1 = enable, 0 = disable, -1 = auto)");
 module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
 
+/**
+ * DOC: gpu_recovery (int)
+ * Set to enable GPU recovery mechanism (1 = enable, 0 = disable). The default is -1 (auto, disabled except SRIOV).
+ */
 MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto)");
 module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
 
+/**
+ * DOC: emu_mode (int)
+ * Set value 1 to enable emulation mode. This is only needed when running on an emulator. The default is 0 (disabled).
+ */
 MODULE_PARM_DESC(emu_mode, "Emulation mode, (1 = enable, 0 = disable)");
 module_param_named(emu_mode, amdgpu_emu_mode, int, 0444);
 
+/**
+ * DOC: si_support (int)
+ * Set SI support driver. This parameter works after set config CONFIG_DRM_AMDGPU_SI. For SI asic, when radeon driver is enabled,
+ * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
+ * otherwise using amdgpu driver.
+ */
 #ifdef CONFIG_DRM_AMDGPU_SI
 
 #if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
@@ -305,6 +502,12 @@ MODULE_PARM_DESC(si_support, "SI support (1 = enabled (default), 0 = disabled)")
 module_param_named(si_support, amdgpu_si_support, int, 0444);
 #endif
 
+/**
+ * DOC: cik_support (int)
+ * Set CIK support driver. This parameter works after set config CONFIG_DRM_AMDGPU_CIK. For CIK asic, when radeon driver is enabled,
+ * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
+ * otherwise using amdgpu driver.
+ */
 #ifdef CONFIG_DRM_AMDGPU_CIK
 
 #if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
@@ -318,6 +521,11 @@ MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)
 module_param_named(cik_support, amdgpu_cik_support, int, 0444);
 #endif
 
+/**
+ * DOC: smu_memory_pool_size (uint)
+ * It is used to reserve gtt for smu debug usage, setting value 0 to disable it. The actual size is value * 256MiB.
+ * E.g. 0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte. The default is 0 (disabled).
+ */
 MODULE_PARM_DESC(smu_memory_pool_size,
        "reserve gtt for smu debug usage, 0 = disable,"
                "0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte");
@@ -664,7 +872,7 @@ retry_init:
 err_pci:
        pci_disable_device(pdev);
 err_free:
-       drm_dev_unref(dev);
+       drm_dev_put(dev);
        return ret;
 }
 
@@ -674,7 +882,7 @@ amdgpu_pci_remove(struct pci_dev *pdev)
        struct drm_device *dev = pci_get_drvdata(pdev);
 
        drm_dev_unregister(dev);
-       drm_dev_unref(dev);
+       drm_dev_put(dev);
        pci_disable_device(pdev);
        pci_set_drvdata(pdev, NULL);
 }
@@ -855,9 +1063,21 @@ static const struct dev_pm_ops amdgpu_pm_ops = {
        .runtime_idle = amdgpu_pmops_runtime_idle,
 };
 
+static int amdgpu_flush(struct file *f, fl_owner_t id)
+{
+       struct drm_file *file_priv = f->private_data;
+       struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
+
+       amdgpu_ctx_mgr_entity_flush(&fpriv->ctx_mgr);
+
+       return 0;
+}
+
+
 static const struct file_operations amdgpu_driver_kms_fops = {
        .owner = THIS_MODULE,
        .open = drm_open,
+       .flush = amdgpu_flush,
        .release = drm_release,
        .unlocked_ioctl = amdgpu_drm_ioctl,
        .mmap = amdgpu_mmap,
index 94138abe093b1a9415f08ab713557ebe977fc193..ae8fac34f7a59bbe6dbed55bc0f0954e3de301af 100644 (file)
@@ -46,7 +46,7 @@ amdgpu_link_encoder_connector(struct drm_device *dev)
                list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
                        amdgpu_encoder = to_amdgpu_encoder(encoder);
                        if (amdgpu_encoder->devices & amdgpu_connector->devices) {
-                               drm_mode_connector_attach_encoder(connector, encoder);
+                               drm_connector_attach_encoder(connector, encoder);
                                if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
                                        amdgpu_atombios_encoder_init_backlight(amdgpu_encoder, connector);
                                        adev->mode_info.bl_encoder = amdgpu_encoder;
index bc5fd8ebab5dd5ac693f47da46be75fecaff8a62..69c5d22f29bdf96fb44aba06111d7a347c91f533 100644 (file)
@@ -146,7 +146,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
                                       AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
                                       AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
                                       AMDGPU_GEM_CREATE_VRAM_CLEARED,
-                                      true, NULL, &gobj);
+                                      ttm_bo_type_kernel, NULL, &gobj);
        if (ret) {
                pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
                return -ENOMEM;
@@ -168,11 +168,19 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
        }
 
 
-       ret = amdgpu_bo_pin(abo, domain, NULL);
+       ret = amdgpu_bo_pin(abo, domain);
        if (ret) {
                amdgpu_bo_unreserve(abo);
                goto out_unref;
        }
+
+       ret = amdgpu_ttm_alloc_gart(&abo->tbo);
+       if (ret) {
+               amdgpu_bo_unreserve(abo);
+               dev_err(adev->dev, "%p bind failed\n", abo);
+               goto out_unref;
+       }
+
        ret = amdgpu_bo_kmap(abo, NULL);
        amdgpu_bo_unreserve(abo);
        if (ret) {
@@ -365,8 +373,8 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev)
 void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state)
 {
        if (adev->mode_info.rfbdev)
-               drm_fb_helper_set_suspend(&adev->mode_info.rfbdev->helper,
-                       state);
+               drm_fb_helper_set_suspend_unlocked(&adev->mode_info.rfbdev->helper,
+                                                  state);
 }
 
 int amdgpu_fbdev_total_size(struct amdgpu_device *adev)
index 39ec6b8890a1bf200053900b7998e5f33d703a32..7056925eb38606fcd896ea2525643261f8495575 100644 (file)
@@ -376,7 +376,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
        struct amdgpu_device *adev = ring->adev;
        uint64_t index;
 
-       if (ring != &adev->uvd.inst[ring->me].ring) {
+       if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
                ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
                ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
        } else {
@@ -646,7 +646,6 @@ static const struct dma_fence_ops amdgpu_fence_ops = {
        .get_driver_name = amdgpu_fence_get_driver_name,
        .get_timeline_name = amdgpu_fence_get_timeline_name,
        .enable_signaling = amdgpu_fence_enable_signaling,
-       .wait = dma_fence_default_wait,
        .release = amdgpu_fence_release,
 };
 
index dd11b7313ca07b960867bb305f3174513b374e9c..a54d5655a191abac380e58c7d175cd08f055fa5b 100644 (file)
@@ -143,14 +143,12 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
  */
 int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
 {
-       uint64_t gpu_addr;
        int r;
 
        r = amdgpu_bo_reserve(adev->gart.robj, false);
        if (unlikely(r != 0))
                return r;
-       r = amdgpu_bo_pin(adev->gart.robj,
-                               AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr);
+       r = amdgpu_bo_pin(adev->gart.robj, AMDGPU_GEM_DOMAIN_VRAM);
        if (r) {
                amdgpu_bo_unreserve(adev->gart.robj);
                return r;
@@ -159,7 +157,7 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
        if (r)
                amdgpu_bo_unpin(adev->gart.robj);
        amdgpu_bo_unreserve(adev->gart.robj);
-       adev->gart.table_addr = gpu_addr;
+       adev->gart.table_addr = amdgpu_bo_gpu_offset(adev->gart.robj);
        return r;
 }
 
@@ -234,7 +232,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
        }
 
        t = offset / AMDGPU_GPU_PAGE_SIZE;
-       p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+       p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
        for (i = 0; i < pages; i++, p++) {
 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
                adev->gart.pages[p] = NULL;
@@ -243,7 +241,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
                if (!adev->gart.ptr)
                        continue;
 
-               for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
+               for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
                        amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
                                               t, page_base, flags);
                        page_base += AMDGPU_GPU_PAGE_SIZE;
@@ -282,7 +280,7 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
 
        for (i = 0; i < pages; i++) {
                page_base = dma_addr[i];
-               for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
+               for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
                        amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags);
                        page_base += AMDGPU_GPU_PAGE_SIZE;
                }
@@ -319,7 +317,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
 
 #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
        t = offset / AMDGPU_GPU_PAGE_SIZE;
-       p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+       p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
        for (i = 0; i < pages; i++, p++)
                adev->gart.pages[p] = pagelist ? pagelist[i] : NULL;
 #endif
index 456295c002915d56b9fb57629bcc33275f76aa5a..9f9e9dc87da11c42b2e414fbe88a0251bcf9897d 100644 (file)
@@ -37,6 +37,8 @@ struct amdgpu_bo;
 #define AMDGPU_GPU_PAGE_SHIFT 12
 #define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
 
+#define AMDGPU_GPU_PAGES_IN_CPU_PAGE (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE)
+
 struct amdgpu_gart {
        u64                             table_addr;
        struct amdgpu_bo                *robj;
index 5fb156a01774ea5d245348f6e4341dfba5ca76aa..71792d820ae0cba1306b721744b0d915cbe4da84 100644 (file)
@@ -265,7 +265,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
 
        r = amdgpu_gem_object_create(adev, size, args->in.alignment,
                                     (u32)(0xffffffff & args->in.domains),
-                                    flags, false, resv, &gobj);
+                                    flags, ttm_bo_type_device, resv, &gobj);
        if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
                if (!r) {
                        struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
@@ -317,7 +317,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
 
        /* create a gem object to contain this object in */
        r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
-                                    0, 0, NULL, &gobj);
+                                    0, ttm_bo_type_device, NULL, &gobj);
        if (r)
                return r;
 
@@ -344,7 +344,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
                if (r)
                        goto free_pages;
 
-               amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+               amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
                r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
                amdgpu_bo_unreserve(bo);
                if (r)
@@ -510,7 +510,6 @@ out:
  * @adev: amdgpu_device pointer
  * @vm: vm to update
  * @bo_va: bo_va to update
- * @list: validation list
  * @operation: map, unmap or clear
  *
  * Update the bo_va directly after setting its address. Errors are not
@@ -519,7 +518,6 @@ out:
 static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
                                    struct amdgpu_vm *vm,
                                    struct amdgpu_bo_va *bo_va,
-                                   struct list_head *list,
                                    uint32_t operation)
 {
        int r;
@@ -612,7 +610,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
                        return -ENOENT;
                abo = gem_to_amdgpu_bo(gobj);
                tv.bo = &abo->tbo;
-               tv.shared = false;
+               tv.shared = !!(abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID);
                list_add(&tv.head, &list);
        } else {
                gobj = NULL;
@@ -673,7 +671,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
                break;
        }
        if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
-               amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va, &list,
+               amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
                                        args->operation);
 
 error_backoff:
@@ -768,7 +766,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
                                amdgpu_display_supported_domains(adev));
        r = amdgpu_gem_object_create(adev, args->size, 0, domain,
                                     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
-                                    false, NULL, &gobj);
+                                    ttm_bo_type_device, NULL, &gobj);
        if (r)
                return -ENOMEM;
 
index 893c2490b7836bc5220ff1f8636a132b12df7922..bb5a47a45790726272ab4d30077498b8c6edfa3d 100644 (file)
@@ -105,8 +105,25 @@ struct amdgpu_gmc {
        /* protects concurrent invalidation */
        spinlock_t              invalidate_lock;
        bool                    translate_further;
+       struct kfd_vm_fault_info *vm_fault_info;
+       atomic_t                vm_fault_info_updated;
 
        const struct amdgpu_gmc_funcs   *gmc_funcs;
 };
 
+/**
+ * amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns:
+ * True if full VRAM is visible through the BAR
+ */
+static inline bool amdgpu_gmc_vram_full_visible(struct amdgpu_gmc *gmc)
+{
+       WARN_ON(gmc->real_vram_size < gmc->visible_vram_size);
+
+       return (gmc->real_vram_size == gmc->visible_vram_size);
+}
+
 #endif
index f70eeed9ed76fa893dabe2218c4c85c4b4aec104..5518e623fed21046791c42e026383526f8cf8adb 100644 (file)
@@ -139,7 +139,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        /* ring tests don't use a job */
        if (job) {
                vm = job->vm;
-               fence_ctx = job->fence_ctx;
+               fence_ctx = job->base.s_fence->scheduled.context;
        } else {
                vm = NULL;
                fence_ctx = 0;
@@ -231,6 +231,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
                fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
 
+       /* wrap the last IB with fence */
+       if (job && job->uf_addr) {
+               amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
+                                      fence_flags | AMDGPU_FENCE_FLAG_64BIT);
+       }
+
        r = amdgpu_fence_emit(ring, f, fence_flags);
        if (r) {
                dev_err(adev->dev, "failed to emit fence (%d)\n", r);
@@ -243,12 +249,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        if (ring->funcs->insert_end)
                ring->funcs->insert_end(ring);
 
-       /* wrap the last IB with fence */
-       if (job && job->uf_addr) {
-               amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
-                                      fence_flags | AMDGPU_FENCE_FLAG_64BIT);
-       }
-
        if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
                amdgpu_ring_patch_cond_exec(ring, patch_offset);
 
@@ -353,7 +353,8 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
                        ring->funcs->type == AMDGPU_RING_TYPE_VCE ||
                        ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC ||
                        ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC ||
-                       ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
+                       ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
+                       ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
                        tmo = tmo_mm;
                else
                        tmo = tmo_gfx;
index a1c78f90eadffe6b719b822e0ed19c49bd0b4aa8..3a072a7a39f0faba89936f5735b349e282cf229a 100644 (file)
@@ -578,11 +578,6 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
                        list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
                }
        }
-
-       adev->vm_manager.fence_context =
-               dma_fence_context_alloc(AMDGPU_MAX_RINGS);
-       for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
-               adev->vm_manager.seqno[i] = 0;
 }
 
 /**
index 3a5ca462abf093457381bcaeae1a8e7ecb90b33e..1abf5b5bac9e547cda0eb93bcaaf07d407e96007 100644 (file)
  *          Alex Deucher
  *          Jerome Glisse
  */
+
+/**
+ * DOC: Interrupt Handling
+ *
+ * Interrupts generated within GPU hardware raise interrupt requests that are
+ * passed to amdgpu IRQ handler which is responsible for detecting source and
+ * type of the interrupt and dispatching matching handlers. If handling an
+ * interrupt requires calling kernel functions that may sleep processing is
+ * dispatched to work handlers.
+ *
+ * If MSI functionality is not disabled by module parameter then MSI
+ * support will be enabled.
+ *
+ * For GPU interrupt sources that may be driven by another driver, IRQ domain
+ * support is used (with mapping between virtual and hardware IRQs).
+ */
+
 #include <linux/irq.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 
 #define AMDGPU_WAIT_IDLE_TIMEOUT 200
 
-/*
- * Handle hotplug events outside the interrupt handler proper.
- */
 /**
- * amdgpu_hotplug_work_func - display hotplug work handler
+ * amdgpu_hotplug_work_func - work handler for display hotplug event
  *
- * @work: work struct
+ * @work: work struct pointer
  *
- * This is the hot plug event work handler (all asics).
- * The work gets scheduled from the irq handler if there
- * was a hot plug interrupt.  It walks the connector table
- * and calls the hotplug handler for each one, then sends
- * a drm hotplug event to alert userspace.
+ * This is the hotplug event work handler (all ASICs).
+ * The work gets scheduled from the IRQ handler if there
+ * was a hotplug interrupt.  It walks through the connector table
+ * and calls hotplug handler for each connector. After this, it sends
+ * a DRM hotplug event to alert userspace.
+ *
+ * This design approach is required in order to defer hotplug event handling
+ * from the IRQ handler to a work handler because hotplug handler has to use
+ * mutexes which cannot be locked in an IRQ handler (since &mutex_lock may
+ * sleep).
  */
 static void amdgpu_hotplug_work_func(struct work_struct *work)
 {
@@ -74,13 +93,12 @@ static void amdgpu_hotplug_work_func(struct work_struct *work)
 }
 
 /**
- * amdgpu_irq_reset_work_func - execute gpu reset
+ * amdgpu_irq_reset_work_func - execute GPU reset
  *
- * @work: work struct
+ * @work: work struct pointer
  *
- * Execute scheduled gpu reset (cayman+).
- * This function is called when the irq handler
- * thinks we need a gpu reset.
+ * Execute scheduled GPU reset (Cayman+).
+ * This function is called when the IRQ handler thinks we need a GPU reset.
  */
 static void amdgpu_irq_reset_work_func(struct work_struct *work)
 {
@@ -91,7 +109,13 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work)
                amdgpu_device_gpu_recover(adev, NULL, false);
 }
 
-/* Disable *all* interrupts */
+/**
+ * amdgpu_irq_disable_all - disable *all* interrupts
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Disable all types of interrupts from all sources.
+ */
 void amdgpu_irq_disable_all(struct amdgpu_device *adev)
 {
        unsigned long irqflags;
@@ -123,11 +147,15 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
 }
 
 /**
- * amdgpu_irq_handler - irq handler
+ * amdgpu_irq_handler - IRQ handler
+ *
+ * @irq: IRQ number (unused)
+ * @arg: pointer to DRM device
  *
- * @int irq, void *arg: args
+ * IRQ handler for amdgpu driver (all ASICs).
  *
- * This is the irq handler for the amdgpu driver (all asics).
+ * Returns:
+ * result of handling the IRQ, as defined by &irqreturn_t
  */
 irqreturn_t amdgpu_irq_handler(int irq, void *arg)
 {
@@ -142,18 +170,18 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg)
 }
 
 /**
- * amdgpu_msi_ok - asic specific msi checks
+ * amdgpu_msi_ok - check whether MSI functionality is enabled
  *
- * @adev: amdgpu device pointer
+ * @adev: amdgpu device pointer (unused)
+ *
+ * Checks whether MSI functionality has been disabled via module parameter
+ * (all ASICs).
  *
- * Handles asic specific MSI checks to determine if
- * MSIs should be enabled on a particular chip (all asics).
- * Returns true if MSIs should be enabled, false if MSIs
- * should not be enabled.
+ * Returns:
+ * *true* if MSIs are allowed to be enabled or *false* otherwise
  */
 static bool amdgpu_msi_ok(struct amdgpu_device *adev)
 {
-       /* force MSI on */
        if (amdgpu_msi == 1)
                return true;
        else if (amdgpu_msi == 0)
@@ -163,12 +191,15 @@ static bool amdgpu_msi_ok(struct amdgpu_device *adev)
 }
 
 /**
- * amdgpu_irq_init - init driver interrupt info
+ * amdgpu_irq_init - initialize interrupt handling
  *
  * @adev: amdgpu device pointer
  *
- * Sets up the work irq handlers, vblank init, MSIs, etc. (all asics).
- * Returns 0 for success, error for failure.
+ * Sets up work functions for hotplug and reset interrupts, enables MSI
+ * functionality, initializes vblank, hotplug and reset interrupt handling.
+ *
+ * Returns:
+ * 0 on success or error code on failure
  */
 int amdgpu_irq_init(struct amdgpu_device *adev)
 {
@@ -176,7 +207,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
 
        spin_lock_init(&adev->irq.lock);
 
-       /* enable msi */
+       /* Enable MSI if not disabled by module parameter */
        adev->irq.msi_enabled = false;
 
        if (amdgpu_msi_ok(adev)) {
@@ -189,7 +220,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
 
        if (!amdgpu_device_has_dc_support(adev)) {
                if (!adev->enable_virtual_display)
-                       /* Disable vblank irqs aggressively for power-saving */
+                       /* Disable vblank IRQs aggressively for power-saving */
                        /* XXX: can this be enabled for DC? */
                        adev->ddev->vblank_disable_immediate = true;
 
@@ -197,7 +228,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
                if (r)
                        return r;
 
-               /* pre DCE11 */
+               /* Pre-DCE11 */
                INIT_WORK(&adev->hotplug_work,
                                amdgpu_hotplug_work_func);
        }
@@ -220,11 +251,13 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
 }
 
 /**
- * amdgpu_irq_fini - tear down driver interrupt info
+ * amdgpu_irq_fini - shut down interrupt handling
  *
  * @adev: amdgpu device pointer
  *
- * Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics).
+ * Tears down work functions for hotplug and reset interrupts, disables MSI
+ * functionality, shuts down vblank, hotplug and reset interrupt handling,
+ * turns off interrupts from all sources (all ASICs).
  */
 void amdgpu_irq_fini(struct amdgpu_device *adev)
 {
@@ -264,12 +297,17 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
 }
 
 /**
- * amdgpu_irq_add_id - register irq source
+ * amdgpu_irq_add_id - register IRQ source
  *
  * @adev: amdgpu device pointer
- * @src_id: source id for this source
- * @source: irq source
+ * @client_id: client id
+ * @src_id: source id
+ * @source: IRQ source pointer
+ *
+ * Registers IRQ source on a client.
  *
+ * Returns:
+ * 0 on success or error code otherwise
  */
 int amdgpu_irq_add_id(struct amdgpu_device *adev,
                      unsigned client_id, unsigned src_id,
@@ -312,12 +350,12 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev,
 }
 
 /**
- * amdgpu_irq_dispatch - dispatch irq to IP blocks
+ * amdgpu_irq_dispatch - dispatch IRQ to IP blocks
  *
  * @adev: amdgpu device pointer
- * @entry: interrupt vector
+ * @entry: interrupt vector pointer
  *
- * Dispatches the irq to the different IP blocks
+ * Dispatches IRQ to IP blocks.
  */
 void amdgpu_irq_dispatch(struct amdgpu_device *adev,
                         struct amdgpu_iv_entry *entry)
@@ -361,13 +399,13 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
 }
 
 /**
- * amdgpu_irq_update - update hw interrupt state
+ * amdgpu_irq_update - update hardware interrupt state
  *
  * @adev: amdgpu device pointer
- * @src: interrupt src you want to enable
- * @type: type of interrupt you want to update
+ * @src: interrupt source pointer
+ * @type: type of interrupt
  *
- * Updates the interrupt state for a specific src (all asics).
+ * Updates interrupt state for the specific source (all ASICs).
  */
 int amdgpu_irq_update(struct amdgpu_device *adev,
                             struct amdgpu_irq_src *src, unsigned type)
@@ -378,7 +416,7 @@ int amdgpu_irq_update(struct amdgpu_device *adev,
 
        spin_lock_irqsave(&adev->irq.lock, irqflags);
 
-       /* we need to determine after taking the lock, otherwise
+       /* We need to determine after taking the lock, otherwise
           we might disable just enabled interrupts again */
        if (amdgpu_irq_enabled(adev, src, type))
                state = AMDGPU_IRQ_STATE_ENABLE;
@@ -390,6 +428,14 @@ int amdgpu_irq_update(struct amdgpu_device *adev,
        return r;
 }
 
+/**
+ * amdgpu_irq_gpu_reset_resume_helper - update interrupt states on all sources
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Updates state of all types of interrupts on all sources on resume after
+ * reset.
+ */
 void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
 {
        int i, j, k;
@@ -413,10 +459,13 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
  * amdgpu_irq_get - enable interrupt
  *
  * @adev: amdgpu device pointer
- * @src: interrupt src you want to enable
- * @type: type of interrupt you want to enable
+ * @src: interrupt source pointer
+ * @type: type of interrupt
  *
- * Enables the interrupt type for a specific src (all asics).
+ * Enables specified type of interrupt on the specified source (all ASICs).
+ *
+ * Returns:
+ * 0 on success or error code otherwise
  */
 int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
                   unsigned type)
@@ -440,10 +489,13 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
  * amdgpu_irq_put - disable interrupt
  *
  * @adev: amdgpu device pointer
- * @src: interrupt src you want to disable
- * @type: type of interrupt you want to disable
+ * @src: interrupt source pointer
+ * @type: type of interrupt
+ *
+ * Enables specified type of interrupt on the specified source (all ASICs).
  *
- * Disables the interrupt type for a specific src (all asics).
+ * Returns:
+ * 0 on success or error code otherwise
  */
 int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
                   unsigned type)
@@ -464,12 +516,17 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
 }
 
 /**
- * amdgpu_irq_enabled - test if irq is enabled or not
+ * amdgpu_irq_enabled - check whether interrupt is enabled or not
  *
  * @adev: amdgpu device pointer
- * @idx: interrupt src you want to test
+ * @src: interrupt source pointer
+ * @type: type of interrupt
  *
- * Tests if the given interrupt source is enabled or not
+ * Checks whether the given type of interrupt is enabled on the given source.
+ *
+ * Returns:
+ * *true* if interrupt is enabled, *false* if interrupt is disabled or on
+ * invalid parameters
  */
 bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
                        unsigned type)
@@ -486,7 +543,7 @@ bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
        return !!atomic_read(&src->enabled_types[type]);
 }
 
-/* gen irq */
+/* XXX: Generic IRQ handling */
 static void amdgpu_irq_mask(struct irq_data *irqd)
 {
        /* XXX */
@@ -497,12 +554,26 @@ static void amdgpu_irq_unmask(struct irq_data *irqd)
        /* XXX */
 }
 
+/* amdgpu hardware interrupt chip descriptor */
 static struct irq_chip amdgpu_irq_chip = {
        .name = "amdgpu-ih",
        .irq_mask = amdgpu_irq_mask,
        .irq_unmask = amdgpu_irq_unmask,
 };
 
+/**
+ * amdgpu_irqdomain_map - create mapping between virtual and hardware IRQ numbers
+ *
+ * @d: amdgpu IRQ domain pointer (unused)
+ * @irq: virtual IRQ number
+ * @hwirq: hardware irq number
+ *
+ * Current implementation assigns simple interrupt handler to the given virtual
+ * IRQ.
+ *
+ * Returns:
+ * 0 on success or error code otherwise
+ */
 static int amdgpu_irqdomain_map(struct irq_domain *d,
                                unsigned int irq, irq_hw_number_t hwirq)
 {
@@ -514,17 +585,21 @@ static int amdgpu_irqdomain_map(struct irq_domain *d,
        return 0;
 }
 
+/* Implementation of methods for amdgpu IRQ domain */
 static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
        .map = amdgpu_irqdomain_map,
 };
 
 /**
- * amdgpu_irq_add_domain - create a linear irq domain
+ * amdgpu_irq_add_domain - create a linear IRQ domain
  *
  * @adev: amdgpu device pointer
  *
- * Create an irq domain for GPU interrupt sources
+ * Creates an IRQ domain for GPU interrupt sources
  * that may be driven by another driver (e.g., ACP).
+ *
+ * Returns:
+ * 0 on success or error code otherwise
  */
 int amdgpu_irq_add_domain(struct amdgpu_device *adev)
 {
@@ -539,11 +614,11 @@ int amdgpu_irq_add_domain(struct amdgpu_device *adev)
 }
 
 /**
- * amdgpu_irq_remove_domain - remove the irq domain
+ * amdgpu_irq_remove_domain - remove the IRQ domain
  *
  * @adev: amdgpu device pointer
  *
- * Remove the irq domain for GPU interrupt sources
+ * Removes the IRQ domain for GPU interrupt sources
  * that may be driven by another driver (e.g., ACP).
  */
 void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
@@ -555,16 +630,17 @@ void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
 }
 
 /**
- * amdgpu_irq_create_mapping - create a mapping between a domain irq and a
- *                             Linux irq
+ * amdgpu_irq_create_mapping - create mapping between domain Linux IRQs
  *
  * @adev: amdgpu device pointer
  * @src_id: IH source id
  *
- * Create a mapping between a domain irq (GPU IH src id) and a Linux irq
+ * Creates mapping between a domain IRQ (GPU IH src id) and a Linux IRQ
  * Use this for components that generate a GPU interrupt, but are driven
  * by a different driver (e.g., ACP).
- * Returns the Linux irq.
+ *
+ * Returns:
+ * Linux IRQ
  */
 unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id)
 {
index 2bd56760c7441fdc720d3a469fef424ae39741ea..391e2f7c03aacdfae679057204e02e10e756cb8a 100644 (file)
 
 static void amdgpu_job_timedout(struct drm_sched_job *s_job)
 {
-       struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
+       struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
+       struct amdgpu_job *job = to_amdgpu_job(s_job);
 
-       DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
-                 job->base.sched->name,
-                 atomic_read(&job->ring->fence_drv.last_seq),
-                 job->ring->fence_drv.sync_seq);
+       DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
+                 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
+                 ring->fence_drv.sync_seq);
 
-       amdgpu_device_gpu_recover(job->adev, job, false);
+       amdgpu_device_gpu_recover(ring->adev, job, false);
 }
 
 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
@@ -54,7 +54,11 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
        if (!*job)
                return -ENOMEM;
 
-       (*job)->adev = adev;
+       /*
+        * Initialize the scheduler to at least some ring so that we always
+        * have a pointer to adev.
+        */
+       (*job)->base.sched = &adev->rings[0]->sched;
        (*job)->vm = vm;
        (*job)->ibs = (void *)&(*job)[1];
        (*job)->num_ibs = num_ibs;
@@ -86,6 +90,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
 
 void amdgpu_job_free_resources(struct amdgpu_job *job)
 {
+       struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
        struct dma_fence *f;
        unsigned i;
 
@@ -93,14 +98,15 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
        f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
 
        for (i = 0; i < job->num_ibs; ++i)
-               amdgpu_ib_free(job->adev, &job->ibs[i], f);
+               amdgpu_ib_free(ring->adev, &job->ibs[i], f);
 }
 
 static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
 {
-       struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
+       struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
+       struct amdgpu_job *job = to_amdgpu_job(s_job);
 
-       amdgpu_ring_priority_put(job->ring, s_job->s_priority);
+       amdgpu_ring_priority_put(ring, s_job->s_priority);
        dma_fence_put(job->fence);
        amdgpu_sync_free(&job->sync);
        amdgpu_sync_free(&job->sched_sync);
@@ -117,50 +123,68 @@ void amdgpu_job_free(struct amdgpu_job *job)
        kfree(job);
 }
 
-int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
-                     struct drm_sched_entity *entity, void *owner,
-                     struct dma_fence **f)
+int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
+                     void *owner, struct dma_fence **f)
 {
+       enum drm_sched_priority priority;
+       struct amdgpu_ring *ring;
        int r;
-       job->ring = ring;
 
        if (!f)
                return -EINVAL;
 
-       r = drm_sched_job_init(&job->base, &ring->sched, entity, owner);
+       r = drm_sched_job_init(&job->base, entity, owner);
        if (r)
                return r;
 
        job->owner = owner;
-       job->fence_ctx = entity->fence_context;
        *f = dma_fence_get(&job->base.s_fence->finished);
        amdgpu_job_free_resources(job);
-       amdgpu_ring_priority_get(job->ring, job->base.s_priority);
+       priority = job->base.s_priority;
        drm_sched_entity_push_job(&job->base, entity);
 
+       ring = to_amdgpu_ring(entity->rq->sched);
+       amdgpu_ring_priority_get(ring, priority);
+
+       return 0;
+}
+
+int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
+                            struct dma_fence **fence)
+{
+       int r;
+
+       job->base.sched = &ring->sched;
+       r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
+       job->fence = dma_fence_get(*fence);
+       if (r)
+               return r;
+
+       amdgpu_job_free(job);
        return 0;
 }
 
 static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
                                               struct drm_sched_entity *s_entity)
 {
+       struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
        struct amdgpu_job *job = to_amdgpu_job(sched_job);
        struct amdgpu_vm *vm = job->vm;
+       struct dma_fence *fence;
        bool explicit = false;
        int r;
-       struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync, &explicit);
 
+       fence = amdgpu_sync_get_fence(&job->sync, &explicit);
        if (fence && explicit) {
                if (drm_sched_dependency_optimized(fence, s_entity)) {
-                       r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence, false);
+                       r = amdgpu_sync_fence(ring->adev, &job->sched_sync,
+                                             fence, false);
                        if (r)
-                               DRM_ERROR("Error adding fence to sync (%d)\n", r);
+                               DRM_ERROR("Error adding fence (%d)\n", r);
                }
        }
 
        while (fence == NULL && vm && !job->vmid) {
-               struct amdgpu_ring *ring = job->ring;
-
                r = amdgpu_vmid_grab(vm, ring, &job->sync,
                                     &job->base.s_fence->finished,
                                     job);
@@ -175,30 +199,25 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
 
 static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
 {
+       struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
        struct dma_fence *fence = NULL, *finished;
-       struct amdgpu_device *adev;
        struct amdgpu_job *job;
        int r;
 
-       if (!sched_job) {
-               DRM_ERROR("job is null\n");
-               return NULL;
-       }
        job = to_amdgpu_job(sched_job);
        finished = &job->base.s_fence->finished;
-       adev = job->adev;
 
        BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
 
        trace_amdgpu_sched_run_job(job);
 
-       if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
+       if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
                dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
 
        if (finished->error < 0) {
                DRM_INFO("Skip scheduling IBs!\n");
        } else {
-               r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job,
+               r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
                                       &fence);
                if (r)
                        DRM_ERROR("Error scheduling IBs (%d)\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
new file mode 100644 (file)
index 0000000..57cfe78
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_JOB_H__
+#define __AMDGPU_JOB_H__
+
+/* bit set means command submit involves a preamble IB */
+#define AMDGPU_PREAMBLE_IB_PRESENT          (1 << 0)
+/* bit set means preamble IB is first presented in belonging context */
+#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST    (1 << 1)
+/* bit set means context switch occured */
+#define AMDGPU_HAVE_CTX_SWITCH              (1 << 2)
+
+#define to_amdgpu_job(sched_job)               \
+               container_of((sched_job), struct amdgpu_job, base)
+
+struct amdgpu_fence;
+
+struct amdgpu_job {
+       struct drm_sched_job    base;
+       struct amdgpu_vm        *vm;
+       struct amdgpu_sync      sync;
+       struct amdgpu_sync      sched_sync;
+       struct amdgpu_ib        *ibs;
+       struct dma_fence        *fence; /* the hw fence */
+       uint32_t                preamble_status;
+       uint32_t                num_ibs;
+       void                    *owner;
+       bool                    vm_needs_flush;
+       uint64_t                vm_pd_addr;
+       unsigned                vmid;
+       unsigned                pasid;
+       uint32_t                gds_base, gds_size;
+       uint32_t                gws_base, gws_size;
+       uint32_t                oa_base, oa_size;
+       uint32_t                vram_lost_counter;
+
+       /* user fence handling */
+       uint64_t                uf_addr;
+       uint64_t                uf_sequence;
+
+};
+
+int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
+                    struct amdgpu_job **job, struct amdgpu_vm *vm);
+int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
+                            struct amdgpu_job **job);
+
+void amdgpu_job_free_resources(struct amdgpu_job *job);
+void amdgpu_job_free(struct amdgpu_job *job);
+int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
+                     void *owner, struct dma_fence **f);
+int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
+                            struct dma_fence **fence);
+#endif
index 91517b166a3b8f504930586a97f173eadc7b47f6..bd98cc5fb97bcab725c18f240fa19658da961354 100644 (file)
@@ -328,61 +328,71 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                case AMDGPU_HW_IP_GFX:
                        type = AMD_IP_BLOCK_TYPE_GFX;
                        for (i = 0; i < adev->gfx.num_gfx_rings; i++)
-                               ring_mask |= ((adev->gfx.gfx_ring[i].ready ? 1 : 0) << i);
-                       ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
-                       ib_size_alignment = 8;
+                               ring_mask |= adev->gfx.gfx_ring[i].ready << i;
+                       ib_start_alignment = 32;
+                       ib_size_alignment = 32;
                        break;
                case AMDGPU_HW_IP_COMPUTE:
                        type = AMD_IP_BLOCK_TYPE_GFX;
                        for (i = 0; i < adev->gfx.num_compute_rings; i++)
-                               ring_mask |= ((adev->gfx.compute_ring[i].ready ? 1 : 0) << i);
-                       ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
-                       ib_size_alignment = 8;
+                               ring_mask |= adev->gfx.compute_ring[i].ready << i;
+                       ib_start_alignment = 32;
+                       ib_size_alignment = 32;
                        break;
                case AMDGPU_HW_IP_DMA:
                        type = AMD_IP_BLOCK_TYPE_SDMA;
                        for (i = 0; i < adev->sdma.num_instances; i++)
-                               ring_mask |= ((adev->sdma.instance[i].ring.ready ? 1 : 0) << i);
-                       ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
-                       ib_size_alignment = 1;
+                               ring_mask |= adev->sdma.instance[i].ring.ready << i;
+                       ib_start_alignment = 256;
+                       ib_size_alignment = 4;
                        break;
                case AMDGPU_HW_IP_UVD:
                        type = AMD_IP_BLOCK_TYPE_UVD;
-                       for (i = 0; i < adev->uvd.num_uvd_inst; i++)
-                               ring_mask |= ((adev->uvd.inst[i].ring.ready ? 1 : 0) << i);
-                       ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
-                       ib_size_alignment = 16;
+                       for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+                               if (adev->uvd.harvest_config & (1 << i))
+                                       continue;
+                               ring_mask |= adev->uvd.inst[i].ring.ready;
+                       }
+                       ib_start_alignment = 64;
+                       ib_size_alignment = 64;
                        break;
                case AMDGPU_HW_IP_VCE:
                        type = AMD_IP_BLOCK_TYPE_VCE;
                        for (i = 0; i < adev->vce.num_rings; i++)
-                               ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i);
-                       ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
+                               ring_mask |= adev->vce.ring[i].ready << i;
+                       ib_start_alignment = 4;
                        ib_size_alignment = 1;
                        break;
                case AMDGPU_HW_IP_UVD_ENC:
                        type = AMD_IP_BLOCK_TYPE_UVD;
-                       for (i = 0; i < adev->uvd.num_uvd_inst; i++)
+                       for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+                               if (adev->uvd.harvest_config & (1 << i))
+                                       continue;
                                for (j = 0; j < adev->uvd.num_enc_rings; j++)
-                                       ring_mask |=
-                                       ((adev->uvd.inst[i].ring_enc[j].ready ? 1 : 0) <<
-                                       (j + i * adev->uvd.num_enc_rings));
-                       ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
-                       ib_size_alignment = 1;
+                                       ring_mask |= adev->uvd.inst[i].ring_enc[j].ready << j;
+                       }
+                       ib_start_alignment = 64;
+                       ib_size_alignment = 64;
                        break;
                case AMDGPU_HW_IP_VCN_DEC:
                        type = AMD_IP_BLOCK_TYPE_VCN;
-                       ring_mask = adev->vcn.ring_dec.ready ? 1 : 0;
-                       ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
+                       ring_mask = adev->vcn.ring_dec.ready;
+                       ib_start_alignment = 16;
                        ib_size_alignment = 16;
                        break;
                case AMDGPU_HW_IP_VCN_ENC:
                        type = AMD_IP_BLOCK_TYPE_VCN;
                        for (i = 0; i < adev->vcn.num_enc_rings; i++)
-                               ring_mask |= ((adev->vcn.ring_enc[i].ready ? 1 : 0) << i);
-                       ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
+                               ring_mask |= adev->vcn.ring_enc[i].ready << i;
+                       ib_start_alignment = 64;
                        ib_size_alignment = 1;
                        break;
+               case AMDGPU_HW_IP_VCN_JPEG:
+                       type = AMD_IP_BLOCK_TYPE_VCN;
+                       ring_mask = adev->vcn.ring_jpeg.ready;
+                       ib_start_alignment = 16;
+                       ib_size_alignment = 16;
+                       break;
                default:
                        return -EINVAL;
                }
@@ -427,6 +437,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                        break;
                case AMDGPU_HW_IP_VCN_DEC:
                case AMDGPU_HW_IP_VCN_ENC:
+               case AMDGPU_HW_IP_VCN_JPEG:
                        type = AMD_IP_BLOCK_TYPE_VCN;
                        break;
                default:
@@ -494,13 +505,13 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
        case AMDGPU_INFO_VRAM_GTT: {
                struct drm_amdgpu_info_vram_gtt vram_gtt;
 
-               vram_gtt.vram_size = adev->gmc.real_vram_size;
-               vram_gtt.vram_size -= adev->vram_pin_size;
-               vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size;
-               vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
+               vram_gtt.vram_size = adev->gmc.real_vram_size -
+                       atomic64_read(&adev->vram_pin_size);
+               vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size -
+                       atomic64_read(&adev->visible_pin_size);
                vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
                vram_gtt.gtt_size *= PAGE_SIZE;
-               vram_gtt.gtt_size -= adev->gart_pin_size;
+               vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
                return copy_to_user(out, &vram_gtt,
                                    min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
        }
@@ -509,17 +520,16 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 
                memset(&mem, 0, sizeof(mem));
                mem.vram.total_heap_size = adev->gmc.real_vram_size;
-               mem.vram.usable_heap_size =
-                       adev->gmc.real_vram_size - adev->vram_pin_size;
+               mem.vram.usable_heap_size = adev->gmc.real_vram_size -
+                       atomic64_read(&adev->vram_pin_size);
                mem.vram.heap_usage =
                        amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
                mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
 
                mem.cpu_accessible_vram.total_heap_size =
                        adev->gmc.visible_vram_size;
-               mem.cpu_accessible_vram.usable_heap_size =
-                       adev->gmc.visible_vram_size -
-                       (adev->vram_pin_size - adev->invisible_pin_size);
+               mem.cpu_accessible_vram.usable_heap_size = adev->gmc.visible_vram_size -
+                       atomic64_read(&adev->visible_pin_size);
                mem.cpu_accessible_vram.heap_usage =
                        amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
                mem.cpu_accessible_vram.max_allocation =
@@ -527,8 +537,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 
                mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
                mem.gtt.total_heap_size *= PAGE_SIZE;
-               mem.gtt.usable_heap_size = mem.gtt.total_heap_size
-                       - adev->gart_pin_size;
+               mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
+                       atomic64_read(&adev->gart_pin_size);
                mem.gtt.heap_usage =
                        amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
                mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
@@ -930,7 +940,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
                return;
 
        pm_runtime_get_sync(dev->dev);
-       amdgpu_ctx_mgr_entity_fini(&fpriv->ctx_mgr);
 
        if (adev->asic_type != CHIP_RAVEN) {
                amdgpu_uvd_free_handles(adev, file_priv);
@@ -958,7 +967,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
        amdgpu_bo_unref(&pd);
 
        idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
-               amdgpu_bo_list_free(list);
+               amdgpu_bo_list_put(list);
 
        idr_destroy(&fpriv->bo_list_handles);
        mutex_destroy(&fpriv->bo_list_lock);
index 83e344fbb50a2e86f1c73f0a1d674c8717ddeb2d..a365ea2383d18c137df14d1fa116fad954d9ca45 100644 (file)
  *    Christian König <christian.koenig@amd.com>
  */
 
+/**
+ * DOC: MMU Notifier
+ *
+ * For coherent userptr handling registers an MMU notifier to inform the driver
+ * about updates on the page tables of a process.
+ *
+ * When somebody tries to invalidate the page tables we block the update until
+ * all operations on the pages in question are completed, then those pages are
+ * marked as accessed and also dirty if it wasn't a read only access.
+ *
+ * New command submissions using the userptrs in question are delayed until all
+ * page table invalidation are completed and we once more see a coherent process
+ * address space.
+ */
+
 #include <linux/firmware.h>
 #include <linux/module.h>
 #include <linux/mmu_notifier.h>
 #include "amdgpu.h"
 #include "amdgpu_amdkfd.h"
 
+/**
+ * struct amdgpu_mn
+ *
+ * @adev: amdgpu device pointer
+ * @mm: process address space
+ * @mn: MMU notifier structure
+ * @type: type of MMU notifier
+ * @work: destruction work item
+ * @node: hash table node to find structure by adev and mn
+ * @lock: rw semaphore protecting the notifier nodes
+ * @objects: interval tree containing amdgpu_mn_nodes
+ * @read_lock: mutex for recursive locking of @lock
+ * @recursion: depth of recursion
+ *
+ * Data for each amdgpu device and process address space.
+ */
 struct amdgpu_mn {
        /* constant after initialisation */
        struct amdgpu_device    *adev;
@@ -58,13 +89,21 @@ struct amdgpu_mn {
        atomic_t                recursion;
 };
 
+/**
+ * struct amdgpu_mn_node
+ *
+ * @it: interval node defining start-last of the affected address range
+ * @bos: list of all BOs in the affected address range
+ *
+ * Manages all BOs which are affected of a certain range of address space.
+ */
 struct amdgpu_mn_node {
        struct interval_tree_node       it;
        struct list_head                bos;
 };
 
 /**
- * amdgpu_mn_destroy - destroy the rmn
+ * amdgpu_mn_destroy - destroy the MMU notifier
  *
  * @work: previously sheduled work item
  *
@@ -72,47 +111,50 @@ struct amdgpu_mn_node {
  */
 static void amdgpu_mn_destroy(struct work_struct *work)
 {
-       struct amdgpu_mn *rmn = container_of(work, struct amdgpu_mn, work);
-       struct amdgpu_device *adev = rmn->adev;
+       struct amdgpu_mn *amn = container_of(work, struct amdgpu_mn, work);
+       struct amdgpu_device *adev = amn->adev;
        struct amdgpu_mn_node *node, *next_node;
        struct amdgpu_bo *bo, *next_bo;
 
        mutex_lock(&adev->mn_lock);
-       down_write(&rmn->lock);
-       hash_del(&rmn->node);
+       down_write(&amn->lock);
+       hash_del(&amn->node);
        rbtree_postorder_for_each_entry_safe(node, next_node,
-                                            &rmn->objects.rb_root, it.rb) {
+                                            &amn->objects.rb_root, it.rb) {
                list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
                        bo->mn = NULL;
                        list_del_init(&bo->mn_list);
                }
                kfree(node);
        }
-       up_write(&rmn->lock);
+       up_write(&amn->lock);
        mutex_unlock(&adev->mn_lock);
-       mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
-       kfree(rmn);
+       mmu_notifier_unregister_no_release(&amn->mn, amn->mm);
+       kfree(amn);
 }
 
 /**
  * amdgpu_mn_release - callback to notify about mm destruction
  *
  * @mn: our notifier
- * @mn: the mm this callback is about
+ * @mm: the mm this callback is about
  *
  * Shedule a work item to lazy destroy our notifier.
  */
 static void amdgpu_mn_release(struct mmu_notifier *mn,
                              struct mm_struct *mm)
 {
-       struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
-       INIT_WORK(&rmn->work, amdgpu_mn_destroy);
-       schedule_work(&rmn->work);
+       struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
+
+       INIT_WORK(&amn->work, amdgpu_mn_destroy);
+       schedule_work(&amn->work);
 }
 
 
 /**
- * amdgpu_mn_lock - take the write side lock for this mn
+ * amdgpu_mn_lock - take the write side lock for this notifier
+ *
+ * @mn: our notifier
  */
 void amdgpu_mn_lock(struct amdgpu_mn *mn)
 {
@@ -121,7 +163,9 @@ void amdgpu_mn_lock(struct amdgpu_mn *mn)
 }
 
 /**
- * amdgpu_mn_unlock - drop the write side lock for this mn
+ * amdgpu_mn_unlock - drop the write side lock for this notifier
+ *
+ * @mn: our notifier
  */
 void amdgpu_mn_unlock(struct amdgpu_mn *mn)
 {
@@ -130,40 +174,38 @@ void amdgpu_mn_unlock(struct amdgpu_mn *mn)
 }
 
 /**
- * amdgpu_mn_read_lock - take the rmn read lock
- *
- * @rmn: our notifier
+ * amdgpu_mn_read_lock - take the read side lock for this notifier
  *
- * Take the rmn read side lock.
+ * @amn: our notifier
  */
-static void amdgpu_mn_read_lock(struct amdgpu_mn *rmn)
+static void amdgpu_mn_read_lock(struct amdgpu_mn *amn)
 {
-       mutex_lock(&rmn->read_lock);
-       if (atomic_inc_return(&rmn->recursion) == 1)
-               down_read_non_owner(&rmn->lock);
-       mutex_unlock(&rmn->read_lock);
+       mutex_lock(&amn->read_lock);
+       if (atomic_inc_return(&amn->recursion) == 1)
+               down_read_non_owner(&amn->lock);
+       mutex_unlock(&amn->read_lock);
 }
 
 /**
- * amdgpu_mn_read_unlock - drop the rmn read lock
+ * amdgpu_mn_read_unlock - drop the read side lock for this notifier
  *
- * @rmn: our notifier
- *
- * Drop the rmn read side lock.
+ * @amn: our notifier
  */
-static void amdgpu_mn_read_unlock(struct amdgpu_mn *rmn)
+static void amdgpu_mn_read_unlock(struct amdgpu_mn *amn)
 {
-       if (atomic_dec_return(&rmn->recursion) == 0)
-               up_read_non_owner(&rmn->lock);
+       if (atomic_dec_return(&amn->recursion) == 0)
+               up_read_non_owner(&amn->lock);
 }
 
 /**
  * amdgpu_mn_invalidate_node - unmap all BOs of a node
  *
  * @node: the node with the BOs to unmap
+ * @start: start of address range affected
+ * @end: end of address range affected
  *
- * We block for all BOs and unmap them by move them
- * into system domain again.
+ * Block for operations on BOs to finish and mark pages as accessed and
+ * potentially dirty.
  */
 static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
                                      unsigned long start,
@@ -190,27 +232,27 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
  * amdgpu_mn_invalidate_range_start_gfx - callback to notify about mm change
  *
  * @mn: our notifier
- * @mn: the mm this callback is about
+ * @mm: the mm this callback is about
  * @start: start of updated range
  * @end: end of updated range
  *
- * We block for all BOs between start and end to be idle and
- * unmap them by move them into system domain again.
+ * Block for operations on BOs to finish and mark pages as accessed and
+ * potentially dirty.
  */
 static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
                                                 struct mm_struct *mm,
                                                 unsigned long start,
                                                 unsigned long end)
 {
-       struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
+       struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
        struct interval_tree_node *it;
 
        /* notification is exclusive, but interval is inclusive */
        end -= 1;
 
-       amdgpu_mn_read_lock(rmn);
+       amdgpu_mn_read_lock(amn);
 
-       it = interval_tree_iter_first(&rmn->objects, start, end);
+       it = interval_tree_iter_first(&amn->objects, start, end);
        while (it) {
                struct amdgpu_mn_node *node;
 
@@ -225,7 +267,7 @@ static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
  * amdgpu_mn_invalidate_range_start_hsa - callback to notify about mm change
  *
  * @mn: our notifier
- * @mn: the mm this callback is about
+ * @mm: the mm this callback is about
  * @start: start of updated range
  * @end: end of updated range
  *
@@ -238,15 +280,15 @@ static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
                                                 unsigned long start,
                                                 unsigned long end)
 {
-       struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
+       struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
        struct interval_tree_node *it;
 
        /* notification is exclusive, but interval is inclusive */
        end -= 1;
 
-       amdgpu_mn_read_lock(rmn);
+       amdgpu_mn_read_lock(amn);
 
-       it = interval_tree_iter_first(&rmn->objects, start, end);
+       it = interval_tree_iter_first(&amn->objects, start, end);
        while (it) {
                struct amdgpu_mn_node *node;
                struct amdgpu_bo *bo;
@@ -268,7 +310,7 @@ static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
  * amdgpu_mn_invalidate_range_end - callback to notify about mm change
  *
  * @mn: our notifier
- * @mn: the mm this callback is about
+ * @mm: the mm this callback is about
  * @start: start of updated range
  * @end: end of updated range
  *
@@ -279,9 +321,9 @@ static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn,
                                           unsigned long start,
                                           unsigned long end)
 {
-       struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
+       struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
 
-       amdgpu_mn_read_unlock(rmn);
+       amdgpu_mn_read_unlock(amn);
 }
 
 static const struct mmu_notifier_ops amdgpu_mn_ops[] = {
@@ -315,7 +357,7 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
                                enum amdgpu_mn_type type)
 {
        struct mm_struct *mm = current->mm;
-       struct amdgpu_mn *rmn;
+       struct amdgpu_mn *amn;
        unsigned long key = AMDGPU_MN_KEY(mm, type);
        int r;
 
@@ -325,41 +367,41 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
                return ERR_PTR(-EINTR);
        }
 
-       hash_for_each_possible(adev->mn_hash, rmn, node, key)
-               if (AMDGPU_MN_KEY(rmn->mm, rmn->type) == key)
+       hash_for_each_possible(adev->mn_hash, amn, node, key)
+               if (AMDGPU_MN_KEY(amn->mm, amn->type) == key)
                        goto release_locks;
 
-       rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
-       if (!rmn) {
-               rmn = ERR_PTR(-ENOMEM);
+       amn = kzalloc(sizeof(*amn), GFP_KERNEL);
+       if (!amn) {
+               amn = ERR_PTR(-ENOMEM);
                goto release_locks;
        }
 
-       rmn->adev = adev;
-       rmn->mm = mm;
-       init_rwsem(&rmn->lock);
-       rmn->type = type;
-       rmn->mn.ops = &amdgpu_mn_ops[type];
-       rmn->objects = RB_ROOT_CACHED;
-       mutex_init(&rmn->read_lock);
-       atomic_set(&rmn->recursion, 0);
+       amn->adev = adev;
+       amn->mm = mm;
+       init_rwsem(&amn->lock);
+       amn->type = type;
+       amn->mn.ops = &amdgpu_mn_ops[type];
+       amn->objects = RB_ROOT_CACHED;
+       mutex_init(&amn->read_lock);
+       atomic_set(&amn->recursion, 0);
 
-       r = __mmu_notifier_register(&rmn->mn, mm);
+       r = __mmu_notifier_register(&amn->mn, mm);
        if (r)
-               goto free_rmn;
+               goto free_amn;
 
-       hash_add(adev->mn_hash, &rmn->node, AMDGPU_MN_KEY(mm, type));
+       hash_add(adev->mn_hash, &amn->node, AMDGPU_MN_KEY(mm, type));
 
 release_locks:
        up_write(&mm->mmap_sem);
        mutex_unlock(&adev->mn_lock);
 
-       return rmn;
+       return amn;
 
-free_rmn:
+free_amn:
        up_write(&mm->mmap_sem);
        mutex_unlock(&adev->mn_lock);
-       kfree(rmn);
+       kfree(amn);
 
        return ERR_PTR(r);
 }
@@ -379,14 +421,14 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
        enum amdgpu_mn_type type =
                bo->kfd_bo ? AMDGPU_MN_TYPE_HSA : AMDGPU_MN_TYPE_GFX;
-       struct amdgpu_mn *rmn;
+       struct amdgpu_mn *amn;
        struct amdgpu_mn_node *node = NULL, *new_node;
        struct list_head bos;
        struct interval_tree_node *it;
 
-       rmn = amdgpu_mn_get(adev, type);
-       if (IS_ERR(rmn))
-               return PTR_ERR(rmn);
+       amn = amdgpu_mn_get(adev, type);
+       if (IS_ERR(amn))
+               return PTR_ERR(amn);
 
        new_node = kmalloc(sizeof(*new_node), GFP_KERNEL);
        if (!new_node)
@@ -394,12 +436,12 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
 
        INIT_LIST_HEAD(&bos);
 
-       down_write(&rmn->lock);
+       down_write(&amn->lock);
 
-       while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
+       while ((it = interval_tree_iter_first(&amn->objects, addr, end))) {
                kfree(node);
                node = container_of(it, struct amdgpu_mn_node, it);
-               interval_tree_remove(&node->it, &rmn->objects);
+               interval_tree_remove(&node->it, &amn->objects);
                addr = min(it->start, addr);
                end = max(it->last, end);
                list_splice(&node->bos, &bos);
@@ -410,7 +452,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
        else
                kfree(new_node);
 
-       bo->mn = rmn;
+       bo->mn = amn;
 
        node->it.start = addr;
        node->it.last = end;
@@ -418,9 +460,9 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
        list_splice(&bos, &node->bos);
        list_add(&bo->mn_list, &node->bos);
 
-       interval_tree_insert(&node->it, &rmn->objects);
+       interval_tree_insert(&node->it, &amn->objects);
 
-       up_write(&rmn->lock);
+       up_write(&amn->lock);
 
        return 0;
 }
@@ -435,18 +477,18 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
 void amdgpu_mn_unregister(struct amdgpu_bo *bo)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
-       struct amdgpu_mn *rmn;
+       struct amdgpu_mn *amn;
        struct list_head *head;
 
        mutex_lock(&adev->mn_lock);
 
-       rmn = bo->mn;
-       if (rmn == NULL) {
+       amn = bo->mn;
+       if (amn == NULL) {
                mutex_unlock(&adev->mn_lock);
                return;
        }
 
-       down_write(&rmn->lock);
+       down_write(&amn->lock);
 
        /* save the next list entry for later */
        head = bo->mn_list.next;
@@ -456,12 +498,13 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
 
        if (list_empty(head)) {
                struct amdgpu_mn_node *node;
+
                node = container_of(head, struct amdgpu_mn_node, bos);
-               interval_tree_remove(&node->it, &rmn->objects);
+               interval_tree_remove(&node->it, &amn->objects);
                kfree(node);
        }
 
-       up_write(&rmn->lock);
+       up_write(&amn->lock);
        mutex_unlock(&adev->mn_lock);
 }
 
index 5e4e1bd9038379fe62666e44318162adfc544fea..b0e14a3d54efd44e86580f816f41f05f16368b08 100644 (file)
 #include "amdgpu_trace.h"
 #include "amdgpu_amdkfd.h"
 
-static bool amdgpu_need_backup(struct amdgpu_device *adev)
+/**
+ * DOC: amdgpu_object
+ *
+ * This defines the interfaces to operate on an &amdgpu_bo buffer object which
+ * represents memory used by driver (VRAM, system memory, etc.). The driver
+ * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces
+ * to create/destroy/set buffer object which are then managed by the kernel TTM
+ * memory manager.
+ * The interfaces are also used internally by kernel clients, including gfx,
+ * uvd, etc. for kernel managed allocations used by the GPU.
+ *
+ */
+
+static bool amdgpu_bo_need_backup(struct amdgpu_device *adev)
 {
        if (adev->flags & AMD_IS_APU)
                return false;
@@ -50,11 +63,35 @@ static bool amdgpu_need_backup(struct amdgpu_device *adev)
        return true;
 }
 
-static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+/**
+ * amdgpu_bo_subtract_pin_size - Remove BO from pin_size accounting
+ *
+ * @bo: &amdgpu_bo buffer object
+ *
+ * This function is called when a BO stops being pinned, and updates the
+ * &amdgpu_device pin_size values accordingly.
+ */
+static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo)
+{
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+
+       if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
+               atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
+               atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
+                            &adev->visible_pin_size);
+       } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+               atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
+       }
+}
+
+static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
        struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
 
+       if (bo->pin_count > 0)
+               amdgpu_bo_subtract_pin_size(bo);
+
        if (bo->kfd_bo)
                amdgpu_amdkfd_unreserve_system_memory_limit(bo);
 
@@ -73,14 +110,32 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
        kfree(bo);
 }
 
-bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
+/**
+ * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
+ * @bo: buffer object to be checked
+ *
+ * Uses destroy function associated with the object to determine if this is
+ * an &amdgpu_bo.
+ *
+ * Returns:
+ * true if the object belongs to &amdgpu_bo, false if not.
+ */
+bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
 {
-       if (bo->destroy == &amdgpu_ttm_bo_destroy)
+       if (bo->destroy == &amdgpu_bo_destroy)
                return true;
        return false;
 }
 
-void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
+/**
+ * amdgpu_bo_placement_from_domain - set buffer's placement
+ * @abo: &amdgpu_bo buffer object whose placement is to be set
+ * @domain: requested domain
+ *
+ * Sets buffer's placement according to requested domain and the buffer's
+ * flags.
+ */
+void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
        struct ttm_placement *placement = &abo->placement;
@@ -161,6 +216,8 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
                c++;
        }
 
+       BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS);
+
        placement->num_placement = c;
        placement->placement = places;
 
@@ -184,7 +241,8 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
  *
  * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
  *
- * Returns 0 on success, negative error code otherwise.
+ * Returns:
+ * 0 on success, negative error code otherwise.
  */
 int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
                              unsigned long size, int align,
@@ -220,22 +278,33 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
                goto error_free;
        }
 
-       r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr);
+       r = amdgpu_bo_pin(*bo_ptr, domain);
        if (r) {
                dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
                goto error_unreserve;
        }
 
+       r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
+       if (r) {
+               dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
+               goto error_unpin;
+       }
+
+       if (gpu_addr)
+               *gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
+
        if (cpu_addr) {
                r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
                if (r) {
                        dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
-                       goto error_unreserve;
+                       goto error_unpin;
                }
        }
 
        return 0;
 
+error_unpin:
+       amdgpu_bo_unpin(*bo_ptr);
 error_unreserve:
        amdgpu_bo_unreserve(*bo_ptr);
 
@@ -261,7 +330,8 @@ error_free:
  *
  * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
  *
- * Returns 0 on success, negative error code otherwise.
+ * Returns:
+ * 0 on success, negative error code otherwise.
  */
 int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
                            unsigned long size, int align,
@@ -285,6 +355,8 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
  * amdgpu_bo_free_kernel - free BO for kernel use
  *
  * @bo: amdgpu BO to free
+ * @gpu_addr: pointer to where the BO's GPU memory space address was stored
+ * @cpu_addr: pointer to where the BO's CPU memory space address was stored
  *
  * unmaps and unpin a BO for kernel internal use.
  */
@@ -418,17 +490,17 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
 #endif
 
        bo->tbo.bdev = &adev->mman.bdev;
-       amdgpu_ttm_placement_from_domain(bo, bp->domain);
+       amdgpu_bo_placement_from_domain(bo, bp->domain);
        if (bp->type == ttm_bo_type_kernel)
                bo->tbo.priority = 1;
 
        r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
                                 &bo->placement, page_align, &ctx, acc_size,
-                                NULL, bp->resv, &amdgpu_ttm_bo_destroy);
+                                NULL, bp->resv, &amdgpu_bo_destroy);
        if (unlikely(r != 0))
                return r;
 
-       if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
+       if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
            bo->tbo.mem.mem_type == TTM_PL_VRAM &&
            bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
                amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
@@ -498,6 +570,20 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
        return r;
 }
 
+/**
+ * amdgpu_bo_create - create an &amdgpu_bo buffer object
+ * @adev: amdgpu device object
+ * @bp: parameters to be used for the buffer object
+ * @bo_ptr: pointer to the buffer object pointer
+ *
+ * Creates an &amdgpu_bo buffer object; and if requested, also creates a
+ * shadow object.
+ * Shadow object is used to backup the original buffer object, and is always
+ * in GTT.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
 int amdgpu_bo_create(struct amdgpu_device *adev,
                     struct amdgpu_bo_param *bp,
                     struct amdgpu_bo **bo_ptr)
@@ -510,7 +596,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
        if (r)
                return r;
 
-       if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) {
+       if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_bo_need_backup(adev)) {
                if (!bp->resv)
                        WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
                                                        NULL));
@@ -527,6 +613,21 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
        return r;
 }
 
+/**
+ * amdgpu_bo_backup_to_shadow - Backs up an &amdgpu_bo buffer object
+ * @adev: amdgpu device object
+ * @ring: amdgpu_ring for the engine handling the buffer operations
+ * @bo: &amdgpu_bo buffer to be backed up
+ * @resv: reservation object with embedded fence
+ * @fence: dma_fence associated with the operation
+ * @direct: whether to submit the job directly
+ *
+ * Copies an &amdgpu_bo buffer object to its shadow object.
+ * Not used for now.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
 int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
                               struct amdgpu_ring *ring,
                               struct amdgpu_bo *bo,
@@ -559,6 +660,18 @@ err:
        return r;
 }
 
+/**
+ * amdgpu_bo_validate - validate an &amdgpu_bo buffer object
+ * @bo: pointer to the buffer object
+ *
+ * Sets placement according to domain; and changes placement and caching
+ * policy of the buffer object according to the placement.
+ * This is used for validating shadow bos.  It calls ttm_bo_validate() to
+ * make sure the buffer is resident where it needs to be.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
 int amdgpu_bo_validate(struct amdgpu_bo *bo)
 {
        struct ttm_operation_ctx ctx = { false, false };
@@ -571,7 +684,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo)
        domain = bo->preferred_domains;
 
 retry:
-       amdgpu_ttm_placement_from_domain(bo, domain);
+       amdgpu_bo_placement_from_domain(bo, domain);
        r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
        if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
                domain = bo->allowed_domains;
@@ -581,6 +694,22 @@ retry:
        return r;
 }
 
+/**
+ * amdgpu_bo_restore_from_shadow - restore an &amdgpu_bo buffer object
+ * @adev: amdgpu device object
+ * @ring: amdgpu_ring for the engine handling the buffer operations
+ * @bo: &amdgpu_bo buffer to be restored
+ * @resv: reservation object with embedded fence
+ * @fence: dma_fence associated with the operation
+ * @direct: whether to submit the job directly
+ *
+ * Copies a buffer object's shadow content back to the object.
+ * This is used for recovering a buffer from its shadow in case of a gpu
+ * reset where vram context may be lost.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
 int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
                                  struct amdgpu_ring *ring,
                                  struct amdgpu_bo *bo,
@@ -613,6 +742,17 @@ err:
        return r;
 }
 
+/**
+ * amdgpu_bo_kmap - map an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object to be mapped
+ * @ptr: kernel virtual address to be returned
+ *
+ * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls
+ * amdgpu_bo_kptr() to get the kernel virtual address.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
 {
        void *kptr;
@@ -643,6 +783,15 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
        return 0;
 }
 
+/**
+ * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object
+ * @bo: &amdgpu_bo buffer object
+ *
+ * Calls ttm_kmap_obj_virtual() to get the kernel virtual address
+ *
+ * Returns:
+ * the virtual address of a buffer object area.
+ */
 void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
 {
        bool is_iomem;
@@ -650,21 +799,42 @@ void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
        return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
 }
 
+/**
+ * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object to be unmapped
+ *
+ * Unmaps a kernel map set up by amdgpu_bo_kmap().
+ */
 void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
 {
        if (bo->kmap.bo)
                ttm_bo_kunmap(&bo->kmap);
 }
 
+/**
+ * amdgpu_bo_ref - reference an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object
+ *
+ * References the contained &ttm_buffer_object.
+ *
+ * Returns:
+ * a refcounted pointer to the &amdgpu_bo buffer object.
+ */
 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
 {
        if (bo == NULL)
                return NULL;
 
-       ttm_bo_reference(&bo->tbo);
+       ttm_bo_get(&bo->tbo);
        return bo;
 }
 
+/**
+ * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object
+ *
+ * Unreferences the contained &ttm_buffer_object and clear the pointer
+ */
 void amdgpu_bo_unref(struct amdgpu_bo **bo)
 {
        struct ttm_buffer_object *tbo;
@@ -673,14 +843,34 @@ void amdgpu_bo_unref(struct amdgpu_bo **bo)
                return;
 
        tbo = &((*bo)->tbo);
-       ttm_bo_unref(&tbo);
-       if (tbo == NULL)
-               *bo = NULL;
+       ttm_bo_put(tbo);
+       *bo = NULL;
 }
 
+/**
+ * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object to be pinned
+ * @domain: domain to be pinned to
+ * @min_offset: the start of requested address range
+ * @max_offset: the end of requested address range
+ *
+ * Pins the buffer object according to requested domain and address range. If
+ * the memory is unbound gart memory, binds the pages into gart table. Adjusts
+ * pin_count and pin_size accordingly.
+ *
+ * Pinning means to lock pages in memory along with keeping them at a fixed
+ * offset. It is required when a buffer can not be moved, for example, when
+ * a display buffer is being scanned out.
+ *
+ * Compared with amdgpu_bo_pin(), this function gives more flexibility on
+ * where to pin a buffer if there are specific restrictions on where a buffer
+ * must be located.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
-                            u64 min_offset, u64 max_offset,
-                            u64 *gpu_addr)
+                            u64 min_offset, u64 max_offset)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
        struct ttm_operation_ctx ctx = { false, false };
@@ -712,8 +902,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                        return -EINVAL;
 
                bo->pin_count++;
-               if (gpu_addr)
-                       *gpu_addr = amdgpu_bo_gpu_offset(bo);
 
                if (max_offset != 0) {
                        u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
@@ -728,7 +916,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
        /* force to pin into visible video ram */
        if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
                bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
-       amdgpu_ttm_placement_from_domain(bo, domain);
+       amdgpu_bo_placement_from_domain(bo, domain);
        for (i = 0; i < bo->placement.num_placement; i++) {
                unsigned fpfn, lpfn;
 
@@ -749,34 +937,48 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                goto error;
        }
 
-       r = amdgpu_ttm_alloc_gart(&bo->tbo);
-       if (unlikely(r)) {
-               dev_err(adev->dev, "%p bind failed\n", bo);
-               goto error;
-       }
-
        bo->pin_count = 1;
-       if (gpu_addr != NULL)
-               *gpu_addr = amdgpu_bo_gpu_offset(bo);
 
        domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
        if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
-               adev->vram_pin_size += amdgpu_bo_size(bo);
-               if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
-                       adev->invisible_pin_size += amdgpu_bo_size(bo);
+               atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
+               atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
+                            &adev->visible_pin_size);
        } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
-               adev->gart_pin_size += amdgpu_bo_size(bo);
+               atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
        }
 
 error:
        return r;
 }
 
-int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
+/**
+ * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object to be pinned
+ * @domain: domain to be pinned to
+ *
+ * A simple wrapper to amdgpu_bo_pin_restricted().
+ * Provides a simpler API for buffers that do not have any strict restrictions
+ * on where a buffer must be located.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
+int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
 {
-       return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr);
+       return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
 }
 
+/**
+ * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object to be unpinned
+ *
+ * Decreases the pin_count, and clears the flags if pin_count reaches 0.
+ * Changes placement and pin size accordingly.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
 int amdgpu_bo_unpin(struct amdgpu_bo *bo)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
@@ -790,28 +992,30 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
        bo->pin_count--;
        if (bo->pin_count)
                return 0;
+
+       amdgpu_bo_subtract_pin_size(bo);
+
        for (i = 0; i < bo->placement.num_placement; i++) {
                bo->placements[i].lpfn = 0;
                bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
        }
        r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
-       if (unlikely(r)) {
+       if (unlikely(r))
                dev_err(adev->dev, "%p validate failed for unpin\n", bo);
-               goto error;
-       }
 
-       if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
-               adev->vram_pin_size -= amdgpu_bo_size(bo);
-               if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
-                       adev->invisible_pin_size -= amdgpu_bo_size(bo);
-       } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
-               adev->gart_pin_size -= amdgpu_bo_size(bo);
-       }
-
-error:
        return r;
 }
 
+/**
+ * amdgpu_bo_evict_vram - evict VRAM buffers
+ * @adev: amdgpu device object
+ *
+ * Evicts all VRAM buffers on the lru list of the memory type.
+ * Mainly used for evicting vram at suspend time.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
 int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
 {
        /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
@@ -834,6 +1038,15 @@ static const char *amdgpu_vram_names[] = {
        "DDR4",
 };
 
+/**
+ * amdgpu_bo_init - initialize memory manager
+ * @adev: amdgpu device object
+ *
+ * Calls amdgpu_ttm_init() to initialize amdgpu memory manager.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
 int amdgpu_bo_init(struct amdgpu_device *adev)
 {
        /* reserve PAT memory space to WC for VRAM */
@@ -851,6 +1064,16 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
        return amdgpu_ttm_init(adev);
 }
 
+/**
+ * amdgpu_bo_late_init - late init
+ * @adev: amdgpu device object
+ *
+ * Calls amdgpu_ttm_late_init() to free resources used earlier during
+ * initialization.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
 int amdgpu_bo_late_init(struct amdgpu_device *adev)
 {
        amdgpu_ttm_late_init(adev);
@@ -858,6 +1081,12 @@ int amdgpu_bo_late_init(struct amdgpu_device *adev)
        return 0;
 }
 
+/**
+ * amdgpu_bo_fini - tear down memory manager
+ * @adev: amdgpu device object
+ *
+ * Reverses amdgpu_bo_init() to tear down memory manager.
+ */
 void amdgpu_bo_fini(struct amdgpu_device *adev)
 {
        amdgpu_ttm_fini(adev);
@@ -865,12 +1094,33 @@ void amdgpu_bo_fini(struct amdgpu_device *adev)
        arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
 }
 
+/**
+ * amdgpu_bo_fbdev_mmap - mmap fbdev memory
+ * @bo: &amdgpu_bo buffer object
+ * @vma: vma as input from the fbdev mmap method
+ *
+ * Calls ttm_fbdev_mmap() to mmap fbdev memory if it is backed by a bo.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
                             struct vm_area_struct *vma)
 {
        return ttm_fbdev_mmap(vma, &bo->tbo);
 }
 
+/**
+ * amdgpu_bo_set_tiling_flags - set tiling flags
+ * @bo: &amdgpu_bo buffer object
+ * @tiling_flags: new flags
+ *
+ * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or
+ * kernel driver to set the tiling flags on a buffer.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
@@ -883,6 +1133,14 @@ int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
        return 0;
 }
 
+/**
+ * amdgpu_bo_get_tiling_flags - get tiling flags
+ * @bo: &amdgpu_bo buffer object
+ * @tiling_flags: returned flags
+ *
+ * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to
+ * set the tiling flags on a buffer.
+ */
 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
 {
        lockdep_assert_held(&bo->tbo.resv->lock.base);
@@ -891,6 +1149,19 @@ void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
                *tiling_flags = bo->tiling_flags;
 }
 
+/**
+ * amdgpu_bo_set_metadata - set metadata
+ * @bo: &amdgpu_bo buffer object
+ * @metadata: new metadata
+ * @metadata_size: size of the new metadata
+ * @flags: flags of the new metadata
+ *
+ * Sets buffer object's metadata, its size and flags.
+ * Used via GEM ioctl.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
                            uint32_t metadata_size, uint64_t flags)
 {
@@ -920,6 +1191,21 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
        return 0;
 }
 
+/**
+ * amdgpu_bo_get_metadata - get metadata
+ * @bo: &amdgpu_bo buffer object
+ * @buffer: returned metadata
+ * @buffer_size: size of the buffer
+ * @metadata_size: size of the returned metadata
+ * @flags: flags of the returned metadata
+ *
+ * Gets buffer object's metadata, its size and flags. buffer_size shall not be
+ * less than metadata_size.
+ * Used via GEM ioctl.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
                           size_t buffer_size, uint32_t *metadata_size,
                           uint64_t *flags)
@@ -943,6 +1229,16 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
        return 0;
 }
 
+/**
+ * amdgpu_bo_move_notify - notification about a memory move
+ * @bo: pointer to a buffer object
+ * @evict: if this move is evicting the buffer from the graphics address space
+ * @new_mem: new information of the bufer object
+ *
+ * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
+ * bookkeeping.
+ * TTM driver callback which is called when ttm moves a buffer.
+ */
 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
                           bool evict,
                           struct ttm_mem_reg *new_mem)
@@ -951,7 +1247,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
        struct amdgpu_bo *abo;
        struct ttm_mem_reg *old_mem = &bo->mem;
 
-       if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
+       if (!amdgpu_bo_is_amdgpu_bo(bo))
                return;
 
        abo = ttm_to_amdgpu_bo(bo);
@@ -968,9 +1264,20 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
                return;
 
        /* move_notify is called before move happens */
-       trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
+       trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
 }
 
+/**
+ * amdgpu_bo_fault_reserve_notify - notification about a memory fault
+ * @bo: pointer to a buffer object
+ *
+ * Notifies the driver we are taking a fault on this BO and have reserved it,
+ * also performs bookkeeping.
+ * TTM driver callback for dealing with vm faults.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
@@ -979,7 +1286,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
        unsigned long offset, size;
        int r;
 
-       if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
+       if (!amdgpu_bo_is_amdgpu_bo(bo))
                return 0;
 
        abo = ttm_to_amdgpu_bo(bo);
@@ -1001,8 +1308,8 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 
        /* hurrah the memory is not visible ! */
        atomic64_inc(&adev->num_vram_cpu_page_faults);
-       amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
-                                        AMDGPU_GEM_DOMAIN_GTT);
+       amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
+                                       AMDGPU_GEM_DOMAIN_GTT);
 
        /* Avoid costly evictions; only set GTT as a busy placement */
        abo->placement.num_busy_placement = 1;
@@ -1044,10 +1351,11 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
  * amdgpu_bo_gpu_offset - return GPU offset of bo
  * @bo:        amdgpu object for which we query the offset
  *
- * Returns current GPU offset of the object.
- *
  * Note: object should either be pinned or reserved when calling this
  * function, it might be useful to add check for this for debugging.
+ *
+ * Returns:
+ * current GPU offset of the object.
  */
 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
 {
@@ -1063,6 +1371,14 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
        return bo->tbo.offset;
 }
 
+/**
+ * amdgpu_bo_get_preferred_pin_domain - get preferred domain for scanout
+ * @adev: amdgpu device object
+ * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
+ *
+ * Returns:
+ * Which of the allowed domains is preferred for pinning the BO for scanout.
+ */
 uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
                                            uint32_t domain)
 {
index 731748033878b35a32f70a6fe4eda9d0d44835cb..18945dd6982db005c04c6654f41f09a6bb12b1cc 100644 (file)
@@ -32,6 +32,7 @@
 #include "amdgpu.h"
 
 #define AMDGPU_BO_INVALID_OFFSET       LONG_MAX
+#define AMDGPU_BO_MAX_PLACEMENTS       3
 
 struct amdgpu_bo_param {
        unsigned long                   size;
@@ -77,7 +78,7 @@ struct amdgpu_bo {
        /* Protected by tbo.reserved */
        u32                             preferred_domains;
        u32                             allowed_domains;
-       struct ttm_place                placements[AMDGPU_GEM_DOMAIN_MAX + 1];
+       struct ttm_place                placements[AMDGPU_BO_MAX_PLACEMENTS];
        struct ttm_placement            placement;
        struct ttm_buffer_object        tbo;
        struct ttm_bo_kmap_obj          kmap;
@@ -234,6 +235,9 @@ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
        return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
 }
 
+bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
+void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
+
 int amdgpu_bo_create(struct amdgpu_device *adev,
                     struct amdgpu_bo_param *bp,
                     struct amdgpu_bo **bo_ptr);
@@ -252,10 +256,9 @@ void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
 void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
 void amdgpu_bo_unref(struct amdgpu_bo **bo);
-int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr);
+int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
-                            u64 min_offset, u64 max_offset,
-                            u64 *gpu_addr);
+                            u64 min_offset, u64 max_offset);
 int amdgpu_bo_unpin(struct amdgpu_bo *bo);
 int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
 int amdgpu_bo_init(struct amdgpu_device *adev);
index b455da4877829e57b76178ed2300959c4dade7f4..8f98629fbe5936858a3c77b3546fd106577f7254 100644 (file)
@@ -31,7 +31,7 @@
 #include <linux/power_supply.h>
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
-
+#include <linux/nospec.h>
 
 static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
 
@@ -68,11 +68,11 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
        if (adev->pm.dpm_enabled) {
                mutex_lock(&adev->pm.mutex);
                if (power_supply_is_system_supplied() > 0)
-                       adev->pm.dpm.ac_power = true;
+                       adev->pm.ac_power = true;
                else
-                       adev->pm.dpm.ac_power = false;
+                       adev->pm.ac_power = false;
                if (adev->powerplay.pp_funcs->enable_bapm)
-                       amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power);
+                       amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
                mutex_unlock(&adev->pm.mutex);
        }
 }
@@ -80,12 +80,15 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
 /**
  * DOC: power_dpm_state
  *
- * This is a legacy interface and is only provided for backwards compatibility.
- * The amdgpu driver provides a sysfs API for adjusting certain power
- * related parameters.  The file power_dpm_state is used for this.
+ * The power_dpm_state file is a legacy interface and is only provided for
+ * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
+ * certain power related parameters.  The file power_dpm_state is used for this.
  * It accepts the following arguments:
+ *
  * - battery
+ *
  * - balanced
+ *
  * - performance
  *
  * battery
@@ -169,14 +172,21 @@ fail:
  * The amdgpu driver provides a sysfs API for adjusting certain power
  * related parameters.  The file power_dpm_force_performance_level is
  * used for this.  It accepts the following arguments:
+ *
  * - auto
+ *
  * - low
+ *
  * - high
+ *
  * - manual
- * - GPU fan
+ *
  * - profile_standard
+ *
  * - profile_min_sclk
+ *
  * - profile_min_mclk
+ *
  * - profile_peak
  *
  * auto
@@ -393,6 +403,7 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
                        count = -EINVAL;
                        goto fail;
                }
+               idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
 
                amdgpu_dpm_get_pp_num_states(adev, &data);
                state = data.states[idx];
@@ -463,8 +474,11 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
  * this.
  *
  * Reading the file will display:
+ *
  * - a list of engine clock levels and voltages labeled OD_SCLK
+ *
  * - a list of memory clock levels and voltages labeled OD_MCLK
+ *
  * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
  *
  * To manually adjust these settings, first select manual using
@@ -593,40 +607,59 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
                return snprintf(buf, PAGE_SIZE, "\n");
 }
 
-static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
-               struct device_attribute *attr,
-               const char *buf,
-               size_t count)
+/*
+ * Worst case: 32 bits individually specified, in octal at 12 characters
+ * per line (+1 for \n).
+ */
+#define AMDGPU_MASK_BUF_MAX    (32 * 13)
+
+static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
 {
-       struct drm_device *ddev = dev_get_drvdata(dev);
-       struct amdgpu_device *adev = ddev->dev_private;
        int ret;
        long level;
-       uint32_t mask = 0;
        char *sub_str = NULL;
        char *tmp;
-       char buf_cpy[count];
+       char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
        const char delimiter[3] = {' ', '\n', '\0'};
+       size_t bytes;
 
-       memcpy(buf_cpy, buf, count+1);
+       *mask = 0;
+
+       bytes = min(count, sizeof(buf_cpy) - 1);
+       memcpy(buf_cpy, buf, bytes);
+       buf_cpy[bytes] = '\0';
        tmp = buf_cpy;
        while (tmp[0]) {
-               sub_str =  strsep(&tmp, delimiter);
+               sub_str = strsep(&tmp, delimiter);
                if (strlen(sub_str)) {
                        ret = kstrtol(sub_str, 0, &level);
-
-                       if (ret) {
-                               count = -EINVAL;
-                               goto fail;
-                       }
-                       mask |= 1 << level;
+                       if (ret)
+                               return -EINVAL;
+                       *mask |= 1 << level;
                } else
                        break;
        }
+
+       return 0;
+}
+
+static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
+               struct device_attribute *attr,
+               const char *buf,
+               size_t count)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = ddev->dev_private;
+       int ret;
+       uint32_t mask = 0;
+
+       ret = amdgpu_read_mask(buf, count, &mask);
+       if (ret)
+               return ret;
+
        if (adev->powerplay.pp_funcs->force_clock_level)
                amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
 
-fail:
        return count;
 }
 
@@ -651,32 +684,15 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
        int ret;
-       long level;
        uint32_t mask = 0;
-       char *sub_str = NULL;
-       char *tmp;
-       char buf_cpy[count];
-       const char delimiter[3] = {' ', '\n', '\0'};
 
-       memcpy(buf_cpy, buf, count+1);
-       tmp = buf_cpy;
-       while (tmp[0]) {
-               sub_str =  strsep(&tmp, delimiter);
-               if (strlen(sub_str)) {
-                       ret = kstrtol(sub_str, 0, &level);
+       ret = amdgpu_read_mask(buf, count, &mask);
+       if (ret)
+               return ret;
 
-                       if (ret) {
-                               count = -EINVAL;
-                               goto fail;
-                       }
-                       mask |= 1 << level;
-               } else
-                       break;
-       }
        if (adev->powerplay.pp_funcs->force_clock_level)
                amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
 
-fail:
        return count;
 }
 
@@ -701,33 +717,15 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
        int ret;
-       long level;
        uint32_t mask = 0;
-       char *sub_str = NULL;
-       char *tmp;
-       char buf_cpy[count];
-       const char delimiter[3] = {' ', '\n', '\0'};
 
-       memcpy(buf_cpy, buf, count+1);
-       tmp = buf_cpy;
-
-       while (tmp[0]) {
-               sub_str =  strsep(&tmp, delimiter);
-               if (strlen(sub_str)) {
-                       ret = kstrtol(sub_str, 0, &level);
+       ret = amdgpu_read_mask(buf, count, &mask);
+       if (ret)
+               return ret;
 
-                       if (ret) {
-                               count = -EINVAL;
-                               goto fail;
-                       }
-                       mask |= 1 << level;
-               } else
-                       break;
-       }
        if (adev->powerplay.pp_funcs->force_clock_level)
                amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
 
-fail:
        return count;
 }
 
@@ -905,6 +903,36 @@ fail:
        return -EINVAL;
 }
 
+/**
+ * DOC: busy_percent
+ *
+ * The amdgpu driver provides a sysfs API for reading how busy the GPU
+ * is as a percentage.  The file gpu_busy_percent is used for this.
+ * The SMU firmware computes a percentage of load based on the
+ * aggregate activity level in the IP cores.
+ */
+static ssize_t amdgpu_get_busy_percent(struct device *dev,
+               struct device_attribute *attr,
+               char *buf)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = ddev->dev_private;
+       int r, value, size = sizeof(value);
+
+       /* sanity check PP is enabled */
+       if (!(adev->powerplay.pp_funcs &&
+             adev->powerplay.pp_funcs->read_sensor))
+               return -EINVAL;
+
+       /* read the IP busy sensor */
+       r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
+                                  (void *)&value, &size);
+       if (r)
+               return r;
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", value);
+}
+
 static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
 static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
                   amdgpu_get_dpm_forced_performance_level,
@@ -938,6 +966,8 @@ static DEVICE_ATTR(pp_power_profile_mode, S_IRUGO | S_IWUSR,
 static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR,
                amdgpu_get_pp_od_clk_voltage,
                amdgpu_set_pp_od_clk_voltage);
+static DEVICE_ATTR(gpu_busy_percent, S_IRUGO,
+               amdgpu_get_busy_percent, NULL);
 
 static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
                                      struct device_attribute *attr,
@@ -1156,7 +1186,7 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
        int r, size = sizeof(vddnb);
 
        /* only APUs have vddnb */
-       if  (adev->flags & AMD_IS_APU)
+       if  (!(adev->flags & AMD_IS_APU))
                return -EINVAL;
 
        /* Can't get voltage when the card is off */
@@ -1285,35 +1315,51 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
  * DOC: hwmon
  *
  * The amdgpu driver exposes the following sensor interfaces:
+ *
  * - GPU temperature (via the on-die sensor)
+ *
  * - GPU voltage
+ *
  * - Northbridge voltage (APUs only)
+ *
  * - GPU power
+ *
  * - GPU fan
  *
  * hwmon interfaces for GPU temperature:
+ *
  * - temp1_input: the on die GPU temperature in millidegrees Celsius
+ *
  * - temp1_crit: temperature critical max value in millidegrees Celsius
+ *
  * - temp1_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
  *
  * hwmon interfaces for GPU voltage:
+ *
  * - in0_input: the voltage on the GPU in millivolts
+ *
  * - in1_input: the voltage on the Northbridge in millivolts
  *
  * hwmon interfaces for GPU power:
+ *
  * - power1_average: average power used by the GPU in microWatts
+ *
  * - power1_cap_min: minimum cap supported in microWatts
+ *
  * - power1_cap_max: maximum cap supported in microWatts
+ *
  * - power1_cap: selected power cap in microWatts
  *
  * hwmon interfaces for GPU fan:
+ *
  * - pwm1: pulse width modulation fan level (0-255)
- * - pwm1_enable: pulse width modulation fan control method
- *                0: no fan speed control
- *                1: manual fan speed control using pwm interface
- *                2: automatic fan speed control
+ *
+ * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
+ *
  * - pwm1_min: pulse width modulation fan control minimum level (0)
+ *
  * - pwm1_max: pulse width modulation fan control maximum level (255)
+ *
  * - fan1_input: fan speed in RPM
  *
  * You can use hwmon tools like sensors to view this information on your system.
@@ -1668,10 +1714,10 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
 
 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
 {
-       if (adev->powerplay.pp_funcs->powergate_uvd) {
+       if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
                /* enable/disable UVD */
                mutex_lock(&adev->pm.mutex);
-               amdgpu_dpm_powergate_uvd(adev, !enable);
+               amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
                mutex_unlock(&adev->pm.mutex);
        } else {
                if (enable) {
@@ -1690,10 +1736,10 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
 
 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
 {
-       if (adev->powerplay.pp_funcs->powergate_vce) {
+       if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
                /* enable/disable VCE */
                mutex_lock(&adev->pm.mutex);
-               amdgpu_dpm_powergate_vce(adev, !enable);
+               amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
                mutex_unlock(&adev->pm.mutex);
        } else {
                if (enable) {
@@ -1825,6 +1871,13 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
                                "pp_od_clk_voltage\n");
                return ret;
        }
+       ret = device_create_file(adev->dev,
+                       &dev_attr_gpu_busy_percent);
+       if (ret) {
+               DRM_ERROR("failed to create device file "
+                               "gpu_busy_level\n");
+               return ret;
+       }
        ret = amdgpu_debugfs_pm_init(adev);
        if (ret) {
                DRM_ERROR("Failed to register debugfs file for dpm!\n");
@@ -1860,6 +1913,7 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
                        &dev_attr_pp_power_profile_mode);
        device_remove_file(adev->dev,
                        &dev_attr_pp_od_clk_voltage);
+       device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
 }
 
 void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
@@ -1878,11 +1932,19 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
                        amdgpu_fence_wait_empty(ring);
        }
 
+       mutex_lock(&adev->pm.mutex);
+       /* update battery/ac status */
+       if (power_supply_is_system_supplied() > 0)
+               adev->pm.ac_power = true;
+       else
+               adev->pm.ac_power = false;
+       mutex_unlock(&adev->pm.mutex);
+
        if (adev->powerplay.pp_funcs->dispatch_tasks) {
                if (!amdgpu_device_has_dc_support(adev)) {
                        mutex_lock(&adev->pm.mutex);
                        amdgpu_dpm_get_active_displays(adev);
-                       adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtcs;
+                       adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
                        adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
                        adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
                        /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
@@ -1898,14 +1960,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
        } else {
                mutex_lock(&adev->pm.mutex);
                amdgpu_dpm_get_active_displays(adev);
-               /* update battery/ac status */
-               if (power_supply_is_system_supplied() > 0)
-                       adev->pm.dpm.ac_power = true;
-               else
-                       adev->pm.dpm.ac_power = false;
-
                amdgpu_dpm_change_power_state_locked(adev);
-
                mutex_unlock(&adev->pm.mutex);
        }
 }
index 4683626b065ff2cb06473ad8349bbaa6148cd68f..1c5d97f4b4dde4e9a7b53c7ea70fc5746327640e 100644 (file)
  *
  * Authors: Alex Deucher
  */
+
+/**
+ * DOC: PRIME Buffer Sharing
+ *
+ * The following callback implementations are used for :ref:`sharing GEM buffer
+ * objects between different devices via PRIME <prime_buffer_sharing>`.
+ */
+
 #include <drm/drmP.h>
 
 #include "amdgpu.h"
 
 static const struct dma_buf_ops amdgpu_dmabuf_ops;
 
+/**
+ * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
+ * implementation
+ * @obj: GEM buffer object
+ *
+ * Returns:
+ * A scatter/gather table for the pinned pages of the buffer object's memory.
+ */
 struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
 {
        struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -40,6 +56,15 @@ struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
        return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
 }
 
+/**
+ * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
+ * @obj: GEM buffer object
+ *
+ * Sets up an in-kernel virtual mapping of the buffer object's memory.
+ *
+ * Returns:
+ * The virtual address of the mapping or an error pointer.
+ */
 void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
 {
        struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -53,6 +78,13 @@ void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
        return bo->dma_buf_vmap.virtual;
 }
 
+/**
+ * amdgpu_gem_prime_vunmap - &dma_buf_ops.vunmap implementation
+ * @obj: GEM buffer object
+ * @vaddr: virtual address (unused)
+ *
+ * Tears down the in-kernel virtual mapping of the buffer object's memory.
+ */
 void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
 {
        struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -60,6 +92,17 @@ void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
        ttm_bo_kunmap(&bo->dma_buf_vmap);
 }
 
+/**
+ * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation
+ * @obj: GEM buffer object
+ * @vma: virtual memory area
+ *
+ * Sets up a userspace mapping of the buffer object's memory in the given
+ * virtual memory area.
+ *
+ * Returns:
+ * 0 on success or negative error code.
+ */
 int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
 {
        struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -94,6 +137,19 @@ int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma
        return ret;
 }
 
+/**
+ * amdgpu_gem_prime_import_sg_table - &drm_driver.gem_prime_import_sg_table
+ * implementation
+ * @dev: DRM device
+ * @attach: DMA-buf attachment
+ * @sg: Scatter/gather table
+ *
+ * Import shared DMA buffer memory exported by another device.
+ *
+ * Returns:
+ * A new GEM buffer object of the given DRM device, representing the memory
+ * described by the given DMA-buf attachment and scatter/gather table.
+ */
 struct drm_gem_object *
 amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
                                 struct dma_buf_attachment *attach,
@@ -132,8 +188,19 @@ error:
        return ERR_PTR(ret);
 }
 
+/**
+ * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
+ * @dma_buf: shared DMA buffer
+ * @attach: DMA-buf attachment
+ *
+ * Makes sure that the shared DMA buffer can be accessed by the target device.
+ * For now, simply pins it to the GTT domain, where it should be accessible by
+ * all DMA devices.
+ *
+ * Returns:
+ * 0 on success or negative error code.
+ */
 static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
-                                struct device *target_dev,
                                 struct dma_buf_attachment *attach)
 {
        struct drm_gem_object *obj = dma_buf->priv;
@@ -141,7 +208,7 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
        long r;
 
-       r = drm_gem_map_attach(dma_buf, target_dev, attach);
+       r = drm_gem_map_attach(dma_buf, attach);
        if (r)
                return r;
 
@@ -165,7 +232,7 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
        }
 
        /* pin buffer into GTT */
-       r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
+       r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
        if (r)
                goto error_unreserve;
 
@@ -181,6 +248,14 @@ error_detach:
        return r;
 }
 
+/**
+ * amdgpu_gem_map_detach - &dma_buf_ops.detach implementation
+ * @dma_buf: shared DMA buffer
+ * @attach: DMA-buf attachment
+ *
+ * This is called when a shared DMA buffer no longer needs to be accessible by
+ * the other device. For now, simply unpins the buffer from GTT.
+ */
 static void amdgpu_gem_map_detach(struct dma_buf *dma_buf,
                                  struct dma_buf_attachment *attach)
 {
@@ -202,6 +277,13 @@ error:
        drm_gem_map_detach(dma_buf, attach);
 }
 
+/**
+ * amdgpu_gem_prime_res_obj - &drm_driver.gem_prime_res_obj implementation
+ * @obj: GEM buffer object
+ *
+ * Returns:
+ * The buffer object's reservation object.
+ */
 struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
 {
        struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@@ -209,6 +291,18 @@ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
        return bo->tbo.resv;
 }
 
+/**
+ * amdgpu_gem_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation
+ * @dma_buf: shared DMA buffer
+ * @direction: direction of DMA transfer
+ *
+ * This is called before CPU access to the shared DMA buffer's memory. If it's
+ * a read access, the buffer is moved to the GTT domain if possible, for optimal
+ * CPU read performance.
+ *
+ * Returns:
+ * 0 on success or negative error code.
+ */
 static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
                                       enum dma_data_direction direction)
 {
@@ -229,7 +323,7 @@ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
                return ret;
 
        if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
-               amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+               amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
                ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
        }
 
@@ -245,14 +339,24 @@ static const struct dma_buf_ops amdgpu_dmabuf_ops = {
        .release = drm_gem_dmabuf_release,
        .begin_cpu_access = amdgpu_gem_begin_cpu_access,
        .map = drm_gem_dmabuf_kmap,
-       .map_atomic = drm_gem_dmabuf_kmap_atomic,
        .unmap = drm_gem_dmabuf_kunmap,
-       .unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
        .mmap = drm_gem_dmabuf_mmap,
        .vmap = drm_gem_dmabuf_vmap,
        .vunmap = drm_gem_dmabuf_vunmap,
 };
 
+/**
+ * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation
+ * @dev: DRM device
+ * @gobj: GEM buffer object
+ * @flags: flags like DRM_CLOEXEC and DRM_RDWR
+ *
+ * The main work is done by the &drm_gem_prime_export helper, which in turn
+ * uses &amdgpu_gem_prime_res_obj.
+ *
+ * Returns:
+ * Shared DMA buffer representing the GEM buffer object from the given device.
+ */
 struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
                                        struct drm_gem_object *gobj,
                                        int flags)
@@ -273,6 +377,17 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
        return buf;
 }
 
+/**
+ * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation
+ * @dev: DRM device
+ * @dma_buf: Shared DMA buffer
+ *
+ * The main work is done by the &drm_gem_prime_import helper, which in turn
+ * uses &amdgpu_gem_prime_import_sg_table.
+ *
+ * Returns:
+ * GEM buffer object representing the shared DMA buffer for the given device.
+ */
 struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
                                            struct dma_buf *dma_buf)
 {
index 8af16e81c7d447dc1ee13b5b0227bdfc9100f387..a172bba32b45c9ef279a742be94af68672881442 100644 (file)
@@ -66,8 +66,6 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
                               u32 ring,
                               struct amdgpu_ring **out_ring)
 {
-       u32 instance;
-
        switch (mapper->hw_ip) {
        case AMDGPU_HW_IP_GFX:
                *out_ring = &adev->gfx.gfx_ring[ring];
@@ -79,16 +77,13 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
                *out_ring = &adev->sdma.instance[ring].ring;
                break;
        case AMDGPU_HW_IP_UVD:
-               instance = ring;
-               *out_ring = &adev->uvd.inst[instance].ring;
+               *out_ring = &adev->uvd.inst[0].ring;
                break;
        case AMDGPU_HW_IP_VCE:
                *out_ring = &adev->vce.ring[ring];
                break;
        case AMDGPU_HW_IP_UVD_ENC:
-               instance = ring / adev->uvd.num_enc_rings;
-               *out_ring =
-               &adev->uvd.inst[instance].ring_enc[ring%adev->uvd.num_enc_rings];
+               *out_ring = &adev->uvd.inst[0].ring_enc[ring];
                break;
        case AMDGPU_HW_IP_VCN_DEC:
                *out_ring = &adev->vcn.ring_dec;
@@ -96,6 +91,9 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
        case AMDGPU_HW_IP_VCN_ENC:
                *out_ring = &adev->vcn.ring_enc[ring];
                break;
+       case AMDGPU_HW_IP_VCN_JPEG:
+               *out_ring = &adev->vcn.ring_jpeg;
+               break;
        default:
                *out_ring = NULL;
                DRM_ERROR("unknown HW IP type: %d\n", mapper->hw_ip);
@@ -216,7 +214,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
                         u32 hw_ip, u32 instance, u32 ring,
                         struct amdgpu_ring **out_ring)
 {
-       int r, ip_num_rings;
+       int i, r, ip_num_rings = 0;
        struct amdgpu_queue_mapper *mapper = &mgr->mapper[hw_ip];
 
        if (!adev || !mgr || !out_ring)
@@ -245,14 +243,21 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
                ip_num_rings = adev->sdma.num_instances;
                break;
        case AMDGPU_HW_IP_UVD:
-               ip_num_rings = adev->uvd.num_uvd_inst;
+               for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+                       if (!(adev->uvd.harvest_config & (1 << i)))
+                               ip_num_rings++;
+               }
                break;
        case AMDGPU_HW_IP_VCE:
                ip_num_rings = adev->vce.num_rings;
                break;
        case AMDGPU_HW_IP_UVD_ENC:
+               for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+                       if (!(adev->uvd.harvest_config & (1 << i)))
+                               ip_num_rings++;
+               }
                ip_num_rings =
-                       adev->uvd.num_enc_rings * adev->uvd.num_uvd_inst;
+                       adev->uvd.num_enc_rings * ip_num_rings;
                break;
        case AMDGPU_HW_IP_VCN_DEC:
                ip_num_rings = 1;
@@ -260,6 +265,9 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
        case AMDGPU_HW_IP_VCN_ENC:
                ip_num_rings = adev->vcn.num_enc_rings;
                break;
+       case AMDGPU_HW_IP_VCN_JPEG:
+               ip_num_rings = 1;
+               break;
        default:
                DRM_DEBUG("unknown ip type: %d\n", hw_ip);
                return -EINVAL;
@@ -287,6 +295,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
        case AMDGPU_HW_IP_UVD_ENC:
        case AMDGPU_HW_IP_VCN_DEC:
        case AMDGPU_HW_IP_VCN_ENC:
+       case AMDGPU_HW_IP_VCN_JPEG:
                r = amdgpu_identity_map(adev, mapper, ring, out_ring);
                break;
        case AMDGPU_HW_IP_DMA:
index c6850b629d0e6b2594c7422ecd697c3a920829eb..93794a85f83d80e868870133e387467c2b8de4a9 100644 (file)
@@ -211,7 +211,8 @@ void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
        if (!ring->funcs->set_priority)
                return;
 
-       atomic_inc(&ring->num_jobs[priority]);
+       if (atomic_inc_return(&ring->num_jobs[priority]) <= 0)
+               return;
 
        mutex_lock(&ring->priority_mutex);
        if (priority <= ring->priority)
@@ -304,7 +305,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
                0xffffffffffffffff : ring->buf_mask;
        /* Allocate ring buffer */
        if (ring->ring_obj == NULL) {
-               r = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
+               r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
                                            AMDGPU_GEM_DOMAIN_GTT,
                                            &ring->ring_obj,
                                            &ring->gpu_addr,
index 1513124c5659db118f1cd6dd9a49d082445abf29..d242b9a51e90f35618aff89c97a8ef9a1165adce 100644 (file)
@@ -44,6 +44,8 @@
 #define AMDGPU_FENCE_FLAG_INT           (1 << 1)
 #define AMDGPU_FENCE_FLAG_TC_WB_ONLY    (1 << 2)
 
+#define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
+
 enum amdgpu_ring_type {
        AMDGPU_RING_TYPE_GFX,
        AMDGPU_RING_TYPE_COMPUTE,
@@ -53,7 +55,8 @@ enum amdgpu_ring_type {
        AMDGPU_RING_TYPE_KIQ,
        AMDGPU_RING_TYPE_UVD_ENC,
        AMDGPU_RING_TYPE_VCN_DEC,
-       AMDGPU_RING_TYPE_VCN_ENC
+       AMDGPU_RING_TYPE_VCN_ENC,
+       AMDGPU_RING_TYPE_VCN_JPEG
 };
 
 struct amdgpu_device;
@@ -112,6 +115,7 @@ struct amdgpu_ring_funcs {
        u32                     nop;
        bool                    support_64bit_ptrs;
        unsigned                vmhub;
+       unsigned                extra_dw;
 
        /* ring read/write ptr handling */
        u64 (*get_rptr)(struct amdgpu_ring *ring);
@@ -119,6 +123,7 @@ struct amdgpu_ring_funcs {
        void (*set_wptr)(struct amdgpu_ring *ring);
        /* validating and patching of IBs */
        int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
+       int (*patch_cs_in_place)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
        /* constants to calculate how many DW are needed for an emit */
        unsigned emit_frame_size;
        unsigned emit_ib_size;
index e3878256743a22281a7d11b9b9ec21369aeaea09..8904e62dca7ae277143f4a3cbdffaa28ef120bf9 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /*
  * Copyright 2009 VMware, Inc.
  *
@@ -75,11 +76,12 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
        r = amdgpu_bo_reserve(vram_obj, false);
        if (unlikely(r != 0))
                goto out_unref;
-       r = amdgpu_bo_pin(vram_obj, AMDGPU_GEM_DOMAIN_VRAM, &vram_addr);
+       r = amdgpu_bo_pin(vram_obj, AMDGPU_GEM_DOMAIN_VRAM);
        if (r) {
                DRM_ERROR("Failed to pin VRAM object\n");
                goto out_unres;
        }
+       vram_addr = amdgpu_bo_gpu_offset(vram_obj);
        for (i = 0; i < n; i++) {
                void *gtt_map, *vram_map;
                void **gart_start, **gart_end;
@@ -96,11 +98,17 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
                r = amdgpu_bo_reserve(gtt_obj[i], false);
                if (unlikely(r != 0))
                        goto out_lclean_unref;
-               r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gart_addr);
+               r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT);
                if (r) {
                        DRM_ERROR("Failed to pin GTT object %d\n", i);
                        goto out_lclean_unres;
                }
+               r = amdgpu_ttm_alloc_gart(&gtt_obj[i]->tbo);
+               if (r) {
+                       DRM_ERROR("%p bind failed\n", gtt_obj[i]);
+                       goto out_lclean_unpin;
+               }
+               gart_addr = amdgpu_bo_gpu_offset(gtt_obj[i]);
 
                r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
                if (r) {
index e96e26d3f3b085f38986f5cd6112e0e7325bd56c..7206a0025b17a1ded6fbe7003dc72396653e62d3 100644 (file)
@@ -150,10 +150,10 @@ TRACE_EVENT(amdgpu_cs,
 
            TP_fast_assign(
                           __entry->bo_list = p->bo_list;
-                          __entry->ring = p->job->ring->idx;
+                          __entry->ring = p->ring->idx;
                           __entry->dw = p->job->ibs[i].length_dw;
                           __entry->fences = amdgpu_fence_count_emitted(
-                               p->job->ring);
+                               p->ring);
                           ),
            TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
                      __entry->bo_list, __entry->ring, __entry->dw,
@@ -178,7 +178,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
                           __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
                           __entry->context = job->base.s_fence->finished.context;
                           __entry->seqno = job->base.s_fence->finished.seqno;
-                          __entry->ring_name = job->ring->name;
+                          __entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
                           __entry->num_ibs = job->num_ibs;
                           ),
            TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
@@ -203,7 +203,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
                           __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
                           __entry->context = job->base.s_fence->finished.context;
                           __entry->seqno = job->base.s_fence->finished.seqno;
-                          __entry->ring_name = job->ring->name;
+                          __entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
                           __entry->num_ibs = job->num_ibs;
                           ),
            TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
@@ -314,6 +314,11 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping,
            TP_ARGS(mapping)
 );
 
+DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_cs,
+           TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
+           TP_ARGS(mapping)
+);
+
 TRACE_EVENT(amdgpu_vm_set_ptes,
            TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
                     uint32_t incr, uint64_t flags),
@@ -436,7 +441,7 @@ TRACE_EVENT(amdgpu_cs_bo_status,
                        __entry->total_bo, __entry->total_size)
 );
 
-TRACE_EVENT(amdgpu_ttm_bo_move,
+TRACE_EVENT(amdgpu_bo_move,
            TP_PROTO(struct amdgpu_bo* bo, uint32_t new_placement, uint32_t old_placement),
            TP_ARGS(bo, new_placement, old_placement),
            TP_STRUCT__entry(
index e93a0a237dc3eb8c3271c2fa3cc754c02c3ff4c7..fcf421263fd9689226de77738da98f09a6d4b280 100644 (file)
@@ -92,11 +92,9 @@ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
 }
 
 /**
- * amdgpu_ttm_global_init - Initialize global TTM memory reference
- *                                                     structures.
+ * amdgpu_ttm_global_init - Initialize global TTM memory reference structures.
  *
- * @adev:      AMDGPU device for which the global structures need to be
- *                     registered.
+ * @adev: AMDGPU device for which the global structures need to be registered.
  *
  * This is called as part of the AMDGPU ttm init from amdgpu_ttm_init()
  * during bring up.
@@ -104,8 +102,6 @@ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
 static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
 {
        struct drm_global_reference *global_ref;
-       struct amdgpu_ring *ring;
-       struct drm_sched_rq *rq;
        int r;
 
        /* ensure reference is false in case init fails */
@@ -138,21 +134,10 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
 
        mutex_init(&adev->mman.gtt_window_lock);
 
-       ring = adev->mman.buffer_funcs_ring;
-       rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
-       r = drm_sched_entity_init(&ring->sched, &adev->mman.entity,
-                                 rq, NULL);
-       if (r) {
-               DRM_ERROR("Failed setting up TTM BO move run queue.\n");
-               goto error_entity;
-       }
-
        adev->mman.mem_global_referenced = true;
 
        return 0;
 
-error_entity:
-       drm_global_item_unref(&adev->mman.bo_global_ref.ref);
 error_bo:
        drm_global_item_unref(&adev->mman.mem_global_ref);
 error_mem:
@@ -162,8 +147,6 @@ error_mem:
 static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
 {
        if (adev->mman.mem_global_referenced) {
-               drm_sched_entity_fini(adev->mman.entity.sched,
-                                     &adev->mman.entity);
                mutex_destroy(&adev->mman.gtt_window_lock);
                drm_global_item_unref(&adev->mman.bo_global_ref.ref);
                drm_global_item_unref(&adev->mman.mem_global_ref);
@@ -177,13 +160,12 @@ static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
 }
 
 /**
- * amdgpu_init_mem_type -      Initialize a memory manager for a specific
- *                                                     type of memory request.
+ * amdgpu_init_mem_type - Initialize a memory manager for a specific type of
+ * memory request.
  *
- * @bdev:      The TTM BO device object (contains a reference to
- *                     amdgpu_device)
- * @type:      The type of memory requested
- * @man:
+ * @bdev: The TTM BO device object (contains a reference to amdgpu_device)
+ * @type: The type of memory requested
+ * @man: The memory type manager for each domain
  *
  * This is called by ttm_bo_init_mm() when a buffer object is being
  * initialized.
@@ -263,7 +245,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
        }
 
        /* Object isn't an AMDGPU object so ignore */
-       if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
+       if (!amdgpu_bo_is_amdgpu_bo(bo)) {
                placement->placement = &placements;
                placement->busy_placement = &placements;
                placement->num_placement = 1;
@@ -276,8 +258,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
        case TTM_PL_VRAM:
                if (!adev->mman.buffer_funcs_enabled) {
                        /* Move to system memory */
-                       amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
-               } else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
+                       amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
+               } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
                           !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
                           amdgpu_bo_in_cpu_visible_vram(abo)) {
 
@@ -286,7 +268,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
                         * BO will be evicted to GTT rather than causing other
                         * BOs to be evicted from VRAM
                         */
-                       amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
+                       amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
                                                         AMDGPU_GEM_DOMAIN_GTT);
                        abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
                        abo->placements[0].lpfn = 0;
@@ -294,12 +276,12 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
                        abo->placement.num_busy_placement = 1;
                } else {
                        /* Move to GTT memory */
-                       amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
+                       amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
                }
                break;
        case TTM_PL_TT:
        default:
-               amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
+               amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
        }
        *placement = abo->placement;
 }
@@ -307,8 +289,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
 /**
  * amdgpu_verify_access - Verify access for a mmap call
  *
- * @bo:                The buffer object to map
- * @filp:      The file pointer from the process performing the mmap
+ * @bo:        The buffer object to map
+ * @filp: The file pointer from the process performing the mmap
  *
  * This is called by ttm_bo_mmap() to verify whether a process
  * has the right to mmap a BO to their process space.
@@ -333,11 +315,10 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 /**
  * amdgpu_move_null - Register memory for a buffer object
  *
- * @bo:                        The bo to assign the memory to
- * @new_mem:   The memory to be assigned.
+ * @bo: The bo to assign the memory to
+ * @new_mem: The memory to be assigned.
  *
- * Assign the memory from new_mem to the memory of the buffer object
- * bo.
+ * Assign the memory from new_mem to the memory of the buffer object bo.
  */
 static void amdgpu_move_null(struct ttm_buffer_object *bo,
                             struct ttm_mem_reg *new_mem)
@@ -350,8 +331,12 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo,
 }
 
 /**
- * amdgpu_mm_node_addr -       Compute the GPU relative offset of a GTT
- *                                                     buffer.
+ * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer.
+ *
+ * @bo: The bo to assign the memory to.
+ * @mm_node: Memory manager node for drm allocator.
+ * @mem: The region where the bo resides.
+ *
  */
 static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
                                    struct drm_mm_node *mm_node,
@@ -367,10 +352,12 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
 }
 
 /**
- * amdgpu_find_mm_node -       Helper function finds the drm_mm_node
- *                                             corresponding to @offset. It also modifies
- *                                                     the offset to be within the drm_mm_node
- *                                                     returned
+ * amdgpu_find_mm_node - Helper function finds the drm_mm_node corresponding to
+ * @offset. It also modifies the offset to be within the drm_mm_node returned
+ *
+ * @mem: The region where the bo resides.
+ * @offset: The offset that drm_mm_node is used for finding.
+ *
  */
 static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
                                               unsigned long *offset)
@@ -512,8 +499,8 @@ error:
 /**
  * amdgpu_move_blit - Copy an entire buffer to another buffer
  *
- * This is a helper called by amdgpu_bo_move() and
- * amdgpu_move_vram_ram() to help move buffers to and from VRAM.
+ * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
+ * help move buffers to and from VRAM.
  */
 static int amdgpu_move_blit(struct ttm_buffer_object *bo,
                            bool evict, bool no_wait_gpu,
@@ -595,7 +582,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
        }
 
        /* blit VRAM to GTT */
-       r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, &tmp_mem, old_mem);
+       r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, &tmp_mem, old_mem);
        if (unlikely(r)) {
                goto out_cleanup;
        }
@@ -647,7 +634,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
        }
 
        /* copy to VRAM */
-       r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, new_mem, old_mem);
+       r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, new_mem, old_mem);
        if (unlikely(r)) {
                goto out_cleanup;
        }
@@ -809,8 +796,8 @@ struct amdgpu_ttm_tt {
 };
 
 /**
- * amdgpu_ttm_tt_get_user_pages -      Pin pages of memory pointed to
- *                                                                     by a USERPTR pointer to memory
+ * amdgpu_ttm_tt_get_user_pages - Pin pages of memory pointed to by a USERPTR
+ * pointer to memory
  *
  * Called by amdgpu_gem_userptr_ioctl() and amdgpu_cs_parser_bos().
  * This provides a wrapper around the get_user_pages() call to provide
@@ -833,8 +820,10 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
        down_read(&mm->mmap_sem);
 
        if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
-               /* check that we only use anonymous memory
-                  to prevent problems with writeback */
+               /*
+                * check that we only use anonymous memory to prevent problems
+                * with writeback
+                */
                unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
                struct vm_area_struct *vma;
 
@@ -885,10 +874,9 @@ release_pages:
 }
 
 /**
- * amdgpu_ttm_tt_set_user_pages -      Copy pages in, putting old pages
- *                                                                     as necessary.
+ * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
  *
- * Called by amdgpu_cs_list_validate().  This creates the page list
+ * Called by amdgpu_cs_list_validate(). This creates the page list
  * that backs user memory and will ultimately be mapped into the device
  * address space.
  */
@@ -930,8 +918,7 @@ void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
 }
 
 /**
- * amdgpu_ttm_tt_pin_userptr -         prepare the sg table with the
- *                                                             user pages
+ * amdgpu_ttm_tt_pin_userptr -         prepare the sg table with the user pages
  *
  * Called by amdgpu_ttm_backend_bind()
  **/
@@ -1310,8 +1297,8 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
 }
 
 /**
- * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt
- *                                                             for the current task
+ * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
+ * task
  *
  * @ttm: The ttm_tt object to bind this userptr object to
  * @addr:  The address in the current tasks VM space to use
@@ -1361,9 +1348,8 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
 }
 
 /**
- * amdgpu_ttm_tt_affect_userptr -      Determine if a ttm_tt object lays
- *                                                                     inside an address range for the
- *                                                                     current task.
+ * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
+ * address range for the current task.
  *
  */
 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
@@ -1401,8 +1387,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
 }
 
 /**
- * amdgpu_ttm_tt_userptr_invalidated - Has the ttm_tt object been
- *                                                                             invalidated?
+ * amdgpu_ttm_tt_userptr_invalidated - Has the ttm_tt object been invalidated?
  */
 bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
                                       int *last_invalidated)
@@ -1415,10 +1400,8 @@ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
 }
 
 /**
- * amdgpu_ttm_tt_userptr_needs_pages - Have the pages backing this
- *                                                                             ttm_tt object been invalidated
- *                                                                             since the last time they've
- *                                                                             been set?
+ * amdgpu_ttm_tt_userptr_needs_pages - Have the pages backing this ttm_tt object
+ * been invalidated since the last time they've been set?
  */
 bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
 {
@@ -1474,13 +1457,12 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
 }
 
 /**
- * amdgpu_ttm_bo_eviction_valuable -   Check to see if we can evict
- *                                                                             a buffer object.
+ * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
+ * object.
  *
- * Return true if eviction is sensible.  Called by
- * ttm_mem_evict_first() on behalf of ttm_bo_mem_force_space()
- * which tries to evict buffer objects until it can find space
- * for a new object and by ttm_bo_force_list_clean() which is
+ * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
+ * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
+ * it can find space for a new object and by ttm_bo_force_list_clean() which is
  * used to clean out a memory space.
  */
 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
@@ -1530,8 +1512,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 }
 
 /**
- * amdgpu_ttm_access_memory -  Read or Write memory that backs a
- *                                                             buffer object.
+ * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
  *
  * @bo:  The buffer object to read/write
  * @offset:  Offset into buffer object
@@ -1695,7 +1676,7 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
                        AMDGPU_GEM_DOMAIN_VRAM,
                        adev->fw_vram_usage.start_offset,
                        (adev->fw_vram_usage.start_offset +
-                       adev->fw_vram_usage.size), NULL);
+                       adev->fw_vram_usage.size));
                if (r)
                        goto error_pin;
                r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
@@ -1719,8 +1700,8 @@ error_create:
        return r;
 }
 /**
- * amdgpu_ttm_init -   Init the memory management (ttm) as well as
- *                                             various gtt/vram related fields.
+ * amdgpu_ttm_init - Init the memory management (ttm) as well as various
+ * gtt/vram related fields.
  *
  * This initializes all of the memory space pools that the TTM layer
  * will need such as the GTT space (system memory mapped to the device),
@@ -1871,8 +1852,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
 }
 
 /**
- * amdgpu_ttm_late_init -      Handle any late initialization for
- *                                                     amdgpu_ttm
+ * amdgpu_ttm_late_init - Handle any late initialization for amdgpu_ttm
  */
 void amdgpu_ttm_late_init(struct amdgpu_device *adev)
 {
@@ -1921,10 +1901,30 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
 {
        struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM];
        uint64_t size;
+       int r;
 
-       if (!adev->mman.initialized || adev->in_gpu_reset)
+       if (!adev->mman.initialized || adev->in_gpu_reset ||
+           adev->mman.buffer_funcs_enabled == enable)
                return;
 
+       if (enable) {
+               struct amdgpu_ring *ring;
+               struct drm_sched_rq *rq;
+
+               ring = adev->mman.buffer_funcs_ring;
+               rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+               r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL);
+               if (r) {
+                       DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
+                                 r);
+                       return;
+               }
+       } else {
+               drm_sched_entity_destroy(&adev->mman.entity);
+               dma_fence_put(man->move);
+               man->move = NULL;
+       }
+
        /* this just adjusts TTM size idea, which sets lpfn to the correct value */
        if (enable)
                size = adev->gmc.real_vram_size;
@@ -2002,7 +2002,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
        if (r)
                goto error_free;
 
-       r = amdgpu_job_submit(job, ring, &adev->mman.entity,
+       r = amdgpu_job_submit(job, &adev->mman.entity,
                              AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
        if (r)
                goto error_free;
@@ -2071,24 +2071,19 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
 
        amdgpu_ring_pad_ib(ring, &job->ibs[0]);
        WARN_ON(job->ibs[0].length_dw > num_dw);
-       if (direct_submit) {
-               r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
-                                      NULL, fence);
-               job->fence = dma_fence_get(*fence);
-               if (r)
-                       DRM_ERROR("Error scheduling IBs (%d)\n", r);
-               amdgpu_job_free(job);
-       } else {
-               r = amdgpu_job_submit(job, ring, &adev->mman.entity,
+       if (direct_submit)
+               r = amdgpu_job_submit_direct(job, ring, fence);
+       else
+               r = amdgpu_job_submit(job, &adev->mman.entity,
                                      AMDGPU_FENCE_OWNER_UNDEFINED, fence);
-               if (r)
-                       goto error_free;
-       }
+       if (r)
+               goto error_free;
 
        return r;
 
 error_free:
        amdgpu_job_free(job);
+       DRM_ERROR("Error scheduling IBs (%d)\n", r);
        return r;
 }
 
@@ -2171,7 +2166,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 
        amdgpu_ring_pad_ib(ring, &job->ibs[0]);
        WARN_ON(job->ibs[0].length_dw > num_dw);
-       r = amdgpu_job_submit(job, ring, &adev->mman.entity,
+       r = amdgpu_job_submit(job, &adev->mman.entity,
                              AMDGPU_FENCE_OWNER_UNDEFINED, fence);
        if (r)
                goto error_free;
index e969c879d87e66c686c0345839da07b39391e3e2..8b3cc6687769eef8b24fb4d11b40cd518d7f442e 100644 (file)
@@ -73,6 +73,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem);
 uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
 int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
 
+u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo);
 uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
 uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
 
index bcf68f80bbf058b9cfb8f7a1239f82259f95774b..632fa5980ff44ab73519a9c8fe8e21baca6fbecc 100644 (file)
 
 /* Firmware Names */
 #ifdef CONFIG_DRM_AMDGPU_CIK
-#define FIRMWARE_BONAIRE       "radeon/bonaire_uvd.bin"
-#define FIRMWARE_KABINI        "radeon/kabini_uvd.bin"
-#define FIRMWARE_KAVERI        "radeon/kaveri_uvd.bin"
-#define FIRMWARE_HAWAII        "radeon/hawaii_uvd.bin"
-#define FIRMWARE_MULLINS       "radeon/mullins_uvd.bin"
+#define FIRMWARE_BONAIRE       "amdgpu/bonaire_uvd.bin"
+#define FIRMWARE_KABINI        "amdgpu/kabini_uvd.bin"
+#define FIRMWARE_KAVERI        "amdgpu/kaveri_uvd.bin"
+#define FIRMWARE_HAWAII        "amdgpu/hawaii_uvd.bin"
+#define FIRMWARE_MULLINS       "amdgpu/mullins_uvd.bin"
 #endif
 #define FIRMWARE_TONGA         "amdgpu/tonga_uvd.bin"
 #define FIRMWARE_CARRIZO       "amdgpu/carrizo_uvd.bin"
@@ -127,10 +127,10 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
        unsigned long bo_size;
        const char *fw_name;
        const struct common_firmware_header *hdr;
-       unsigned version_major, version_minor, family_id;
+       unsigned family_id;
        int i, j, r;
 
-       INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler);
+       INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
 
        switch (adev->asic_type) {
 #ifdef CONFIG_DRM_AMDGPU_CIK
@@ -208,29 +208,46 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
 
        hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
        family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
-       version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
-       version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
-       DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
-               version_major, version_minor, family_id);
-
-       /*
-        * Limit the number of UVD handles depending on microcode major
-        * and minor versions. The firmware version which has 40 UVD
-        * instances support is 1.80. So all subsequent versions should
-        * also have the same support.
-        */
-       if ((version_major > 0x01) ||
-           ((version_major == 0x01) && (version_minor >= 0x50)))
-               adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
 
-       adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
-                               (family_id << 8));
+       if (adev->asic_type < CHIP_VEGA20) {
+               unsigned version_major, version_minor;
 
-       if ((adev->asic_type == CHIP_POLARIS10 ||
-            adev->asic_type == CHIP_POLARIS11) &&
-           (adev->uvd.fw_version < FW_1_66_16))
-               DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
-                         version_major, version_minor);
+               version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
+               version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
+               DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
+                       version_major, version_minor, family_id);
+
+               /*
+                * Limit the number of UVD handles depending on microcode major
+                * and minor versions. The firmware version which has 40 UVD
+                * instances support is 1.80. So all subsequent versions should
+                * also have the same support.
+                */
+               if ((version_major > 0x01) ||
+                   ((version_major == 0x01) && (version_minor >= 0x50)))
+                       adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
+
+               adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
+                                       (family_id << 8));
+
+               if ((adev->asic_type == CHIP_POLARIS10 ||
+                    adev->asic_type == CHIP_POLARIS11) &&
+                   (adev->uvd.fw_version < FW_1_66_16))
+                       DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
+                                 version_major, version_minor);
+       } else {
+               unsigned int enc_major, enc_minor, dec_minor;
+
+               dec_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
+               enc_minor = (le32_to_cpu(hdr->ucode_version) >> 24) & 0x3f;
+               enc_major = (le32_to_cpu(hdr->ucode_version) >> 30) & 0x3;
+               DRM_INFO("Found UVD firmware ENC: %hu.%hu DEC: .%hu Family ID: %hu\n",
+                       enc_major, enc_minor, dec_minor, family_id);
+
+               adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
+
+               adev->uvd.fw_version = le32_to_cpu(hdr->ucode_version);
+       }
 
        bo_size = AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
                  +  AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
@@ -238,7 +255,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
                bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
 
        for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
-
+               if (adev->uvd.harvest_config & (1 << j))
+                       continue;
                r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
                                            AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo,
                                            &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr);
@@ -246,21 +264,20 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
                        dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
                        return r;
                }
+       }
 
-               ring = &adev->uvd.inst[j].ring;
-               rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-               r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity,
-                                         rq, NULL);
-               if (r != 0) {
-                       DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j);
-                       return r;
-               }
-
-               for (i = 0; i < adev->uvd.max_handles; ++i) {
-                       atomic_set(&adev->uvd.inst[j].handles[i], 0);
-                       adev->uvd.inst[j].filp[i] = NULL;
-               }
+       ring = &adev->uvd.inst[0].ring;
+       rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+       r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL);
+       if (r) {
+               DRM_ERROR("Failed setting up UVD kernel entity.\n");
+               return r;
        }
+       for (i = 0; i < adev->uvd.max_handles; ++i) {
+               atomic_set(&adev->uvd.handles[i], 0);
+               adev->uvd.filp[i] = NULL;
+       }
+
        /* from uvd v5.0 HW addressing capacity increased to 64 bits */
        if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
                adev->uvd.address_64_bit = true;
@@ -289,11 +306,13 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
 {
        int i, j;
 
+       drm_sched_entity_destroy(&adev->uvd.entity);
+
        for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
+               if (adev->uvd.harvest_config & (1 << j))
+                       continue;
                kfree(adev->uvd.inst[j].saved_bo);
 
-               drm_sched_entity_fini(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity);
-
                amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
                                      &adev->uvd.inst[j].gpu_addr,
                                      (void **)&adev->uvd.inst[j].cpu_addr);
@@ -314,21 +333,23 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
        void *ptr;
        int i, j;
 
-       for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
-               if (adev->uvd.inst[j].vcpu_bo == NULL)
-                       continue;
+       cancel_delayed_work_sync(&adev->uvd.idle_work);
 
-               cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work);
+       /* only valid for physical mode */
+       if (adev->asic_type < CHIP_POLARIS10) {
+               for (i = 0; i < adev->uvd.max_handles; ++i)
+                       if (atomic_read(&adev->uvd.handles[i]))
+                               break;
 
-               /* only valid for physical mode */
-               if (adev->asic_type < CHIP_POLARIS10) {
-                       for (i = 0; i < adev->uvd.max_handles; ++i)
-                               if (atomic_read(&adev->uvd.inst[j].handles[i]))
-                                       break;
+               if (i == adev->uvd.max_handles)
+                       return 0;
+       }
 
-                       if (i == adev->uvd.max_handles)
-                               continue;
-               }
+       for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
+               if (adev->uvd.harvest_config & (1 << j))
+                       continue;
+               if (adev->uvd.inst[j].vcpu_bo == NULL)
+                       continue;
 
                size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo);
                ptr = adev->uvd.inst[j].cpu_addr;
@@ -349,6 +370,8 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
        int i;
 
        for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+               if (adev->uvd.harvest_config & (1 << i))
+                       continue;
                if (adev->uvd.inst[i].vcpu_bo == NULL)
                        return -EINVAL;
 
@@ -381,30 +404,27 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
 
 void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
 {
-       struct amdgpu_ring *ring;
-       int i, j, r;
-
-       for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
-               ring = &adev->uvd.inst[j].ring;
+       struct amdgpu_ring *ring = &adev->uvd.inst[0].ring;
+       int i, r;
 
-               for (i = 0; i < adev->uvd.max_handles; ++i) {
-                       uint32_t handle = atomic_read(&adev->uvd.inst[j].handles[i]);
-                       if (handle != 0 && adev->uvd.inst[j].filp[i] == filp) {
-                               struct dma_fence *fence;
-
-                               r = amdgpu_uvd_get_destroy_msg(ring, handle,
-                                                              false, &fence);
-                               if (r) {
-                                       DRM_ERROR("Error destroying UVD(%d) %d!\n", j, r);
-                                       continue;
-                               }
+       for (i = 0; i < adev->uvd.max_handles; ++i) {
+               uint32_t handle = atomic_read(&adev->uvd.handles[i]);
 
-                               dma_fence_wait(fence, false);
-                               dma_fence_put(fence);
+               if (handle != 0 && adev->uvd.filp[i] == filp) {
+                       struct dma_fence *fence;
 
-                               adev->uvd.inst[j].filp[i] = NULL;
-                               atomic_set(&adev->uvd.inst[j].handles[i], 0);
+                       r = amdgpu_uvd_get_destroy_msg(ring, handle, false,
+                                                      &fence);
+                       if (r) {
+                               DRM_ERROR("Error destroying UVD %d!\n", r);
+                               continue;
                        }
+
+                       dma_fence_wait(fence, false);
+                       dma_fence_put(fence);
+
+                       adev->uvd.filp[i] = NULL;
+                       atomic_set(&adev->uvd.handles[i], 0);
                }
        }
 }
@@ -459,7 +479,7 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
                if (cmd == 0x0 || cmd == 0x3) {
                        /* yes, force it into VRAM */
                        uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
-                       amdgpu_ttm_placement_from_domain(bo, domain);
+                       amdgpu_bo_placement_from_domain(bo, domain);
                }
                amdgpu_uvd_force_into_uvd_segment(bo);
 
@@ -679,16 +699,15 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
        void *ptr;
        long r;
        int i;
-       uint32_t ip_instance = ctx->parser->job->ring->me;
 
        if (offset & 0x3F) {
-               DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance);
+               DRM_ERROR("UVD messages must be 64 byte aligned!\n");
                return -EINVAL;
        }
 
        r = amdgpu_bo_kmap(bo, &ptr);
        if (r) {
-               DRM_ERROR("Failed mapping the UVD(%d) message (%ld)!\n", ip_instance, r);
+               DRM_ERROR("Failed mapping the UVD) message (%ld)!\n", r);
                return r;
        }
 
@@ -698,7 +717,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
        handle = msg[2];
 
        if (handle == 0) {
-               DRM_ERROR("Invalid UVD(%d) handle!\n", ip_instance);
+               DRM_ERROR("Invalid UVD handle!\n");
                return -EINVAL;
        }
 
@@ -709,18 +728,19 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
 
                /* try to alloc a new handle */
                for (i = 0; i < adev->uvd.max_handles; ++i) {
-                       if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) {
-                               DRM_ERROR("(%d)Handle 0x%x already in use!\n", ip_instance, handle);
+                       if (atomic_read(&adev->uvd.handles[i]) == handle) {
+                               DRM_ERROR(")Handle 0x%x already in use!\n",
+                                         handle);
                                return -EINVAL;
                        }
 
-                       if (!atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], 0, handle)) {
-                               adev->uvd.inst[ip_instance].filp[i] = ctx->parser->filp;
+                       if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
+                               adev->uvd.filp[i] = ctx->parser->filp;
                                return 0;
                        }
                }
 
-               DRM_ERROR("No more free UVD(%d) handles!\n", ip_instance);
+               DRM_ERROR("No more free UVD handles!\n");
                return -ENOSPC;
 
        case 1:
@@ -732,27 +752,27 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
 
                /* validate the handle */
                for (i = 0; i < adev->uvd.max_handles; ++i) {
-                       if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) {
-                               if (adev->uvd.inst[ip_instance].filp[i] != ctx->parser->filp) {
-                                       DRM_ERROR("UVD(%d) handle collision detected!\n", ip_instance);
+                       if (atomic_read(&adev->uvd.handles[i]) == handle) {
+                               if (adev->uvd.filp[i] != ctx->parser->filp) {
+                                       DRM_ERROR("UVD handle collision detected!\n");
                                        return -EINVAL;
                                }
                                return 0;
                        }
                }
 
-               DRM_ERROR("Invalid UVD(%d) handle 0x%x!\n", ip_instance, handle);
+               DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
                return -ENOENT;
 
        case 2:
                /* it's a destroy msg, free the handle */
                for (i = 0; i < adev->uvd.max_handles; ++i)
-                       atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], handle, 0);
+                       atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
                amdgpu_bo_kunmap(bo);
                return 0;
 
        default:
-               DRM_ERROR("Illegal UVD(%d) message type (%d)!\n", ip_instance, msg_type);
+               DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
                return -EINVAL;
        }
        BUG();
@@ -1000,7 +1020,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
        if (!ring->adev->uvd.address_64_bit) {
                struct ttm_operation_ctx ctx = { true, false };
 
-               amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
+               amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
                amdgpu_uvd_force_into_uvd_segment(bo);
                r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
                if (r)
@@ -1045,19 +1065,16 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
                if (r < 0)
                        goto err_free;
 
-               r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
-               job->fence = dma_fence_get(f);
+               r = amdgpu_job_submit_direct(job, ring, &f);
                if (r)
                        goto err_free;
-
-               amdgpu_job_free(job);
        } else {
                r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
                                     AMDGPU_FENCE_OWNER_UNDEFINED, false);
                if (r)
                        goto err_free;
 
-               r = amdgpu_job_submit(job, ring, &adev->uvd.inst[ring->me].entity,
+               r = amdgpu_job_submit(job, &adev->uvd.entity,
                                      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
                if (r)
                        goto err_free;
@@ -1145,10 +1162,12 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
 {
        struct amdgpu_device *adev =
-               container_of(work, struct amdgpu_device, uvd.inst->idle_work.work);
+               container_of(work, struct amdgpu_device, uvd.idle_work.work);
        unsigned fences = 0, i, j;
 
        for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+               if (adev->uvd.harvest_config & (1 << i))
+                       continue;
                fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
                for (j = 0; j < adev->uvd.num_enc_rings; ++j) {
                        fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]);
@@ -1167,7 +1186,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
                                                               AMD_CG_STATE_GATE);
                }
        } else {
-               schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
+               schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
        }
 }
 
@@ -1179,7 +1198,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
        if (amdgpu_sriov_vf(adev))
                return;
 
-       set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work);
+       set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
        if (set_clocks) {
                if (adev->pm.dpm_enabled) {
                        amdgpu_dpm_enable_uvd(adev, true);
@@ -1196,7 +1215,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
 void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
 {
        if (!amdgpu_sriov_vf(ring->adev))
-               schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
+               schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
 }
 
 /**
@@ -1259,7 +1278,7 @@ uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev)
                 * necessarily linear. So we need to count
                 * all non-zero handles.
                 */
-               if (atomic_read(&adev->uvd.inst->handles[i]))
+               if (atomic_read(&adev->uvd.handles[i]))
                        used_handles++;
        }
 
index b1579fba134c189777d59242d4f9e2dcd97a8378..33c5f806f9256a004235833268ad4d5a281ca644 100644 (file)
@@ -42,26 +42,29 @@ struct amdgpu_uvd_inst {
        void                    *cpu_addr;
        uint64_t                gpu_addr;
        void                    *saved_bo;
-       atomic_t                handles[AMDGPU_MAX_UVD_HANDLES];
-       struct drm_file         *filp[AMDGPU_MAX_UVD_HANDLES];
-       struct delayed_work     idle_work;
        struct amdgpu_ring      ring;
        struct amdgpu_ring      ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
        struct amdgpu_irq_src   irq;
-       struct drm_sched_entity entity;
-       struct drm_sched_entity entity_enc;
        uint32_t                srbm_soft_reset;
 };
 
+#define AMDGPU_UVD_HARVEST_UVD0 (1 << 0)
+#define AMDGPU_UVD_HARVEST_UVD1 (1 << 1)
+
 struct amdgpu_uvd {
        const struct firmware   *fw;    /* UVD firmware */
        unsigned                fw_version;
        unsigned                max_handles;
        unsigned                num_enc_rings;
-       uint8_t         num_uvd_inst;
+       uint8_t                 num_uvd_inst;
        bool                    address_64_bit;
        bool                    use_ctx_buf;
-       struct amdgpu_uvd_inst          inst[AMDGPU_MAX_UVD_INSTANCES];
+       struct amdgpu_uvd_inst  inst[AMDGPU_MAX_UVD_INSTANCES];
+       struct drm_file         *filp[AMDGPU_MAX_UVD_HANDLES];
+       atomic_t                handles[AMDGPU_MAX_UVD_HANDLES];
+       struct drm_sched_entity entity;
+       struct delayed_work     idle_work;
+       unsigned                harvest_config;
 };
 
 int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
index 23d960ec1cf27947d8f5ccea419a92224b9ad112..b6ab4f5350c8098836c5f6758f2ef2387efea596 100644 (file)
 
 /* Firmware Names */
 #ifdef CONFIG_DRM_AMDGPU_CIK
-#define FIRMWARE_BONAIRE       "radeon/bonaire_vce.bin"
-#define FIRMWARE_KABINI        "radeon/kabini_vce.bin"
-#define FIRMWARE_KAVERI        "radeon/kaveri_vce.bin"
-#define FIRMWARE_HAWAII        "radeon/hawaii_vce.bin"
-#define FIRMWARE_MULLINS       "radeon/mullins_vce.bin"
+#define FIRMWARE_BONAIRE       "amdgpu/bonaire_vce.bin"
+#define FIRMWARE_KABINI        "amdgpu/kabini_vce.bin"
+#define FIRMWARE_KAVERI        "amdgpu/kaveri_vce.bin"
+#define FIRMWARE_HAWAII        "amdgpu/hawaii_vce.bin"
+#define FIRMWARE_MULLINS       "amdgpu/mullins_vce.bin"
 #endif
 #define FIRMWARE_TONGA         "amdgpu/tonga_vce.bin"
 #define FIRMWARE_CARRIZO       "amdgpu/carrizo_vce.bin"
@@ -190,8 +190,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
 
        ring = &adev->vce.ring[0];
        rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-       r = drm_sched_entity_init(&ring->sched, &adev->vce.entity,
-                                 rq, NULL);
+       r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
        if (r != 0) {
                DRM_ERROR("Failed setting up VCE run queue.\n");
                return r;
@@ -222,7 +221,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
        if (adev->vce.vcpu_bo == NULL)
                return 0;
 
-       drm_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
+       drm_sched_entity_destroy(&adev->vce.entity);
 
        amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
                (void **)&adev->vce.cpu_addr);
@@ -470,12 +469,10 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
        for (i = ib->length_dw; i < ib_size_dw; ++i)
                ib->ptr[i] = 0x0;
 
-       r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
-       job->fence = dma_fence_get(f);
+       r = amdgpu_job_submit_direct(job, ring, &f);
        if (r)
                goto err;
 
-       amdgpu_job_free(job);
        if (fence)
                *fence = dma_fence_get(f);
        dma_fence_put(f);
@@ -532,19 +529,13 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
        for (i = ib->length_dw; i < ib_size_dw; ++i)
                ib->ptr[i] = 0x0;
 
-       if (direct) {
-               r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
-               job->fence = dma_fence_get(f);
-               if (r)
-                       goto err;
-
-               amdgpu_job_free(job);
-       } else {
-               r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
+       if (direct)
+               r = amdgpu_job_submit_direct(job, ring, &f);
+       else
+               r = amdgpu_job_submit(job, &ring->adev->vce.entity,
                                      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
-               if (r)
-                       goto err;
-       }
+       if (r)
+               goto err;
 
        if (fence)
                *fence = dma_fence_get(f);
index 127e87b470ff4da368c8c1feb0576f0b6cb0c62c..798648a1971080040e2dc5140f1a7349440c6cc2 100644 (file)
@@ -52,7 +52,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
        unsigned long bo_size;
        const char *fw_name;
        const struct common_firmware_header *hdr;
-       unsigned version_major, version_minor, family_id;
+       unsigned char fw_check;
        int r;
 
        INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
@@ -83,12 +83,33 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
 
        hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
        adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
-       family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
-       version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
-       version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
-       DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
-               version_major, version_minor, family_id);
 
+       /* Bit 20-23, it is encode major and non-zero for new naming convention.
+        * This field is part of version minor and DRM_DISABLED_FLAG in old naming
+        * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
+        * is zero in old naming convention, this field is always zero so far.
+        * These four bits are used to tell which naming convention is present.
+        */
+       fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
+       if (fw_check) {
+               unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
+
+               fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
+               enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
+               enc_major = fw_check;
+               dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
+               vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
+               DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n",
+                       enc_major, enc_minor, dec_ver, vep, fw_rev);
+       } else {
+               unsigned int version_major, version_minor, family_id;
+
+               family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
+               version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
+               version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
+               DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
+                       version_major, version_minor, family_id);
+       }
 
        bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
                  +  AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
@@ -119,6 +140,8 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
        for (i = 0; i < adev->vcn.num_enc_rings; ++i)
                amdgpu_ring_fini(&adev->vcn.ring_enc[i]);
 
+       amdgpu_ring_fini(&adev->vcn.ring_jpeg);
+
        release_firmware(adev->vcn.fw);
 
        return 0;
@@ -188,6 +211,8 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
                fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
        }
 
+       fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg);
+
        if (fences == 0) {
                if (adev->pm.dpm_enabled)
                        amdgpu_dpm_enable_uvd(adev, false);
@@ -204,7 +229,7 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
        struct amdgpu_device *adev = ring->adev;
        bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
 
-       if (set_clocks && adev->pm.dpm_enabled) {
+       if (set_clocks) {
                if (adev->pm.dpm_enabled)
                        amdgpu_dpm_enable_uvd(adev, true);
                else
@@ -283,13 +308,10 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
        }
        ib->length_dw = 16;
 
-       r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
-       job->fence = dma_fence_get(f);
+       r = amdgpu_job_submit_direct(job, ring, &f);
        if (r)
                goto err_free;
 
-       amdgpu_job_free(job);
-
        amdgpu_bo_fence(bo, f, false);
        amdgpu_bo_unreserve(bo);
        amdgpu_bo_unref(&bo);
@@ -474,12 +496,10 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
        for (i = ib->length_dw; i < ib_size_dw; ++i)
                ib->ptr[i] = 0x0;
 
-       r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
-       job->fence = dma_fence_get(f);
+       r = amdgpu_job_submit_direct(job, ring, &f);
        if (r)
                goto err;
 
-       amdgpu_job_free(job);
        if (fence)
                *fence = dma_fence_get(f);
        dma_fence_put(f);
@@ -528,12 +548,10 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
        for (i = ib->length_dw; i < ib_size_dw; ++i)
                ib->ptr[i] = 0x0;
 
-       r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
-       job->fence = dma_fence_get(f);
+       r = amdgpu_job_submit_direct(job, ring, &f);
        if (r)
                goto err;
 
-       amdgpu_job_free(job);
        if (fence)
                *fence = dma_fence_get(f);
        dma_fence_put(f);
@@ -576,3 +594,127 @@ error:
        dma_fence_put(fence);
        return r;
 }
+
+int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       uint32_t tmp = 0;
+       unsigned i;
+       int r;
+
+       WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
+       r = amdgpu_ring_alloc(ring, 3);
+
+       if (r) {
+               DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
+                                 ring->idx, r);
+               return r;
+       }
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0, 0, 0));
+       amdgpu_ring_write(ring, 0xDEADBEEF);
+       amdgpu_ring_commit(ring);
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
+               if (tmp == 0xDEADBEEF)
+                       break;
+               DRM_UDELAY(1);
+       }
+
+       if (i < adev->usec_timeout) {
+               DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
+                                 ring->idx, i);
+       } else {
+               DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
+                                 ring->idx, tmp);
+               r = -EINVAL;
+       }
+
+       return r;
+}
+
+static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
+               struct dma_fence **fence)
+{
+       struct amdgpu_device *adev = ring->adev;
+       struct amdgpu_job *job;
+       struct amdgpu_ib *ib;
+       struct dma_fence *f = NULL;
+       const unsigned ib_size_dw = 16;
+       int i, r;
+
+       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+       if (r)
+               return r;
+
+       ib = &job->ibs[0];
+
+       ib->ptr[0] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH), 0, 0, PACKETJ_TYPE0);
+       ib->ptr[1] = 0xDEADBEEF;
+       for (i = 2; i < 16; i += 2) {
+               ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
+               ib->ptr[i+1] = 0;
+       }
+       ib->length_dw = 16;
+
+       r = amdgpu_job_submit_direct(job, ring, &f);
+       if (r)
+               goto err;
+
+       if (fence)
+               *fence = dma_fence_get(f);
+       dma_fence_put(f);
+
+       return 0;
+
+err:
+       amdgpu_job_free(job);
+       return r;
+}
+
+int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+       struct amdgpu_device *adev = ring->adev;
+       uint32_t tmp = 0;
+       unsigned i;
+       struct dma_fence *fence = NULL;
+       long r = 0;
+
+       r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence);
+       if (r) {
+               DRM_ERROR("amdgpu: failed to set jpeg register (%ld).\n", r);
+               goto error;
+       }
+
+       r = dma_fence_wait_timeout(fence, false, timeout);
+       if (r == 0) {
+               DRM_ERROR("amdgpu: IB test timed out.\n");
+               r = -ETIMEDOUT;
+               goto error;
+       } else if (r < 0) {
+               DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
+               goto error;
+       } else
+               r = 0;
+
+       for (i = 0; i < adev->usec_timeout; i++) {
+               tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH));
+               if (tmp == 0xDEADBEEF)
+                       break;
+               DRM_UDELAY(1);
+       }
+
+       if (i < adev->usec_timeout)
+               DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+       else {
+               DRM_ERROR("ib test failed (0x%08X)\n", tmp);
+               r = -EINVAL;
+       }
+
+       dma_fence_put(fence);
+
+error:
+       return r;
+}
index 773010b9ff153f89aaa5747df5dc09b07baa8cd7..0b0b8638d73fba480c6431995862b63e22065f26 100644 (file)
@@ -66,6 +66,7 @@ struct amdgpu_vcn {
        const struct firmware   *fw;    /* VCN firmware */
        struct amdgpu_ring      ring_dec;
        struct amdgpu_ring      ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
+       struct amdgpu_ring      ring_jpeg;
        struct amdgpu_irq_src   irq;
        unsigned                num_enc_rings;
 };
@@ -83,4 +84,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring);
 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout);
 
+int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring);
+int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout);
+
 #endif
index b0eb2f537392d192d84d3884bd685f7084e9e220..ece0ac703e277282992422865f9945ce06ca5f0c 100644 (file)
 #include "amdgpu.h"
 #include "amdgpu_trace.h"
 #include "amdgpu_amdkfd.h"
+#include "amdgpu_gmc.h"
 
-/*
- * GPUVM
+/**
+ * DOC: GPUVM
+ *
  * GPUVM is similar to the legacy gart on older asics, however
  * rather than there being a single global gart table
  * for the entire GPU, there are multiple VM page tables active
@@ -63,37 +65,84 @@ INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
 #undef START
 #undef LAST
 
-/* Local structure. Encapsulate some VM table update parameters to reduce
+/**
+ * struct amdgpu_pte_update_params - Local structure
+ *
+ * Encapsulate some VM table update parameters to reduce
  * the number of function parameters
+ *
  */
 struct amdgpu_pte_update_params {
-       /* amdgpu device we do this update for */
+
+       /**
+        * @adev: amdgpu device we do this update for
+        */
        struct amdgpu_device *adev;
-       /* optional amdgpu_vm we do this update for */
+
+       /**
+        * @vm: optional amdgpu_vm we do this update for
+        */
        struct amdgpu_vm *vm;
-       /* address where to copy page table entries from */
+
+       /**
+        * @src: address where to copy page table entries from
+        */
        uint64_t src;
-       /* indirect buffer to fill with commands */
+
+       /**
+        * @ib: indirect buffer to fill with commands
+        */
        struct amdgpu_ib *ib;
-       /* Function which actually does the update */
+
+       /**
+        * @func: Function which actually does the update
+        */
        void (*func)(struct amdgpu_pte_update_params *params,
                     struct amdgpu_bo *bo, uint64_t pe,
                     uint64_t addr, unsigned count, uint32_t incr,
                     uint64_t flags);
-       /* The next two are used during VM update by CPU
-        *  DMA addresses to use for mapping
-        *  Kernel pointer of PD/PT BO that needs to be updated
+       /**
+        * @pages_addr:
+        *
+        * DMA addresses to use for mapping, used during VM update by CPU
         */
        dma_addr_t *pages_addr;
+
+       /**
+        * @kptr:
+        *
+        * Kernel pointer of PD/PT BO that needs to be updated,
+        * used during VM update by CPU
+        */
        void *kptr;
 };
 
-/* Helper to disable partial resident texture feature from a fence callback */
+/**
+ * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
+ */
 struct amdgpu_prt_cb {
+
+       /**
+        * @adev: amdgpu device
+        */
        struct amdgpu_device *adev;
+
+       /**
+        * @cb: callback
+        */
        struct dma_fence_cb cb;
 };
 
+/**
+ * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
+ *
+ * @base: base structure for tracking BO usage in a VM
+ * @vm: vm to which bo is to be added
+ * @bo: amdgpu buffer object
+ *
+ * Initialize a bo_va_base structure and add it to the appropriate lists
+ *
+ */
 static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
                                   struct amdgpu_vm *vm,
                                   struct amdgpu_bo *bo)
@@ -107,6 +156,9 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
                return;
        list_add_tail(&base->bo_list, &bo->va);
 
+       if (bo->tbo.type == ttm_bo_type_kernel)
+               list_move(&base->vm_status, &vm->relocated);
+
        if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
                return;
 
@@ -126,8 +178,10 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
  * amdgpu_vm_level_shift - return the addr shift for each level
  *
  * @adev: amdgpu_device pointer
+ * @level: VMPT level
  *
- * Returns the number of bits the pfn needs to be right shifted for a level.
+ * Returns:
+ * The number of bits the pfn needs to be right shifted for a level.
  */
 static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
                                      unsigned level)
@@ -155,8 +209,10 @@ static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
  * amdgpu_vm_num_entries - return the number of entries in a PD/PT
  *
  * @adev: amdgpu_device pointer
+ * @level: VMPT level
  *
- * Calculate the number of entries in a page directory or page table.
+ * Returns:
+ * The number of entries in a page directory or page table.
  */
 static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
                                      unsigned level)
@@ -179,8 +235,10 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
  * amdgpu_vm_bo_size - returns the size of the BOs in bytes
  *
  * @adev: amdgpu_device pointer
+ * @level: VMPT level
  *
- * Calculate the size of the BO for a page directory or page table in bytes.
+ * Returns:
+ * The size of the BO for a page directory or page table in bytes.
  */
 static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
 {
@@ -218,6 +276,9 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
  * @param: parameter for the validation callback
  *
  * Validate the page table BOs on command submission if neccessary.
+ *
+ * Returns:
+ * Validation result.
  */
 int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                              int (*validate)(void *p, struct amdgpu_bo *bo),
@@ -273,6 +334,9 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
  * @vm: VM to check
  *
  * Check if all VM PDs/PTs are ready for updates
+ *
+ * Returns:
+ * True if eviction list is empty.
  */
 bool amdgpu_vm_ready(struct amdgpu_vm *vm)
 {
@@ -283,10 +347,15 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
  * amdgpu_vm_clear_bo - initially clear the PDs/PTs
  *
  * @adev: amdgpu_device pointer
+ * @vm: VM to clear BO from
  * @bo: BO to clear
  * @level: level this BO is at
+ * @pte_support_ats: indicate ATS support from PTE
  *
  * Root PD needs to be reserved when calling this.
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
  */
 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
                              struct amdgpu_vm *vm, struct amdgpu_bo *bo,
@@ -318,7 +387,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
                ats_entries = 0;
        }
 
-       ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+       ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
 
        r = reservation_object_reserve_shared(bo->tbo.resv);
        if (r)
@@ -356,8 +425,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
        if (r)
                goto error_free;
 
-       r = amdgpu_job_submit(job, ring, &vm->entity,
-                             AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
+       r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_UNDEFINED,
+                             &fence);
        if (r)
                goto error_free;
 
@@ -382,10 +451,16 @@ error:
  *
  * @adev: amdgpu_device pointer
  * @vm: requested vm
+ * @parent: parent PT
  * @saddr: start of the address range
  * @eaddr: end of the address range
+ * @level: VMPT level
+ * @ats: indicate ATS support from PTE
  *
  * Make sure the page directories and page tables are allocated
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
  */
 static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
                                  struct amdgpu_vm *vm,
@@ -420,11 +495,12 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
        eaddr = eaddr & ((1 << shift) - 1);
 
        flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+       if (vm->root.base.bo->shadow)
+               flags |= AMDGPU_GEM_CREATE_SHADOW;
        if (vm->use_cpu_for_update)
                flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
        else
-               flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
-                               AMDGPU_GEM_CREATE_SHADOW);
+               flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
 
        /* walk over the address space and allocate the page tables */
        for (pt_idx = from; pt_idx <= to; ++pt_idx) {
@@ -468,7 +544,6 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
                        pt->parent = amdgpu_bo_ref(parent->base.bo);
 
                        amdgpu_vm_bo_base_init(&entry->base, vm, pt);
-                       list_move(&entry->base.vm_status, &vm->relocated);
                }
 
                if (level < AMDGPU_VM_PTB) {
@@ -494,6 +569,9 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
  * @size: Size from start address we need.
  *
  * Make sure the page tables are allocated.
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
  */
 int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
                        struct amdgpu_vm *vm,
@@ -559,6 +637,15 @@ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
        }
 }
 
+/**
+ * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
+ *
+ * @ring: ring on which the job will be submitted
+ * @job: job to submit
+ *
+ * Returns:
+ * True if sync is needed.
+ */
 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
                                  struct amdgpu_job *job)
 {
@@ -586,19 +673,17 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
        return vm_flush_needed || gds_switch_needed;
 }
 
-static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev)
-{
-       return (adev->gmc.real_vram_size == adev->gmc.visible_vram_size);
-}
-
 /**
  * amdgpu_vm_flush - hardware flush the vm
  *
  * @ring: ring to use for flush
- * @vmid: vmid number to use
- * @pd_addr: address of the page directory
+ * @job:  related job
+ * @need_pipe_sync: is pipe sync needed
  *
  * Emit a VM flush when it is necessary.
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
  */
 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
 {
@@ -706,6 +791,9 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
  * Returns the found bo_va or NULL if none is found
  *
  * Object has to be reserved!
+ *
+ * Returns:
+ * Found bo_va or NULL.
  */
 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
                                       struct amdgpu_bo *bo)
@@ -787,7 +875,10 @@ static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
  * @addr: the unmapped addr
  *
  * Look up the physical address of the page that the pte resolves
- * to and return the pointer for the page table entry.
+ * to.
+ *
+ * Returns:
+ * The pointer for the page table entry.
  */
 static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
 {
@@ -840,6 +931,17 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
        }
 }
 
+
+/**
+ * amdgpu_vm_wait_pd - Wait for PT BOs to be free.
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: related vm
+ * @owner: fence owner
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
+ */
 static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                             void *owner)
 {
@@ -893,7 +995,10 @@ static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
 /*
  * amdgpu_vm_invalidate_level - mark all PD levels as invalid
  *
+ * @adev: amdgpu_device pointer
+ * @vm: related vm
  * @parent: parent PD
+ * @level: VMPT level
  *
  * Mark all PD level as invalid after an error.
  */
@@ -928,7 +1033,9 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
  * @vm: requested vm
  *
  * Makes sure all directories are up to date.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
  */
 int amdgpu_vm_update_directories(struct amdgpu_device *adev,
                                 struct amdgpu_vm *vm)
@@ -978,7 +1085,7 @@ restart:
                                           struct amdgpu_vm_bo_base,
                                           vm_status);
                bo_base->moved = false;
-               list_move(&bo_base->vm_status, &vm->idle);
+               list_del_init(&bo_base->vm_status);
 
                bo = bo_base->bo->parent;
                if (!bo)
@@ -1007,15 +1114,15 @@ restart:
                struct amdgpu_ring *ring;
                struct dma_fence *fence;
 
-               ring = container_of(vm->entity.sched, struct amdgpu_ring,
+               ring = container_of(vm->entity.rq->sched, struct amdgpu_ring,
                                    sched);
 
                amdgpu_ring_pad_ib(ring, params.ib);
                amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
                                 AMDGPU_FENCE_OWNER_VM, false);
                WARN_ON(params.ib->length_dw > ndw);
-               r = amdgpu_job_submit(job, ring, &vm->entity,
-                                     AMDGPU_FENCE_OWNER_VM, &fence);
+               r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM,
+                                     &fence);
                if (r)
                        goto error;
 
@@ -1115,14 +1222,15 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
  * amdgpu_vm_update_ptes - make sure that page tables are valid
  *
  * @params: see amdgpu_pte_update_params definition
- * @vm: requested vm
  * @start: start of GPU address range
  * @end: end of GPU address range
  * @dst: destination address to map to, the next dst inside the function
  * @flags: mapping flags
  *
  * Update the page tables in the range @start - @end.
- * Returns 0 for success, -EINVAL for failure.
+ *
+ * Returns:
+ * 0 for success, -EINVAL for failure.
  */
 static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
                                  uint64_t start, uint64_t end,
@@ -1176,7 +1284,9 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
  * @end: last PTE to handle
  * @dst: addr those PTEs should point to
  * @flags: hw mapping flags
- * Returns 0 for success, -EINVAL for failure.
+ *
+ * Returns:
+ * 0 for success, -EINVAL for failure.
  */
 static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
                                uint64_t start, uint64_t end,
@@ -1248,7 +1358,9 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params    *params,
  * @fence: optional resulting fence
  *
  * Fill in the page table entries between @start and @last.
- * Returns 0 for success, -EINVAL for failure.
+ *
+ * Returns:
+ * 0 for success, -EINVAL for failure.
  */
 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
                                       struct dma_fence *exclusive,
@@ -1292,7 +1404,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
                                           addr, flags);
        }
 
-       ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+       ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
 
        nptes = last - start + 1;
 
@@ -1324,7 +1436,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
                ndw += ncmds * 10;
 
                /* extra commands for begin/end fragments */
-               ndw += 2 * 10 * adev->vm_manager.fragment_size;
+               if (vm->root.base.bo->shadow)
+                       ndw += 2 * 10 * adev->vm_manager.fragment_size * 2;
+               else
+                       ndw += 2 * 10 * adev->vm_manager.fragment_size;
 
                params.func = amdgpu_vm_do_set_ptes;
        }
@@ -1371,8 +1486,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 
        amdgpu_ring_pad_ib(ring, params.ib);
        WARN_ON(params.ib->length_dw > ndw);
-       r = amdgpu_job_submit(job, ring, &vm->entity,
-                             AMDGPU_FENCE_OWNER_VM, &f);
+       r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f);
        if (r)
                goto error_free;
 
@@ -1400,7 +1514,9 @@ error_free:
  *
  * Split the mapping into smaller chunks so that each update fits
  * into a SDMA IB.
- * Returns 0 for success, -EINVAL for failure.
+ *
+ * Returns:
+ * 0 for success, -EINVAL for failure.
  */
 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                                      struct dma_fence *exclusive,
@@ -1453,7 +1569,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                if (nodes) {
                        addr = nodes->start << PAGE_SHIFT;
                        max_entries = (nodes->size - pfn) *
-                               (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+                               AMDGPU_GPU_PAGES_IN_CPU_PAGE;
                } else {
                        addr = 0;
                        max_entries = S64_MAX;
@@ -1463,7 +1579,9 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                        uint64_t count;
 
                        max_entries = min(max_entries, 16ull * 1024ull);
-                       for (count = 1; count < max_entries; ++count) {
+                       for (count = 1;
+                            count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
+                            ++count) {
                                uint64_t idx = pfn + count;
 
                                if (pages_addr[idx] !=
@@ -1476,7 +1594,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                                dma_addr = pages_addr;
                        } else {
                                addr = pages_addr[pfn];
-                               max_entries = count;
+                               max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
                        }
 
                } else if (flags & AMDGPU_PTE_VALID) {
@@ -1491,7 +1609,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                if (r)
                        return r;
 
-               pfn += last - start + 1;
+               pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
                if (nodes && nodes->size == pfn) {
                        pfn = 0;
                        ++nodes;
@@ -1511,7 +1629,9 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
  * @clear: if true clear the entries
  *
  * Fill in the page table entries for @bo_va.
- * Returns 0 for success, -EINVAL for failure.
+ *
+ * Returns:
+ * 0 for success, -EINVAL for failure.
  */
 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
                        struct amdgpu_bo_va *bo_va,
@@ -1527,18 +1647,17 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
        uint64_t flags;
        int r;
 
-       if (clear || !bo_va->base.bo) {
+       if (clear || !bo) {
                mem = NULL;
                nodes = NULL;
                exclusive = NULL;
        } else {
                struct ttm_dma_tt *ttm;
 
-               mem = &bo_va->base.bo->tbo.mem;
+               mem = &bo->tbo.mem;
                nodes = mem->mm_node;
                if (mem->mem_type == TTM_PL_TT) {
-                       ttm = container_of(bo_va->base.bo->tbo.ttm,
-                                          struct ttm_dma_tt, ttm);
+                       ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
                        pages_addr = ttm->dma_address;
                }
                exclusive = reservation_object_get_excl(bo->tbo.resv);
@@ -1606,6 +1725,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 
 /**
  * amdgpu_vm_update_prt_state - update the global PRT state
+ *
+ * @adev: amdgpu_device pointer
  */
 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
 {
@@ -1620,6 +1741,8 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
 
 /**
  * amdgpu_vm_prt_get - add a PRT user
+ *
+ * @adev: amdgpu_device pointer
  */
 static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
 {
@@ -1632,6 +1755,8 @@ static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
 
 /**
  * amdgpu_vm_prt_put - drop a PRT user
+ *
+ * @adev: amdgpu_device pointer
  */
 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
 {
@@ -1641,6 +1766,9 @@ static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
 
 /**
  * amdgpu_vm_prt_cb - callback for updating the PRT status
+ *
+ * @fence: fence for the callback
+ * @_cb: the callback function
  */
 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
 {
@@ -1652,6 +1780,9 @@ static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
 
 /**
  * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
+ *
+ * @adev: amdgpu_device pointer
+ * @fence: fence for the callback
  */
 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
                                 struct dma_fence *fence)
@@ -1743,9 +1874,11 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
  * or if an error occurred)
  *
  * Make sure all freed BOs are cleared in the PT.
- * Returns 0 for success.
- *
  * PTs have to be reserved and mutex must be locked!
+ *
+ * Returns:
+ * 0 for success.
+ *
  */
 int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
                          struct amdgpu_vm *vm,
@@ -1790,10 +1923,11 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
  *
  * @adev: amdgpu_device pointer
  * @vm: requested vm
- * @sync: sync object to add fences to
  *
  * Make sure all BOs which are moved are updated in the PTs.
- * Returns 0 for success.
+ *
+ * Returns:
+ * 0 for success.
  *
  * PTs have to be reserved!
  */
@@ -1848,7 +1982,9 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
  *
  * Add @bo into the requested vm.
  * Add @bo to the list of bos associated with the vm
- * Returns newly added bo_va or NULL for failure
+ *
+ * Returns:
+ * Newly added bo_va or NULL for failure
  *
  * Object has to be reserved!
  */
@@ -1911,10 +2047,13 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
  * @bo_va: bo_va to store the address
  * @saddr: where to map the BO
  * @offset: requested offset in the BO
+ * @size: BO size in bytes
  * @flags: attributes of pages (read/write/valid/etc.)
  *
  * Add a mapping of the BO at the specefied addr into the VM.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
  *
  * Object has to be reserved and unreserved outside!
  */
@@ -1972,11 +2111,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
  * @bo_va: bo_va to store the address
  * @saddr: where to map the BO
  * @offset: requested offset in the BO
+ * @size: BO size in bytes
  * @flags: attributes of pages (read/write/valid/etc.)
  *
  * Add a mapping of the BO at the specefied addr into the VM. Replace existing
  * mappings as we do so.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
  *
  * Object has to be reserved and unreserved outside!
  */
@@ -2033,7 +2175,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
  * @saddr: where to the BO is mapped
  *
  * Remove a mapping of the BO at the specefied addr from the VM.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
  *
  * Object has to be reserved and unreserved outside!
  */
@@ -2087,7 +2231,9 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
  * @size: size of the range
  *
  * Remove all mappings in a range, split them as appropriate.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
  */
 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
                                struct amdgpu_vm *vm,
@@ -2184,8 +2330,13 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
  * amdgpu_vm_bo_lookup_mapping - find mapping by address
  *
  * @vm: the requested VM
+ * @addr: the address
  *
  * Find a mapping by it's address.
+ *
+ * Returns:
+ * The amdgpu_bo_va_mapping matching for addr or NULL
+ *
  */
 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
                                                         uint64_t addr)
@@ -2193,6 +2344,35 @@ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
        return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
 }
 
+/**
+ * amdgpu_vm_bo_trace_cs - trace all reserved mappings
+ *
+ * @vm: the requested vm
+ * @ticket: CS ticket
+ *
+ * Trace all mappings of BOs reserved during a command submission.
+ */
+void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
+{
+       struct amdgpu_bo_va_mapping *mapping;
+
+       if (!trace_amdgpu_vm_bo_cs_enabled())
+               return;
+
+       for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
+            mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
+               if (mapping->bo_va && mapping->bo_va->base.bo) {
+                       struct amdgpu_bo *bo;
+
+                       bo = mapping->bo_va->base.bo;
+                       if (READ_ONCE(bo->tbo.resv->lock.ctx) != ticket)
+                               continue;
+               }
+
+               trace_amdgpu_vm_bo_cs(mapping);
+       }
+}
+
 /**
  * amdgpu_vm_bo_rmv - remove a bo to a specific vm
  *
@@ -2237,8 +2417,8 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
  * amdgpu_vm_bo_invalidate - mark the bo as invalid
  *
  * @adev: amdgpu_device pointer
- * @vm: requested vm
  * @bo: amdgpu buffer object
+ * @evicted: is the BO evicted
  *
  * Mark @bo as invalid.
  */
@@ -2278,6 +2458,14 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
        }
 }
 
+/**
+ * amdgpu_vm_get_block_size - calculate VM page table size as power of two
+ *
+ * @vm_size: VM size
+ *
+ * Returns:
+ * VM page table as power of two
+ */
 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
 {
        /* Total bits covered by PD + PTs */
@@ -2296,6 +2484,10 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
  *
  * @adev: amdgpu_device pointer
  * @vm_size: the default vm size if it's set auto
+ * @fragment_size_default: Default PTE fragment size
+ * @max_level: max VMPT level
+ * @max_bits: max address space size in bits
+ *
  */
 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
                           uint32_t fragment_size_default, unsigned max_level,
@@ -2363,8 +2555,12 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
  * @adev: amdgpu_device pointer
  * @vm: requested vm
  * @vm_context: Indicates if it GFX or Compute context
+ * @pasid: Process address space identifier
  *
  * Init @vm fields.
+ *
+ * Returns:
+ * 0 for success, error for failure.
  */
 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                   int vm_context, unsigned int pasid)
@@ -2396,8 +2592,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        ring_instance %= adev->vm_manager.vm_pte_num_rings;
        ring = adev->vm_manager.vm_pte_rings[ring_instance];
        rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
-       r = drm_sched_entity_init(&ring->sched, &vm->entity,
-                                 rq, NULL);
+       r = drm_sched_entity_init(&vm->entity, &rq, 1, NULL);
        if (r)
                return r;
 
@@ -2415,14 +2610,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        }
        DRM_DEBUG_DRIVER("VM update mode is %s\n",
                         vm->use_cpu_for_update ? "CPU" : "SDMA");
-       WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
+       WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
                  "CPU update of VM recommended only for large BAR system\n");
        vm->last_update = NULL;
 
        flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
        if (vm->use_cpu_for_update)
                flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
-       else
+       else if (vm_context != AMDGPU_VM_CONTEXT_COMPUTE)
                flags |= AMDGPU_GEM_CREATE_SHADOW;
 
        size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
@@ -2477,7 +2672,7 @@ error_free_root:
        vm->root.base.bo = NULL;
 
 error_free_sched_entity:
-       drm_sched_entity_fini(&ring->sched, &vm->entity);
+       drm_sched_entity_destroy(&vm->entity);
 
        return r;
 }
@@ -2485,6 +2680,9 @@ error_free_sched_entity:
 /**
  * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
  *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ *
  * This only works on GFX VMs that don't have any BOs added and no
  * page tables allocated yet.
  *
@@ -2494,10 +2692,10 @@ error_free_sched_entity:
  * - pasid (old PASID is released, because compute manages its own PASIDs)
  *
  * Reinitializes the page directory to reflect the changed ATS
- * setting. May leave behind an unused shadow BO for the page
- * directory when switching from SDMA updates to CPU updates.
+ * setting.
  *
- * Returns 0 for success, -errno for errors.
+ * Returns:
+ * 0 for success, -errno for errors.
  */
 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 {
@@ -2531,7 +2729,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
        vm->pte_support_ats = pte_support_ats;
        DRM_DEBUG_DRIVER("VM update mode is %s\n",
                         vm->use_cpu_for_update ? "CPU" : "SDMA");
-       WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
+       WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
                  "CPU update of VM recommended only for large BAR system\n");
 
        if (vm->pasid) {
@@ -2544,6 +2742,9 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
                vm->pasid = 0;
        }
 
+       /* Free the shadow bo for compute VM */
+       amdgpu_bo_unref(&vm->root.base.bo->shadow);
+
 error:
        amdgpu_bo_unreserve(vm->root.base.bo);
        return r;
@@ -2610,7 +2811,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
                spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
        }
 
-       drm_sched_entity_fini(vm->entity.sched, &vm->entity);
+       drm_sched_entity_destroy(&vm->entity);
 
        if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
                dev_err(adev->dev, "still active bo inside vm\n");
@@ -2652,8 +2853,10 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
  * @adev: amdgpu_device pointer
  * @pasid: PASID do identify the VM
  *
- * This function is expected to be called in interrupt context. Returns
- * true if there was fault credit, false otherwise
+ * This function is expected to be called in interrupt context.
+ *
+ * Returns:
+ * True if there was fault credit, false otherwise
  */
 bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
                                  unsigned int pasid)
@@ -2707,7 +2910,7 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
         */
 #ifdef CONFIG_X86_64
        if (amdgpu_vm_update_mode == -1) {
-               if (amdgpu_vm_is_large_bar(adev))
+               if (amdgpu_gmc_vram_full_visible(&adev->gmc))
                        adev->vm_manager.vm_update_mode =
                                AMDGPU_VM_USE_CPU_FOR_COMPUTE;
                else
@@ -2737,6 +2940,16 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
        amdgpu_vmid_mgr_fini(adev);
 }
 
+/**
+ * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
+ *
+ * @dev: drm device pointer
+ * @data: drm_amdgpu_vm
+ * @filp: drm file pointer
+ *
+ * Returns:
+ * 0 for success, -errno for errors.
+ */
 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 {
        union drm_amdgpu_vm *args = data;
@@ -2760,3 +2973,42 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 
        return 0;
 }
+
+/**
+ * amdgpu_vm_get_task_info - Extracts task info for a PASID.
+ *
+ * @dev: drm device pointer
+ * @pasid: PASID identifier for VM
+ * @task_info: task_info to fill.
+ */
+void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
+                        struct amdgpu_task_info *task_info)
+{
+       struct amdgpu_vm *vm;
+
+       spin_lock(&adev->vm_manager.pasid_lock);
+
+       vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
+       if (vm)
+               *task_info = vm->task_info;
+
+       spin_unlock(&adev->vm_manager.pasid_lock);
+}
+
+/**
+ * amdgpu_vm_set_task_info - Sets VMs task info.
+ *
+ * @vm: vm for which to set the info
+ */
+void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
+{
+       if (!vm->task_info.pid) {
+               vm->task_info.pid = current->pid;
+               get_task_comm(vm->task_info.task_name, current);
+
+               if (current->group_leader->mm == current->mm) {
+                       vm->task_info.tgid = current->group_leader->pid;
+                       get_task_comm(vm->task_info.process_name, current->group_leader);
+               }
+       }
+}
index 061b99a18cb81d7b2a9fb197bbc33c4df14d9d02..67a15d439ac006f97b48ffe7e74544d6688bf452 100644 (file)
@@ -164,6 +164,14 @@ struct amdgpu_vm_pt {
 #define AMDGPU_VM_FAULT_PASID(fault) ((u64)(fault) >> 48)
 #define AMDGPU_VM_FAULT_ADDR(fault)  ((u64)(fault) & 0xfffffffff000ULL)
 
+
+struct amdgpu_task_info {
+       char    process_name[TASK_COMM_LEN];
+       char    task_name[TASK_COMM_LEN];
+       pid_t   pid;
+       pid_t   tgid;
+};
+
 struct amdgpu_vm {
        /* tree of virtual addresses mapped */
        struct rb_root_cached   va;
@@ -215,6 +223,9 @@ struct amdgpu_vm {
 
        /* Valid while the PD is reserved or fenced */
        uint64_t                pd_phys_addr;
+
+       /* Some basic info about the task */
+       struct amdgpu_task_info task_info;
 };
 
 struct amdgpu_vm_manager {
@@ -307,6 +318,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
                                uint64_t saddr, uint64_t size);
 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
                                                         uint64_t addr);
+void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
                      struct amdgpu_bo_va *bo_va);
 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
@@ -317,4 +329,9 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
                                  struct amdgpu_job *job);
 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
 
+void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
+                        struct amdgpu_task_info *task_info);
+
+void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
+
 #endif
index 9aca653bec07714874297e327eb950225f5ac555..9cfa8a9ada921b8b0844a9bd140a68e1c1eca8c1 100644 (file)
@@ -96,6 +96,34 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
                adev->gmc.visible_vram_size : end) - start;
 }
 
+/**
+ * amdgpu_vram_mgr_bo_visible_size - CPU visible BO size
+ *
+ * @bo: &amdgpu_bo buffer object (must be in VRAM)
+ *
+ * Returns:
+ * How much of the given &amdgpu_bo buffer object lies in CPU visible VRAM.
+ */
+u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
+{
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+       struct ttm_mem_reg *mem = &bo->tbo.mem;
+       struct drm_mm_node *nodes = mem->mm_node;
+       unsigned pages = mem->num_pages;
+       u64 usage;
+
+       if (amdgpu_gmc_vram_full_visible(&adev->gmc))
+               return amdgpu_bo_size(bo);
+
+       if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
+               return 0;
+
+       for (usage = 0; nodes && pages; pages -= nodes->size, nodes++)
+               usage += amdgpu_vram_mgr_vis_size(adev, nodes);
+
+       return usage;
+}
+
 /**
  * amdgpu_vram_mgr_new - allocate new ranges
  *
@@ -135,7 +163,8 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
                num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
        }
 
-       nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL);
+       nodes = kvmalloc_array(num_nodes, sizeof(*nodes),
+                              GFP_KERNEL | __GFP_ZERO);
        if (!nodes)
                return -ENOMEM;
 
@@ -190,7 +219,7 @@ error:
                drm_mm_remove_node(&nodes[i]);
        spin_unlock(&mgr->lock);
 
-       kfree(nodes);
+       kvfree(nodes);
        return r == -ENOSPC ? 0 : r;
 }
 
@@ -229,7 +258,7 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
        atomic64_sub(usage, &mgr->usage);
        atomic64_sub(vis_usage, &mgr->vis_usage);
 
-       kfree(mem->mm_node);
+       kvfree(mem->mm_node);
        mem->mm_node = NULL;
 }
 
index 7fbad2f5f0bd7bbe76a7518b0c49bbed6e010602..d2469453dca26e8d4848fa98037d44bdea92cd8d 100644 (file)
 #include "gmc/gmc_7_1_d.h"
 #include "gmc/gmc_7_1_sh_mask.h"
 
-MODULE_FIRMWARE("radeon/bonaire_smc.bin");
-MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
-MODULE_FIRMWARE("radeon/hawaii_smc.bin");
-MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_smc.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_smc.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_k_smc.bin");
 
 #define MC_CG_ARB_FREQ_F0           0x0a
 #define MC_CG_ARB_FREQ_F1           0x0b
@@ -951,12 +951,12 @@ static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
        else
                pi->battery_state = false;
 
-       if (adev->pm.dpm.ac_power)
+       if (adev->pm.ac_power)
                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
        else
                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
 
-       if (adev->pm.dpm.ac_power == false) {
+       if (adev->pm.ac_power == false) {
                for (i = 0; i < ps->performance_level_count; i++) {
                        if (ps->performance_levels[i].mclk > max_limits->mclk)
                                ps->performance_levels[i].mclk = max_limits->mclk;
@@ -4078,7 +4078,7 @@ static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
        const struct amdgpu_clock_and_voltage_limits *max_limits;
        int i;
 
-       if (adev->pm.dpm.ac_power)
+       if (adev->pm.ac_power)
                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
        else
                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
@@ -4127,7 +4127,7 @@ static int ci_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
        const struct amdgpu_clock_and_voltage_limits *max_limits;
        int i;
 
-       if (adev->pm.dpm.ac_power)
+       if (adev->pm.ac_power)
                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
        else
                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
@@ -4160,7 +4160,7 @@ static int ci_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
        const struct amdgpu_clock_and_voltage_limits *max_limits;
        int i;
 
-       if (adev->pm.dpm.ac_power)
+       if (adev->pm.ac_power)
                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
        else
                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
@@ -4191,7 +4191,7 @@ static int ci_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
        const struct amdgpu_clock_and_voltage_limits *max_limits;
        int i;
 
-       if (adev->pm.dpm.ac_power)
+       if (adev->pm.ac_power)
                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
        else
                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
@@ -5815,7 +5815,7 @@ static int ci_dpm_init_microcode(struct amdgpu_device *adev)
        default: BUG();
        }
 
-       snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
        err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
        if (err)
                goto out;
@@ -5846,8 +5846,7 @@ static int ci_dpm_init(struct amdgpu_device *adev)
        adev->pm.dpm.priv = pi;
 
        pi->sys_pcie_mask =
-               (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
-               CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
+               adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK;
 
        pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
 
@@ -6767,6 +6766,19 @@ static int ci_dpm_read_sensor(void *handle, int idx,
        }
 }
 
+static int ci_set_powergating_by_smu(void *handle,
+                               uint32_t block_type, bool gate)
+{
+       switch (block_type) {
+       case AMD_IP_BLOCK_TYPE_UVD:
+               ci_dpm_powergate_uvd(handle, gate);
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
 static const struct amd_ip_funcs ci_dpm_ip_funcs = {
        .name = "ci_dpm",
        .early_init = ci_dpm_early_init,
@@ -6804,7 +6816,7 @@ static const struct amd_pm_funcs ci_dpm_funcs = {
        .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
        .force_performance_level = &ci_dpm_force_performance_level,
        .vblank_too_short = &ci_dpm_vblank_too_short,
-       .powergate_uvd = &ci_dpm_powergate_uvd,
+       .set_powergating_by_smu = &ci_set_powergating_by_smu,
        .set_fan_control_mode = &ci_dpm_set_fan_control_mode,
        .get_fan_control_mode = &ci_dpm_get_fan_control_mode,
        .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
index 8ff4c60d1b59599bfd6cee31e3cee1ede9d6a51c..78ab939ae5d864f54b88eda53847e0fbf5dca7ca 100644 (file)
@@ -1476,7 +1476,7 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
                                tmp |= PCIE_LC_CNTL4__LC_REDO_EQ_MASK;
                                WREG32_PCIE(ixPCIE_LC_CNTL4, tmp);
 
-                               mdelay(100);
+                               msleep(100);
 
                                /* linkctl */
                                pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
@@ -2003,9 +2003,9 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
                amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
                if (amdgpu_dpm == -1)
-                       amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
-               else
                        amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+               else
+                       amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
                if (adev->enable_virtual_display)
                        amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
 #if defined(CONFIG_DRM_AMD_DC)
@@ -2024,9 +2024,9 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
                amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
                if (amdgpu_dpm == -1)
-                       amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
-               else
                        amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+               else
+                       amdgpu_device_ip_block_add(adev, &ci_smu_ip_block);
                if (adev->enable_virtual_display)
                        amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
 #if defined(CONFIG_DRM_AMD_DC)
index a7576255cc3029fffa64230bb727d7034bea8681..d0fa2aac238884630eba999d325b2e7d9e384504 100644 (file)
@@ -54,16 +54,16 @@ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
 static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
 static int cik_sdma_soft_reset(void *handle);
 
-MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
-MODULE_FIRMWARE("radeon/bonaire_sdma1.bin");
-MODULE_FIRMWARE("radeon/hawaii_sdma.bin");
-MODULE_FIRMWARE("radeon/hawaii_sdma1.bin");
-MODULE_FIRMWARE("radeon/kaveri_sdma.bin");
-MODULE_FIRMWARE("radeon/kaveri_sdma1.bin");
-MODULE_FIRMWARE("radeon/kabini_sdma.bin");
-MODULE_FIRMWARE("radeon/kabini_sdma1.bin");
-MODULE_FIRMWARE("radeon/mullins_sdma.bin");
-MODULE_FIRMWARE("radeon/mullins_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_sdma.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_sdma.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/kaveri_sdma.bin");
+MODULE_FIRMWARE("amdgpu/kaveri_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/kabini_sdma.bin");
+MODULE_FIRMWARE("amdgpu/kabini_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/mullins_sdma.bin");
+MODULE_FIRMWARE("amdgpu/mullins_sdma1.bin");
 
 u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
 
@@ -132,9 +132,9 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev)
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
                if (i == 0)
-                       snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
+                       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
                else
-                       snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name);
+                       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
                err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
                if (err)
                        goto out;
@@ -177,9 +177,8 @@ static uint64_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring)
 static uint64_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
-       u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
 
-       return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
+       return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) & 0x3fffc) >> 2;
 }
 
 /**
@@ -192,9 +191,8 @@ static uint64_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
 static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
-       u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
 
-       WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me],
+       WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me],
                        (lower_32_bits(ring->wptr) << 2) & 0x3fffc);
 }
 
@@ -248,7 +246,7 @@ static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
                          SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
        u32 ref_and_mask;
 
-       if (ring == &ring->adev->sdma.instance[0].ring)
+       if (ring->me == 0)
                ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK;
        else
                ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK;
@@ -1290,8 +1288,10 @@ static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
 {
        int i;
 
-       for (i = 0; i < adev->sdma.num_instances; i++)
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                adev->sdma.instance[i].ring.funcs = &cik_sdma_ring_funcs;
+               adev->sdma.instance[i].ring.me = i;
+       }
 }
 
 static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = {
index ada241bfeee96c743c93cbfad8f567eaf1eaf6ac..308f9f238bc11ca34ec407f5edf57d12d2dd57fc 100644 (file)
@@ -41,6 +41,8 @@
 #include "gmc/gmc_8_1_d.h"
 #include "gmc/gmc_8_1_sh_mask.h"
 
+#include "ivsrcid/ivsrcid_vislands30.h"
+
 static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev);
 static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev);
 
@@ -1855,15 +1857,14 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
        if (unlikely(r != 0))
                return r;
 
-       if (atomic) {
-               fb_location = amdgpu_bo_gpu_offset(abo);
-       } else {
-               r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+       if (!atomic) {
+               r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
                if (unlikely(r != 0)) {
                        amdgpu_bo_unreserve(abo);
                        return -EINVAL;
                }
        }
+       fb_location = amdgpu_bo_gpu_offset(abo);
 
        amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
        amdgpu_bo_unreserve(abo);
@@ -2370,13 +2371,14 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
                return ret;
        }
 
-       ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+       ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
        amdgpu_bo_unreserve(aobj);
        if (ret) {
                DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
                drm_gem_object_put_unlocked(obj);
                return ret;
        }
+       amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
 
        dce_v10_0_lock_cursor(crtc, true);
 
@@ -2737,14 +2739,14 @@ static int dce_v10_0_sw_init(void *handle)
                        return r;
        }
 
-       for (i = 8; i < 20; i += 2) {
+       for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
                r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
                if (r)
                        return r;
        }
 
        /* HPD hotplug */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
        if (r)
                return r;
 
index a5b96eac3033232d46d3e5320f7ae9e9e72f97e8..76dfb76f7900c671527a5ae972840d4d7942fb34 100644 (file)
@@ -41,6 +41,8 @@
 #include "gmc/gmc_8_1_d.h"
 #include "gmc/gmc_8_1_sh_mask.h"
 
+#include "ivsrcid/ivsrcid_vislands30.h"
+
 static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev);
 static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev);
 
@@ -1897,15 +1899,14 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
        if (unlikely(r != 0))
                return r;
 
-       if (atomic) {
-               fb_location = amdgpu_bo_gpu_offset(abo);
-       } else {
-               r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+       if (!atomic) {
+               r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
                if (unlikely(r != 0)) {
                        amdgpu_bo_unreserve(abo);
                        return -EINVAL;
                }
        }
+       fb_location = amdgpu_bo_gpu_offset(abo);
 
        amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
        amdgpu_bo_unreserve(abo);
@@ -2449,13 +2450,14 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
                return ret;
        }
 
-       ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+       ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
        amdgpu_bo_unreserve(aobj);
        if (ret) {
                DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
                drm_gem_object_put_unlocked(obj);
                return ret;
        }
+       amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
 
        dce_v11_0_lock_cursor(crtc, true);
 
@@ -2858,14 +2860,14 @@ static int dce_v11_0_sw_init(void *handle)
                        return r;
        }
 
-       for (i = 8; i < 20; i += 2) {
+       for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
                r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
                if (r)
                        return r;
        }
 
        /* HPD hotplug */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
        if (r)
                return r;
 
index 394cc1e8fe20e22f4131f6697f24fa248f536996..c9adc627305da9411dab4f565b00008b2927b2bd 100644 (file)
@@ -1811,15 +1811,14 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
        if (unlikely(r != 0))
                return r;
 
-       if (atomic) {
-               fb_location = amdgpu_bo_gpu_offset(abo);
-       } else {
-               r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+       if (!atomic) {
+               r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
                if (unlikely(r != 0)) {
                        amdgpu_bo_unreserve(abo);
                        return -EINVAL;
                }
        }
+       fb_location = amdgpu_bo_gpu_offset(abo);
 
        amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
        amdgpu_bo_unreserve(abo);
@@ -2263,13 +2262,14 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
                return ret;
        }
 
-       ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+       ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
        amdgpu_bo_unreserve(aobj);
        if (ret) {
                DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
                drm_gem_object_put_unlocked(obj);
                return ret;
        }
+       amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
 
        dce_v6_0_lock_cursor(crtc, true);
 
index c9b9ab8f1b059a8b0385babc3a2b50547c566450..50cd03beac7d673cc448245e616b5463e50b739d 100644 (file)
@@ -1786,15 +1786,14 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
        if (unlikely(r != 0))
                return r;
 
-       if (atomic) {
-               fb_location = amdgpu_bo_gpu_offset(abo);
-       } else {
-               r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+       if (!atomic) {
+               r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
                if (unlikely(r != 0)) {
                        amdgpu_bo_unreserve(abo);
                        return -EINVAL;
                }
        }
+       fb_location = amdgpu_bo_gpu_offset(abo);
 
        amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
        amdgpu_bo_unreserve(abo);
@@ -2274,13 +2273,14 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
                return ret;
        }
 
-       ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+       ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
        amdgpu_bo_unreserve(aobj);
        if (ret) {
                DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
                drm_gem_object_put_unlocked(obj);
                return ret;
        }
+       amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
 
        dce_v8_0_lock_cursor(crtc, true);
 
index dbf2ccd0c7447c5c6b9d887c740897fdc0a82a04..15257634a53aa673e30f04068bf3c1e48b541ac7 100644 (file)
@@ -36,6 +36,7 @@
 #include "dce_v10_0.h"
 #include "dce_v11_0.h"
 #include "dce_virtual.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
 
 #define DCE_VIRTUAL_VBLANK_PERIOD 16666666
 
@@ -269,25 +270,18 @@ static int dce_virtual_early_init(void *handle)
 static struct drm_encoder *
 dce_virtual_encoder(struct drm_connector *connector)
 {
-       int enc_id = connector->encoder_ids[0];
        struct drm_encoder *encoder;
        int i;
 
-       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-               if (connector->encoder_ids[i] == 0)
-                       break;
-
-               encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
-               if (!encoder)
-                       continue;
-
+       drm_connector_for_each_possible_encoder(connector, encoder, i) {
                if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
                        return encoder;
        }
 
        /* pick the first one */
-       if (enc_id)
-               return drm_encoder_find(connector->dev, NULL, enc_id);
+       drm_connector_for_each_possible_encoder(connector, encoder, i)
+               return encoder;
+
        return NULL;
 }
 
@@ -378,7 +372,7 @@ static int dce_virtual_sw_init(void *handle)
        int r, i;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 229, &adev->crtc_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER, &adev->crtc_irq);
        if (r)
                return r;
 
@@ -634,7 +628,7 @@ static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
        drm_connector_register(connector);
 
        /* link them */
-       drm_mode_connector_attach_encoder(connector, encoder);
+       drm_connector_attach_encoder(connector, encoder);
 
        return 0;
 }
index cd6bf291a853d9d0226a16c05485fc684237a82f..de184a8860573ef0413661ddb8b4d199b31513de 100644 (file)
@@ -44,30 +44,30 @@ static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev);
 static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev);
 static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev);
 
-MODULE_FIRMWARE("radeon/tahiti_pfp.bin");
-MODULE_FIRMWARE("radeon/tahiti_me.bin");
-MODULE_FIRMWARE("radeon/tahiti_ce.bin");
-MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
-
-MODULE_FIRMWARE("radeon/pitcairn_pfp.bin");
-MODULE_FIRMWARE("radeon/pitcairn_me.bin");
-MODULE_FIRMWARE("radeon/pitcairn_ce.bin");
-MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
-
-MODULE_FIRMWARE("radeon/verde_pfp.bin");
-MODULE_FIRMWARE("radeon/verde_me.bin");
-MODULE_FIRMWARE("radeon/verde_ce.bin");
-MODULE_FIRMWARE("radeon/verde_rlc.bin");
-
-MODULE_FIRMWARE("radeon/oland_pfp.bin");
-MODULE_FIRMWARE("radeon/oland_me.bin");
-MODULE_FIRMWARE("radeon/oland_ce.bin");
-MODULE_FIRMWARE("radeon/oland_rlc.bin");
-
-MODULE_FIRMWARE("radeon/hainan_pfp.bin");
-MODULE_FIRMWARE("radeon/hainan_me.bin");
-MODULE_FIRMWARE("radeon/hainan_ce.bin");
-MODULE_FIRMWARE("radeon/hainan_rlc.bin");
+MODULE_FIRMWARE("amdgpu/tahiti_pfp.bin");
+MODULE_FIRMWARE("amdgpu/tahiti_me.bin");
+MODULE_FIRMWARE("amdgpu/tahiti_ce.bin");
+MODULE_FIRMWARE("amdgpu/tahiti_rlc.bin");
+
+MODULE_FIRMWARE("amdgpu/pitcairn_pfp.bin");
+MODULE_FIRMWARE("amdgpu/pitcairn_me.bin");
+MODULE_FIRMWARE("amdgpu/pitcairn_ce.bin");
+MODULE_FIRMWARE("amdgpu/pitcairn_rlc.bin");
+
+MODULE_FIRMWARE("amdgpu/verde_pfp.bin");
+MODULE_FIRMWARE("amdgpu/verde_me.bin");
+MODULE_FIRMWARE("amdgpu/verde_ce.bin");
+MODULE_FIRMWARE("amdgpu/verde_rlc.bin");
+
+MODULE_FIRMWARE("amdgpu/oland_pfp.bin");
+MODULE_FIRMWARE("amdgpu/oland_me.bin");
+MODULE_FIRMWARE("amdgpu/oland_ce.bin");
+MODULE_FIRMWARE("amdgpu/oland_rlc.bin");
+
+MODULE_FIRMWARE("amdgpu/hainan_pfp.bin");
+MODULE_FIRMWARE("amdgpu/hainan_me.bin");
+MODULE_FIRMWARE("amdgpu/hainan_ce.bin");
+MODULE_FIRMWARE("amdgpu/hainan_rlc.bin");
 
 static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev);
 static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
@@ -335,7 +335,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
        default: BUG();
        }
 
-       snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
        err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
        if (err)
                goto out;
@@ -346,7 +346,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
        adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
        adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
 
-       snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
        err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
        if (err)
                goto out;
@@ -357,7 +357,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
        adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
        adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
 
-       snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
        err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
        if (err)
                goto out;
@@ -368,7 +368,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
        adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
        adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
 
-       snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
+       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
        err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
        if (err)
                goto out;
index 42b6144c1fd59c3e10724c4806c7eff04ce8eca9..95452c5a9df6ea6c0bea0ca817bcd57cf68fa057 100644 (file)
@@ -57,36 +57,36 @@ static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev);
 static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev);
 static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev);
 
-MODULE_FIRMWARE("radeon/bonaire_pfp.bin");
-MODULE_FIRMWARE("radeon/bonaire_me.bin");
-MODULE_FIRMWARE("radeon/bonaire_ce.bin");
-MODULE_FIRMWARE("radeon/bonaire_rlc.bin");
-MODULE_FIRMWARE("radeon/bonaire_mec.bin");
-
-MODULE_FIRMWARE("radeon/hawaii_pfp.bin");
-MODULE_FIRMWARE("radeon/hawaii_me.bin");
-MODULE_FIRMWARE("radeon/hawaii_ce.bin");
-MODULE_FIRMWARE("radeon/hawaii_rlc.bin");
-MODULE_FIRMWARE("radeon/hawaii_mec.bin");
-
-MODULE_FIRMWARE("radeon/kaveri_pfp.bin");
-MODULE_FIRMWARE("radeon/kaveri_me.bin");
-MODULE_FIRMWARE("radeon/kaveri_ce.bin");
-MODULE_FIRMWARE("radeon/kaveri_rlc.bin");
-MODULE_FIRMWARE("radeon/kaveri_mec.bin");
-MODULE_FIRMWARE("radeon/kaveri_mec2.bin");
-
-MODULE_FIRMWARE("radeon/kabini_pfp.bin");
-MODULE_FIRMWARE("radeon/kabini_me.bin");
-MODULE_FIRMWARE("radeon/kabini_ce.bin");
-MODULE_FIRMWARE("radeon/kabini_rlc.bin");
-MODULE_FIRMWARE("radeon/kabini_mec.bin");
-
-MODULE_FIRMWARE("radeon/mullins_pfp.bin");
-MODULE_FIRMWARE("radeon/mullins_me.bin");
-MODULE_FIRMWARE("radeon/mullins_ce.bin");
-MODULE_FIRMWARE("radeon/mullins_rlc.bin");
-MODULE_FIRMWARE("radeon/mullins_mec.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_pfp.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_me.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_ce.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_rlc.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_mec.bin");
+
+MODULE_FIRMWARE("amdgpu/hawaii_pfp.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_me.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_ce.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_rlc.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_mec.bin");
+
+MODULE_FIRMWARE("amdgpu/kaveri_pfp.bin");
+MODULE_FIRMWARE("amdgpu/kaveri_me.bin");
+MODULE_FIRMWARE("amdgpu/kaveri_ce.bin");
+MODULE_FIRMWARE("amdgpu/kaveri_rlc.bin");
+MODULE_FIRMWARE("amdgpu/kaveri_mec.bin");
+MODULE_FIRMWARE("amdgpu/kaveri_mec2.bin");
+
+MODULE_FIRMWARE("amdgpu/kabini_pfp.bin");
+MODULE_FIRMWARE("amdgpu/kabini_me.bin");
+MODULE_FIRMWARE("amdgpu/kabini_ce.bin");
+MODULE_FIRMWARE("amdgpu/kabini_rlc.bin");
+MODULE_FIRMWARE("amdgpu/kabini_mec.bin");
+
+MODULE_FIRMWARE("amdgpu/mullins_pfp.bin");
+MODULE_FIRMWARE("amdgpu/mullins_me.bin");
+MODULE_FIRMWARE("amdgpu/mullins_ce.bin");
+MODULE_FIRMWARE("amdgpu/mullins_rlc.bin");
+MODULE_FIRMWARE("amdgpu/mullins_mec.bin");
 
 static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
 {
@@ -925,7 +925,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
        default: BUG();
        }
 
-       snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
        err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
        if (err)
                goto out;
@@ -933,7 +933,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
        if (err)
                goto out;
 
-       snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
        err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
        if (err)
                goto out;
@@ -941,7 +941,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
        if (err)
                goto out;
 
-       snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
        err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
        if (err)
                goto out;
@@ -949,7 +949,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
        if (err)
                goto out;
 
-       snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
+       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
        err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
        if (err)
                goto out;
@@ -958,7 +958,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
                goto out;
 
        if (adev->asic_type == CHIP_KAVERI) {
-               snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec2.bin", chip_name);
+               snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
                err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
                if (err)
                        goto out;
@@ -967,7 +967,7 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
                        goto out;
        }
 
-       snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
+       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
        err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
        if (err)
                goto out;
index 818874b13c99d1ee31d8dd7428ef27f0c0b1b267..5cd45210113f645062750e4ef54ffd4e7dc14da7 100644 (file)
@@ -51,6 +51,8 @@
 
 #include "smu/smu_7_1_3_d.h"
 
+#include "ivsrcid/ivsrcid_vislands30.h"
+
 #define GFX8_NUM_GFX_RINGS     1
 #define GFX8_MEC_HPD_SIZE 2048
 
@@ -704,6 +706,17 @@ static const u32 stoney_mgcg_cgcg_init[] =
        mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
 };
 
+
+static const char * const sq_edc_source_names[] = {
+       "SQ_EDC_INFO_SOURCE_INVALID: No EDC error has occurred",
+       "SQ_EDC_INFO_SOURCE_INST: EDC source is Instruction Fetch",
+       "SQ_EDC_INFO_SOURCE_SGPR: EDC source is SGPR or SQC data return",
+       "SQ_EDC_INFO_SOURCE_VGPR: EDC source is VGPR",
+       "SQ_EDC_INFO_SOURCE_LDS: EDC source is LDS",
+       "SQ_EDC_INFO_SOURCE_GDS: EDC source is GDS",
+       "SQ_EDC_INFO_SOURCE_TA: EDC source is TA",
+};
+
 static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev);
 static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev);
 static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev);
@@ -866,26 +879,32 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_ib ib;
        struct dma_fence *f = NULL;
-       uint32_t scratch;
-       uint32_t tmp = 0;
+
+       unsigned int index;
+       uint64_t gpu_addr;
+       uint32_t tmp;
        long r;
 
-       r = amdgpu_gfx_scratch_get(adev, &scratch);
+       r = amdgpu_device_wb_get(adev, &index);
        if (r) {
-               DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
+               dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
                return r;
        }
-       WREG32(scratch, 0xCAFEDEAD);
+
+       gpu_addr = adev->wb.gpu_addr + (index * 4);
+       adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 256, &ib);
+       r = amdgpu_ib_get(adev, NULL, 16, &ib);
        if (r) {
                DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
                goto err1;
        }
-       ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
-       ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
-       ib.ptr[2] = 0xDEADBEEF;
-       ib.length_dw = 3;
+       ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
+       ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
+       ib.ptr[2] = lower_32_bits(gpu_addr);
+       ib.ptr[3] = upper_32_bits(gpu_addr);
+       ib.ptr[4] = 0xDEADBEEF;
+       ib.length_dw = 5;
 
        r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
        if (r)
@@ -900,20 +919,21 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
                DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
                goto err2;
        }
-       tmp = RREG32(scratch);
+
+       tmp = adev->wb.wb[index];
        if (tmp == 0xDEADBEEF) {
                DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
                r = 0;
        } else {
-               DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
-                         scratch, tmp);
+               DRM_ERROR("ib test on ring %d failed\n", ring->idx);
                r = -EINVAL;
        }
+
 err2:
        amdgpu_ib_free(adev, &ib, NULL);
        dma_fence_put(f);
 err1:
-       amdgpu_gfx_scratch_free(adev, scratch);
+       amdgpu_device_wb_free(adev, index);
        return r;
 }
 
@@ -1999,6 +2019,8 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
        return 0;
 }
 
+static void gfx_v8_0_sq_irq_work_func(struct work_struct *work);
+
 static int gfx_v8_0_sw_init(void *handle)
 {
        int i, j, k, r, ring_id;
@@ -2027,27 +2049,43 @@ static int gfx_v8_0_sw_init(void *handle)
        adev->gfx.mec.num_queue_per_pipe = 8;
 
        /* KIQ event */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 178, &adev->gfx.kiq.irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_INT_IB2, &adev->gfx.kiq.irq);
        if (r)
                return r;
 
        /* EOP Event */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_END_OF_PIPE, &adev->gfx.eop_irq);
        if (r)
                return r;
 
        /* Privileged reg */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 184,
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT,
                              &adev->gfx.priv_reg_irq);
        if (r)
                return r;
 
        /* Privileged inst */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 185,
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT,
                              &adev->gfx.priv_inst_irq);
        if (r)
                return r;
 
+       /* Add CP EDC/ECC irq  */
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_ECC_ERROR,
+                             &adev->gfx.cp_ecc_error_irq);
+       if (r)
+               return r;
+
+       /* SQ interrupts. */
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG,
+                             &adev->gfx.sq_irq);
+       if (r) {
+               DRM_ERROR("amdgpu_irq_add() for SQ failed: %d\n", r);
+               return r;
+       }
+
+       INIT_WORK(&adev->gfx.sq_work.work, gfx_v8_0_sq_irq_work_func);
+
        adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
 
        gfx_v8_0_scratch_init(adev);
@@ -5111,6 +5149,10 @@ static int gfx_v8_0_hw_fini(void *handle)
        amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
        amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
 
+       amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
+
+       amdgpu_irq_put(adev, &adev->gfx.sq_irq, 0);
+
        /* disable KCQ to avoid CPC touch memory not valid anymore */
        for (i = 0; i < adev->gfx.num_compute_rings; i++)
                gfx_v8_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
@@ -5542,9 +5584,19 @@ static int gfx_v8_0_late_init(void *handle)
        if (r)
                return r;
 
-       amdgpu_device_ip_set_powergating_state(adev,
-                                              AMD_IP_BLOCK_TYPE_GFX,
-                                              AMD_PG_STATE_GATE);
+       r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
+       if (r) {
+               DRM_ERROR("amdgpu_irq_get() failed to get IRQ for EDC, r: %d.\n", r);
+               return r;
+       }
+
+       r = amdgpu_irq_get(adev, &adev->gfx.sq_irq, 0);
+       if (r) {
+               DRM_ERROR(
+                       "amdgpu_irq_get() failed to get IRQ for SQ, r: %d.\n",
+                       r);
+               return r;
+       }
 
        return 0;
 }
@@ -5552,14 +5604,12 @@ static int gfx_v8_0_late_init(void *handle)
 static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
                                                       bool enable)
 {
-       if ((adev->asic_type == CHIP_POLARIS11) ||
+       if (((adev->asic_type == CHIP_POLARIS11) ||
            (adev->asic_type == CHIP_POLARIS12) ||
-           (adev->asic_type == CHIP_VEGAM))
+           (adev->asic_type == CHIP_VEGAM)) &&
+           adev->powerplay.pp_funcs->set_powergating_by_smu)
                /* Send msg to SMU via Powerplay */
-               amdgpu_device_ip_set_powergating_state(adev,
-                                                      AMD_IP_BLOCK_TYPE_SMC,
-                                                      enable ?
-                                                      AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
+               amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, enable);
 
        WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0);
 }
@@ -6787,6 +6837,77 @@ static int gfx_v8_0_set_eop_interrupt_state(struct amdgpu_device *adev,
        return 0;
 }
 
+static int gfx_v8_0_set_cp_ecc_int_state(struct amdgpu_device *adev,
+                                        struct amdgpu_irq_src *source,
+                                        unsigned int type,
+                                        enum amdgpu_interrupt_state state)
+{
+       int enable_flag;
+
+       switch (state) {
+       case AMDGPU_IRQ_STATE_DISABLE:
+               enable_flag = 0;
+               break;
+
+       case AMDGPU_IRQ_STATE_ENABLE:
+               enable_flag = 1;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       WREG32_FIELD(CP_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, enable_flag);
+       WREG32_FIELD(CP_INT_CNTL_RING0, CP_ECC_ERROR_INT_ENABLE, enable_flag);
+       WREG32_FIELD(CP_INT_CNTL_RING1, CP_ECC_ERROR_INT_ENABLE, enable_flag);
+       WREG32_FIELD(CP_INT_CNTL_RING2, CP_ECC_ERROR_INT_ENABLE, enable_flag);
+       WREG32_FIELD(CPC_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, enable_flag);
+       WREG32_FIELD(CP_ME1_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+                    enable_flag);
+       WREG32_FIELD(CP_ME1_PIPE1_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+                    enable_flag);
+       WREG32_FIELD(CP_ME1_PIPE2_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+                    enable_flag);
+       WREG32_FIELD(CP_ME1_PIPE3_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+                    enable_flag);
+       WREG32_FIELD(CP_ME2_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+                    enable_flag);
+       WREG32_FIELD(CP_ME2_PIPE1_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+                    enable_flag);
+       WREG32_FIELD(CP_ME2_PIPE2_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+                    enable_flag);
+       WREG32_FIELD(CP_ME2_PIPE3_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
+                    enable_flag);
+
+       return 0;
+}
+
+static int gfx_v8_0_set_sq_int_state(struct amdgpu_device *adev,
+                                    struct amdgpu_irq_src *source,
+                                    unsigned int type,
+                                    enum amdgpu_interrupt_state state)
+{
+       int enable_flag;
+
+       switch (state) {
+       case AMDGPU_IRQ_STATE_DISABLE:
+               enable_flag = 1;
+               break;
+
+       case AMDGPU_IRQ_STATE_ENABLE:
+               enable_flag = 0;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       WREG32_FIELD(SQ_INTERRUPT_MSG_CTRL, STALL,
+                    enable_flag);
+
+       return 0;
+}
+
 static int gfx_v8_0_eop_irq(struct amdgpu_device *adev,
                            struct amdgpu_irq_src *source,
                            struct amdgpu_iv_entry *entry)
@@ -6837,6 +6958,114 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
        return 0;
 }
 
+static int gfx_v8_0_cp_ecc_error_irq(struct amdgpu_device *adev,
+                                    struct amdgpu_irq_src *source,
+                                    struct amdgpu_iv_entry *entry)
+{
+       DRM_ERROR("CP EDC/ECC error detected.");
+       return 0;
+}
+
+static void gfx_v8_0_parse_sq_irq(struct amdgpu_device *adev, unsigned ih_data)
+{
+       u32 enc, se_id, sh_id, cu_id;
+       char type[20];
+       int sq_edc_source = -1;
+
+       enc = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_CMN, ENCODING);
+       se_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_CMN, SE_ID);
+
+       switch (enc) {
+               case 0:
+                       DRM_INFO("SQ general purpose intr detected:"
+                                       "se_id %d, immed_overflow %d, host_reg_overflow %d,"
+                                       "host_cmd_overflow %d, cmd_timestamp %d,"
+                                       "reg_timestamp %d, thread_trace_buff_full %d,"
+                                       "wlt %d, thread_trace %d.\n",
+                                       se_id,
+                                       REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, IMMED_OVERFLOW),
+                                       REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, HOST_REG_OVERFLOW),
+                                       REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, HOST_CMD_OVERFLOW),
+                                       REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, CMD_TIMESTAMP),
+                                       REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, REG_TIMESTAMP),
+                                       REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, THREAD_TRACE_BUF_FULL),
+                                       REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, WLT),
+                                       REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, THREAD_TRACE)
+                                       );
+                       break;
+               case 1:
+               case 2:
+
+                       cu_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, CU_ID);
+                       sh_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SH_ID);
+
+                       /*
+                        * This function can be called either directly from ISR
+                        * or from BH in which case we can access SQ_EDC_INFO
+                        * instance
+                        */
+                       if (in_task()) {
+                               mutex_lock(&adev->grbm_idx_mutex);
+                               gfx_v8_0_select_se_sh(adev, se_id, sh_id, cu_id);
+
+                               sq_edc_source = REG_GET_FIELD(RREG32(mmSQ_EDC_INFO), SQ_EDC_INFO, SOURCE);
+
+                               gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+                               mutex_unlock(&adev->grbm_idx_mutex);
+                       }
+
+                       if (enc == 1)
+                               sprintf(type, "instruction intr");
+                       else
+                               sprintf(type, "EDC/ECC error");
+
+                       DRM_INFO(
+                               "SQ %s detected: "
+                                       "se_id %d, sh_id %d, cu_id %d, simd_id %d, wave_id %d, vm_id %d "
+                                       "trap %s, sq_ed_info.source %s.\n",
+                                       type, se_id, sh_id, cu_id,
+                                       REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SIMD_ID),
+                                       REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, WAVE_ID),
+                                       REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, VM_ID),
+                                       REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, PRIV) ? "true" : "false",
+                                       (sq_edc_source != -1) ? sq_edc_source_names[sq_edc_source] : "unavailable"
+                               );
+                       break;
+               default:
+                       DRM_ERROR("SQ invalid encoding type\n.");
+       }
+}
+
+static void gfx_v8_0_sq_irq_work_func(struct work_struct *work)
+{
+
+       struct amdgpu_device *adev = container_of(work, struct amdgpu_device, gfx.sq_work.work);
+       struct sq_work *sq_work = container_of(work, struct sq_work, work);
+
+       gfx_v8_0_parse_sq_irq(adev, sq_work->ih_data);
+}
+
+static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
+                          struct amdgpu_irq_src *source,
+                          struct amdgpu_iv_entry *entry)
+{
+       unsigned ih_data = entry->src_data[0];
+
+       /*
+        * Try to submit work so SQ_EDC_INFO can be accessed from
+        * BH. If previous work submission hasn't finished yet
+        * just print whatever info is possible directly from the ISR.
+        */
+       if (work_pending(&adev->gfx.sq_work.work)) {
+               gfx_v8_0_parse_sq_irq(adev, ih_data);
+       } else {
+               adev->gfx.sq_work.ih_data = ih_data;
+               schedule_work(&adev->gfx.sq_work.work);
+       }
+
+       return 0;
+}
+
 static int gfx_v8_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
                                            struct amdgpu_irq_src *src,
                                            unsigned int type,
@@ -7037,6 +7266,16 @@ static const struct amdgpu_irq_src_funcs gfx_v8_0_kiq_irq_funcs = {
        .process = gfx_v8_0_kiq_irq,
 };
 
+static const struct amdgpu_irq_src_funcs gfx_v8_0_cp_ecc_error_irq_funcs = {
+       .set = gfx_v8_0_set_cp_ecc_int_state,
+       .process = gfx_v8_0_cp_ecc_error_irq,
+};
+
+static const struct amdgpu_irq_src_funcs gfx_v8_0_sq_irq_funcs = {
+       .set = gfx_v8_0_set_sq_int_state,
+       .process = gfx_v8_0_sq_irq,
+};
+
 static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
 {
        adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
@@ -7050,6 +7289,12 @@ static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
 
        adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
        adev->gfx.kiq.irq.funcs = &gfx_v8_0_kiq_irq_funcs;
+
+       adev->gfx.cp_ecc_error_irq.num_types = 1;
+       adev->gfx.cp_ecc_error_irq.funcs = &gfx_v8_0_cp_ecc_error_irq_funcs;
+
+       adev->gfx.sq_irq.num_types = 1;
+       adev->gfx.sq_irq.funcs = &gfx_v8_0_sq_irq_funcs;
 }
 
 static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev)
index a69153435ea7ec9ccf4180b70ead070734549688..ef00d14f86453bb1e0c4fd3653be6bb144e412f1 100644 (file)
@@ -38,6 +38,8 @@
 #include "clearstate_gfx9.h"
 #include "v9_structs.h"
 
+#include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
+
 #define GFX9_NUM_GFX_RINGS     1
 #define GFX9_MEC_HPD_SIZE 2048
 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
@@ -102,11 +104,22 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
 {
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800)
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
 };
 
 static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
@@ -648,7 +661,10 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
                adev->firmware.fw_size +=
                        ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
 
-               if (adev->gfx.rlc.is_rlc_v2_1) {
+               if (adev->gfx.rlc.is_rlc_v2_1 &&
+                   adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
+                   adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
+                   adev->gfx.rlc.save_restore_list_srm_size_bytes) {
                        info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
                        info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
                        info->fw = adev->gfx.rlc_fw;
@@ -943,6 +959,7 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
                dst_ptr = adev->gfx.rlc.cs_ptr;
                gfx_v9_0_get_csb_buffer(adev, dst_ptr);
                amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
+               amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
                amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
        }
 
@@ -971,6 +988,39 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
        return 0;
 }
 
+static int gfx_v9_0_csb_vram_pin(struct amdgpu_device *adev)
+{
+       int r;
+
+       r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+       if (unlikely(r != 0))
+               return r;
+
+       r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
+                       AMDGPU_GEM_DOMAIN_VRAM);
+       if (!r)
+               adev->gfx.rlc.clear_state_gpu_addr =
+                       amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
+
+       amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+
+       return r;
+}
+
+static void gfx_v9_0_csb_vram_unpin(struct amdgpu_device *adev)
+{
+       int r;
+
+       if (!adev->gfx.rlc.clear_state_obj)
+               return;
+
+       r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
+       if (likely(r == 0)) {
+               amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
+               amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+       }
+}
+
 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
 {
        amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
@@ -1451,23 +1501,23 @@ static int gfx_v9_0_sw_init(void *handle)
        adev->gfx.mec.num_queue_per_pipe = 8;
 
        /* KIQ event */
-       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 178, &adev->gfx.kiq.irq);
+       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_IB2_INTERRUPT_PKT, &adev->gfx.kiq.irq);
        if (r)
                return r;
 
        /* EOP Event */
-       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 181, &adev->gfx.eop_irq);
+       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
        if (r)
                return r;
 
        /* Privileged reg */
-       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 184,
+       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
                              &adev->gfx.priv_reg_irq);
        if (r)
                return r;
 
        /* Privileged inst */
-       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 185,
+       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
                              &adev->gfx.priv_inst_irq);
        if (r)
                return r;
@@ -2148,8 +2198,16 @@ static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *ad
 
 static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
 {
-       if (!adev->gfx.rlc.is_rlc_v2_1)
-               return;
+       gfx_v9_0_init_csb(adev);
+
+       /*
+        * Rlc save restore list is workable since v2_1.
+        * And it's needed by gfxoff feature.
+        */
+       if (adev->gfx.rlc.is_rlc_v2_1) {
+               gfx_v9_1_init_rlc_save_restore_list(adev);
+               gfx_v9_0_enable_save_restore_machine(adev);
+       }
 
        if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
                              AMD_PG_SUPPORT_GFX_SMG |
@@ -2157,10 +2215,6 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
                              AMD_PG_SUPPORT_CP |
                              AMD_PG_SUPPORT_GDS |
                              AMD_PG_SUPPORT_RLC_SMU_HS)) {
-               gfx_v9_0_init_csb(adev);
-               gfx_v9_1_init_rlc_save_restore_list(adev);
-               gfx_v9_0_enable_save_restore_machine(adev);
-
                WREG32(mmRLC_JUMP_TABLE_RESTORE,
                       adev->gfx.rlc.cp_table_gpu_addr >> 8);
                gfx_v9_0_init_gfx_power_gating(adev);
@@ -2252,9 +2306,6 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
        /* disable CG */
        WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
 
-       /* disable PG */
-       WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, 0);
-
        gfx_v9_0_rlc_reset(adev);
 
        gfx_v9_0_init_pg(adev);
@@ -3116,6 +3167,10 @@ static int gfx_v9_0_hw_init(void *handle)
 
        gfx_v9_0_gpu_init(adev);
 
+       r = gfx_v9_0_csb_vram_pin(adev);
+       if (r)
+               return r;
+
        r = gfx_v9_0_rlc_resume(adev);
        if (r)
                return r;
@@ -3224,6 +3279,8 @@ static int gfx_v9_0_hw_fini(void *handle)
        gfx_v9_0_cp_enable(adev, false);
        gfx_v9_0_rlc_stop(adev);
 
+       gfx_v9_0_csb_vram_unpin(adev);
+
        return 0;
 }
 
@@ -3433,7 +3490,7 @@ static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
 
                /* wait for RLC_SAFE_MODE */
                for (i = 0; i < adev->usec_timeout; i++) {
-                       if (!REG_GET_FIELD(SOC15_REG_OFFSET(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
+                       if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
                                break;
                        udelay(1);
                }
@@ -3510,8 +3567,11 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
                /* 1 - RLC_CGTT_MGCG_OVERRIDE */
                def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
-               data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK |
-                         RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
+
+               if (adev->asic_type != CHIP_VEGA12)
+                       data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
+
+               data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
                          RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
                          RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
 
@@ -3541,11 +3601,15 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
        } else {
                /* 1 - MGCG_OVERRIDE */
                def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
-               data |= (RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK |
-                        RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
+
+               if (adev->asic_type != CHIP_VEGA12)
+                       data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
+
+               data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
                         RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
                         RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
                         RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
+
                if (def != data)
                        WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
 
@@ -3581,9 +3645,11 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
                /* update CGCG and CGLS override bits */
                if (def != data)
                        WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
-               /* enable 3Dcgcg FSM(0x0020003f) */
+
+               /* enable 3Dcgcg FSM(0x0000363f) */
                def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
-               data = (0x2000 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
+
+               data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
                        RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
                if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
                        data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
@@ -3630,9 +3696,10 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
                if (def != data)
                        WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
 
-               /* enable cgcg FSM(0x0020003F) */
+               /* enable cgcg FSM(0x0000363F) */
                def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
-               data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
+
+               data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
                        RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
                if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
                        data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
@@ -3714,6 +3781,15 @@ static int gfx_v9_0_set_powergating_state(void *handle,
 
                /* update mgcg state */
                gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
+
+               /* set gfx off through smu */
+               if (enable && adev->powerplay.pp_funcs->set_powergating_by_smu)
+                       amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true);
+               break;
+       case CHIP_VEGA12:
+               /* set gfx off through smu */
+               if (enable && adev->powerplay.pp_funcs->set_powergating_by_smu)
+                       amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true);
                break;
        default:
                break;
index 79f9ac29019bdaf180d875d8215322f107931420..75317f283c6967d2de4daaaf5dca4cfdaf9922b7 100644 (file)
@@ -41,11 +41,11 @@ static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev);
 static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
 static int gmc_v6_0_wait_for_idle(void *handle);
 
-MODULE_FIRMWARE("radeon/tahiti_mc.bin");
-MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
-MODULE_FIRMWARE("radeon/verde_mc.bin");
-MODULE_FIRMWARE("radeon/oland_mc.bin");
-MODULE_FIRMWARE("radeon/si58_mc.bin");
+MODULE_FIRMWARE("amdgpu/tahiti_mc.bin");
+MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
+MODULE_FIRMWARE("amdgpu/verde_mc.bin");
+MODULE_FIRMWARE("amdgpu/oland_mc.bin");
+MODULE_FIRMWARE("amdgpu/si58_mc.bin");
 
 #define MC_SEQ_MISC0__MT__MASK   0xf0000000
 #define MC_SEQ_MISC0__MT__GDDR1  0x10000000
@@ -134,9 +134,9 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
                is_58_fw = true;
 
        if (is_58_fw)
-               snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
+               snprintf(fw_name, sizeof(fw_name), "amdgpu/si58_mc.bin");
        else
-               snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+               snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
        err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
        if (err)
                goto out;
index 7147bfe25a2343beebec49cdfe1813a808328395..36dc367c4b45ea86a5a5b575ba357f9717ec92b0 100644 (file)
@@ -28,6 +28,7 @@
 #include "cik.h"
 #include "gmc_v7_0.h"
 #include "amdgpu_ucode.h"
+#include "amdgpu_amdkfd.h"
 
 #include "bif/bif_4_1_d.h"
 #include "bif/bif_4_1_sh_mask.h"
 
 #include "amdgpu_atombios.h"
 
+#include "ivsrcid/ivsrcid_vislands30.h"
+
 static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev);
 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
 static int gmc_v7_0_wait_for_idle(void *handle);
 
-MODULE_FIRMWARE("radeon/bonaire_mc.bin");
-MODULE_FIRMWARE("radeon/hawaii_mc.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_mc.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_mc.bin");
 MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
 
 static const u32 golden_settings_iceland_a11[] =
@@ -147,10 +150,7 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
        default: BUG();
        }
 
-       if (adev->asic_type == CHIP_TOPAZ)
-               snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
-       else
-               snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
 
        err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
        if (err)
@@ -999,11 +999,11 @@ static int gmc_v7_0_sw_init(void *handle)
                adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp);
        }
 
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
        if (r)
                return r;
 
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
        if (r)
                return r;
 
@@ -1079,6 +1079,12 @@ static int gmc_v7_0_sw_init(void *handle)
                adev->vm_manager.vram_base_offset = 0;
        }
 
+       adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info),
+                                       GFP_KERNEL);
+       if (!adev->gmc.vm_fault_info)
+               return -ENOMEM;
+       atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+
        return 0;
 }
 
@@ -1088,6 +1094,7 @@ static int gmc_v7_0_sw_fini(void *handle)
 
        amdgpu_gem_force_release(adev);
        amdgpu_vm_manager_fini(adev);
+       kfree(adev->gmc.vm_fault_info);
        gmc_v7_0_gart_fini(adev);
        amdgpu_bo_fini(adev);
        release_firmware(adev->gmc.fw);
@@ -1277,7 +1284,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
                                      struct amdgpu_irq_src *source,
                                      struct amdgpu_iv_entry *entry)
 {
-       u32 addr, status, mc_client;
+       u32 addr, status, mc_client, vmid;
 
        addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
        status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
@@ -1302,6 +1309,29 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
                                         entry->pasid);
        }
 
+       vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+                            VMID);
+       if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
+               && !atomic_read(&adev->gmc.vm_fault_info_updated)) {
+               struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
+               u32 protections = REG_GET_FIELD(status,
+                                       VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+                                       PROTECTIONS);
+
+               info->vmid = vmid;
+               info->mc_id = REG_GET_FIELD(status,
+                                           VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+                                           MEMORY_CLIENT_ID);
+               info->status = status;
+               info->page_addr = addr;
+               info->prot_valid = protections & 0x7 ? true : false;
+               info->prot_read = protections & 0x8 ? true : false;
+               info->prot_write = protections & 0x10 ? true : false;
+               info->prot_exec = protections & 0x20 ? true : false;
+               mb();
+               atomic_set(&adev->gmc.vm_fault_info_updated, 1);
+       }
+
        return 0;
 }
 
index 1edbe6b477b56dc309a49eb1d5a2e68d11ffa8be..70fc97b59b4f2dcf157b49885c8356fedfca05a3 100644 (file)
@@ -26,6 +26,7 @@
 #include "amdgpu.h"
 #include "gmc_v8_0.h"
 #include "amdgpu_ucode.h"
+#include "amdgpu_amdkfd.h"
 
 #include "gmc/gmc_8_1_d.h"
 #include "gmc/gmc_8_1_sh_mask.h"
@@ -44,6 +45,7 @@
 
 #include "amdgpu_atombios.h"
 
+#include "ivsrcid/ivsrcid_vislands30.h"
 
 static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev);
 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -1101,11 +1103,11 @@ static int gmc_v8_0_sw_init(void *handle)
                adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp);
        }
 
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault);
        if (r)
                return r;
 
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault);
        if (r)
                return r;
 
@@ -1181,6 +1183,12 @@ static int gmc_v8_0_sw_init(void *handle)
                adev->vm_manager.vram_base_offset = 0;
        }
 
+       adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info),
+                                       GFP_KERNEL);
+       if (!adev->gmc.vm_fault_info)
+               return -ENOMEM;
+       atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+
        return 0;
 }
 
@@ -1190,6 +1198,7 @@ static int gmc_v8_0_sw_fini(void *handle)
 
        amdgpu_gem_force_release(adev);
        amdgpu_vm_manager_fini(adev);
+       kfree(adev->gmc.vm_fault_info);
        gmc_v8_0_gart_fini(adev);
        amdgpu_bo_fini(adev);
        release_firmware(adev->gmc.fw);
@@ -1425,7 +1434,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
                                      struct amdgpu_irq_src *source,
                                      struct amdgpu_iv_entry *entry)
 {
-       u32 addr, status, mc_client;
+       u32 addr, status, mc_client, vmid;
 
        if (amdgpu_sriov_vf(adev)) {
                dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
@@ -1447,8 +1456,13 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
                gmc_v8_0_set_fault_enable_default(adev, false);
 
        if (printk_ratelimit()) {
-               dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
-                       entry->src_id, entry->src_data[0]);
+               struct amdgpu_task_info task_info = { 0 };
+
+               amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
+
+               dev_err(adev->dev, "GPU fault detected: %d 0x%08x for process %s pid %d thread %s pid %d\n",
+                       entry->src_id, entry->src_data[0], task_info.process_name,
+                       task_info.tgid, task_info.task_name, task_info.pid);
                dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
                        addr);
                dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
@@ -1457,6 +1471,29 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
                                         entry->pasid);
        }
 
+       vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+                            VMID);
+       if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
+               && !atomic_read(&adev->gmc.vm_fault_info_updated)) {
+               struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
+               u32 protections = REG_GET_FIELD(status,
+                                       VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+                                       PROTECTIONS);
+
+               info->vmid = vmid;
+               info->mc_id = REG_GET_FIELD(status,
+                                           VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+                                           MEMORY_CLIENT_ID);
+               info->status = status;
+               info->page_addr = addr;
+               info->prot_valid = protections & 0x7 ? true : false;
+               info->prot_read = protections & 0x8 ? true : false;
+               info->prot_write = protections & 0x10 ? true : false;
+               info->prot_exec = protections & 0x20 ? true : false;
+               mb();
+               atomic_set(&adev->gmc.vm_fault_info_updated, 1);
+       }
+
        return 0;
 }
 
index 3c0a85d4e4ab9b513ec2681ab199659bba6e1bf3..399a5db27649728686868550502089391f3b0807 100644 (file)
@@ -43,6 +43,8 @@
 #include "gfxhub_v1_0.h"
 #include "mmhub_v1_0.h"
 
+#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
+
 /* add these here since we already include dce12 headers and these are for DCN */
 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION                                                          0x055d
 #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX                                                 2
@@ -257,12 +259,17 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
        }
 
        if (printk_ratelimit()) {
+               struct amdgpu_task_info task_info = { 0 };
+
+               amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
+
                dev_err(adev->dev,
-                       "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n",
+                       "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d\n)\n",
                        entry->vmid_src ? "mmhub" : "gfxhub",
                        entry->src_id, entry->ring_id, entry->vmid,
-                       entry->pasid);
-               dev_err(adev->dev, "  at page 0x%016llx from %d\n",
+                       entry->pasid, task_info.process_name, task_info.tgid,
+                       task_info.task_name, task_info.pid);
+               dev_err(adev->dev, "  at address 0x%016llx from %d\n",
                        addr, entry->client_id);
                if (!amdgpu_sriov_vf(adev))
                        dev_err(adev->dev,
@@ -872,9 +879,9 @@ static int gmc_v9_0_sw_init(void *handle)
        }
 
        /* This interrupt is VMC page fault.*/
-       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, 0,
+       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
                                &adev->gmc.vm_fault);
-       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, 0,
+       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
                                &adev->gmc.vm_fault);
 
        if (r)
index 7a1e77c93bf1be75b8e25ca96911eb4b59a2a812..3f57f6463dc880c797429d9f3080894b929e5eff 100644 (file)
@@ -1921,7 +1921,7 @@ static int kv_dpm_set_power_state(void *handle)
        int ret;
 
        if (pi->bapm_enable) {
-               ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.dpm.ac_power);
+               ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.ac_power);
                if (ret) {
                        DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
                        return ret;
@@ -3306,6 +3306,19 @@ static int kv_dpm_read_sensor(void *handle, int idx,
        }
 }
 
+static int kv_set_powergating_by_smu(void *handle,
+                               uint32_t block_type, bool gate)
+{
+       switch (block_type) {
+       case AMD_IP_BLOCK_TYPE_UVD:
+               kv_dpm_powergate_uvd(handle, gate);
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
 static const struct amd_ip_funcs kv_dpm_ip_funcs = {
        .name = "kv_dpm",
        .early_init = kv_dpm_early_init,
@@ -3342,7 +3355,7 @@ static const struct amd_pm_funcs kv_dpm_funcs = {
        .print_power_state = &kv_dpm_print_power_state,
        .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,
        .force_performance_level = &kv_dpm_force_performance_level,
-       .powergate_uvd = &kv_dpm_powergate_uvd,
+       .set_powergating_by_smu = kv_set_powergating_by_smu,
        .enable_bapm = &kv_dpm_enable_bapm,
        .get_vce_clock_state = amdgpu_get_vce_clock_state,
        .check_state_equal = kv_check_state_equal,
index 3d53c4413f13869d1d3dc95a87df88af7dce699a..e70a0d4d6db4111a59239accef4d47d0d6676d72 100644 (file)
@@ -471,8 +471,8 @@ void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
                                                RENG_EXECUTE_ON_REG_UPDATE, 1);
                WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute);
 
-               if (adev->powerplay.pp_funcs->set_mmhub_powergating_by_smu)
-                       amdgpu_dpm_set_mmhub_powergating_by_smu(adev);
+               if (adev->powerplay.pp_funcs->set_powergating_by_smu)
+                       amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true);
 
        } else {
                pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
index c7190c39c4f52cf44fed320402e35de3125c2346..15ae4bc9c072741d6b8fb319b9371b87e08ac6c4 100644 (file)
@@ -44,6 +44,8 @@
 
 #include "iceland_sdma_pkt_open.h"
 
+#include "ivsrcid/ivsrcid_vislands30.h"
+
 static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev);
 static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev);
 static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev);
@@ -202,8 +204,7 @@ static uint64_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring)
 static uint64_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
-       int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
-       u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
+       u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) >> 2;
 
        return wptr;
 }
@@ -218,9 +219,8 @@ static uint64_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring)
 static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
-       int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
 
-       WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], lower_32_bits(ring->wptr) << 2);
+       WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], lower_32_bits(ring->wptr) << 2);
 }
 
 static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
@@ -273,7 +273,7 @@ static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 {
        u32 ref_and_mask = 0;
 
-       if (ring == &ring->adev->sdma.instance[0].ring)
+       if (ring->me == 0)
                ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
        else
                ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
@@ -898,7 +898,7 @@ static int sdma_v2_4_sw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        /* SDMA trap event */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
                              &adev->sdma.trap_irq);
        if (r)
                return r;
@@ -910,7 +910,7 @@ static int sdma_v2_4_sw_init(void *handle)
                return r;
 
        /* SDMA Privileged inst */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
                              &adev->sdma.illegal_inst_irq);
        if (r)
                return r;
@@ -1213,8 +1213,10 @@ static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
 {
        int i;
 
-       for (i = 0; i < adev->sdma.num_instances; i++)
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                adev->sdma.instance[i].ring.funcs = &sdma_v2_4_ring_funcs;
+               adev->sdma.instance[i].ring.me = i;
+       }
 }
 
 static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = {
index aa9ab299fd32a0efc4bb4872b3c34c3c9b6c7c07..1e07ff274d73433b34e14a9aa4b94caf04bba961 100644 (file)
@@ -44,6 +44,8 @@
 
 #include "tonga_sdma_pkt_open.h"
 
+#include "ivsrcid/ivsrcid_vislands30.h"
+
 static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev);
 static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev);
 static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev);
@@ -365,9 +367,7 @@ static uint64_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
                /* XXX check if swapping is necessary on BE */
                wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2;
        } else {
-               int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
-
-               wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2;
+               wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) >> 2;
        }
 
        return wptr;
@@ -394,9 +394,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
 
                WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2));
        } else {
-               int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
-
-               WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], lower_32_bits(ring->wptr) << 2);
+               WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], lower_32_bits(ring->wptr) << 2);
        }
 }
 
@@ -450,7 +448,7 @@ static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
 {
        u32 ref_and_mask = 0;
 
-       if (ring == &ring->adev->sdma.instance[0].ring)
+       if (ring->me == 0)
                ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1);
        else
                ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1);
@@ -1179,7 +1177,7 @@ static int sdma_v3_0_sw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        /* SDMA trap event */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP,
                              &adev->sdma.trap_irq);
        if (r)
                return r;
@@ -1191,7 +1189,7 @@ static int sdma_v3_0_sw_init(void *handle)
                return r;
 
        /* SDMA Privileged inst */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE,
                              &adev->sdma.illegal_inst_irq);
        if (r)
                return r;
@@ -1655,8 +1653,10 @@ static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
 {
        int i;
 
-       for (i = 0; i < adev->sdma.num_instances; i++)
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                adev->sdma.instance[i].ring.funcs = &sdma_v3_0_ring_funcs;
+               adev->sdma.instance[i].ring.me = i;
+       }
 }
 
 static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = {
index ca53b3fba422d83a6a18122c6b773ec676c15def..e7ca4623cfb946f41b0e3e1ffb107a3f295daedb 100644 (file)
@@ -38,6 +38,9 @@
 #include "soc15.h"
 #include "vega10_sdma_pkt_open.h"
 
+#include "ivsrcid/sdma0/irqsrcs_sdma0_4_0.h"
+#include "ivsrcid/sdma1/irqsrcs_sdma1_4_0.h"
+
 MODULE_FIRMWARE("amdgpu/vega10_sdma.bin");
 MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin");
 MODULE_FIRMWARE("amdgpu/vega12_sdma.bin");
@@ -296,13 +299,12 @@ static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
                DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
        } else {
                u32 lowbit, highbit;
-               int me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
 
-               lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR)) >> 2;
-               highbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
+               lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)) >> 2;
+               highbit = RREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
 
                DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n",
-                               me, highbit, lowbit);
+                               ring->me, highbit, lowbit);
                wptr = highbit;
                wptr = wptr << 32;
                wptr |= lowbit;
@@ -339,17 +341,15 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
                                ring->doorbell_index, ring->wptr << 2);
                WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
        } else {
-               int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
-
                DRM_DEBUG("Not using doorbell -- "
                                "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
                                "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
-                               me,
+                               ring->me,
                                lower_32_bits(ring->wptr << 2),
-                               me,
+                               ring->me,
                                upper_32_bits(ring->wptr << 2));
-               WREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
-               WREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
+               WREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
+               WREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
        }
 }
 
@@ -430,7 +430,7 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
        u32 ref_and_mask = 0;
        const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
 
-       if (ring == &ring->adev->sdma.instance[0].ring)
+       if (ring->me == 0)
                ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
        else
                ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1;
@@ -1228,13 +1228,13 @@ static int sdma_v4_0_sw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        /* SDMA trap event */
-       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, 224,
+       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, SDMA0_4_0__SRCID__SDMA_TRAP,
                              &adev->sdma.trap_irq);
        if (r)
                return r;
 
        /* SDMA trap event */
-       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, 224,
+       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, SDMA1_4_0__SRCID__SDMA_TRAP,
                              &adev->sdma.trap_irq);
        if (r)
                return r;
@@ -1651,8 +1651,10 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
 {
        int i;
 
-       for (i = 0; i < adev->sdma.num_instances; i++)
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs;
+               adev->sdma.instance[i].ring.me = i;
+       }
 }
 
 static const struct amdgpu_irq_src_funcs sdma_v4_0_trap_irq_funcs = {
index 5c97a36717264f5ca9bf3924927cd934b2f6d963..db327b4125626d411e155de18bfeb28f4efb92b4 100644 (file)
 
 #define BIOS_SCRATCH_4                                    0x5cd
 
-MODULE_FIRMWARE("radeon/tahiti_smc.bin");
-MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
-MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
-MODULE_FIRMWARE("radeon/verde_smc.bin");
-MODULE_FIRMWARE("radeon/verde_k_smc.bin");
-MODULE_FIRMWARE("radeon/oland_smc.bin");
-MODULE_FIRMWARE("radeon/oland_k_smc.bin");
-MODULE_FIRMWARE("radeon/hainan_smc.bin");
-MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
-MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
+MODULE_FIRMWARE("amdgpu/tahiti_smc.bin");
+MODULE_FIRMWARE("amdgpu/pitcairn_smc.bin");
+MODULE_FIRMWARE("amdgpu/pitcairn_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/verde_smc.bin");
+MODULE_FIRMWARE("amdgpu/verde_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/oland_smc.bin");
+MODULE_FIRMWARE("amdgpu/oland_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/hainan_smc.bin");
+MODULE_FIRMWARE("amdgpu/hainan_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/banks_k_2_smc.bin");
 
 static const struct amd_pm_funcs si_dpm_funcs;
 
@@ -3480,7 +3480,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
                disable_sclk_switching = true;
        }
 
-       if (adev->pm.dpm.ac_power)
+       if (adev->pm.ac_power)
                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
        else
                max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
@@ -3489,7 +3489,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
                if (ps->performance_levels[i].vddc > ps->performance_levels[i+1].vddc)
                        ps->performance_levels[i].vddc = ps->performance_levels[i+1].vddc;
        }
-       if (adev->pm.dpm.ac_power == false) {
+       if (adev->pm.ac_power == false) {
                for (i = 0; i < ps->performance_level_count; i++) {
                        if (ps->performance_levels[i].mclk > max_limits->mclk)
                                ps->performance_levels[i].mclk = max_limits->mclk;
@@ -7318,8 +7318,7 @@ static int si_dpm_init(struct amdgpu_device *adev)
        pi = &eg_pi->rv7xx;
 
        si_pi->sys_pcie_mask =
-               (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
-               CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
+               adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK;
        si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
        si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
 
@@ -7667,7 +7666,7 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
        default: BUG();
        }
 
-       snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
        err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
        if (err)
                goto out;
index 8dc29107228fd145ad27331a8cf3b0bda55be655..edfe50821cd9bf5e042fc001eb7ca204660f9aed 100644 (file)
 
 #define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
 
+#define        PACKETJ_CONDITION_CHECK0        0
+#define        PACKETJ_CONDITION_CHECK1        1
+#define        PACKETJ_CONDITION_CHECK2        2
+#define        PACKETJ_CONDITION_CHECK3        3
+#define        PACKETJ_CONDITION_CHECK4        4
+#define        PACKETJ_CONDITION_CHECK5        5
+#define        PACKETJ_CONDITION_CHECK6        6
+#define        PACKETJ_CONDITION_CHECK7        7
+
+#define        PACKETJ_TYPE0   0
+#define        PACKETJ_TYPE1   1
+#define        PACKETJ_TYPE2   2
+#define        PACKETJ_TYPE3   3
+#define        PACKETJ_TYPE4   4
+#define        PACKETJ_TYPE5   5
+#define        PACKETJ_TYPE6   6
+#define        PACKETJ_TYPE7   7
+
+#define PACKETJ(reg, r, cond, type)    ((reg & 0x3FFFF) |                      \
+                        ((r & 0x3F) << 18) |                   \
+                        ((cond & 0xF) << 24) |                         \
+                        ((type & 0xF) << 28))
+
 /* Packet 3 types */
 #define        PACKET3_NOP                                     0x10
 #define        PACKET3_SET_BASE                                0x11
index 341ee6d55ce89e0aeaac6f646f05f7d3be49d68c..aeaa1ca46a99dc1a4801f921ea8942ad30ca1d56 100644 (file)
@@ -35,6 +35,7 @@
 #include "vi.h"
 #include "smu/smu_7_1_2_d.h"
 #include "smu/smu_7_1_2_sh_mask.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
 
 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
 static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -104,7 +105,7 @@ static int uvd_v5_0_sw_init(void *handle)
        int r;
 
        /* UVD TRAP */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
        if (r)
                return r;
 
index bfddf97dd13e6048ab009a5ef3f3655b72bad48d..598dbeaba63686e2e2476528adace86d035b3f70 100644 (file)
@@ -36,6 +36,7 @@
 #include "bif/bif_5_1_d.h"
 #include "gmc/gmc_8_1_d.h"
 #include "vi.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
 
 /* Polaris10/11/12 firmware version */
 #define FW_1_130_16 ((1 << 24) | (130 << 16) | (16 << 8))
@@ -247,12 +248,10 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
        for (i = ib->length_dw; i < ib_size_dw; ++i)
                ib->ptr[i] = 0x0;
 
-       r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
-       job->fence = dma_fence_get(f);
+       r = amdgpu_job_submit_direct(job, ring, &f);
        if (r)
                goto err;
 
-       amdgpu_job_free(job);
        if (fence)
                *fence = dma_fence_get(f);
        dma_fence_put(f);
@@ -311,19 +310,13 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
        for (i = ib->length_dw; i < ib_size_dw; ++i)
                ib->ptr[i] = 0x0;
 
-       if (direct) {
-               r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
-               job->fence = dma_fence_get(f);
-               if (r)
-                       goto err;
-
-               amdgpu_job_free(job);
-       } else {
-               r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
+       if (direct)
+               r = amdgpu_job_submit_direct(job, ring, &f);
+       else
+               r = amdgpu_job_submit(job, &ring->adev->vce.entity,
                                      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
-               if (r)
-                       goto err;
-       }
+       if (r)
+               goto err;
 
        if (fence)
                *fence = dma_fence_get(f);
@@ -400,14 +393,14 @@ static int uvd_v6_0_sw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        /* UVD TRAP */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
        if (r)
                return r;
 
        /* UVD ENC TRAP */
        if (uvd_v6_0_enc_support(adev)) {
                for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
-                       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.inst->irq);
+                       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq);
                        if (r)
                                return r;
                }
@@ -425,16 +418,6 @@ static int uvd_v6_0_sw_init(void *handle)
                adev->uvd.num_enc_rings = 0;
 
                DRM_INFO("UVD ENC is disabled\n");
-       } else {
-               struct drm_sched_rq *rq;
-               ring = &adev->uvd.inst->ring_enc[0];
-               rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-               r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc,
-                                         rq, NULL);
-               if (r) {
-                       DRM_ERROR("Failed setting up UVD ENC run queue.\n");
-                       return r;
-               }
        }
 
        r = amdgpu_uvd_resume(adev);
@@ -470,8 +453,6 @@ static int uvd_v6_0_sw_fini(void *handle)
                return r;
 
        if (uvd_v6_0_enc_support(adev)) {
-               drm_sched_entity_fini(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc);
-
                for (i = 0; i < adev->uvd.num_enc_rings; ++i)
                        amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
        }
@@ -1569,7 +1550,6 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
 static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
        .type = AMDGPU_RING_TYPE_UVD,
        .align_mask = 0xf,
-       .nop = PACKET0(mmUVD_NO_OP, 0),
        .support_64bit_ptrs = false,
        .get_rptr = uvd_v6_0_ring_get_rptr,
        .get_wptr = uvd_v6_0_ring_get_wptr,
@@ -1587,7 +1567,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
        .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
        .test_ring = uvd_v6_0_ring_test_ring,
        .test_ib = amdgpu_uvd_ring_test_ib,
-       .insert_nop = amdgpu_ring_insert_nop,
+       .insert_nop = uvd_v6_0_ring_insert_nop,
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .begin_use = amdgpu_uvd_ring_begin_use,
        .end_use = amdgpu_uvd_ring_end_use,
index 57d32f21b3a611e5bc5d009cb0dd7d32430720bb..5fab3560a71db8b3b4d6116c2de0e94337beb208 100644 (file)
 #include "hdp/hdp_4_0_offset.h"
 #include "mmhub/mmhub_1_0_offset.h"
 #include "mmhub/mmhub_1_0_sh_mask.h"
+#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
+
+#define mmUVD_PG0_CC_UVD_HARVESTING                                                                    0x00c7
+#define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX                                                           1
+//UVD_PG0_CC_UVD_HARVESTING
+#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT                                                         0x1
+#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK                                                           0x00000002L
 
 #define UVD7_MAX_HW_INSTANCES_VEGA20                   2
 
@@ -249,12 +256,10 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
        for (i = ib->length_dw; i < ib_size_dw; ++i)
                ib->ptr[i] = 0x0;
 
-       r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
-       job->fence = dma_fence_get(f);
+       r = amdgpu_job_submit_direct(job, ring, &f);
        if (r)
                goto err;
 
-       amdgpu_job_free(job);
        if (fence)
                *fence = dma_fence_get(f);
        dma_fence_put(f);
@@ -312,19 +317,13 @@ int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
        for (i = ib->length_dw; i < ib_size_dw; ++i)
                ib->ptr[i] = 0x0;
 
-       if (direct) {
-               r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
-               job->fence = dma_fence_get(f);
-               if (r)
-                       goto err;
-
-               amdgpu_job_free(job);
-       } else {
-               r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
+       if (direct)
+               r = amdgpu_job_submit_direct(job, ring, &f);
+       else
+               r = amdgpu_job_submit(job, &ring->adev->vce.entity,
                                      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
-               if (r)
-                       goto err;
-       }
+       if (r)
+               goto err;
 
        if (fence)
                *fence = dma_fence_get(f);
@@ -377,10 +376,25 @@ error:
 static int uvd_v7_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       if (adev->asic_type == CHIP_VEGA20)
+
+       if (adev->asic_type == CHIP_VEGA20) {
+               u32 harvest;
+               int i;
+
                adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
-       else
+               for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+                       harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
+                       if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
+                               adev->uvd.harvest_config |= 1 << i;
+                       }
+               }
+               if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
+                                                AMDGPU_UVD_HARVEST_UVD1))
+                       /* both instances are harvested, disable the block */
+                       return -ENOENT;
+       } else {
                adev->uvd.num_uvd_inst = 1;
+       }
 
        if (amdgpu_sriov_vf(adev))
                adev->uvd.num_enc_rings = 1;
@@ -396,19 +410,20 @@ static int uvd_v7_0_early_init(void *handle)
 static int uvd_v7_0_sw_init(void *handle)
 {
        struct amdgpu_ring *ring;
-       struct drm_sched_rq *rq;
        int i, j, r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
+               if (adev->uvd.harvest_config & (1 << j))
+                       continue;
                /* UVD TRAP */
-               r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], 124, &adev->uvd.inst[j].irq);
+               r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
                if (r)
                        return r;
 
                /* UVD ENC TRAP */
                for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
-                       r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + 119, &adev->uvd.inst[j].irq);
+                       r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, &adev->uvd.inst[j].irq);
                        if (r)
                                return r;
                }
@@ -428,22 +443,13 @@ static int uvd_v7_0_sw_init(void *handle)
                DRM_INFO("PSP loading UVD firmware\n");
        }
 
-       for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
-               ring = &adev->uvd.inst[j].ring_enc[0];
-               rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-               r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity_enc,
-                                         rq, NULL);
-               if (r) {
-                       DRM_ERROR("(%d)Failed setting up UVD ENC run queue.\n", j);
-                       return r;
-               }
-       }
-
        r = amdgpu_uvd_resume(adev);
        if (r)
                return r;
 
        for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
+               if (adev->uvd.harvest_config & (1 << j))
+                       continue;
                if (!amdgpu_sriov_vf(adev)) {
                        ring = &adev->uvd.inst[j].ring;
                        sprintf(ring->name, "uvd<%d>", j);
@@ -491,8 +497,8 @@ static int uvd_v7_0_sw_fini(void *handle)
                return r;
 
        for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
-               drm_sched_entity_fini(&adev->uvd.inst[j].ring_enc[0].sched, &adev->uvd.inst[j].entity_enc);
-
+               if (adev->uvd.harvest_config & (1 << j))
+                       continue;
                for (i = 0; i < adev->uvd.num_enc_rings; ++i)
                        amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
        }
@@ -521,6 +527,8 @@ static int uvd_v7_0_hw_init(void *handle)
                goto done;
 
        for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
+               if (adev->uvd.harvest_config & (1 << j))
+                       continue;
                ring = &adev->uvd.inst[j].ring;
 
                if (!amdgpu_sriov_vf(adev)) {
@@ -600,8 +608,11 @@ static int uvd_v7_0_hw_fini(void *handle)
                DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
        }
 
-       for (i = 0; i < adev->uvd.num_uvd_inst; ++i)
+       for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+               if (adev->uvd.harvest_config & (1 << i))
+                       continue;
                adev->uvd.inst[i].ring.ready = false;
+       }
 
        return 0;
 }
@@ -644,6 +655,8 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
        int i;
 
        for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+               if (adev->uvd.harvest_config & (1 << i))
+                       continue;
                if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
                        WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
                                lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
@@ -716,6 +729,8 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
        WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
 
        for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+               if (adev->uvd.harvest_config & (1 << i))
+                       continue;
                WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
                adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0;
                adev->uvd.inst[i].ring_enc[0].wptr = 0;
@@ -772,6 +787,8 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
                init_table += header->uvd_table_offset;
 
                for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+                       if (adev->uvd.harvest_config & (1 << i))
+                               continue;
                        ring = &adev->uvd.inst[i].ring;
                        ring->wptr = 0;
                        size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
@@ -911,6 +928,8 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
        int i, j, k, r;
 
        for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
+               if (adev->uvd.harvest_config & (1 << k))
+                       continue;
                /* disable DPG */
                WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
                                ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
@@ -923,6 +942,8 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
        uvd_v7_0_mc_resume(adev);
 
        for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
+               if (adev->uvd.harvest_config & (1 << k))
+                       continue;
                ring = &adev->uvd.inst[k].ring;
                /* disable clock gating */
                WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
@@ -1090,6 +1111,8 @@ static void uvd_v7_0_stop(struct amdgpu_device *adev)
        uint8_t i = 0;
 
        for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+               if (adev->uvd.harvest_config & (1 << i))
+                       continue;
                /* force RBC into idle state */
                WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
 
@@ -1226,6 +1249,34 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
        return r;
 }
 
+/**
+ * uvd_v7_0_ring_patch_cs_in_place - Patch the IB for command submission.
+ *
+ * @p: the CS parser with the IBs
+ * @ib_idx: which IB to patch
+ *
+ */
+static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
+                                          uint32_t ib_idx)
+{
+       struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
+       unsigned i;
+
+       /* No patching necessary for the first instance */
+       if (!p->ring->me)
+               return 0;
+
+       for (i = 0; i < ib->length_dw; i += 2) {
+               uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
+
+               reg -= p->adev->reg_offset[UVD_HWIP][0][1];
+               reg += p->adev->reg_offset[UVD_HWIP][1][1];
+
+               amdgpu_set_ib_value(p, ib_idx, i, reg);
+       }
+       return 0;
+}
+
 /**
  * uvd_v7_0_ring_emit_ib - execute indirect buffer
  *
@@ -1718,6 +1769,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
        .get_rptr = uvd_v7_0_ring_get_rptr,
        .get_wptr = uvd_v7_0_ring_get_wptr,
        .set_wptr = uvd_v7_0_ring_set_wptr,
+       .patch_cs_in_place = uvd_v7_0_ring_patch_cs_in_place,
        .emit_frame_size =
                6 + /* hdp invalidate */
                SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
@@ -1777,6 +1829,8 @@ static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
        int i;
 
        for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+               if (adev->uvd.harvest_config & (1 << i))
+                       continue;
                adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
                adev->uvd.inst[i].ring.me = i;
                DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
@@ -1788,6 +1842,8 @@ static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
        int i, j;
 
        for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
+               if (adev->uvd.harvest_config & (1 << j))
+                       continue;
                for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
                        adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
                        adev->uvd.inst[j].ring_enc[i].me = j;
@@ -1807,6 +1863,8 @@ static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
        int i;
 
        for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+               if (adev->uvd.harvest_config & (1 << i))
+                       continue;
                adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
                adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
        }
index 47f70827195bff2e87fcada1d5843f864f3f86aa..d48e877b682e8f1ba18ed7d659c83229cfb94e8f 100644 (file)
@@ -56,7 +56,7 @@ static uint64_t vce_v2_0_ring_get_rptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
 
-       if (ring == &adev->vce.ring[0])
+       if (ring->me == 0)
                return RREG32(mmVCE_RB_RPTR);
        else
                return RREG32(mmVCE_RB_RPTR2);
@@ -73,7 +73,7 @@ static uint64_t vce_v2_0_ring_get_wptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
 
-       if (ring == &adev->vce.ring[0])
+       if (ring->me == 0)
                return RREG32(mmVCE_RB_WPTR);
        else
                return RREG32(mmVCE_RB_WPTR2);
@@ -90,7 +90,7 @@ static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
 
-       if (ring == &adev->vce.ring[0])
+       if (ring->me == 0)
                WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
        else
                WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
@@ -627,8 +627,10 @@ static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
 {
        int i;
 
-       for (i = 0; i < adev->vce.num_rings; i++)
+       for (i = 0; i < adev->vce.num_rings; i++) {
                adev->vce.ring[i].funcs = &vce_v2_0_ring_funcs;
+               adev->vce.ring[i].me = i;
+       }
 }
 
 static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = {
index 0999c843f623ca37c0504a2a4c3151374c7fb854..cc6ce6cc03f47968345e6afe0d44003308ed2ecf 100644 (file)
@@ -39,6 +39,7 @@
 #include "smu/smu_7_1_2_sh_mask.h"
 #include "gca/gfx_8_0_d.h"
 #include "gca/gfx_8_0_sh_mask.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
 
 
 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT    0x04
@@ -86,9 +87,9 @@ static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring)
        else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
                WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
 
-       if (ring == &adev->vce.ring[0])
+       if (ring->me == 0)
                v = RREG32(mmVCE_RB_RPTR);
-       else if (ring == &adev->vce.ring[1])
+       else if (ring->me == 1)
                v = RREG32(mmVCE_RB_RPTR2);
        else
                v = RREG32(mmVCE_RB_RPTR3);
@@ -118,9 +119,9 @@ static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
        else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
                WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
 
-       if (ring == &adev->vce.ring[0])
+       if (ring->me == 0)
                v = RREG32(mmVCE_RB_WPTR);
-       else if (ring == &adev->vce.ring[1])
+       else if (ring->me == 1)
                v = RREG32(mmVCE_RB_WPTR2);
        else
                v = RREG32(mmVCE_RB_WPTR3);
@@ -149,9 +150,9 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
        else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
                WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
 
-       if (ring == &adev->vce.ring[0])
+       if (ring->me == 0)
                WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
-       else if (ring == &adev->vce.ring[1])
+       else if (ring->me == 1)
                WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
        else
                WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
@@ -422,7 +423,7 @@ static int vce_v3_0_sw_init(void *handle)
        int r, i;
 
        /* VCE */
-       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 167, &adev->vce.irq);
+       r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_VCE_TRAP, &adev->vce.irq);
        if (r)
                return r;
 
@@ -900,7 +901,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
        .emit_frame_size =
                4 + /* vce_v3_0_emit_pipeline_sync */
                6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
-       .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
+       .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
        .emit_ib = amdgpu_vce_ring_emit_ib,
        .emit_fence = amdgpu_vce_ring_emit_fence,
        .test_ring = amdgpu_vce_ring_test_ring,
@@ -924,7 +925,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
                6 + /* vce_v3_0_emit_vm_flush */
                4 + /* vce_v3_0_emit_pipeline_sync */
                6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */
-       .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
+       .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
        .emit_ib = vce_v3_0_ring_emit_ib,
        .emit_vm_flush = vce_v3_0_emit_vm_flush,
        .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
@@ -942,12 +943,16 @@ static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
        int i;
 
        if (adev->asic_type >= CHIP_STONEY) {
-               for (i = 0; i < adev->vce.num_rings; i++)
+               for (i = 0; i < adev->vce.num_rings; i++) {
                        adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs;
+                       adev->vce.ring[i].me = i;
+               }
                DRM_INFO("VCE enabled in VM mode\n");
        } else {
-               for (i = 0; i < adev->vce.num_rings; i++)
+               for (i = 0; i < adev->vce.num_rings; i++) {
                        adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs;
+                       adev->vce.ring[i].me = i;
+               }
                DRM_INFO("VCE enabled in physical mode\n");
        }
 }
index 8fd1b742985acad26dbf3fb747fe962acbb6e0a1..65f8860169e95b4030b97c4c8de563e21c93dfe5 100644 (file)
@@ -39,6 +39,8 @@
 #include "mmhub/mmhub_1_0_offset.h"
 #include "mmhub/mmhub_1_0_sh_mask.h"
 
+#include "ivsrcid/vce/irqsrcs_vce_4_0.h"
+
 #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK  0x02
 
 #define VCE_V4_0_FW_SIZE       (384 * 1024)
@@ -60,9 +62,9 @@ static uint64_t vce_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
 {
        struct amdgpu_device *adev = ring->adev;
 
-       if (ring == &adev->vce.ring[0])
+       if (ring->me == 0)
                return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR));
-       else if (ring == &adev->vce.ring[1])
+       else if (ring->me == 1)
                return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR2));
        else
                return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR3));
@@ -82,9 +84,9 @@ static uint64_t vce_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
        if (ring->use_doorbell)
                return adev->wb.wb[ring->wptr_offs];
 
-       if (ring == &adev->vce.ring[0])
+       if (ring->me == 0)
                return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR));
-       else if (ring == &adev->vce.ring[1])
+       else if (ring->me == 1)
                return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2));
        else
                return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3));
@@ -108,10 +110,10 @@ static void vce_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
                return;
        }
 
-       if (ring == &adev->vce.ring[0])
+       if (ring->me == 0)
                WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR),
                        lower_32_bits(ring->wptr));
-       else if (ring == &adev->vce.ring[1])
+       else if (ring->me == 1)
                WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2),
                        lower_32_bits(ring->wptr));
        else
@@ -1088,8 +1090,10 @@ static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev)
 {
        int i;
 
-       for (i = 0; i < adev->vce.num_rings; i++)
+       for (i = 0; i < adev->vce.num_rings; i++) {
                adev->vce.ring[i].funcs = &vce_v4_0_ring_vm_funcs;
+               adev->vce.ring[i].me = i;
+       }
        DRM_INFO("VCE enabled in VM mode\n");
 }
 
index 29684c3ea4ef2b6d1586721f0b095dfd961eb989..2ce91a748c4028867c64d7f8d5940fc3e6787cd0 100644 (file)
 #include "mmhub/mmhub_9_1_offset.h"
 #include "mmhub/mmhub_9_1_sh_mask.h"
 
+#include "ivsrcid/vcn/irqsrcs_vcn_1_0.h"
+
 static int vcn_v1_0_stop(struct amdgpu_device *adev);
 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
+static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
+static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr);
 
 /**
  * vcn_v1_0_early_init - set function pointers
@@ -55,6 +59,7 @@ static int vcn_v1_0_early_init(void *handle)
 
        vcn_v1_0_set_dec_ring_funcs(adev);
        vcn_v1_0_set_enc_ring_funcs(adev);
+       vcn_v1_0_set_jpeg_ring_funcs(adev);
        vcn_v1_0_set_irq_funcs(adev);
 
        return 0;
@@ -74,18 +79,23 @@ static int vcn_v1_0_sw_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        /* VCN DEC TRAP */
-       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 124, &adev->vcn.irq);
+       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.irq);
        if (r)
                return r;
 
        /* VCN ENC TRAP */
        for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
-               r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + 119,
+               r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
                                        &adev->vcn.irq);
                if (r)
                        return r;
        }
 
+       /* VCN JPEG TRAP */
+       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->vcn.irq);
+       if (r)
+               return r;
+
        r = amdgpu_vcn_sw_init(adev);
        if (r)
                return r;
@@ -108,6 +118,12 @@ static int vcn_v1_0_sw_init(void *handle)
                        return r;
        }
 
+       ring = &adev->vcn.ring_jpeg;
+       sprintf(ring->name, "vcn_jpeg");
+       r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
+       if (r)
+               return r;
+
        return r;
 }
 
@@ -162,6 +178,14 @@ static int vcn_v1_0_hw_init(void *handle)
                }
        }
 
+       ring = &adev->vcn.ring_jpeg;
+       ring->ready = true;
+       r = amdgpu_ring_test_ring(ring);
+       if (r) {
+               ring->ready = false;
+               goto done;
+       }
+
 done:
        if (!r)
                DRM_INFO("VCN decode and encode initialized successfully.\n");
@@ -578,12 +602,12 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
        /* disable byte swapping */
        lmi_swap_cntl = 0;
 
-       vcn_v1_0_mc_resume(adev);
-
        vcn_1_0_disable_static_power_gating(adev);
        /* disable clock gating */
        vcn_v1_0_disable_clock_gating(adev);
 
+       vcn_v1_0_mc_resume(adev);
+
        /* disable interupt */
        WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
                        ~UVD_MASTINT_EN__VCPU_EN_MASK);
@@ -729,6 +753,22 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
 
+       ring = &adev->vcn.ring_jpeg;
+       WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
+       WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
+       WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr));
+       WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr));
+       WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0);
+       WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0);
+       WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
+
+       /* initialize wptr */
+       ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
+
+       /* copy patch commands to the jpeg ring */
+       vcn_v1_0_jpeg_ring_set_patch_ring(ring,
+               (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission));
+
        return 0;
 }
 
@@ -1126,6 +1166,383 @@ static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, val);
 }
 
+
+/**
+ * vcn_v1_0_jpeg_ring_get_rptr - get read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+static uint64_t vcn_v1_0_jpeg_ring_get_rptr(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR);
+}
+
+/**
+ * vcn_v1_0_jpeg_ring_get_wptr - get write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+static uint64_t vcn_v1_0_jpeg_ring_get_wptr(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
+}
+
+/**
+ * vcn_v1_0_jpeg_ring_set_wptr - set write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+static void vcn_v1_0_jpeg_ring_set_wptr(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
+}
+
+/**
+ * vcn_v1_0_jpeg_ring_insert_start - insert a start command
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Write a start command to the ring.
+ */
+static void vcn_v1_0_jpeg_ring_insert_start(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, 0x68e04);
+
+       amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, 0x80010000);
+}
+
+/**
+ * vcn_v1_0_jpeg_ring_insert_end - insert a end command
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Write a end command to the ring.
+ */
+static void vcn_v1_0_jpeg_ring_insert_end(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, 0x68e04);
+
+       amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, 0x00010000);
+}
+
+/**
+ * vcn_v1_0_jpeg_ring_emit_fence - emit an fence & trap command
+ *
+ * @ring: amdgpu_ring pointer
+ * @fence: fence to emit
+ *
+ * Write a fence and a trap command to the ring.
+ */
+static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
+                                    unsigned flags)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA0), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, seq);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_DATA1), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, seq);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, lower_32_bits(addr));
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, upper_32_bits(addr));
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, 0x8);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_GPCOM_CMD), 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE4));
+       amdgpu_ring_write(ring, 0);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, 0x01400200);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, seq);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, lower_32_bits(addr));
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, upper_32_bits(addr));
+
+       amdgpu_ring_write(ring,
+               PACKETJ(0, 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE2));
+       amdgpu_ring_write(ring, 0xffffffff);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, 0x3fbc);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, 0x1);
+}
+
+/**
+ * vcn_v1_0_jpeg_ring_emit_ib - execute indirect buffer
+ *
+ * @ring: amdgpu_ring pointer
+ * @ib: indirect buffer to execute
+ *
+ * Write ring commands to execute the indirect buffer.
+ */
+static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
+                                 struct amdgpu_ib *ib,
+                                 unsigned vmid, bool ctx_switch)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JPEG_VMID), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_IB_SIZE), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, ib->length_dw);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, lower_32_bits(ring->gpu_addr));
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, upper_32_bits(ring->gpu_addr));
+
+       amdgpu_ring_write(ring,
+               PACKETJ(0, 0, PACKETJ_CONDITION_CHECK0, PACKETJ_TYPE2));
+       amdgpu_ring_write(ring, 0);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, 0x01400200);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, 0x2);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_STATUS), 0, PACKETJ_CONDITION_CHECK3, PACKETJ_TYPE3));
+       amdgpu_ring_write(ring, 0x2);
+}
+
+static void vcn_v1_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring,
+                                           uint32_t reg, uint32_t val,
+                                           uint32_t mask)
+{
+       struct amdgpu_device *adev = ring->adev;
+       uint32_t reg_offset = (reg << 2);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, 0x01400200);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0));
+       amdgpu_ring_write(ring, val);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+       if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
+               ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
+               amdgpu_ring_write(ring, 0);
+               amdgpu_ring_write(ring,
+                       PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3));
+       } else {
+               amdgpu_ring_write(ring, reg_offset);
+               amdgpu_ring_write(ring,
+                       PACKETJ(0, 0, 0, PACKETJ_TYPE3));
+       }
+       amdgpu_ring_write(ring, mask);
+}
+
+static void vcn_v1_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring,
+               unsigned vmid, uint64_t pd_addr)
+{
+       struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
+       uint32_t data0, data1, mask;
+
+       pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+
+       /* wait for register write */
+       data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
+       data1 = lower_32_bits(pd_addr);
+       mask = 0xffffffff;
+       vcn_v1_0_jpeg_ring_emit_reg_wait(ring, data0, data1, mask);
+}
+
+static void vcn_v1_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring,
+                                       uint32_t reg, uint32_t val)
+{
+       struct amdgpu_device *adev = ring->adev;
+       uint32_t reg_offset = (reg << 2);
+
+       amdgpu_ring_write(ring,
+               PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0));
+       if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
+                       ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
+               amdgpu_ring_write(ring, 0);
+               amdgpu_ring_write(ring,
+                       PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0));
+       } else {
+               amdgpu_ring_write(ring, reg_offset);
+               amdgpu_ring_write(ring,
+                       PACKETJ(0, 0, 0, PACKETJ_TYPE0));
+       }
+       amdgpu_ring_write(ring, val);
+}
+
+static void vcn_v1_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+       int i;
+
+       WARN_ON(ring->wptr % 2 || count % 2);
+
+       for (i = 0; i < count / 2; i++) {
+               amdgpu_ring_write(ring, PACKETJ(0, 0, 0, PACKETJ_TYPE6));
+               amdgpu_ring_write(ring, 0);
+       }
+}
+
+static void vcn_v1_0_jpeg_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val)
+{
+       struct amdgpu_device *adev = ring->adev;
+       ring->ring[(*ptr)++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
+       if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
+               ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
+               ring->ring[(*ptr)++] = 0;
+               ring->ring[(*ptr)++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0);
+       } else {
+               ring->ring[(*ptr)++] = reg_offset;
+               ring->ring[(*ptr)++] = PACKETJ(0, 0, 0, PACKETJ_TYPE0);
+       }
+       ring->ring[(*ptr)++] = val;
+}
+
+static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr)
+{
+       struct amdgpu_device *adev = ring->adev;
+
+       uint32_t reg, reg_offset, val, mask, i;
+
+       // 1st: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW
+       reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW);
+       reg_offset = (reg << 2);
+       val = lower_32_bits(ring->gpu_addr);
+       vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+       // 2nd: program mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH
+       reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH);
+       reg_offset = (reg << 2);
+       val = upper_32_bits(ring->gpu_addr);
+       vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+       // 3rd to 5th: issue MEM_READ commands
+       for (i = 0; i <= 2; i++) {
+               ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE2);
+               ring->ring[ptr++] = 0;
+       }
+
+       // 6th: program mmUVD_JRBC_RB_CNTL register to enable NO_FETCH and RPTR write ability
+       reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
+       reg_offset = (reg << 2);
+       val = 0x13;
+       vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+       // 7th: program mmUVD_JRBC_RB_REF_DATA
+       reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA);
+       reg_offset = (reg << 2);
+       val = 0x1;
+       vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+       // 8th: issue conditional register read mmUVD_JRBC_RB_CNTL
+       reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
+       reg_offset = (reg << 2);
+       val = 0x1;
+       mask = 0x1;
+
+       ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_COND_RD_TIMER), 0, 0, PACKETJ_TYPE0);
+       ring->ring[ptr++] = 0x01400200;
+       ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_REF_DATA), 0, 0, PACKETJ_TYPE0);
+       ring->ring[ptr++] = val;
+       ring->ring[ptr++] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACKETJ_TYPE0);
+       if (((reg_offset >= 0x1f800) && (reg_offset <= 0x21fff)) ||
+               ((reg_offset >= 0x1e000) && (reg_offset <= 0x1e1ff))) {
+               ring->ring[ptr++] = 0;
+               ring->ring[ptr++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE3);
+       } else {
+               ring->ring[ptr++] = reg_offset;
+               ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE3);
+       }
+       ring->ring[ptr++] = mask;
+
+       //9th to 21st: insert no-op
+       for (i = 0; i <= 12; i++) {
+               ring->ring[ptr++] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
+               ring->ring[ptr++] = 0;
+       }
+
+       //22nd: reset mmUVD_JRBC_RB_RPTR
+       reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_RPTR);
+       reg_offset = (reg << 2);
+       val = 0;
+       vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
+
+       //23rd: program mmUVD_JRBC_RB_CNTL to disable no_fetch
+       reg = SOC15_REG_OFFSET(UVD, 0, mmUVD_JRBC_RB_CNTL);
+       reg_offset = (reg << 2);
+       val = 0x12;
+       vcn_v1_0_jpeg_ring_patch_wreg(ring, &ptr, reg_offset, val);
+}
+
 static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
                                        struct amdgpu_irq_src *source,
                                        unsigned type,
@@ -1150,6 +1567,9 @@ static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
        case 120:
                amdgpu_fence_process(&adev->vcn.ring_enc[1]);
                break;
+       case 126:
+               amdgpu_fence_process(&adev->vcn.ring_jpeg);
+               break;
        default:
                DRM_ERROR("Unhandled interrupt: %d %d\n",
                          entry->src_id, entry->src_data[0]);
@@ -1273,6 +1693,39 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
        .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
 };
 
+static const struct amdgpu_ring_funcs vcn_v1_0_jpeg_ring_vm_funcs = {
+       .type = AMDGPU_RING_TYPE_VCN_JPEG,
+       .align_mask = 0xf,
+       .nop = PACKET0(0x81ff, 0),
+       .support_64bit_ptrs = false,
+       .vmhub = AMDGPU_MMHUB,
+       .extra_dw = 64,
+       .get_rptr = vcn_v1_0_jpeg_ring_get_rptr,
+       .get_wptr = vcn_v1_0_jpeg_ring_get_wptr,
+       .set_wptr = vcn_v1_0_jpeg_ring_set_wptr,
+       .emit_frame_size =
+               6 + 6 + /* hdp invalidate / flush */
+               SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+               SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+               8 + /* vcn_v1_0_dec_ring_emit_vm_flush */
+               14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
+               6,
+       .emit_ib_size = 22, /* vcn_v1_0_dec_ring_emit_ib */
+       .emit_ib = vcn_v1_0_jpeg_ring_emit_ib,
+       .emit_fence = vcn_v1_0_jpeg_ring_emit_fence,
+       .emit_vm_flush = vcn_v1_0_jpeg_ring_emit_vm_flush,
+       .test_ring = amdgpu_vcn_jpeg_ring_test_ring,
+       .test_ib = amdgpu_vcn_jpeg_ring_test_ib,
+       .insert_nop = vcn_v1_0_jpeg_ring_nop,
+       .insert_start = vcn_v1_0_jpeg_ring_insert_start,
+       .insert_end = vcn_v1_0_jpeg_ring_insert_end,
+       .pad_ib = amdgpu_ring_generic_pad_ib,
+       .begin_use = amdgpu_vcn_ring_begin_use,
+       .end_use = amdgpu_vcn_ring_end_use,
+       .emit_wreg = vcn_v1_0_jpeg_ring_emit_wreg,
+       .emit_reg_wait = vcn_v1_0_jpeg_ring_emit_reg_wait,
+};
+
 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
 {
        adev->vcn.ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
@@ -1289,6 +1742,12 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
        DRM_INFO("VCN encode is enabled in VM mode\n");
 }
 
+static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev)
+{
+       adev->vcn.ring_jpeg.funcs = &vcn_v1_0_jpeg_ring_vm_funcs;
+       DRM_INFO("VCN jpeg decode is enabled in VM mode\n");
+}
+
 static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
        .set = vcn_v1_0_set_interrupt_state,
        .process = vcn_v1_0_process_interrupt,
index 45aafca7f31560fb4e0862c500046e61030cb336..c5c9b2bc190d5cdd679e2fd7252d235816cbb166 100644 (file)
@@ -51,6 +51,7 @@ int vega10_reg_base_init(struct amdgpu_device *adev)
                adev->reg_offset[PWR_HWIP][i] = (uint32_t *)(&(PWR_BASE.instance[i]));
                adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIF_BASE.instance[i]));
                adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
+               adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
        }
        return 0;
 }
index 4ac1288ab7dff4fee300e3cdd1b359628173da71..42c8ad105b0503f1471f06215aa7e46b074aaa88 100644 (file)
@@ -1363,11 +1363,11 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
 
        if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
                if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
-                       pp_support_state = AMD_CG_SUPPORT_MC_LS;
+                       pp_support_state = PP_STATE_SUPPORT_LS;
                        pp_state = PP_STATE_LS;
                }
                if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) {
-                       pp_support_state |= AMD_CG_SUPPORT_MC_MGCG;
+                       pp_support_state |= PP_STATE_SUPPORT_CG;
                        pp_state |= PP_STATE_CG;
                }
                if (state == AMD_CG_STATE_UNGATE)
@@ -1382,11 +1382,11 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
 
        if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
                if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) {
-                       pp_support_state = AMD_CG_SUPPORT_SDMA_LS;
+                       pp_support_state = PP_STATE_SUPPORT_LS;
                        pp_state = PP_STATE_LS;
                }
                if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) {
-                       pp_support_state |= AMD_CG_SUPPORT_SDMA_MGCG;
+                       pp_support_state |= PP_STATE_SUPPORT_CG;
                        pp_state |= PP_STATE_CG;
                }
                if (state == AMD_CG_STATE_UNGATE)
@@ -1401,11 +1401,11 @@ static int vi_common_set_clockgating_state_by_smu(void *handle,
 
        if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
                if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
-                       pp_support_state = AMD_CG_SUPPORT_HDP_LS;
+                       pp_support_state = PP_STATE_SUPPORT_LS;
                        pp_state = PP_STATE_LS;
                }
                if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) {
-                       pp_support_state |= AMD_CG_SUPPORT_HDP_MGCG;
+                       pp_support_state |= PP_STATE_SUPPORT_CG;
                        pp_state |= PP_STATE_CG;
                }
                if (state == AMD_CG_STATE_UNGATE)
index 49df6c791cfcce5032ccefe9e87f835b4b282bbf..5d2475d5392ce25cbb719c0fa519e0c0d94777fc 100644 (file)
 #include "cik_int.h"
 
 static bool cik_event_interrupt_isr(struct kfd_dev *dev,
-                                       const uint32_t *ih_ring_entry)
+                                       const uint32_t *ih_ring_entry,
+                                       uint32_t *patched_ihre,
+                                       bool *patched_flag)
 {
        const struct cik_ih_ring_entry *ihre =
                        (const struct cik_ih_ring_entry *)ih_ring_entry;
+       const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
        unsigned int vmid, pasid;
 
+       /* This workaround is due to HW/FW limitation on Hawaii that
+        * VMID and PASID are not written into ih_ring_entry
+        */
+       if ((ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
+               ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) &&
+               dev->device_info->asic_family == CHIP_HAWAII) {
+               struct cik_ih_ring_entry *tmp_ihre =
+                       (struct cik_ih_ring_entry *)patched_ihre;
+
+               *patched_flag = true;
+               *tmp_ihre = *ihre;
+
+               vmid = f2g->read_vmid_from_vmfault_reg(dev->kgd);
+               pasid = f2g->get_atc_vmid_pasid_mapping_pasid(dev->kgd, vmid);
+
+               tmp_ihre->ring_id &= 0x000000ff;
+               tmp_ihre->ring_id |= vmid << 8;
+               tmp_ihre->ring_id |= pasid << 16;
+
+               return (pasid != 0) &&
+                       vmid >= dev->vm_info.first_vmid_kfd &&
+                       vmid <= dev->vm_info.last_vmid_kfd;
+       }
+
        /* Only handle interrupts from KFD VMIDs */
        vmid  = (ihre->ring_id & 0x0000ff00) >> 8;
        if (vmid < dev->vm_info.first_vmid_kfd ||
@@ -48,18 +75,19 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
        return ihre->source_id == CIK_INTSRC_CP_END_OF_PIPE ||
                ihre->source_id == CIK_INTSRC_SDMA_TRAP ||
                ihre->source_id == CIK_INTSRC_SQ_INTERRUPT_MSG ||
-               ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE;
+               ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE ||
+               ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
+               ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT;
 }
 
 static void cik_event_interrupt_wq(struct kfd_dev *dev,
                                        const uint32_t *ih_ring_entry)
 {
-       unsigned int pasid;
        const struct cik_ih_ring_entry *ihre =
                        (const struct cik_ih_ring_entry *)ih_ring_entry;
        uint32_t context_id = ihre->data & 0xfffffff;
-
-       pasid = (ihre->ring_id & 0xffff0000) >> 16;
+       unsigned int vmid  = (ihre->ring_id & 0x0000ff00) >> 8;
+       unsigned int pasid = (ihre->ring_id & 0xffff0000) >> 16;
 
        if (pasid == 0)
                return;
@@ -72,6 +100,22 @@ static void cik_event_interrupt_wq(struct kfd_dev *dev,
                kfd_signal_event_interrupt(pasid, context_id & 0xff, 8);
        else if (ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE)
                kfd_signal_hw_exception_event(pasid);
+       else if (ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
+               ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) {
+               struct kfd_vm_fault_info info;
+
+               kfd_process_vm_fault(dev->dqm, pasid);
+
+               memset(&info, 0, sizeof(info));
+               dev->kfd2kgd->get_vm_fault_info(dev->kgd, &info);
+               if (!info.page_addr && !info.status)
+                       return;
+
+               if (info.vmid == vmid)
+                       kfd_signal_vm_fault_event(dev, pasid, &info);
+               else
+                       kfd_signal_vm_fault_event(dev, pasid, NULL);
+       }
 }
 
 const struct kfd_event_interrupt_class event_interrupt_class_cik = {
index 109298b9d507d130d8c09bbcfb1f09746b3c7e6f..76f8677a7926c20923e012c966aea3d615d927c2 100644 (file)
@@ -20,8 +20,8 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 
-#ifndef HSA_RADEON_CIK_INT_H_INCLUDED
-#define HSA_RADEON_CIK_INT_H_INCLUDED
+#ifndef CIK_INT_H_INCLUDED
+#define CIK_INT_H_INCLUDED
 
 #include <linux/types.h>
 
@@ -34,9 +34,10 @@ struct cik_ih_ring_entry {
 
 #define CIK_INTSRC_CP_END_OF_PIPE      0xB5
 #define CIK_INTSRC_CP_BAD_OPCODE       0xB7
-#define CIK_INTSRC_DEQUEUE_COMPLETE    0xC6
 #define CIK_INTSRC_SDMA_TRAP           0xE0
 #define CIK_INTSRC_SQ_INTERRUPT_MSG    0xEF
+#define CIK_INTSRC_GFX_PAGE_INV_FAULT  0x92
+#define CIK_INTSRC_GFX_MEM_PROT_FAULT  0x93
 
 #endif
 
index f68aef02fc1fc116f4ec64f953a8b0da3d46e740..3621efbd57595df861e9d2088f6fd2e02c596eef 100644 (file)
  */
 
 static const uint32_t cwsr_trap_gfx8_hex[] = {
-       0xbf820001, 0xbf820125,
+       0xbf820001, 0xbf82012b,
        0xb8f4f802, 0x89748674,
        0xb8f5f803, 0x8675ff75,
-       0x00000400, 0xbf850011,
+       0x00000400, 0xbf850017,
        0xc00a1e37, 0x00000000,
        0xbf8c007f, 0x87777978,
-       0xbf840002, 0xb974f802,
-       0xbe801d78, 0xb8f5f803,
-       0x8675ff75, 0x000001ff,
-       0xbf850002, 0x80708470,
-       0x82718071, 0x8671ff71,
-       0x0000ffff, 0xb974f802,
+       0xbf840005, 0x8f728374,
+       0xb972e0c2, 0xbf800002,
+       0xb9740002, 0xbe801d78,
+       0xb8f5f803, 0x8675ff75,
+       0x000001ff, 0xbf850002,
+       0x80708470, 0x82718071,
+       0x8671ff71, 0x0000ffff,
+       0x8f728374, 0xb972e0c2,
+       0xbf800002, 0xb9740002,
        0xbe801f70, 0xb8f5f803,
        0x8675ff75, 0x00000100,
        0xbf840006, 0xbefa0080,
@@ -168,7 +171,7 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
        0x807c847c, 0x806eff6e,
        0x00000400, 0xbf0a757c,
        0xbf85ffef, 0xbf9c0000,
-       0xbf8200ca, 0xbef8007e,
+       0xbf8200cd, 0xbef8007e,
        0x8679ff7f, 0x0000ffff,
        0x8779ff79, 0x00040000,
        0xbefa0080, 0xbefb00ff,
@@ -268,16 +271,18 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
        0x8f739773, 0xb976f807,
        0x8671ff71, 0x0000ffff,
        0x86fe7e7e, 0x86ea6a6a,
-       0xb974f802, 0xbf8a0000,
-       0x95807370, 0xbf810000,
+       0x8f768374, 0xb976e0c2,
+       0xbf800002, 0xb9740002,
+       0xbf8a0000, 0x95807370,
+       0xbf810000, 0x00000000,
 };
 
 
 static const uint32_t cwsr_trap_gfx9_hex[] = {
-       0xbf820001, 0xbf82015a,
+       0xbf820001, 0xbf82015d,
        0xb8f8f802, 0x89788678,
        0xb8f1f803, 0x866eff71,
-       0x00000400, 0xbf850034,
+       0x00000400, 0xbf850037,
        0x866eff71, 0x00000800,
        0xbf850003, 0x866eff71,
        0x00000100, 0xbf840008,
@@ -303,258 +308,261 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
        0x8f6e8b77, 0x866eff6e,
        0x001f8000, 0xb96ef807,
        0x86fe7e7e, 0x86ea6a6a,
-       0xb978f802, 0xbe801f6c,
-       0x866dff6d, 0x0000ffff,
-       0xbef00080, 0xb9700283,
-       0xb8f02407, 0x8e709c70,
-       0x876d706d, 0xb8f003c7,
-       0x8e709b70, 0x876d706d,
-       0xb8f0f807, 0x8670ff70,
-       0x00007fff, 0xb970f807,
-       0xbeee007e, 0xbeef007f,
-       0xbefe0180, 0xbf900004,
-       0x87708478, 0xb970f802,
-       0xbf8e0002, 0xbf88fffe,
-       0xb8f02a05, 0x80708170,
-       0x8e708a70, 0xb8f11605,
-       0x80718171, 0x8e718671,
-       0x80707170, 0x80707e70,
-       0x8271807f, 0x8671ff71,
-       0x0000ffff, 0xc0471cb8,
-       0x00000040, 0xbf8cc07f,
-       0xc04b1d38, 0x00000048,
-       0xbf8cc07f, 0xc0431e78,
-       0x00000058, 0xbf8cc07f,
-       0xc0471eb8, 0x0000005c,
-       0xbf8cc07f, 0xbef4007e,
-       0x8675ff7f, 0x0000ffff,
-       0x8775ff75, 0x00040000,
-       0xbef60080, 0xbef700ff,
-       0x00807fac, 0x8670ff7f,
-       0x08000000, 0x8f708370,
-       0x87777077, 0x8670ff7f,
-       0x70000000, 0x8f708170,
-       0x87777077, 0xbefb007c,
-       0xbefa0080, 0xb8fa2a05,
-       0x807a817a, 0x8e7a8a7a,
-       0xb8f01605, 0x80708170,
-       0x8e708670, 0x807a707a,
-       0xbef60084, 0xbef600ff,
-       0x01000000, 0xbefe007c,
-       0xbefc007a, 0xc0611efa,
-       0x0000007c, 0xbf8cc07f,
-       0x807a847a, 0xbefc007e,
+       0x8f6e8378, 0xb96ee0c2,
+       0xbf800002, 0xb9780002,
+       0xbe801f6c, 0x866dff6d,
+       0x0000ffff, 0xbef00080,
+       0xb9700283, 0xb8f02407,
+       0x8e709c70, 0x876d706d,
+       0xb8f003c7, 0x8e709b70,
+       0x876d706d, 0xb8f0f807,
+       0x8670ff70, 0x00007fff,
+       0xb970f807, 0xbeee007e,
+       0xbeef007f, 0xbefe0180,
+       0xbf900004, 0x87708478,
+       0xb970f802, 0xbf8e0002,
+       0xbf88fffe, 0xb8f02a05,
+       0x80708170, 0x8e708a70,
+       0xb8f11605, 0x80718171,
+       0x8e718671, 0x80707170,
+       0x80707e70, 0x8271807f,
+       0x8671ff71, 0x0000ffff,
+       0xc0471cb8, 0x00000040,
+       0xbf8cc07f, 0xc04b1d38,
+       0x00000048, 0xbf8cc07f,
+       0xc0431e78, 0x00000058,
+       0xbf8cc07f, 0xc0471eb8,
+       0x0000005c, 0xbf8cc07f,
+       0xbef4007e, 0x8675ff7f,
+       0x0000ffff, 0x8775ff75,
+       0x00040000, 0xbef60080,
+       0xbef700ff, 0x00807fac,
+       0x8670ff7f, 0x08000000,
+       0x8f708370, 0x87777077,
+       0x8670ff7f, 0x70000000,
+       0x8f708170, 0x87777077,
+       0xbefb007c, 0xbefa0080,
+       0xb8fa2a05, 0x807a817a,
+       0x8e7a8a7a, 0xb8f01605,
+       0x80708170, 0x8e708670,
+       0x807a707a, 0xbef60084,
+       0xbef600ff, 0x01000000,
        0xbefe007c, 0xbefc007a,
-       0xc0611b3a, 0x0000007c,
+       0xc0611efa, 0x0000007c,
        0xbf8cc07f, 0x807a847a,
        0xbefc007e, 0xbefe007c,
-       0xbefc007a, 0xc0611b7a,
+       0xbefc007a, 0xc0611b3a,
        0x0000007c, 0xbf8cc07f,
        0x807a847a, 0xbefc007e,
        0xbefe007c, 0xbefc007a,
-       0xc0611bba, 0x0000007c,
+       0xc0611b7a, 0x0000007c,
        0xbf8cc07f, 0x807a847a,
        0xbefc007e, 0xbefe007c,
-       0xbefc007a, 0xc0611bfa,
+       0xbefc007a, 0xc0611bba,
        0x0000007c, 0xbf8cc07f,
        0x807a847a, 0xbefc007e,
        0xbefe007c, 0xbefc007a,
-       0xc0611e3a, 0x0000007c,
-       0xbf8cc07f, 0x807a847a,
-       0xbefc007e, 0xb8f1f803,
-       0xbefe007c, 0xbefc007a,
-       0xc0611c7a, 0x0000007c,
+       0xc0611bfa, 0x0000007c,
        0xbf8cc07f, 0x807a847a,
        0xbefc007e, 0xbefe007c,
-       0xbefc007a, 0xc0611a3a,
+       0xbefc007a, 0xc0611e3a,
+       0x0000007c, 0xbf8cc07f,
+       0x807a847a, 0xbefc007e,
+       0xb8f1f803, 0xbefe007c,
+       0xbefc007a, 0xc0611c7a,
        0x0000007c, 0xbf8cc07f,
        0x807a847a, 0xbefc007e,
        0xbefe007c, 0xbefc007a,
-       0xc0611a7a, 0x0000007c,
-       0xbf8cc07f, 0x807a847a,
-       0xbefc007e, 0xb8fbf801,
-       0xbefe007c, 0xbefc007a,
-       0xc0611efa, 0x0000007c,
+       0xc0611a3a, 0x0000007c,
        0xbf8cc07f, 0x807a847a,
-       0xbefc007e, 0x8670ff7f,
-       0x04000000, 0xbeef0080,
-       0x876f6f70, 0xb8fa2a05,
+       0xbefc007e, 0xbefe007c,
+       0xbefc007a, 0xc0611a7a,
+       0x0000007c, 0xbf8cc07f,
+       0x807a847a, 0xbefc007e,
+       0xb8fbf801, 0xbefe007c,
+       0xbefc007a, 0xc0611efa,
+       0x0000007c, 0xbf8cc07f,
+       0x807a847a, 0xbefc007e,
+       0x8670ff7f, 0x04000000,
+       0xbeef0080, 0x876f6f70,
+       0xb8fa2a05, 0x807a817a,
+       0x8e7a8a7a, 0xb8f11605,
+       0x80718171, 0x8e718471,
+       0x8e768271, 0xbef600ff,
+       0x01000000, 0xbef20174,
+       0x80747a74, 0x82758075,
+       0xbefc0080, 0xbf800000,
+       0xbe802b00, 0xbe822b02,
+       0xbe842b04, 0xbe862b06,
+       0xbe882b08, 0xbe8a2b0a,
+       0xbe8c2b0c, 0xbe8e2b0e,
+       0xc06b003a, 0x00000000,
+       0xbf8cc07f, 0xc06b013a,
+       0x00000010, 0xbf8cc07f,
+       0xc06b023a, 0x00000020,
+       0xbf8cc07f, 0xc06b033a,
+       0x00000030, 0xbf8cc07f,
+       0x8074c074, 0x82758075,
+       0x807c907c, 0xbf0a717c,
+       0xbf85ffe7, 0xbef40172,
+       0xbefa0080, 0xbefe00c1,
+       0xbeff00c1, 0xbee80080,
+       0xbee90080, 0xbef600ff,
+       0x01000000, 0xe0724000,
+       0x7a1d0000, 0xe0724100,
+       0x7a1d0100, 0xe0724200,
+       0x7a1d0200, 0xe0724300,
+       0x7a1d0300, 0xbefe00c1,
+       0xbeff00c1, 0xb8f14306,
+       0x8671c171, 0xbf84002c,
+       0xbf8a0000, 0x8670ff6f,
+       0x04000000, 0xbf840028,
+       0x8e718671, 0x8e718271,
+       0xbef60071, 0xb8fa2a05,
        0x807a817a, 0x8e7a8a7a,
-       0xb8f11605, 0x80718171,
-       0x8e718471, 0x8e768271,
+       0xb8f01605, 0x80708170,
+       0x8e708670, 0x807a707a,
+       0x807aff7a, 0x00000080,
        0xbef600ff, 0x01000000,
-       0xbef20174, 0x80747a74,
-       0x82758075, 0xbefc0080,
-       0xbf800000, 0xbe802b00,
-       0xbe822b02, 0xbe842b04,
-       0xbe862b06, 0xbe882b08,
-       0xbe8a2b0a, 0xbe8c2b0c,
-       0xbe8e2b0e, 0xc06b003a,
-       0x00000000, 0xbf8cc07f,
-       0xc06b013a, 0x00000010,
-       0xbf8cc07f, 0xc06b023a,
-       0x00000020, 0xbf8cc07f,
-       0xc06b033a, 0x00000030,
-       0xbf8cc07f, 0x8074c074,
-       0x82758075, 0x807c907c,
-       0xbf0a717c, 0xbf85ffe7,
-       0xbef40172, 0xbefa0080,
+       0xbefc0080, 0xd28c0002,
+       0x000100c1, 0xd28d0003,
+       0x000204c1, 0xd1060002,
+       0x00011103, 0x7e0602ff,
+       0x00000200, 0xbefc00ff,
+       0x00010000, 0xbe800077,
+       0x8677ff77, 0xff7fffff,
+       0x8777ff77, 0x00058000,
+       0xd8ec0000, 0x00000002,
+       0xbf8cc07f, 0xe0765000,
+       0x7a1d0002, 0x68040702,
+       0xd0c9006a, 0x0000e302,
+       0xbf87fff7, 0xbef70000,
+       0xbefa00ff, 0x00000400,
        0xbefe00c1, 0xbeff00c1,
-       0xbee80080, 0xbee90080,
+       0xb8f12a05, 0x80718171,
+       0x8e718271, 0x8e768871,
        0xbef600ff, 0x01000000,
+       0xbefc0084, 0xbf0a717c,
+       0xbf840015, 0xbf11017c,
+       0x8071ff71, 0x00001000,
+       0x7e000300, 0x7e020301,
+       0x7e040302, 0x7e060303,
        0xe0724000, 0x7a1d0000,
        0xe0724100, 0x7a1d0100,
        0xe0724200, 0x7a1d0200,
        0xe0724300, 0x7a1d0300,
+       0x807c847c, 0x807aff7a,
+       0x00000400, 0xbf0a717c,
+       0xbf85ffef, 0xbf9c0000,
+       0xbf8200dc, 0xbef4007e,
+       0x8675ff7f, 0x0000ffff,
+       0x8775ff75, 0x00040000,
+       0xbef60080, 0xbef700ff,
+       0x00807fac, 0x866eff7f,
+       0x08000000, 0x8f6e836e,
+       0x87776e77, 0x866eff7f,
+       0x70000000, 0x8f6e816e,
+       0x87776e77, 0x866eff7f,
+       0x04000000, 0xbf84001e,
        0xbefe00c1, 0xbeff00c1,
-       0xb8f14306, 0x8671c171,
-       0xbf84002c, 0xbf8a0000,
-       0x8670ff6f, 0x04000000,
-       0xbf840028, 0x8e718671,
-       0x8e718271, 0xbef60071,
-       0xb8fa2a05, 0x807a817a,
-       0x8e7a8a7a, 0xb8f01605,
-       0x80708170, 0x8e708670,
-       0x807a707a, 0x807aff7a,
+       0xb8ef4306, 0x866fc16f,
+       0xbf840019, 0x8e6f866f,
+       0x8e6f826f, 0xbef6006f,
+       0xb8f82a05, 0x80788178,
+       0x8e788a78, 0xb8ee1605,
+       0x806e816e, 0x8e6e866e,
+       0x80786e78, 0x8078ff78,
        0x00000080, 0xbef600ff,
        0x01000000, 0xbefc0080,
-       0xd28c0002, 0x000100c1,
-       0xd28d0003, 0x000204c1,
-       0xd1060002, 0x00011103,
-       0x7e0602ff, 0x00000200,
-       0xbefc00ff, 0x00010000,
-       0xbe800077, 0x8677ff77,
-       0xff7fffff, 0x8777ff77,
-       0x00058000, 0xd8ec0000,
-       0x00000002, 0xbf8cc07f,
-       0xe0765000, 0x7a1d0002,
-       0x68040702, 0xd0c9006a,
-       0x0000e302, 0xbf87fff7,
-       0xbef70000, 0xbefa00ff,
-       0x00000400, 0xbefe00c1,
-       0xbeff00c1, 0xb8f12a05,
-       0x80718171, 0x8e718271,
-       0x8e768871, 0xbef600ff,
-       0x01000000, 0xbefc0084,
-       0xbf0a717c, 0xbf840015,
-       0xbf11017c, 0x8071ff71,
-       0x00001000, 0x7e000300,
+       0xe0510000, 0x781d0000,
+       0xe0510100, 0x781d0000,
+       0x807cff7c, 0x00000200,
+       0x8078ff78, 0x00000200,
+       0xbf0a6f7c, 0xbf85fff6,
+       0xbef80080, 0xbefe00c1,
+       0xbeff00c1, 0xb8ef2a05,
+       0x806f816f, 0x8e6f826f,
+       0x8e76886f, 0xbef600ff,
+       0x01000000, 0xbeee0078,
+       0x8078ff78, 0x00000400,
+       0xbefc0084, 0xbf11087c,
+       0x806fff6f, 0x00008000,
+       0xe0524000, 0x781d0000,
+       0xe0524100, 0x781d0100,
+       0xe0524200, 0x781d0200,
+       0xe0524300, 0x781d0300,
+       0xbf8c0f70, 0x7e000300,
        0x7e020301, 0x7e040302,
-       0x7e060303, 0xe0724000,
-       0x7a1d0000, 0xe0724100,
-       0x7a1d0100, 0xe0724200,
-       0x7a1d0200, 0xe0724300,
-       0x7a1d0300, 0x807c847c,
-       0x807aff7a, 0x00000400,
-       0xbf0a717c, 0xbf85ffef,
-       0xbf9c0000, 0xbf8200d9,
-       0xbef4007e, 0x8675ff7f,
-       0x0000ffff, 0x8775ff75,
-       0x00040000, 0xbef60080,
-       0xbef700ff, 0x00807fac,
-       0x866eff7f, 0x08000000,
-       0x8f6e836e, 0x87776e77,
-       0x866eff7f, 0x70000000,
-       0x8f6e816e, 0x87776e77,
-       0x866eff7f, 0x04000000,
-       0xbf84001e, 0xbefe00c1,
-       0xbeff00c1, 0xb8ef4306,
-       0x866fc16f, 0xbf840019,
-       0x8e6f866f, 0x8e6f826f,
-       0xbef6006f, 0xb8f82a05,
+       0x7e060303, 0x807c847c,
+       0x8078ff78, 0x00000400,
+       0xbf0a6f7c, 0xbf85ffee,
+       0xbf9c0000, 0xe0524000,
+       0x6e1d0000, 0xe0524100,
+       0x6e1d0100, 0xe0524200,
+       0x6e1d0200, 0xe0524300,
+       0x6e1d0300, 0xb8f82a05,
        0x80788178, 0x8e788a78,
        0xb8ee1605, 0x806e816e,
        0x8e6e866e, 0x80786e78,
-       0x8078ff78, 0x00000080,
-       0xbef600ff, 0x01000000,
-       0xbefc0080, 0xe0510000,
-       0x781d0000, 0xe0510100,
-       0x781d0000, 0x807cff7c,
-       0x00000200, 0x8078ff78,
-       0x00000200, 0xbf0a6f7c,
-       0xbf85fff6, 0xbef80080,
-       0xbefe00c1, 0xbeff00c1,
-       0xb8ef2a05, 0x806f816f,
-       0x8e6f826f, 0x8e76886f,
-       0xbef600ff, 0x01000000,
-       0xbeee0078, 0x8078ff78,
-       0x00000400, 0xbefc0084,
-       0xbf11087c, 0x806fff6f,
-       0x00008000, 0xe0524000,
-       0x781d0000, 0xe0524100,
-       0x781d0100, 0xe0524200,
-       0x781d0200, 0xe0524300,
-       0x781d0300, 0xbf8c0f70,
-       0x7e000300, 0x7e020301,
-       0x7e040302, 0x7e060303,
-       0x807c847c, 0x8078ff78,
-       0x00000400, 0xbf0a6f7c,
-       0xbf85ffee, 0xbf9c0000,
-       0xe0524000, 0x6e1d0000,
-       0xe0524100, 0x6e1d0100,
-       0xe0524200, 0x6e1d0200,
-       0xe0524300, 0x6e1d0300,
+       0x80f8c078, 0xb8ef1605,
+       0x806f816f, 0x8e6f846f,
+       0x8e76826f, 0xbef600ff,
+       0x01000000, 0xbefc006f,
+       0xc031003a, 0x00000078,
+       0x80f8c078, 0xbf8cc07f,
+       0x80fc907c, 0xbf800000,
+       0xbe802d00, 0xbe822d02,
+       0xbe842d04, 0xbe862d06,
+       0xbe882d08, 0xbe8a2d0a,
+       0xbe8c2d0c, 0xbe8e2d0e,
+       0xbf06807c, 0xbf84fff0,
        0xb8f82a05, 0x80788178,
        0x8e788a78, 0xb8ee1605,
        0x806e816e, 0x8e6e866e,
-       0x80786e78, 0x80f8c078,
-       0xb8ef1605, 0x806f816f,
-       0x8e6f846f, 0x8e76826f,
+       0x80786e78, 0xbef60084,
        0xbef600ff, 0x01000000,
-       0xbefc006f, 0xc031003a,
-       0x00000078, 0x80f8c078,
-       0xbf8cc07f, 0x80fc907c,
-       0xbf800000, 0xbe802d00,
-       0xbe822d02, 0xbe842d04,
-       0xbe862d06, 0xbe882d08,
-       0xbe8a2d0a, 0xbe8c2d0c,
-       0xbe8e2d0e, 0xbf06807c,
-       0xbf84fff0, 0xb8f82a05,
-       0x80788178, 0x8e788a78,
-       0xb8ee1605, 0x806e816e,
-       0x8e6e866e, 0x80786e78,
-       0xbef60084, 0xbef600ff,
-       0x01000000, 0xc0211bfa,
+       0xc0211bfa, 0x00000078,
+       0x80788478, 0xc0211b3a,
        0x00000078, 0x80788478,
-       0xc0211b3a, 0x00000078,
-       0x80788478, 0xc0211b7a,
+       0xc0211b7a, 0x00000078,
+       0x80788478, 0xc0211eba,
        0x00000078, 0x80788478,
-       0xc0211eba, 0x00000078,
-       0x80788478, 0xc0211efa,
+       0xc0211efa, 0x00000078,
+       0x80788478, 0xc0211c3a,
        0x00000078, 0x80788478,
-       0xc0211c3a, 0x00000078,
-       0x80788478, 0xc0211c7a,
+       0xc0211c7a, 0x00000078,
+       0x80788478, 0xc0211a3a,
        0x00000078, 0x80788478,
-       0xc0211a3a, 0x00000078,
-       0x80788478, 0xc0211a7a,
+       0xc0211a7a, 0x00000078,
+       0x80788478, 0xc0211cfa,
        0x00000078, 0x80788478,
-       0xc0211cfa, 0x00000078,
-       0x80788478, 0xbf8cc07f,
-       0xbefc006f, 0xbefe007a,
-       0xbeff007b, 0x866f71ff,
-       0x000003ff, 0xb96f4803,
-       0x866f71ff, 0xfffff800,
-       0x8f6f8b6f, 0xb96fa2c3,
-       0xb973f801, 0xb8ee2a05,
-       0x806e816e, 0x8e6e8a6e,
-       0xb8ef1605, 0x806f816f,
-       0x8e6f866f, 0x806e6f6e,
-       0x806e746e, 0x826f8075,
-       0x866fff6f, 0x0000ffff,
-       0xc0071cb7, 0x00000040,
-       0xc00b1d37, 0x00000048,
-       0xc0031e77, 0x00000058,
-       0xc0071eb7, 0x0000005c,
-       0xbf8cc07f, 0x866fff6d,
-       0xf0000000, 0x8f6f9c6f,
-       0x8e6f906f, 0xbeee0080,
-       0x876e6f6e, 0x866fff6d,
-       0x08000000, 0x8f6f9b6f,
-       0x8e6f8f6f, 0x876e6f6e,
-       0x866fff70, 0x00800000,
-       0x8f6f976f, 0xb96ef807,
-       0x866dff6d, 0x0000ffff,
-       0x86fe7e7e, 0x86ea6a6a,
-       0xb970f802, 0xbf8a0000,
+       0xbf8cc07f, 0xbefc006f,
+       0xbefe007a, 0xbeff007b,
+       0x866f71ff, 0x000003ff,
+       0xb96f4803, 0x866f71ff,
+       0xfffff800, 0x8f6f8b6f,
+       0xb96fa2c3, 0xb973f801,
+       0xb8ee2a05, 0x806e816e,
+       0x8e6e8a6e, 0xb8ef1605,
+       0x806f816f, 0x8e6f866f,
+       0x806e6f6e, 0x806e746e,
+       0x826f8075, 0x866fff6f,
+       0x0000ffff, 0xc0071cb7,
+       0x00000040, 0xc00b1d37,
+       0x00000048, 0xc0031e77,
+       0x00000058, 0xc0071eb7,
+       0x0000005c, 0xbf8cc07f,
+       0x866fff6d, 0xf0000000,
+       0x8f6f9c6f, 0x8e6f906f,
+       0xbeee0080, 0x876e6f6e,
+       0x866fff6d, 0x08000000,
+       0x8f6f9b6f, 0x8e6f8f6f,
+       0x876e6f6e, 0x866fff70,
+       0x00800000, 0x8f6f976f,
+       0xb96ef807, 0x866dff6d,
+       0x0000ffff, 0x86fe7e7e,
+       0x86ea6a6a, 0x8f6e8370,
+       0xb96ee0c2, 0xbf800002,
+       0xb9700002, 0xbf8a0000,
        0x95806f6c, 0xbf810000,
 };
index a2a04bb64096f0ca6801fbef0e28aa931e7caa0b..abe1a5da29fb313b7ae03777bec24ddf8082a40c 100644 (file)
@@ -103,6 +103,10 @@ var SQ_WAVE_STATUS_INST_ATC_SHIFT  = 23
 var SQ_WAVE_STATUS_INST_ATC_MASK   = 0x00800000
 var SQ_WAVE_STATUS_SPI_PRIO_SHIFT  = 1
 var SQ_WAVE_STATUS_SPI_PRIO_MASK   = 0x00000006
+var SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT   = 0
+var SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE    = 1
+var SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT  = 3
+var SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE   = 29
 
 var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT    = 12
 var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE     = 9
@@ -251,7 +255,7 @@ if (!EMU_RUN_HACK)
     s_waitcnt lgkmcnt(0)
     s_or_b32        ttmp7, ttmp8, ttmp9
     s_cbranch_scc0  L_NO_NEXT_TRAP //next level trap handler not been set
-    s_setreg_b32    hwreg(HW_REG_STATUS), s_save_status //restore HW status(SCC)
+    set_status_without_spi_prio(s_save_status, ttmp2) //restore HW status(SCC)
     s_setpc_b64     [ttmp8,ttmp9] //jump to next level trap handler
 
 L_NO_NEXT_TRAP:
@@ -262,7 +266,7 @@ L_NO_NEXT_TRAP:
     s_addc_u32  ttmp1, ttmp1, 0
 L_EXCP_CASE:
     s_and_b32   ttmp1, ttmp1, 0xFFFF
-    s_setreg_b32    hwreg(HW_REG_STATUS), s_save_status //restore HW status(SCC)
+    set_status_without_spi_prio(s_save_status, ttmp2) //restore HW status(SCC)
     s_rfe_b64       [ttmp0, ttmp1]
 end
     // *********        End handling of non-CWSR traps   *******************
@@ -1053,7 +1057,7 @@ end
     s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff      //pc[47:32]        //Do it here in order not to affect STATUS
     s_and_b64    exec, exec, exec  // Restore STATUS.EXECZ, not writable by s_setreg_b32
     s_and_b64    vcc, vcc, vcc  // Restore STATUS.VCCZ, not writable by s_setreg_b32
-    s_setreg_b32    hwreg(HW_REG_STATUS),   s_restore_status     // SCC is included, which is changed by previous salu
+    set_status_without_spi_prio(s_restore_status, s_restore_tmp) // SCC is included, which is changed by previous salu
 
     s_barrier                                                   //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
 
@@ -1134,3 +1138,11 @@ end
 function get_hwreg_size_bytes
     return 128 //HWREG size 128 bytes
 end
+
+function set_status_without_spi_prio(status, tmp)
+    // Do not restore STATUS.SPI_PRIO since scheduler may have raised it.
+    s_lshr_b32      tmp, status, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT
+    s_setreg_b32    hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE), tmp
+    s_nop           0x2 // avoid S_SETREG => S_SETREG hazard
+    s_setreg_b32    hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE), status
+end
index 998be96be7361d685e0b83cc36f62c03f1bd9c6a..0bb9c577b3a2c8b7a93e2b9dc330fdff1b32f5ea 100644 (file)
@@ -103,6 +103,10 @@ var SQ_WAVE_STATUS_INST_ATC_MASK   = 0x00800000
 var SQ_WAVE_STATUS_SPI_PRIO_SHIFT  = 1
 var SQ_WAVE_STATUS_SPI_PRIO_MASK   = 0x00000006
 var SQ_WAVE_STATUS_HALT_MASK       = 0x2000
+var SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT   = 0
+var SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE    = 1
+var SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT  = 3
+var SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE   = 29
 
 var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT   = 12
 var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE    = 9
@@ -317,7 +321,7 @@ L_EXCP_CASE:
     // Restore SQ_WAVE_STATUS.
     s_and_b64       exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
     s_and_b64       vcc, vcc, vcc    // Restore STATUS.VCCZ, not writable by s_setreg_b32
-    s_setreg_b32    hwreg(HW_REG_STATUS), s_save_status
+    set_status_without_spi_prio(s_save_status, ttmp2)
 
     s_rfe_b64       [ttmp0, ttmp1]
 end
@@ -1120,7 +1124,7 @@ end
     s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff     //pc[47:32]        //Do it here in order not to affect STATUS
     s_and_b64   exec, exec, exec  // Restore STATUS.EXECZ, not writable by s_setreg_b32
     s_and_b64   vcc, vcc, vcc  // Restore STATUS.VCCZ, not writable by s_setreg_b32
-    s_setreg_b32    hwreg(HW_REG_STATUS),   s_restore_status    // SCC is included, which is changed by previous salu
+    set_status_without_spi_prio(s_restore_status, s_restore_tmp) // SCC is included, which is changed by previous salu
 
     s_barrier                                                  //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
 
@@ -1212,3 +1216,11 @@ function ack_sqc_store_workaround
         s_waitcnt lgkmcnt(0)
     end
 end
+
+function set_status_without_spi_prio(status, tmp)
+    // Do not restore STATUS.SPI_PRIO since scheduler may have raised it.
+    s_lshr_b32      tmp, status, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT
+    s_setreg_b32    hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE), tmp
+    s_nop           0x2 // avoid S_SETREG => S_SETREG hazard
+    s_setreg_b32    hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE), status
+end
index f64c5551cdba05f00e149a50f7e104735a3b3fd1..297b36c26a05c819634f9cdeb9426aa243f76fe4 100644 (file)
@@ -122,6 +122,9 @@ static int kfd_open(struct inode *inode, struct file *filep)
        if (IS_ERR(process))
                return PTR_ERR(process);
 
+       if (kfd_is_locked())
+               return -EAGAIN;
+
        dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
                process->pasid, process->is_32bit_user_mode);
 
@@ -389,6 +392,61 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
        return retval;
 }
 
+static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
+                                       void *data)
+{
+       int retval;
+       const int max_num_cus = 1024;
+       struct kfd_ioctl_set_cu_mask_args *args = data;
+       struct queue_properties properties;
+       uint32_t __user *cu_mask_ptr = (uint32_t __user *)args->cu_mask_ptr;
+       size_t cu_mask_size = sizeof(uint32_t) * (args->num_cu_mask / 32);
+
+       if ((args->num_cu_mask % 32) != 0) {
+               pr_debug("num_cu_mask 0x%x must be a multiple of 32",
+                               args->num_cu_mask);
+               return -EINVAL;
+       }
+
+       properties.cu_mask_count = args->num_cu_mask;
+       if (properties.cu_mask_count == 0) {
+               pr_debug("CU mask cannot be 0");
+               return -EINVAL;
+       }
+
+       /* To prevent an unreasonably large CU mask size, set an arbitrary
+        * limit of max_num_cus bits.  We can then just drop any CU mask bits
+        * past max_num_cus bits and just use the first max_num_cus bits.
+        */
+       if (properties.cu_mask_count > max_num_cus) {
+               pr_debug("CU mask cannot be greater than 1024 bits");
+               properties.cu_mask_count = max_num_cus;
+               cu_mask_size = sizeof(uint32_t) * (max_num_cus/32);
+       }
+
+       properties.cu_mask = kzalloc(cu_mask_size, GFP_KERNEL);
+       if (!properties.cu_mask)
+               return -ENOMEM;
+
+       retval = copy_from_user(properties.cu_mask, cu_mask_ptr, cu_mask_size);
+       if (retval) {
+               pr_debug("Could not copy CU mask from userspace");
+               kfree(properties.cu_mask);
+               return -EFAULT;
+       }
+
+       mutex_lock(&p->mutex);
+
+       retval = pqm_set_cu_mask(&p->pqm, args->queue_id, &properties);
+
+       mutex_unlock(&p->mutex);
+
+       if (retval)
+               kfree(properties.cu_mask);
+
+       return retval;
+}
+
 static int kfd_ioctl_set_memory_policy(struct file *filep,
                                        struct kfd_process *p, void *data)
 {
@@ -754,7 +812,6 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
 {
        struct kfd_ioctl_get_clock_counters_args *args = data;
        struct kfd_dev *dev;
-       struct timespec64 time;
 
        dev = kfd_device_by_id(args->gpu_id);
        if (dev)
@@ -766,11 +823,8 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
                args->gpu_clock_counter = 0;
 
        /* No access to rdtsc. Using raw monotonic time */
-       getrawmonotonic64(&time);
-       args->cpu_clock_counter = (uint64_t)timespec64_to_ns(&time);
-
-       get_monotonic_boottime64(&time);
-       args->system_clock_counter = (uint64_t)timespec64_to_ns(&time);
+       args->cpu_clock_counter = ktime_get_raw_ns();
+       args->system_clock_counter = ktime_get_boot_ns();
 
        /* Since the counter is in nano-seconds we use 1GHz frequency */
        args->system_clock_freq = 1000000000;
@@ -1558,6 +1612,9 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
        AMDKFD_IOCTL_DEF(AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU,
                        kfd_ioctl_unmap_memory_from_gpu, 0),
 
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_CU_MASK,
+                       kfd_ioctl_set_cu_mask, 0),
+
 };
 
 #define AMDKFD_CORE_IOCTL_COUNT        ARRAY_SIZE(amdkfd_ioctls)
index 296b3f230280bc8def89b6d7b700995f1ec6dcc1..ee4996029a86866fc05807e60ec956e2ddad80df 100644 (file)
@@ -189,6 +189,21 @@ static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu,
        return 0;
 }
 
+static struct kfd_mem_properties *
+find_subtype_mem(uint32_t heap_type, uint32_t flags, uint32_t width,
+               struct kfd_topology_device *dev)
+{
+       struct kfd_mem_properties *props;
+
+       list_for_each_entry(props, &dev->mem_props, list) {
+               if (props->heap_type == heap_type
+                               && props->flags == flags
+                               && props->width == width)
+                       return props;
+       }
+
+       return NULL;
+}
 /* kfd_parse_subtype_mem - parse memory subtypes and attach it to correct
  * topology device present in the device_list
  */
@@ -197,36 +212,56 @@ static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem,
 {
        struct kfd_mem_properties *props;
        struct kfd_topology_device *dev;
+       uint32_t heap_type;
+       uint64_t size_in_bytes;
+       uint32_t flags = 0;
+       uint32_t width;
 
        pr_debug("Found memory entry in CRAT table with proximity_domain=%d\n",
                        mem->proximity_domain);
        list_for_each_entry(dev, device_list, list) {
                if (mem->proximity_domain == dev->proximity_domain) {
-                       props = kfd_alloc_struct(props);
-                       if (!props)
-                               return -ENOMEM;
-
                        /* We're on GPU node */
                        if (dev->node_props.cpu_cores_count == 0) {
                                /* APU */
                                if (mem->visibility_type == 0)
-                                       props->heap_type =
+                                       heap_type =
                                                HSA_MEM_HEAP_TYPE_FB_PRIVATE;
                                /* dGPU */
                                else
-                                       props->heap_type = mem->visibility_type;
+                                       heap_type = mem->visibility_type;
                        } else
-                               props->heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
+                               heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
 
                        if (mem->flags & CRAT_MEM_FLAGS_HOT_PLUGGABLE)
-                               props->flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
+                               flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
                        if (mem->flags & CRAT_MEM_FLAGS_NON_VOLATILE)
-                               props->flags |= HSA_MEM_FLAGS_NON_VOLATILE;
+                               flags |= HSA_MEM_FLAGS_NON_VOLATILE;
 
-                       props->size_in_bytes =
+                       size_in_bytes =
                                ((uint64_t)mem->length_high << 32) +
                                                        mem->length_low;
-                       props->width = mem->width;
+                       width = mem->width;
+
+                       /* Multiple banks of the same type are aggregated into
+                        * one. User mode doesn't care about multiple physical
+                        * memory segments. It's managed as a single virtual
+                        * heap for user mode.
+                        */
+                       props = find_subtype_mem(heap_type, flags, width, dev);
+                       if (props) {
+                               props->size_in_bytes += size_in_bytes;
+                               break;
+                       }
+
+                       props = kfd_alloc_struct(props);
+                       if (!props)
+                               return -ENOMEM;
+
+                       props->heap_type = heap_type;
+                       props->flags = flags;
+                       props->size_in_bytes = size_in_bytes;
+                       props->width = width;
 
                        dev->node_props.mem_banks_count++;
                        list_add_tail(&props->list, &dev->mem_props);
index afb26f205d2978717bdeec174603caa0a83d4712..a3441b0e385b7a32edf8a887dbafe2daaed3d109 100644 (file)
@@ -38,7 +38,6 @@
 #include "kfd_dbgmgr.h"
 #include "kfd_dbgdev.h"
 #include "kfd_device_queue_manager.h"
-#include "../../radeon/cik_reg.h"
 
 static void dbgdev_address_watch_disable_nodiq(struct kfd_dev *dev)
 {
index 03424c20920cc84ab0a18e97aaba33995b0a268e..0619c777b47e6283513666c65e2ad56e46c01fc3 100644 (file)
@@ -60,6 +60,9 @@ enum {
        SH_REG_SIZE = SH_REG_END - SH_REG_BASE
 };
 
+/* SQ_CMD definitions */
+#define SQ_CMD                                         0x8DEC
+
 enum SQ_IND_CMD_CMD {
        SQ_IND_CMD_CMD_NULL = 0x00000000,
        SQ_IND_CMD_CMD_HALT = 0x00000001,
@@ -190,4 +193,38 @@ union ULARGE_INTEGER {
 void kfd_dbgdev_init(struct kfd_dbgdev *pdbgdev, struct kfd_dev *pdev,
                        enum DBGDEV_TYPE type);
 
+union TCP_WATCH_CNTL_BITS {
+       struct {
+               uint32_t mask:24;
+               uint32_t vmid:4;
+               uint32_t atc:1;
+               uint32_t mode:2;
+               uint32_t valid:1;
+       } bitfields, bits;
+       uint32_t u32All;
+       signed int i32All;
+       float f32All;
+};
+
+enum {
+       ADDRESS_WATCH_REG_CNTL_ATC_BIT = 0x10000000UL,
+       ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK = 0x00FFFFFF,
+       ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION = 0x03000000,
+       /* extend the mask to 26 bits in order to match the low address field */
+       ADDRESS_WATCH_REG_ADDLOW_SHIFT = 6,
+       ADDRESS_WATCH_REG_ADDHIGH_MASK = 0xFFFF
+};
+
+enum {
+       MAX_TRAPID = 8,         /* 3 bits in the bitfield. */
+       MAX_WATCH_ADDRESSES = 4
+};
+
+enum {
+       ADDRESS_WATCH_REG_ADDR_HI = 0,
+       ADDRESS_WATCH_REG_ADDR_LO,
+       ADDRESS_WATCH_REG_CNTL,
+       ADDRESS_WATCH_REG_MAX
+};
+
 #endif /* KFD_DBGDEV_H_ */
index 4bd6ebfaf425bcc88947bf41cc6b2ad832bc84b9..ab37d36d9cd69f305b5c81bc5b94fb2b1628d497 100644 (file)
@@ -21,6 +21,8 @@
  */
 
 #include <linux/debugfs.h>
+#include <linux/uaccess.h>
+
 #include "kfd_priv.h"
 
 static struct dentry *debugfs_root;
@@ -32,6 +34,38 @@ static int kfd_debugfs_open(struct inode *inode, struct file *file)
        return single_open(file, show, NULL);
 }
 
+static ssize_t kfd_debugfs_hang_hws_write(struct file *file,
+       const char __user *user_buf, size_t size, loff_t *ppos)
+{
+       struct kfd_dev *dev;
+       char tmp[16];
+       uint32_t gpu_id;
+       int ret = -EINVAL;
+
+       memset(tmp, 0, 16);
+       if (size >= 16) {
+               pr_err("Invalid input for gpu id.\n");
+               goto out;
+       }
+       if (copy_from_user(tmp, user_buf, size)) {
+               ret = -EFAULT;
+               goto out;
+       }
+       if (kstrtoint(tmp, 10, &gpu_id)) {
+               pr_err("Invalid input for gpu id.\n");
+               goto out;
+       }
+       dev = kfd_device_by_id(gpu_id);
+       if (dev) {
+               kfd_debugfs_hang_hws(dev);
+               ret = size;
+       } else
+               pr_err("Cannot find device %d.\n", gpu_id);
+
+out:
+       return ret;
+}
+
 static const struct file_operations kfd_debugfs_fops = {
        .owner = THIS_MODULE,
        .open = kfd_debugfs_open,
@@ -40,6 +74,15 @@ static const struct file_operations kfd_debugfs_fops = {
        .release = single_release,
 };
 
+static const struct file_operations kfd_debugfs_hang_hws_fops = {
+       .owner = THIS_MODULE,
+       .open = kfd_debugfs_open,
+       .read = seq_read,
+       .write = kfd_debugfs_hang_hws_write,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
 void kfd_debugfs_init(void)
 {
        struct dentry *ent;
@@ -65,6 +108,11 @@ void kfd_debugfs_init(void)
        ent = debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
                                  kfd_debugfs_rls_by_device,
                                  &kfd_debugfs_fops);
+
+       ent = debugfs_create_file("hang_hws", S_IFREG | 0644, debugfs_root,
+                                 NULL,
+                                 &kfd_debugfs_hang_hws_fops);
+
        if (!ent)
                pr_warn("Failed to create rls in kfd debugfs\n");
 }
index 7ee6cec2c06024182c00c06f4fc07a1f9b12a1c0..1b048715ab8a1df42a391a527823b98212534be5 100644 (file)
 #include "kfd_iommu.h"
 
 #define MQD_SIZE_ALIGNED 768
-static atomic_t kfd_device_suspended = ATOMIC_INIT(0);
+
+/*
+ * kfd_locked is used to lock the kfd driver during suspend or reset
+ * once locked, kfd driver will stop any further GPU execution.
+ * create process (open) will return -EAGAIN.
+ */
+static atomic_t kfd_locked = ATOMIC_INIT(0);
 
 #ifdef KFD_SUPPORT_IOMMU_V2
 static const struct kfd_device_info kaveri_device_info = {
@@ -46,6 +52,7 @@ static const struct kfd_device_info kaveri_device_info = {
        .supports_cwsr = false,
        .needs_iommu_device = true,
        .needs_pci_atomics = false,
+       .num_sdma_engines = 2,
 };
 
 static const struct kfd_device_info carrizo_device_info = {
@@ -61,6 +68,22 @@ static const struct kfd_device_info carrizo_device_info = {
        .supports_cwsr = true,
        .needs_iommu_device = true,
        .needs_pci_atomics = false,
+       .num_sdma_engines = 2,
+};
+
+static const struct kfd_device_info raven_device_info = {
+       .asic_family = CHIP_RAVEN,
+       .max_pasid_bits = 16,
+       .max_no_of_hqd  = 24,
+       .doorbell_size  = 8,
+       .ih_ring_entry_size = 8 * sizeof(uint32_t),
+       .event_interrupt_class = &event_interrupt_class_v9,
+       .num_of_watch_points = 4,
+       .mqd_size_aligned = MQD_SIZE_ALIGNED,
+       .supports_cwsr = true,
+       .needs_iommu_device = true,
+       .needs_pci_atomics = true,
+       .num_sdma_engines = 1,
 };
 #endif
 
@@ -77,6 +100,7 @@ static const struct kfd_device_info hawaii_device_info = {
        .supports_cwsr = false,
        .needs_iommu_device = false,
        .needs_pci_atomics = false,
+       .num_sdma_engines = 2,
 };
 
 static const struct kfd_device_info tonga_device_info = {
@@ -91,6 +115,7 @@ static const struct kfd_device_info tonga_device_info = {
        .supports_cwsr = false,
        .needs_iommu_device = false,
        .needs_pci_atomics = true,
+       .num_sdma_engines = 2,
 };
 
 static const struct kfd_device_info tonga_vf_device_info = {
@@ -105,6 +130,7 @@ static const struct kfd_device_info tonga_vf_device_info = {
        .supports_cwsr = false,
        .needs_iommu_device = false,
        .needs_pci_atomics = false,
+       .num_sdma_engines = 2,
 };
 
 static const struct kfd_device_info fiji_device_info = {
@@ -119,6 +145,7 @@ static const struct kfd_device_info fiji_device_info = {
        .supports_cwsr = true,
        .needs_iommu_device = false,
        .needs_pci_atomics = true,
+       .num_sdma_engines = 2,
 };
 
 static const struct kfd_device_info fiji_vf_device_info = {
@@ -133,6 +160,7 @@ static const struct kfd_device_info fiji_vf_device_info = {
        .supports_cwsr = true,
        .needs_iommu_device = false,
        .needs_pci_atomics = false,
+       .num_sdma_engines = 2,
 };
 
 
@@ -148,6 +176,7 @@ static const struct kfd_device_info polaris10_device_info = {
        .supports_cwsr = true,
        .needs_iommu_device = false,
        .needs_pci_atomics = true,
+       .num_sdma_engines = 2,
 };
 
 static const struct kfd_device_info polaris10_vf_device_info = {
@@ -162,6 +191,7 @@ static const struct kfd_device_info polaris10_vf_device_info = {
        .supports_cwsr = true,
        .needs_iommu_device = false,
        .needs_pci_atomics = false,
+       .num_sdma_engines = 2,
 };
 
 static const struct kfd_device_info polaris11_device_info = {
@@ -176,6 +206,7 @@ static const struct kfd_device_info polaris11_device_info = {
        .supports_cwsr = true,
        .needs_iommu_device = false,
        .needs_pci_atomics = true,
+       .num_sdma_engines = 2,
 };
 
 static const struct kfd_device_info vega10_device_info = {
@@ -190,6 +221,7 @@ static const struct kfd_device_info vega10_device_info = {
        .supports_cwsr = true,
        .needs_iommu_device = false,
        .needs_pci_atomics = false,
+       .num_sdma_engines = 2,
 };
 
 static const struct kfd_device_info vega10_vf_device_info = {
@@ -204,6 +236,7 @@ static const struct kfd_device_info vega10_vf_device_info = {
        .supports_cwsr = true,
        .needs_iommu_device = false,
        .needs_pci_atomics = false,
+       .num_sdma_engines = 2,
 };
 
 
@@ -241,6 +274,7 @@ static const struct kfd_deviceid supported_devices[] = {
        { 0x9875, &carrizo_device_info },       /* Carrizo */
        { 0x9876, &carrizo_device_info },       /* Carrizo */
        { 0x9877, &carrizo_device_info },       /* Carrizo */
+       { 0x15DD, &raven_device_info },         /* Raven */
 #endif
        { 0x67A0, &hawaii_device_info },        /* Hawaii */
        { 0x67A1, &hawaii_device_info },        /* Hawaii */
@@ -514,13 +548,54 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
        kfree(kfd);
 }
 
+int kgd2kfd_pre_reset(struct kfd_dev *kfd)
+{
+       if (!kfd->init_complete)
+               return 0;
+       kgd2kfd_suspend(kfd);
+
+       /* hold dqm->lock to prevent further execution*/
+       dqm_lock(kfd->dqm);
+
+       kfd_signal_reset_event(kfd);
+       return 0;
+}
+
+/*
+ * Fix me. KFD won't be able to resume existing process for now.
+ * We will keep all existing process in a evicted state and
+ * wait the process to be terminated.
+ */
+
+int kgd2kfd_post_reset(struct kfd_dev *kfd)
+{
+       int ret, count;
+
+       if (!kfd->init_complete)
+               return 0;
+
+       dqm_unlock(kfd->dqm);
+
+       ret = kfd_resume(kfd);
+       if (ret)
+               return ret;
+       count = atomic_dec_return(&kfd_locked);
+       WARN_ONCE(count != 0, "KFD reset ref. error");
+       return 0;
+}
+
+bool kfd_is_locked(void)
+{
+       return  (atomic_read(&kfd_locked) > 0);
+}
+
 void kgd2kfd_suspend(struct kfd_dev *kfd)
 {
        if (!kfd->init_complete)
                return;
 
        /* For first KFD device suspend all the KFD processes */
-       if (atomic_inc_return(&kfd_device_suspended) == 1)
+       if (atomic_inc_return(&kfd_locked) == 1)
                kfd_suspend_all_processes();
 
        kfd->dqm->ops.stop(kfd->dqm);
@@ -539,7 +614,7 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
        if (ret)
                return ret;
 
-       count = atomic_dec_return(&kfd_device_suspended);
+       count = atomic_dec_return(&kfd_locked);
        WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
        if (count == 0)
                ret = kfd_resume_all_processes();
@@ -577,14 +652,24 @@ dqm_start_error:
 /* This is called directly from KGD at ISR. */
 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
 {
+       uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
+       bool is_patched = false;
+
        if (!kfd->init_complete)
                return;
 
+       if (kfd->device_info->ih_ring_entry_size > sizeof(patched_ihre)) {
+               dev_err_once(kfd_device, "Ring entry too small\n");
+               return;
+       }
+
        spin_lock(&kfd->interrupt_lock);
 
        if (kfd->interrupts_active
-           && interrupt_is_wanted(kfd, ih_ring_entry)
-           && enqueue_ih_ring_entry(kfd, ih_ring_entry))
+           && interrupt_is_wanted(kfd, ih_ring_entry,
+                                  patched_ihre, &is_patched)
+           && enqueue_ih_ring_entry(kfd,
+                                    is_patched ? patched_ihre : ih_ring_entry))
                queue_work(kfd->ih_wq, &kfd->interrupt_work);
 
        spin_unlock(&kfd->interrupt_lock);
@@ -739,8 +824,8 @@ int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
        if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
                return -ENOMEM;
 
-       *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
-       if ((*mem_obj) == NULL)
+       *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
+       if (!(*mem_obj))
                return -ENOMEM;
 
        pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
@@ -857,3 +942,26 @@ int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
        kfree(mem_obj);
        return 0;
 }
+
+#if defined(CONFIG_DEBUG_FS)
+
+/* This function will send a package to HIQ to hang the HWS
+ * which will trigger a GPU reset and bring the HWS back to normal state
+ */
+int kfd_debugfs_hang_hws(struct kfd_dev *dev)
+{
+       int r = 0;
+
+       if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
+               pr_err("HWS is not enabled");
+               return -EINVAL;
+       }
+
+       r = pm_debugfs_hang_hws(&dev->dqm->packets);
+       if (!r)
+               r = dqm_debugfs_execute_queues(dev->dqm);
+
+       return r;
+}
+
+#endif
index 668ad07ebe1fd045518b3aa6c5d73a69aa5575d6..ec0d62a16e538c305f631b831432df0566b051c8 100644 (file)
@@ -61,6 +61,8 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
                                unsigned int sdma_queue_id);
 
+static void kfd_process_hw_exception(struct work_struct *work);
+
 static inline
 enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
 {
@@ -99,6 +101,17 @@ unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
        return dqm->dev->shared_resources.num_pipe_per_mec;
 }
 
+static unsigned int get_num_sdma_engines(struct device_queue_manager *dqm)
+{
+       return dqm->dev->device_info->num_sdma_engines;
+}
+
+unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
+{
+       return dqm->dev->device_info->num_sdma_engines
+                       * KFD_SDMA_QUEUES_PER_ENGINE;
+}
+
 void program_sh_mem_settings(struct device_queue_manager *dqm,
                                        struct qcm_process_device *qpd)
 {
@@ -240,7 +253,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
 
        print_queue(q);
 
-       mutex_lock(&dqm->lock);
+       dqm_lock(dqm);
 
        if (dqm->total_queue_count >= max_num_of_queues_per_device) {
                pr_warn("Can't create new usermode queue because %d queues were already created\n",
@@ -297,7 +310,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
                        dqm->total_queue_count);
 
 out_unlock:
-       mutex_unlock(&dqm->lock);
+       dqm_unlock(dqm);
        return retval;
 }
 
@@ -346,10 +359,10 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
                                        struct qcm_process_device *qpd)
 {
        int retval;
-       struct mqd_manager *mqd;
+       struct mqd_manager *mqd_mgr;
 
-       mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
-       if (!mqd)
+       mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
+       if (!mqd_mgr)
                return -ENOMEM;
 
        retval = allocate_hqd(dqm, q);
@@ -360,7 +373,7 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
        if (retval)
                goto out_deallocate_hqd;
 
-       retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
+       retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
                                &q->gart_mqd_addr, &q->properties);
        if (retval)
                goto out_deallocate_doorbell;
@@ -374,15 +387,15 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
        if (!q->properties.is_active)
                return 0;
 
-       retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue, &q->properties,
-                              q->process->mm);
+       retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
+                       &q->properties, q->process->mm);
        if (retval)
                goto out_uninit_mqd;
 
        return 0;
 
 out_uninit_mqd:
-       mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+       mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
 out_deallocate_doorbell:
        deallocate_doorbell(qpd, q);
 out_deallocate_hqd:
@@ -399,11 +412,11 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
                                struct queue *q)
 {
        int retval;
-       struct mqd_manager *mqd;
+       struct mqd_manager *mqd_mgr;
 
-       mqd = dqm->ops.get_mqd_manager(dqm,
+       mqd_mgr = dqm->ops.get_mqd_manager(dqm,
                get_mqd_type_from_queue_type(q->properties.type));
-       if (!mqd)
+       if (!mqd_mgr)
                return -ENOMEM;
 
        if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
@@ -420,14 +433,14 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
 
        deallocate_doorbell(qpd, q);
 
-       retval = mqd->destroy_mqd(mqd, q->mqd,
+       retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
                                KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
                                KFD_UNMAP_LATENCY_MS,
                                q->pipe, q->queue);
        if (retval == -ETIME)
                qpd->reset_wavefronts = true;
 
-       mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+       mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
 
        list_del(&q->list);
        if (list_empty(&qpd->queues_list)) {
@@ -457,9 +470,9 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
 {
        int retval;
 
-       mutex_lock(&dqm->lock);
+       dqm_lock(dqm);
        retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
-       mutex_unlock(&dqm->lock);
+       dqm_unlock(dqm);
 
        return retval;
 }
@@ -467,19 +480,19 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
 static int update_queue(struct device_queue_manager *dqm, struct queue *q)
 {
        int retval;
-       struct mqd_manager *mqd;
+       struct mqd_manager *mqd_mgr;
        struct kfd_process_device *pdd;
        bool prev_active = false;
 
-       mutex_lock(&dqm->lock);
+       dqm_lock(dqm);
        pdd = kfd_get_process_device_data(q->device, q->process);
        if (!pdd) {
                retval = -ENODEV;
                goto out_unlock;
        }
-       mqd = dqm->ops.get_mqd_manager(dqm,
+       mqd_mgr = dqm->ops.get_mqd_manager(dqm,
                        get_mqd_type_from_queue_type(q->properties.type));
-       if (!mqd) {
+       if (!mqd_mgr) {
                retval = -ENOMEM;
                goto out_unlock;
        }
@@ -506,7 +519,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
        } else if (prev_active &&
                   (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
                    q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
-               retval = mqd->destroy_mqd(mqd, q->mqd,
+               retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
                                KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
                                KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
                if (retval) {
@@ -515,7 +528,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
                }
        }
 
-       retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
+       retval = mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties);
 
        /*
         * check active state vs. the previous state and modify
@@ -533,44 +546,44 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
        else if (q->properties.is_active &&
                 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
                  q->properties.type == KFD_QUEUE_TYPE_SDMA))
-               retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue,
+               retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
                                       &q->properties, q->process->mm);
 
 out_unlock:
-       mutex_unlock(&dqm->lock);
+       dqm_unlock(dqm);
        return retval;
 }
 
 static struct mqd_manager *get_mqd_manager(
                struct device_queue_manager *dqm, enum KFD_MQD_TYPE type)
 {
-       struct mqd_manager *mqd;
+       struct mqd_manager *mqd_mgr;
 
        if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
                return NULL;
 
        pr_debug("mqd type %d\n", type);
 
-       mqd = dqm->mqds[type];
-       if (!mqd) {
-               mqd = mqd_manager_init(type, dqm->dev);
-               if (!mqd)
+       mqd_mgr = dqm->mqd_mgrs[type];
+       if (!mqd_mgr) {
+               mqd_mgr = mqd_manager_init(type, dqm->dev);
+               if (!mqd_mgr)
                        pr_err("mqd manager is NULL");
-               dqm->mqds[type] = mqd;
+               dqm->mqd_mgrs[type] = mqd_mgr;
        }
 
-       return mqd;
+       return mqd_mgr;
 }
 
 static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
                                        struct qcm_process_device *qpd)
 {
        struct queue *q;
-       struct mqd_manager *mqd;
+       struct mqd_manager *mqd_mgr;
        struct kfd_process_device *pdd;
        int retval = 0;
 
-       mutex_lock(&dqm->lock);
+       dqm_lock(dqm);
        if (qpd->evicted++ > 0) /* already evicted, do nothing */
                goto out;
 
@@ -582,16 +595,16 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
        list_for_each_entry(q, &qpd->queues_list, list) {
                if (!q->properties.is_active)
                        continue;
-               mqd = dqm->ops.get_mqd_manager(dqm,
+               mqd_mgr = dqm->ops.get_mqd_manager(dqm,
                        get_mqd_type_from_queue_type(q->properties.type));
-               if (!mqd) { /* should not be here */
+               if (!mqd_mgr) { /* should not be here */
                        pr_err("Cannot evict queue, mqd mgr is NULL\n");
                        retval = -ENOMEM;
                        goto out;
                }
                q->properties.is_evicted = true;
                q->properties.is_active = false;
-               retval = mqd->destroy_mqd(mqd, q->mqd,
+               retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
                                KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
                                KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
                if (retval)
@@ -600,7 +613,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
        }
 
 out:
-       mutex_unlock(&dqm->lock);
+       dqm_unlock(dqm);
        return retval;
 }
 
@@ -611,7 +624,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
        struct kfd_process_device *pdd;
        int retval = 0;
 
-       mutex_lock(&dqm->lock);
+       dqm_lock(dqm);
        if (qpd->evicted++ > 0) /* already evicted, do nothing */
                goto out;
 
@@ -633,7 +646,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
                                KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
 
 out:
-       mutex_unlock(&dqm->lock);
+       dqm_unlock(dqm);
        return retval;
 }
 
@@ -641,7 +654,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
                                          struct qcm_process_device *qpd)
 {
        struct queue *q;
-       struct mqd_manager *mqd;
+       struct mqd_manager *mqd_mgr;
        struct kfd_process_device *pdd;
        uint32_t pd_base;
        int retval = 0;
@@ -650,7 +663,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
        /* Retrieve PD base */
        pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
 
-       mutex_lock(&dqm->lock);
+       dqm_lock(dqm);
        if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
                goto out;
        if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
@@ -677,16 +690,16 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
        list_for_each_entry(q, &qpd->queues_list, list) {
                if (!q->properties.is_evicted)
                        continue;
-               mqd = dqm->ops.get_mqd_manager(dqm,
+               mqd_mgr = dqm->ops.get_mqd_manager(dqm,
                        get_mqd_type_from_queue_type(q->properties.type));
-               if (!mqd) { /* should not be here */
+               if (!mqd_mgr) { /* should not be here */
                        pr_err("Cannot restore queue, mqd mgr is NULL\n");
                        retval = -ENOMEM;
                        goto out;
                }
                q->properties.is_evicted = false;
                q->properties.is_active = true;
-               retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
+               retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
                                       q->queue, &q->properties,
                                       q->process->mm);
                if (retval)
@@ -695,7 +708,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
        }
        qpd->evicted = 0;
 out:
-       mutex_unlock(&dqm->lock);
+       dqm_unlock(dqm);
        return retval;
 }
 
@@ -711,7 +724,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
        /* Retrieve PD base */
        pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
 
-       mutex_lock(&dqm->lock);
+       dqm_lock(dqm);
        if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
                goto out;
        if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
@@ -739,7 +752,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
        if (!retval)
                qpd->evicted = 0;
 out:
-       mutex_unlock(&dqm->lock);
+       dqm_unlock(dqm);
        return retval;
 }
 
@@ -761,7 +774,7 @@ static int register_process(struct device_queue_manager *dqm,
        /* Retrieve PD base */
        pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
 
-       mutex_lock(&dqm->lock);
+       dqm_lock(dqm);
        list_add(&n->list, &dqm->queues);
 
        /* Update PD Base in QPD */
@@ -769,9 +782,10 @@ static int register_process(struct device_queue_manager *dqm,
 
        retval = dqm->asic_ops.update_qpd(dqm, qpd);
 
-       dqm->processes_count++;
+       if (dqm->processes_count++ == 0)
+               dqm->dev->kfd2kgd->set_compute_idle(dqm->dev->kgd, false);
 
-       mutex_unlock(&dqm->lock);
+       dqm_unlock(dqm);
 
        return retval;
 }
@@ -786,20 +800,22 @@ static int unregister_process(struct device_queue_manager *dqm,
                        list_empty(&qpd->queues_list) ? "empty" : "not empty");
 
        retval = 0;
-       mutex_lock(&dqm->lock);
+       dqm_lock(dqm);
 
        list_for_each_entry_safe(cur, next, &dqm->queues, list) {
                if (qpd == cur->qpd) {
                        list_del(&cur->list);
                        kfree(cur);
-                       dqm->processes_count--;
+                       if (--dqm->processes_count == 0)
+                               dqm->dev->kfd2kgd->set_compute_idle(
+                                       dqm->dev->kgd, true);
                        goto out;
                }
        }
        /* qpd not found in dqm list */
        retval = 1;
 out:
-       mutex_unlock(&dqm->lock);
+       dqm_unlock(dqm);
        return retval;
 }
 
@@ -838,7 +854,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
        if (!dqm->allocated_queues)
                return -ENOMEM;
 
-       mutex_init(&dqm->lock);
+       mutex_init(&dqm->lock_hidden);
        INIT_LIST_HEAD(&dqm->queues);
        dqm->queue_count = dqm->next_pipe_to_allocate = 0;
        dqm->sdma_queue_count = 0;
@@ -853,7 +869,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
        }
 
        dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1;
-       dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
+       dqm->sdma_bitmap = (1 << get_num_sdma_queues(dqm)) - 1;
 
        return 0;
 }
@@ -866,8 +882,8 @@ static void uninitialize(struct device_queue_manager *dqm)
 
        kfree(dqm->allocated_queues);
        for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
-               kfree(dqm->mqds[i]);
-       mutex_destroy(&dqm->lock);
+               kfree(dqm->mqd_mgrs[i]);
+       mutex_destroy(&dqm->lock_hidden);
        kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
 }
 
@@ -901,7 +917,7 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
                                unsigned int sdma_queue_id)
 {
-       if (sdma_queue_id >= CIK_SDMA_QUEUES)
+       if (sdma_queue_id >= get_num_sdma_queues(dqm))
                return;
        dqm->sdma_bitmap |= (1 << sdma_queue_id);
 }
@@ -910,19 +926,19 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
                                        struct queue *q,
                                        struct qcm_process_device *qpd)
 {
-       struct mqd_manager *mqd;
+       struct mqd_manager *mqd_mgr;
        int retval;
 
-       mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
-       if (!mqd)
+       mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
+       if (!mqd_mgr)
                return -ENOMEM;
 
        retval = allocate_sdma_queue(dqm, &q->sdma_id);
        if (retval)
                return retval;
 
-       q->properties.sdma_queue_id = q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE;
-       q->properties.sdma_engine_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
+       q->properties.sdma_queue_id = q->sdma_id / get_num_sdma_engines(dqm);
+       q->properties.sdma_engine_id = q->sdma_id % get_num_sdma_engines(dqm);
 
        retval = allocate_doorbell(qpd, q);
        if (retval)
@@ -933,19 +949,20 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
        pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
 
        dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
-       retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
+       retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
                                &q->gart_mqd_addr, &q->properties);
        if (retval)
                goto out_deallocate_doorbell;
 
-       retval = mqd->load_mqd(mqd, q->mqd, 0, 0, &q->properties, NULL);
+       retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, 0, 0, &q->properties,
+                               NULL);
        if (retval)
                goto out_uninit_mqd;
 
        return 0;
 
 out_uninit_mqd:
-       mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+       mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
 out_deallocate_doorbell:
        deallocate_doorbell(qpd, q);
 out_deallocate_sdma_queue:
@@ -1003,12 +1020,14 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
 {
        pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
 
-       mutex_init(&dqm->lock);
+       mutex_init(&dqm->lock_hidden);
        INIT_LIST_HEAD(&dqm->queues);
        dqm->queue_count = dqm->processes_count = 0;
        dqm->sdma_queue_count = 0;
        dqm->active_runlist = false;
-       dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
+       dqm->sdma_bitmap = (1 << get_num_sdma_queues(dqm)) - 1;
+
+       INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
 
        return 0;
 }
@@ -1041,9 +1060,11 @@ static int start_cpsch(struct device_queue_manager *dqm)
 
        init_interrupts(dqm);
 
-       mutex_lock(&dqm->lock);
+       dqm_lock(dqm);
+       /* clear hang status when driver try to start the hw scheduler */
+       dqm->is_hws_hang = false;
        execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
-       mutex_unlock(&dqm->lock);
+       dqm_unlock(dqm);
 
        return 0;
 fail_allocate_vidmem:
@@ -1055,9 +1076,9 @@ fail_packet_manager_init:
 
 static int stop_cpsch(struct device_queue_manager *dqm)
 {
-       mutex_lock(&dqm->lock);
+       dqm_lock(dqm);
        unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
-       mutex_unlock(&dqm->lock);
+       dqm_unlock(dqm);
 
        kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
        pm_uninit(&dqm->packets);
@@ -1069,11 +1090,11 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
                                        struct kernel_queue *kq,
                                        struct qcm_process_device *qpd)
 {
-       mutex_lock(&dqm->lock);
+       dqm_lock(dqm);
        if (dqm->total_queue_count >= max_num_of_queues_per_device) {
                pr_warn("Can't create new kernel queue because %d queues were already created\n",
                                dqm->total_queue_count);
-               mutex_unlock(&dqm->lock);
+               dqm_unlock(dqm);
                return -EPERM;
        }
 
@@ -1089,7 +1110,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
        dqm->queue_count++;
        qpd->is_debug = true;
        execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
-       mutex_unlock(&dqm->lock);
+       dqm_unlock(dqm);
 
        return 0;
 }
@@ -1098,7 +1119,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
                                        struct kernel_queue *kq,
                                        struct qcm_process_device *qpd)
 {
-       mutex_lock(&dqm->lock);
+       dqm_lock(dqm);
        list_del(&kq->list);
        dqm->queue_count--;
        qpd->is_debug = false;
@@ -1110,18 +1131,18 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
        dqm->total_queue_count--;
        pr_debug("Total of %d queues are accountable so far\n",
                        dqm->total_queue_count);
-       mutex_unlock(&dqm->lock);
+       dqm_unlock(dqm);
 }
 
 static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
                        struct qcm_process_device *qpd)
 {
        int retval;
-       struct mqd_manager *mqd;
+       struct mqd_manager *mqd_mgr;
 
        retval = 0;
 
-       mutex_lock(&dqm->lock);
+       dqm_lock(dqm);
 
        if (dqm->total_queue_count >= max_num_of_queues_per_device) {
                pr_warn("Can't create new usermode queue because %d queues were already created\n",
@@ -1135,19 +1156,19 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
                if (retval)
                        goto out_unlock;
                q->properties.sdma_queue_id =
-                       q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE;
+                       q->sdma_id / get_num_sdma_engines(dqm);
                q->properties.sdma_engine_id =
-                       q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
+                       q->sdma_id % get_num_sdma_engines(dqm);
        }
 
        retval = allocate_doorbell(qpd, q);
        if (retval)
                goto out_deallocate_sdma_queue;
 
-       mqd = dqm->ops.get_mqd_manager(dqm,
+       mqd_mgr = dqm->ops.get_mqd_manager(dqm,
                        get_mqd_type_from_queue_type(q->properties.type));
 
-       if (!mqd) {
+       if (!mqd_mgr) {
                retval = -ENOMEM;
                goto out_deallocate_doorbell;
        }
@@ -1164,7 +1185,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
 
        q->properties.tba_addr = qpd->tba_addr;
        q->properties.tma_addr = qpd->tma_addr;
-       retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
+       retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
                                &q->gart_mqd_addr, &q->properties);
        if (retval)
                goto out_deallocate_doorbell;
@@ -1188,7 +1209,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
        pr_debug("Total of %d queues are accountable so far\n",
                        dqm->total_queue_count);
 
-       mutex_unlock(&dqm->lock);
+       dqm_unlock(dqm);
        return retval;
 
 out_deallocate_doorbell:
@@ -1197,7 +1218,8 @@ out_deallocate_sdma_queue:
        if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
                deallocate_sdma_queue(dqm, q->sdma_id);
 out_unlock:
-       mutex_unlock(&dqm->lock);
+       dqm_unlock(dqm);
+
        return retval;
 }
 
@@ -1210,6 +1232,13 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
        while (*fence_addr != fence_value) {
                if (time_after(jiffies, end_jiffies)) {
                        pr_err("qcm fence wait loop timeout expired\n");
+                       /* In HWS case, this is used to halt the driver thread
+                        * in order not to mess up CP states before doing
+                        * scandumps for FW debugging.
+                        */
+                       while (halt_if_hws_hang)
+                               schedule();
+
                        return -ETIME;
                }
                schedule();
@@ -1254,6 +1283,8 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
 {
        int retval = 0;
 
+       if (dqm->is_hws_hang)
+               return -EIO;
        if (!dqm->active_runlist)
                return retval;
 
@@ -1292,9 +1323,13 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm,
 {
        int retval;
 
+       if (dqm->is_hws_hang)
+               return -EIO;
        retval = unmap_queues_cpsch(dqm, filter, filter_param);
        if (retval) {
                pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
+               dqm->is_hws_hang = true;
+               schedule_work(&dqm->hw_exception_work);
                return retval;
        }
 
@@ -1306,7 +1341,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
                                struct queue *q)
 {
        int retval;
-       struct mqd_manager *mqd;
+       struct mqd_manager *mqd_mgr;
        bool preempt_all_queues;
 
        preempt_all_queues = false;
@@ -1314,7 +1349,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
        retval = 0;
 
        /* remove queue from list to prevent rescheduling after preemption */
-       mutex_lock(&dqm->lock);
+       dqm_lock(dqm);
 
        if (qpd->is_debug) {
                /*
@@ -1326,9 +1361,9 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
 
        }
 
-       mqd = dqm->ops.get_mqd_manager(dqm,
+       mqd_mgr = dqm->ops.get_mqd_manager(dqm,
                        get_mqd_type_from_queue_type(q->properties.type));
-       if (!mqd) {
+       if (!mqd_mgr) {
                retval = -ENOMEM;
                goto failed;
        }
@@ -1350,7 +1385,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
                        qpd->reset_wavefronts = true;
        }
 
-       mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+       mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
 
        /*
         * Unconditionally decrement this counter, regardless of the queue's
@@ -1360,14 +1395,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
        pr_debug("Total of %d queues are accountable so far\n",
                        dqm->total_queue_count);
 
-       mutex_unlock(&dqm->lock);
+       dqm_unlock(dqm);
 
        return retval;
 
 failed:
 failed_try_destroy_debugged_queue:
 
-       mutex_unlock(&dqm->lock);
+       dqm_unlock(dqm);
        return retval;
 }
 
@@ -1391,7 +1426,7 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
        if (!dqm->asic_ops.set_cache_memory_policy)
                return retval;
 
-       mutex_lock(&dqm->lock);
+       dqm_lock(dqm);
 
        if (alternate_aperture_size == 0) {
                /* base > limit disables APE1 */
@@ -1437,7 +1472,7 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
                qpd->sh_mem_ape1_limit);
 
 out:
-       mutex_unlock(&dqm->lock);
+       dqm_unlock(dqm);
        return retval;
 }
 
@@ -1468,7 +1503,7 @@ static int process_termination_nocpsch(struct device_queue_manager *dqm,
        struct device_process_node *cur, *next_dpn;
        int retval = 0;
 
-       mutex_lock(&dqm->lock);
+       dqm_lock(dqm);
 
        /* Clear all user mode queues */
        list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
@@ -1489,7 +1524,7 @@ static int process_termination_nocpsch(struct device_queue_manager *dqm,
                }
        }
 
-       mutex_unlock(&dqm->lock);
+       dqm_unlock(dqm);
        return retval;
 }
 
@@ -1500,14 +1535,14 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
        int retval;
        struct queue *q, *next;
        struct kernel_queue *kq, *kq_next;
-       struct mqd_manager *mqd;
+       struct mqd_manager *mqd_mgr;
        struct device_process_node *cur, *next_dpn;
        enum kfd_unmap_queues_filter filter =
                KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
 
        retval = 0;
 
-       mutex_lock(&dqm->lock);
+       dqm_lock(dqm);
 
        /* Clean all kernel queues */
        list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
@@ -1542,7 +1577,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
        }
 
        retval = execute_queues_cpsch(dqm, filter, 0);
-       if (retval || qpd->reset_wavefronts) {
+       if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) {
                pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
                dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
                qpd->reset_wavefronts = false;
@@ -1550,19 +1585,19 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
 
        /* lastly, free mqd resources */
        list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
-               mqd = dqm->ops.get_mqd_manager(dqm,
+               mqd_mgr = dqm->ops.get_mqd_manager(dqm,
                        get_mqd_type_from_queue_type(q->properties.type));
-               if (!mqd) {
+               if (!mqd_mgr) {
                        retval = -ENOMEM;
                        goto out;
                }
                list_del(&q->list);
                qpd->queue_count--;
-               mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+               mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
        }
 
 out:
-       mutex_unlock(&dqm->lock);
+       dqm_unlock(dqm);
        return retval;
 }
 
@@ -1683,6 +1718,30 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm)
        kfree(dqm);
 }
 
+int kfd_process_vm_fault(struct device_queue_manager *dqm,
+                        unsigned int pasid)
+{
+       struct kfd_process_device *pdd;
+       struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+       int ret = 0;
+
+       if (!p)
+               return -EINVAL;
+       pdd = kfd_get_process_device_data(dqm->dev, p);
+       if (pdd)
+               ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
+       kfd_unref_process(p);
+
+       return ret;
+}
+
+static void kfd_process_hw_exception(struct work_struct *work)
+{
+       struct device_queue_manager *dqm = container_of(work,
+                       struct device_queue_manager, hw_exception_work);
+       dqm->dev->kfd2kgd->gpu_recover(dqm->dev->kgd);
+}
+
 #if defined(CONFIG_DEBUG_FS)
 
 static void seq_reg_dump(struct seq_file *m,
@@ -1746,8 +1805,8 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
                }
        }
 
-       for (pipe = 0; pipe < CIK_SDMA_ENGINE_NUM; pipe++) {
-               for (queue = 0; queue < CIK_SDMA_QUEUES_PER_ENGINE; queue++) {
+       for (pipe = 0; pipe < get_num_sdma_engines(dqm); pipe++) {
+               for (queue = 0; queue < KFD_SDMA_QUEUES_PER_ENGINE; queue++) {
                        r = dqm->dev->kfd2kgd->hqd_sdma_dump(
                                dqm->dev->kgd, pipe, queue, &dump, &n_regs);
                        if (r)
@@ -1764,4 +1823,16 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
        return r;
 }
 
+int dqm_debugfs_execute_queues(struct device_queue_manager *dqm)
+{
+       int r = 0;
+
+       dqm_lock(dqm);
+       dqm->active_runlist = true;
+       r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+       dqm_unlock(dqm);
+
+       return r;
+}
+
 #endif
index 59a6b1956932ffa160915f869f4e3f6222fc0f89..00da3169a0044ace318a8f0b19fde3e8dfab227b 100644 (file)
 
 #include <linux/rwsem.h>
 #include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/sched/mm.h>
 #include "kfd_priv.h"
 #include "kfd_mqd_manager.h"
 
 #define KFD_UNMAP_LATENCY_MS                   (4000)
 #define QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS (2 * KFD_UNMAP_LATENCY_MS + 1000)
-
-#define CIK_SDMA_QUEUES                                (4)
-#define CIK_SDMA_QUEUES_PER_ENGINE             (2)
-#define CIK_SDMA_ENGINE_NUM                    (2)
+#define KFD_SDMA_QUEUES_PER_ENGINE             (2)
 
 struct device_process_node {
        struct qcm_process_device *qpd;
@@ -170,11 +169,12 @@ struct device_queue_manager {
        struct device_queue_manager_ops ops;
        struct device_queue_manager_asic_ops asic_ops;
 
-       struct mqd_manager      *mqds[KFD_MQD_TYPE_MAX];
+       struct mqd_manager      *mqd_mgrs[KFD_MQD_TYPE_MAX];
        struct packet_manager   packets;
        struct kfd_dev          *dev;
-       struct mutex            lock;
+       struct mutex            lock_hidden; /* use dqm_lock/unlock(dqm) */
        struct list_head        queues;
+       unsigned int            saved_flags;
        unsigned int            processes_count;
        unsigned int            queue_count;
        unsigned int            sdma_queue_count;
@@ -190,6 +190,10 @@ struct device_queue_manager {
        struct kfd_mem_obj      *fence_mem;
        bool                    active_runlist;
        int                     sched_policy;
+
+       /* hw exception  */
+       bool                    is_hws_hang;
+       struct work_struct      hw_exception_work;
 };
 
 void device_queue_manager_init_cik(
@@ -207,6 +211,7 @@ void program_sh_mem_settings(struct device_queue_manager *dqm,
 unsigned int get_queues_num(struct device_queue_manager *dqm);
 unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
 unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
+unsigned int get_num_sdma_queues(struct device_queue_manager *dqm);
 
 static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
 {
@@ -219,4 +224,19 @@ get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
        return (pdd->lds_base >> 60) & 0x0E;
 }
 
+/* The DQM lock can be taken in MMU notifiers. Make sure no reclaim-FS
+ * happens while holding this lock anywhere to prevent deadlocks when
+ * an MMU notifier runs in reclaim-FS context.
+ */
+static inline void dqm_lock(struct device_queue_manager *dqm)
+{
+       mutex_lock(&dqm->lock_hidden);
+       dqm->saved_flags = memalloc_nofs_save();
+}
+static inline void dqm_unlock(struct device_queue_manager *dqm)
+{
+       memalloc_nofs_restore(dqm->saved_flags);
+       mutex_unlock(&dqm->lock_hidden);
+}
+
 #endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */
index 79e5bcf6367ccea42a1078ac73e1559e57d3dbc0..417515332c35119a39eced74c1460d77774f5c8e 100644 (file)
@@ -60,7 +60,7 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
                qpd->sh_mem_config =
                                SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
                                        SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
-               if (vega10_noretry &&
+               if (noretry &&
                    !dqm->dev->device_info->needs_iommu_device)
                        qpd->sh_mem_config |=
                                1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
index c3744d89352c13bd9c2a3f0581d53dec45fa00a8..ebe79bf00145b2d0d2163a472a2c0123cb43805d 100644 (file)
@@ -188,9 +188,9 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
        *doorbell_off = kfd->doorbell_id_offset + inx;
 
        pr_debug("Get kernel queue doorbell\n"
-                        "     doorbell offset   == 0x%08X\n"
-                        "     kernel address    == %p\n",
-               *doorbell_off, (kfd->doorbell_kernel_ptr + inx));
+                       "     doorbell offset   == 0x%08X\n"
+                       "     doorbell index    == 0x%x\n",
+               *doorbell_off, inx);
 
        return kfd->doorbell_kernel_ptr + inx;
 }
@@ -199,7 +199,8 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr)
 {
        unsigned int inx;
 
-       inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr);
+       inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr)
+               * sizeof(u32) / kfd->device_info->doorbell_size;
 
        mutex_lock(&kfd->doorbell_mutex);
        __clear_bit(inx, kfd->doorbell_available_index);
index 5562e94e786ae2f093f9ebb1d9ac7c801293d7ac..e9f0e0a1b41c074204a69a7745e29bc8e53668ba 100644 (file)
@@ -850,6 +850,13 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
                                ev->memory_exception_data = *ev_data;
                }
 
+       if (type == KFD_EVENT_TYPE_MEMORY) {
+               dev_warn(kfd_device,
+                       "Sending SIGSEGV to HSA Process with PID %d ",
+                               p->lead_thread->pid);
+               send_sig(SIGSEGV, p->lead_thread, 0);
+       }
+
        /* Send SIGTERM no event of type "type" has been found*/
        if (send_signal) {
                if (send_sigterm) {
@@ -904,34 +911,41 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
        memory_exception_data.failure.NotPresent = 1;
        memory_exception_data.failure.NoExecute = 0;
        memory_exception_data.failure.ReadOnly = 0;
-       if (vma) {
-               if (vma->vm_start > address) {
-                       memory_exception_data.failure.NotPresent = 1;
-                       memory_exception_data.failure.NoExecute = 0;
+       if (vma && address >= vma->vm_start) {
+               memory_exception_data.failure.NotPresent = 0;
+
+               if (is_write_requested && !(vma->vm_flags & VM_WRITE))
+                       memory_exception_data.failure.ReadOnly = 1;
+               else
                        memory_exception_data.failure.ReadOnly = 0;
-               } else {
-                       memory_exception_data.failure.NotPresent = 0;
-                       if (is_write_requested && !(vma->vm_flags & VM_WRITE))
-                               memory_exception_data.failure.ReadOnly = 1;
-                       else
-                               memory_exception_data.failure.ReadOnly = 0;
-                       if (is_execute_requested && !(vma->vm_flags & VM_EXEC))
-                               memory_exception_data.failure.NoExecute = 1;
-                       else
-                               memory_exception_data.failure.NoExecute = 0;
-               }
+
+               if (is_execute_requested && !(vma->vm_flags & VM_EXEC))
+                       memory_exception_data.failure.NoExecute = 1;
+               else
+                       memory_exception_data.failure.NoExecute = 0;
        }
 
        up_read(&mm->mmap_sem);
        mmput(mm);
 
-       mutex_lock(&p->event_mutex);
+       pr_debug("notpresent %d, noexecute %d, readonly %d\n",
+                       memory_exception_data.failure.NotPresent,
+                       memory_exception_data.failure.NoExecute,
+                       memory_exception_data.failure.ReadOnly);
 
-       /* Lookup events by type and signal them */
-       lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY,
-                       &memory_exception_data);
+       /* Workaround on Raven to not kill the process when memory is freed
+        * before IOMMU is able to finish processing all the excessive PPRs
+        */
+       if (dev->device_info->asic_family != CHIP_RAVEN) {
+               mutex_lock(&p->event_mutex);
+
+               /* Lookup events by type and signal them */
+               lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY,
+                               &memory_exception_data);
+
+               mutex_unlock(&p->event_mutex);
+       }
 
-       mutex_unlock(&p->event_mutex);
        kfd_unref_process(p);
 }
 #endif /* KFD_SUPPORT_IOMMU_V2 */
@@ -956,3 +970,67 @@ void kfd_signal_hw_exception_event(unsigned int pasid)
        mutex_unlock(&p->event_mutex);
        kfd_unref_process(p);
 }
+
+void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
+                               struct kfd_vm_fault_info *info)
+{
+       struct kfd_event *ev;
+       uint32_t id;
+       struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+       struct kfd_hsa_memory_exception_data memory_exception_data;
+
+       if (!p)
+               return; /* Presumably process exited. */
+       memset(&memory_exception_data, 0, sizeof(memory_exception_data));
+       memory_exception_data.gpu_id = dev->id;
+       memory_exception_data.failure.imprecise = 1;
+       /* Set failure reason */
+       if (info) {
+               memory_exception_data.va = (info->page_addr) << PAGE_SHIFT;
+               memory_exception_data.failure.NotPresent =
+                       info->prot_valid ? 1 : 0;
+               memory_exception_data.failure.NoExecute =
+                       info->prot_exec ? 1 : 0;
+               memory_exception_data.failure.ReadOnly =
+                       info->prot_write ? 1 : 0;
+               memory_exception_data.failure.imprecise = 0;
+       }
+       mutex_lock(&p->event_mutex);
+
+       id = KFD_FIRST_NONSIGNAL_EVENT_ID;
+       idr_for_each_entry_continue(&p->event_idr, ev, id)
+               if (ev->type == KFD_EVENT_TYPE_MEMORY) {
+                       ev->memory_exception_data = memory_exception_data;
+                       set_event(ev);
+               }
+
+       mutex_unlock(&p->event_mutex);
+       kfd_unref_process(p);
+}
+
+void kfd_signal_reset_event(struct kfd_dev *dev)
+{
+       struct kfd_hsa_hw_exception_data hw_exception_data;
+       struct kfd_process *p;
+       struct kfd_event *ev;
+       unsigned int temp;
+       uint32_t id, idx;
+
+       /* Whole gpu reset caused by GPU hang and memory is lost */
+       memset(&hw_exception_data, 0, sizeof(hw_exception_data));
+       hw_exception_data.gpu_id = dev->id;
+       hw_exception_data.memory_lost = 1;
+
+       idx = srcu_read_lock(&kfd_processes_srcu);
+       hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
+               mutex_lock(&p->event_mutex);
+               id = KFD_FIRST_NONSIGNAL_EVENT_ID;
+               idr_for_each_entry_continue(&p->event_idr, ev, id)
+                       if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) {
+                               ev->hw_exception_data = hw_exception_data;
+                               set_event(ev);
+                       }
+               mutex_unlock(&p->event_mutex);
+       }
+       srcu_read_unlock(&kfd_processes_srcu, idx);
+}
index abca5bfebbff16fadd5699fff91bf0a939ba0b4b..c7ac6c73af86eb80c1f166bb96286682675a3c4a 100644 (file)
@@ -66,6 +66,7 @@ struct kfd_event {
        /* type specific data */
        union {
                struct kfd_hsa_memory_exception_data memory_exception_data;
+               struct kfd_hsa_hw_exception_data hw_exception_data;
        };
 };
 
index 37029baa334605160b42d1138e62457c555171c1..f836897bbf5833799e7cb94e45be4c10975b9c8c 100644 (file)
@@ -26,7 +26,9 @@
 
 
 static bool event_interrupt_isr_v9(struct kfd_dev *dev,
-                                       const uint32_t *ih_ring_entry)
+                                       const uint32_t *ih_ring_entry,
+                                       uint32_t *patched_ihre,
+                                       bool *patched_flag)
 {
        uint16_t source_id, client_id, pasid, vmid;
        const uint32_t *data = ih_ring_entry;
@@ -57,7 +59,9 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
        return source_id == SOC15_INTSRC_CP_END_OF_PIPE ||
                source_id == SOC15_INTSRC_SDMA_TRAP ||
                source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
-               source_id == SOC15_INTSRC_CP_BAD_OPCODE;
+               source_id == SOC15_INTSRC_CP_BAD_OPCODE ||
+               client_id == SOC15_IH_CLIENTID_VMC ||
+               client_id == SOC15_IH_CLIENTID_UTCL2;
 }
 
 static void event_interrupt_wq_v9(struct kfd_dev *dev,
@@ -82,7 +86,19 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
                kfd_signal_hw_exception_event(pasid);
        else if (client_id == SOC15_IH_CLIENTID_VMC ||
                 client_id == SOC15_IH_CLIENTID_UTCL2) {
-               /* TODO */
+               struct kfd_vm_fault_info info = {0};
+               uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
+
+               info.vmid = vmid;
+               info.mc_id = client_id;
+               info.page_addr = ih_ring_entry[4] |
+                       (uint64_t)(ih_ring_entry[5] & 0xf) << 32;
+               info.prot_valid = ring_id & 0x08;
+               info.prot_read  = ring_id & 0x10;
+               info.prot_write = ring_id & 0x20;
+
+               kfd_process_vm_fault(dev->dqm, pasid);
+               kfd_signal_vm_fault_event(dev, pasid, &info);
        }
 }
 
index db6d9336b80d2c7b6b12f7b131c07014cc40bd22..c56ac47cd3189779333acc9e8da89a8f9ad7c5be 100644 (file)
@@ -151,13 +151,15 @@ static void interrupt_wq(struct work_struct *work)
                                                                ih_ring_entry);
 }
 
-bool interrupt_is_wanted(struct kfd_dev *dev, const uint32_t *ih_ring_entry)
+bool interrupt_is_wanted(struct kfd_dev *dev,
+                       const uint32_t *ih_ring_entry,
+                       uint32_t *patched_ihre, bool *flag)
 {
        /* integer and bitwise OR so there is no boolean short-circuiting */
        unsigned int wanted = 0;
 
        wanted |= dev->device_info->event_interrupt_class->interrupt_isr(dev,
-                                                               ih_ring_entry);
+                                        ih_ring_entry, patched_ihre, flag);
 
        return wanted != 0;
 }
index c71817963eea6f2921dab7aaab116283624a9eec..7a61f38c09e65bfd9b33d668799efd9d79ab7aab 100644 (file)
@@ -190,7 +190,7 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
 {
        struct kfd_dev *dev;
 
-       dev_warn(kfd_device,
+       dev_warn_ratelimited(kfd_device,
                        "Invalid PPR device %x:%x.%x pasid %d address 0x%lX flags 0x%X",
                        PCI_BUS_NUM(pdev->devfn),
                        PCI_SLOT(pdev->devfn),
index 476951d8c91cc656807d2a3562890169c159a684..9f84b4d9fb884825ce6791c39bb87880d6b33c35 100644 (file)
@@ -59,7 +59,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
        switch (type) {
        case KFD_QUEUE_TYPE_DIQ:
        case KFD_QUEUE_TYPE_HIQ:
-               kq->mqd = dev->dqm->ops.get_mqd_manager(dev->dqm,
+               kq->mqd_mgr = dev->dqm->ops.get_mqd_manager(dev->dqm,
                                                KFD_MQD_TYPE_HIQ);
                break;
        default:
@@ -67,7 +67,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
                return false;
        }
 
-       if (!kq->mqd)
+       if (!kq->mqd_mgr)
                return false;
 
        prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off);
@@ -123,6 +123,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
        prop.write_ptr = (uint32_t *) kq->wptr_gpu_addr;
        prop.eop_ring_buffer_address = kq->eop_gpu_addr;
        prop.eop_ring_buffer_size = PAGE_SIZE;
+       prop.cu_mask = NULL;
 
        if (init_queue(&kq->queue, &prop) != 0)
                goto err_init_queue;
@@ -130,7 +131,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
        kq->queue->device = dev;
        kq->queue->process = kfd_get_process(current);
 
-       retval = kq->mqd->init_mqd(kq->mqd, &kq->queue->mqd,
+       retval = kq->mqd_mgr->init_mqd(kq->mqd_mgr, &kq->queue->mqd,
                                        &kq->queue->mqd_mem_obj,
                                        &kq->queue->gart_mqd_addr,
                                        &kq->queue->properties);
@@ -142,9 +143,9 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
                pr_debug("Assigning hiq to hqd\n");
                kq->queue->pipe = KFD_CIK_HIQ_PIPE;
                kq->queue->queue = KFD_CIK_HIQ_QUEUE;
-               kq->mqd->load_mqd(kq->mqd, kq->queue->mqd, kq->queue->pipe,
-                                 kq->queue->queue, &kq->queue->properties,
-                                 NULL);
+               kq->mqd_mgr->load_mqd(kq->mqd_mgr, kq->queue->mqd,
+                               kq->queue->pipe, kq->queue->queue,
+                               &kq->queue->properties, NULL);
        } else {
                /* allocate fence for DIQ */
 
@@ -182,7 +183,7 @@ err_get_kernel_doorbell:
 static void uninitialize(struct kernel_queue *kq)
 {
        if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ)
-               kq->mqd->destroy_mqd(kq->mqd,
+               kq->mqd_mgr->destroy_mqd(kq->mqd_mgr,
                                        kq->queue->mqd,
                                        KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
                                        KFD_UNMAP_LATENCY_MS,
@@ -191,7 +192,8 @@ static void uninitialize(struct kernel_queue *kq)
        else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ)
                kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj);
 
-       kq->mqd->uninit_mqd(kq->mqd, kq->queue->mqd, kq->queue->mqd_mem_obj);
+       kq->mqd_mgr->uninit_mqd(kq->mqd_mgr, kq->queue->mqd,
+                               kq->queue->mqd_mem_obj);
 
        kfd_gtt_sa_free(kq->dev, kq->rptr_mem);
        kfd_gtt_sa_free(kq->dev, kq->wptr_mem);
index 97aff2041a5d16ebed8ee14e605066d451f5721d..a7116a93902951c3e3a373d98e08f0519c3d8541 100644 (file)
@@ -70,7 +70,7 @@ struct kernel_queue {
 
        /* data */
        struct kfd_dev          *dev;
-       struct mqd_manager      *mqd;
+       struct mqd_manager      *mqd_mgr;
        struct queue            *queue;
        uint64_t                pending_wptr64;
        uint32_t                pending_wptr;
index 76bf2dc8aec4398afcf5b5835650e445b528cabe..6e1f5c7c2d4be6131d1ed38cb18b38d59dc3383c 100644 (file)
@@ -47,6 +47,8 @@ static const struct kgd2kfd_calls kgd2kfd = {
        .resume_mm      = kgd2kfd_resume_mm,
        .schedule_evict_and_restore_process =
                          kgd2kfd_schedule_evict_and_restore_process,
+       .pre_reset      = kgd2kfd_pre_reset,
+       .post_reset     = kgd2kfd_post_reset,
 };
 
 int sched_policy = KFD_SCHED_POLICY_HWS;
@@ -61,7 +63,7 @@ MODULE_PARM_DESC(hws_max_conc_proc,
 
 int cwsr_enable = 1;
 module_param(cwsr_enable, int, 0444);
-MODULE_PARM_DESC(cwsr_enable, "CWSR enable (0 = Off, 1 = On (Default))");
+MODULE_PARM_DESC(cwsr_enable, "CWSR enable (0 = off, 1 = on (default))");
 
 int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
 module_param(max_num_of_queues_per_device, int, 0444);
@@ -83,13 +85,19 @@ module_param(ignore_crat, int, 0444);
 MODULE_PARM_DESC(ignore_crat,
        "Ignore CRAT table during KFD initialization (0 = use CRAT (default), 1 = ignore CRAT)");
 
-int vega10_noretry;
-module_param_named(noretry, vega10_noretry, int, 0644);
+int noretry;
+module_param(noretry, int, 0644);
 MODULE_PARM_DESC(noretry,
-       "Set sh_mem_config.retry_disable on Vega10 (0 = retry enabled (default), 1 = retry disabled)");
+       "Set sh_mem_config.retry_disable on GFXv9+ dGPUs (0 = retry enabled (default), 1 = retry disabled)");
+
+int halt_if_hws_hang;
+module_param(halt_if_hws_hang, int, 0644);
+MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
+
 
 static int amdkfd_init_completed;
 
+
 int kgd2kfd_init(unsigned int interface_version,
                const struct kgd2kfd_calls **g2f)
 {
index 4b8eb506642b816d21a3533525943205f215662e..3bc25ab84f34033c96aadbf07cec0bf8226c8c7d 100644 (file)
@@ -21,7 +21,7 @@
  *
  */
 
-#include "kfd_priv.h"
+#include "kfd_mqd_manager.h"
 
 struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
                                        struct kfd_dev *dev)
@@ -48,3 +48,42 @@ struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
 
        return NULL;
 }
+
+void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
+               const uint32_t *cu_mask, uint32_t cu_mask_count,
+               uint32_t *se_mask)
+{
+       struct kfd_cu_info cu_info;
+       uint32_t cu_per_sh[4] = {0};
+       int i, se, cu = 0;
+
+       mm->dev->kfd2kgd->get_cu_info(mm->dev->kgd, &cu_info);
+
+       if (cu_mask_count > cu_info.cu_active_number)
+               cu_mask_count = cu_info.cu_active_number;
+
+       for (se = 0; se < cu_info.num_shader_engines; se++)
+               for (i = 0; i < 4; i++)
+                       cu_per_sh[se] += hweight32(cu_info.cu_bitmap[se][i]);
+
+       /* Symmetrically map cu_mask to all SEs:
+        * cu_mask[0] bit0 -> se_mask[0] bit0;
+        * cu_mask[0] bit1 -> se_mask[1] bit0;
+        * ... (if # SE is 4)
+        * cu_mask[0] bit4 -> se_mask[0] bit1;
+        * ...
+        */
+       se = 0;
+       for (i = 0; i < cu_mask_count; i++) {
+               if (cu_mask[i / 32] & (1 << (i % 32)))
+                       se_mask[se] |= 1 << cu;
+
+               do {
+                       se++;
+                       if (se == cu_info.num_shader_engines) {
+                               se = 0;
+                               cu++;
+                       }
+               } while (cu >= cu_per_sh[se] && cu < 32);
+       }
+}
index 8972bcfbf701c269c14c3bb6804d439429b2bb08..4e84052d4e210e471b326e4715903d66830b7365 100644 (file)
@@ -93,4 +93,8 @@ struct mqd_manager {
        struct kfd_dev  *dev;
 };
 
+void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
+               const uint32_t *cu_mask, uint32_t cu_mask_count,
+               uint32_t *se_mask);
+
 #endif /* KFD_MQD_MANAGER_H_ */
index 06eaa218eba657ca505122e5054f7b997977098f..47243165a082a5221b56fa05929f4f5b46fa8081 100644 (file)
@@ -41,6 +41,31 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
        return (struct cik_sdma_rlc_registers *)mqd;
 }
 
+static void update_cu_mask(struct mqd_manager *mm, void *mqd,
+                       struct queue_properties *q)
+{
+       struct cik_mqd *m;
+       uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
+
+       if (q->cu_mask_count == 0)
+               return;
+
+       mqd_symmetrically_map_cu_mask(mm,
+               q->cu_mask, q->cu_mask_count, se_mask);
+
+       m = get_mqd(mqd);
+       m->compute_static_thread_mgmt_se0 = se_mask[0];
+       m->compute_static_thread_mgmt_se1 = se_mask[1];
+       m->compute_static_thread_mgmt_se2 = se_mask[2];
+       m->compute_static_thread_mgmt_se3 = se_mask[3];
+
+       pr_debug("Update cu mask to %#x %#x %#x %#x\n",
+               m->compute_static_thread_mgmt_se0,
+               m->compute_static_thread_mgmt_se1,
+               m->compute_static_thread_mgmt_se2,
+               m->compute_static_thread_mgmt_se3);
+}
+
 static int init_mqd(struct mqd_manager *mm, void **mqd,
                struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
                struct queue_properties *q)
@@ -196,6 +221,8 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
        if (q->format == KFD_QUEUE_FORMAT_AQL)
                m->cp_hqd_pq_control |= NO_UPDATE_RPTR;
 
+       update_cu_mask(mm, mqd, q);
+
        q->is_active = (q->queue_size > 0 &&
                        q->queue_address != 0 &&
                        q->queue_percent > 0 &&
@@ -408,7 +435,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
        if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
                return NULL;
 
-       mqd = kzalloc(sizeof(*mqd), GFP_NOIO);
+       mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
        if (!mqd)
                return NULL;
 
index 684054ff02cdc21f9b2b7e1218d2dc15cda5245a..f5fc3675f21eda3d7c769aed2130a9360382afad 100644 (file)
@@ -41,6 +41,31 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
        return (struct v9_sdma_mqd *)mqd;
 }
 
+static void update_cu_mask(struct mqd_manager *mm, void *mqd,
+                       struct queue_properties *q)
+{
+       struct v9_mqd *m;
+       uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
+
+       if (q->cu_mask_count == 0)
+               return;
+
+       mqd_symmetrically_map_cu_mask(mm,
+               q->cu_mask, q->cu_mask_count, se_mask);
+
+       m = get_mqd(mqd);
+       m->compute_static_thread_mgmt_se0 = se_mask[0];
+       m->compute_static_thread_mgmt_se1 = se_mask[1];
+       m->compute_static_thread_mgmt_se2 = se_mask[2];
+       m->compute_static_thread_mgmt_se3 = se_mask[3];
+
+       pr_debug("update cu mask to %#x %#x %#x %#x\n",
+               m->compute_static_thread_mgmt_se0,
+               m->compute_static_thread_mgmt_se1,
+               m->compute_static_thread_mgmt_se2,
+               m->compute_static_thread_mgmt_se3);
+}
+
 static int init_mqd(struct mqd_manager *mm, void **mqd,
                        struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
                        struct queue_properties *q)
@@ -55,7 +80,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
         * instead of sub-allocation function.
         */
        if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
-               *mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
+               *mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
                if (!*mqd_mem_obj)
                        return -ENOMEM;
                retval = kfd->kfd2kgd->init_gtt_mem_allocation(kfd->kgd,
@@ -198,6 +223,8 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
        if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address)
                m->cp_hqd_ctx_save_control = 0;
 
+       update_cu_mask(mm, mqd, q);
+
        q->is_active = (q->queue_size > 0 &&
                        q->queue_address != 0 &&
                        q->queue_percent > 0 &&
@@ -393,7 +420,7 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
        if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
                return NULL;
 
-       mqd = kzalloc(sizeof(*mqd), GFP_NOIO);
+       mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
        if (!mqd)
                return NULL;
 
index 481307b8b4dbdae29c4fafd39ff577a410696410..b81fda3754dac850c112b56deef4c84c0c77a50f 100644 (file)
@@ -43,6 +43,31 @@ static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd)
        return (struct vi_sdma_mqd *)mqd;
 }
 
+static void update_cu_mask(struct mqd_manager *mm, void *mqd,
+                       struct queue_properties *q)
+{
+       struct vi_mqd *m;
+       uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
+
+       if (q->cu_mask_count == 0)
+               return;
+
+       mqd_symmetrically_map_cu_mask(mm,
+               q->cu_mask, q->cu_mask_count, se_mask);
+
+       m = get_mqd(mqd);
+       m->compute_static_thread_mgmt_se0 = se_mask[0];
+       m->compute_static_thread_mgmt_se1 = se_mask[1];
+       m->compute_static_thread_mgmt_se2 = se_mask[2];
+       m->compute_static_thread_mgmt_se3 = se_mask[3];
+
+       pr_debug("Update cu mask to %#x %#x %#x %#x\n",
+               m->compute_static_thread_mgmt_se0,
+               m->compute_static_thread_mgmt_se1,
+               m->compute_static_thread_mgmt_se2,
+               m->compute_static_thread_mgmt_se3);
+}
+
 static int init_mqd(struct mqd_manager *mm, void **mqd,
                        struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
                        struct queue_properties *q)
@@ -196,6 +221,8 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
                        atc_bit << CP_HQD_CTX_SAVE_CONTROL__ATC__SHIFT |
                        mtype << CP_HQD_CTX_SAVE_CONTROL__MTYPE__SHIFT;
 
+       update_cu_mask(mm, mqd, q);
+
        q->is_active = (q->queue_size > 0 &&
                        q->queue_address != 0 &&
                        q->queue_percent > 0 &&
@@ -394,7 +421,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
        if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
                return NULL;
 
-       mqd = kzalloc(sizeof(*mqd), GFP_NOIO);
+       mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
        if (!mqd)
                return NULL;
 
index c317feb43f69d8ce76cd1ccae55e79f361db87f7..1092631765cb5b09ef198be0caa2007407db0c13 100644 (file)
@@ -418,4 +418,30 @@ out:
        return 0;
 }
 
+int pm_debugfs_hang_hws(struct packet_manager *pm)
+{
+       uint32_t *buffer, size;
+       int r = 0;
+
+       size = pm->pmf->query_status_size;
+       mutex_lock(&pm->lock);
+       pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+                       size / sizeof(uint32_t), (unsigned int **)&buffer);
+       if (!buffer) {
+               pr_err("Failed to allocate buffer on kernel queue\n");
+               r = -ENOMEM;
+               goto out;
+       }
+       memset(buffer, 0x55, size);
+       pm->priv_queue->ops.submit_packet(pm->priv_queue);
+
+       pr_info("Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.",
+               buffer[0], buffer[1], buffer[2], buffer[3],
+               buffer[4], buffer[5], buffer[6]);
+out:
+       mutex_unlock(&pm->lock);
+       return r;
+}
+
+
 #endif
index 5e3990bb4c4be501723b96d3d01813d3c72d793c..f971710f1c91b2edec3670f0a36e837ef947190c 100644 (file)
@@ -73,7 +73,7 @@
 
 /*
  * When working with cp scheduler we should assign the HIQ manually or via
- * the radeon driver to a fixed hqd slot, here are the fixed HIQ hqd slot
+ * the amdgpu driver to a fixed hqd slot, here are the fixed HIQ hqd slot
  * definitions for Kaveri. In Kaveri only the first ME queues participates
  * in the cp scheduling taking that in mind we set the HIQ slot in the
  * second ME.
@@ -142,7 +142,12 @@ extern int ignore_crat;
 /*
  * Set sh_mem_config.retry_disable on Vega10
  */
-extern int vega10_noretry;
+extern int noretry;
+
+/*
+ * Halt if HWS hang is detected
+ */
+extern int halt_if_hws_hang;
 
 /**
  * enum kfd_sched_policy
@@ -180,9 +185,10 @@ enum cache_policy {
 
 struct kfd_event_interrupt_class {
        bool (*interrupt_isr)(struct kfd_dev *dev,
-                               const uint32_t *ih_ring_entry);
+                       const uint32_t *ih_ring_entry, uint32_t *patched_ihre,
+                       bool *patched_flag);
        void (*interrupt_wq)(struct kfd_dev *dev,
-                               const uint32_t *ih_ring_entry);
+                       const uint32_t *ih_ring_entry);
 };
 
 struct kfd_device_info {
@@ -197,6 +203,7 @@ struct kfd_device_info {
        bool supports_cwsr;
        bool needs_iommu_device;
        bool needs_pci_atomics;
+       unsigned int num_sdma_engines;
 };
 
 struct kfd_mem_obj {
@@ -415,6 +422,9 @@ struct queue_properties {
        uint32_t ctl_stack_size;
        uint64_t tba_addr;
        uint64_t tma_addr;
+       /* Relevant for CU */
+       uint32_t cu_mask_count; /* Must be a multiple of 32 */
+       uint32_t *cu_mask;
 };
 
 /**
@@ -806,12 +816,18 @@ int kfd_interrupt_init(struct kfd_dev *dev);
 void kfd_interrupt_exit(struct kfd_dev *dev);
 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
 bool enqueue_ih_ring_entry(struct kfd_dev *kfd,        const void *ih_ring_entry);
-bool interrupt_is_wanted(struct kfd_dev *dev, const uint32_t *ih_ring_entry);
+bool interrupt_is_wanted(struct kfd_dev *dev,
+                               const uint32_t *ih_ring_entry,
+                               uint32_t *patched_ihre, bool *flag);
 
 /* Power Management */
 void kgd2kfd_suspend(struct kfd_dev *kfd);
 int kgd2kfd_resume(struct kfd_dev *kfd);
 
+/* GPU reset */
+int kgd2kfd_pre_reset(struct kfd_dev *kfd);
+int kgd2kfd_post_reset(struct kfd_dev *kfd);
+
 /* amdkfd Apertures */
 int kfd_init_apertures(struct kfd_process *process);
 
@@ -838,6 +854,7 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm);
 struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
                                        enum kfd_queue_type type);
 void kernel_queue_uninit(struct kernel_queue *kq);
+int kfd_process_vm_fault(struct device_queue_manager *dqm, unsigned int pasid);
 
 /* Process Queue Manager */
 struct process_queue_node {
@@ -858,6 +875,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
 int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
 int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
                        struct queue_properties *p);
+int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid,
+                       struct queue_properties *p);
 struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
                                                unsigned int qid);
 
@@ -964,10 +983,17 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
                     uint64_t *event_page_offset, uint32_t *event_slot_index);
 int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
 
+void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
+                               struct kfd_vm_fault_info *info);
+
+void kfd_signal_reset_event(struct kfd_dev *dev);
+
 void kfd_flush_tlb(struct kfd_process_device *pdd);
 
 int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p);
 
+bool kfd_is_locked(void);
+
 /* Debugfs */
 #if defined(CONFIG_DEBUG_FS)
 
@@ -980,6 +1006,10 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data);
 int kfd_debugfs_rls_by_device(struct seq_file *m, void *data);
 int pm_debugfs_runlist(struct seq_file *m, void *data);
 
+int kfd_debugfs_hang_hws(struct kfd_dev *dev);
+int pm_debugfs_hang_hws(struct packet_manager *pm);
+int dqm_debugfs_execute_queues(struct device_queue_manager *dqm);
+
 #else
 
 static inline void kfd_debugfs_init(void) {}
index 1d80b4f7c681cc990fc2e60cb48dc52f8758977c..4694386cc6236238df2230be18a7173ab1bd86af 100644 (file)
@@ -244,6 +244,8 @@ struct kfd_process *kfd_get_process(const struct task_struct *thread)
                return ERR_PTR(-EINVAL);
 
        process = find_process(thread);
+       if (!process)
+               return ERR_PTR(-EINVAL);
 
        return process;
 }
index d65ce0436b31ba45ca9ef303b3970f7e5fc42dc6..c8cad9c078ae367f9096a9061b05992dc46c3615 100644 (file)
@@ -186,8 +186,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
 
        switch (type) {
        case KFD_QUEUE_TYPE_SDMA:
-               if (dev->dqm->queue_count >=
-                       CIK_SDMA_QUEUES_PER_ENGINE * CIK_SDMA_ENGINE_NUM) {
+               if (dev->dqm->queue_count >= get_num_sdma_queues(dev->dqm)) {
                        pr_err("Over-subscription is not allowed for SDMA.\n");
                        retval = -EPERM;
                        goto err_create_queue;
@@ -209,7 +208,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
                     KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
                ((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
                (dev->dqm->queue_count >= get_queues_num(dev->dqm)))) {
-                       pr_err("Over-subscription is not allowed in radeon_kfd.sched_policy == 1\n");
+                       pr_debug("Over-subscription is not allowed when amdkfd.sched_policy == 1\n");
                        retval = -EPERM;
                        goto err_create_queue;
                }
@@ -326,6 +325,8 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
                        if (retval != -ETIME)
                                goto err_destroy_queue;
                }
+               kfree(pqn->q->properties.cu_mask);
+               pqn->q->properties.cu_mask = NULL;
                uninit_queue(pqn->q);
        }
 
@@ -366,6 +367,34 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
        return 0;
 }
 
+int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid,
+                       struct queue_properties *p)
+{
+       int retval;
+       struct process_queue_node *pqn;
+
+       pqn = get_queue_by_qid(pqm, qid);
+       if (!pqn) {
+               pr_debug("No queue %d exists for update operation\n", qid);
+               return -EFAULT;
+       }
+
+       /* Free the old CU mask memory if it is already allocated, then
+        * allocate memory for the new CU mask.
+        */
+       kfree(pqn->q->properties.cu_mask);
+
+       pqn->q->properties.cu_mask_count = p->cu_mask_count;
+       pqn->q->properties.cu_mask = p->cu_mask;
+
+       retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
+                                                       pqn->q);
+       if (retval != 0)
+               return retval;
+
+       return 0;
+}
+
 struct kernel_queue *pqm_get_kernel_queue(
                                        struct process_queue_manager *pqm,
                                        unsigned int qid)
@@ -387,7 +416,7 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
        struct process_queue_node *pqn;
        struct queue *q;
        enum KFD_MQD_TYPE mqd_type;
-       struct mqd_manager *mqd_manager;
+       struct mqd_manager *mqd_mgr;
        int r = 0;
 
        list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
@@ -410,11 +439,11 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
                                           q->properties.type, q->device->id);
                                continue;
                        }
-                       mqd_manager = q->device->dqm->ops.get_mqd_manager(
+                       mqd_mgr = q->device->dqm->ops.get_mqd_manager(
                                q->device->dqm, mqd_type);
                } else if (pqn->kq) {
                        q = pqn->kq->queue;
-                       mqd_manager = pqn->kq->mqd;
+                       mqd_mgr = pqn->kq->mqd_mgr;
                        switch (q->properties.type) {
                        case KFD_QUEUE_TYPE_DIQ:
                                seq_printf(m, "  DIQ on device %x\n",
@@ -434,7 +463,7 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
                        continue;
                }
 
-               r = mqd_manager->debugfs_show_mqd(m, q->mqd);
+               r = mqd_mgr->debugfs_show_mqd(m, q->mqd);
                if (r != 0)
                        break;
        }
index d5d4586e617653b982d54ade172302c5294b9714..325083b0297e4a3d165a50410c3af180d987f7b2 100644 (file)
@@ -9,23 +9,6 @@ config DRM_AMD_DC
          support for AMDGPU. This adds required support for Vega and
          Raven ASICs.
 
-config DRM_AMD_DC_FBC
-       bool "AMD FBC - Enable Frame Buffer Compression"
-       depends on DRM_AMD_DC
-       help
-         Choose this option if you want to use frame buffer compression
-         support.
-         This is a power optimisation feature, check its availability
-         on your hardware before enabling this option.
-
-
-config DRM_AMD_DC_DCN1_0
-       bool "DCN 1.0 Raven family"
-       depends on DRM_AMD_DC && X86
-       help
-         Choose this option if you want to have
-         RV family for display engine
-
 config DEBUG_KERNEL_DC
        bool "Enable kgdb break in DC"
        depends on DRM_AMD_DC
index 357d596484016a8f92cd9afd655cece66857b606..a8a6c106e8c74ff8ebe7c6faff771ceb69f46036 100644 (file)
@@ -97,10 +97,10 @@ share it with drivers. But that's a very long term goal, and by far not just an
 issue with DC - other drivers, especially around DP sink handling, are equally
 guilty.
 
-19. The DC logger is still a rather sore thing, but I know that the DRM_DEBUG
-stuff just isn't up to the challenges either. We need to figure out something
-that integrates better with DRM and linux debug printing, while not being
-useless with filtering output. dynamic debug printing might be an option.
+19. DONE - The DC logger is still a rather sore thing, but I know that the
+DRM_DEBUG stuff just isn't up to the challenges either. We need to figure out
+something that integrates better with DRM and linux debug printing, while not
+being useless with filtering output. dynamic debug printing might be an option.
 
 20. Use kernel i2c device to program HDMI retimer. Some boards have an HDMI
 retimer that we need to program to pass PHY compliance. Currently that's
index af16973f2c412c901746e0e97b78ef545e637374..94911871eb9b5e4218e53f80f570786b865cc700 100644 (file)
 AMDGPUDM = amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o amdgpu_dm_color.o
 
 ifneq ($(CONFIG_DRM_AMD_DC),)
-AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o
+AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o
 endif
 
 ifneq ($(CONFIG_DEBUG_FS),)
-AMDGPUDM += amdgpu_dm_crc.o
+AMDGPUDM += amdgpu_dm_crc.o amdgpu_dm_debugfs.o
 endif
 
 subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc
index f9add85157e7355432aab9d0d14728f8906774f9..45e062022461bfdfd1c34a149971d20fa3cbe907 100644 (file)
@@ -39,6 +39,9 @@
 #include "dm_helpers.h"
 #include "dm_services_types.h"
 #include "amdgpu_dm_mst_types.h"
+#if defined(CONFIG_DEBUG_FS)
+#include "amdgpu_dm_debugfs.h"
+#endif
 
 #include "ivsrcid/ivsrcid_vislands30.h"
 
@@ -57,7 +60,7 @@
 
 #include "modules/inc/mod_freesync.h"
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
 #include "ivsrcid/irqsrcs_dcn_1_0.h"
 
 #include "dcn/dcn_1_0_offset.h"
@@ -347,7 +350,6 @@ static void hotplug_notify_work_func(struct work_struct *work)
        drm_kms_helper_hotplug_event(dev);
 }
 
-#if defined(CONFIG_DRM_AMD_DC_FBC)
 /* Allocate memory for FBC compressed data  */
 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
 {
@@ -388,7 +390,6 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector)
        }
 
 }
-#endif
 
 
 /* Init display KMS
@@ -902,14 +903,14 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
                                (struct edid *) sink->dc_edid.raw_edid;
 
 
-                       drm_mode_connector_update_edid_property(connector,
+                       drm_connector_update_edid_property(connector,
                                        aconnector->edid);
                }
                amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
 
        } else {
                amdgpu_dm_remove_sink_from_freesync_module(connector);
-               drm_mode_connector_update_edid_property(connector, NULL);
+               drm_connector_update_edid_property(connector, NULL);
                aconnector->num_modes = 0;
                aconnector->dc_sink = NULL;
                aconnector->edid = NULL;
@@ -1040,7 +1041,7 @@ static void handle_hpd_rx_irq(void *param)
        if (dc_link->type != dc_connection_mst_branch)
                mutex_lock(&aconnector->hpd_lock);
 
-       if (dc_link_handle_hpd_rx_irq(dc_link, NULL) &&
+       if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
                        !is_mst_root_connector) {
                /* Downstream Port status changed. */
                if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
@@ -1191,7 +1192,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
        return 0;
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
 /* Register IRQ sources and initialize IRQ callbacks */
 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
 {
@@ -1525,16 +1526,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
                        goto fail;
                }
                break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
        case CHIP_RAVEN:
                if (dcn10_register_irq_handlers(dm->adev)) {
                        DRM_ERROR("DM: Failed to initialize IRQ\n");
                        goto fail;
                }
-               /*
-                * Temporary disable until pplib/smu interaction is implemented
-                */
-               dm->dc->debug.disable_stutter = true;
                break;
 #endif
        default:
@@ -1542,6 +1539,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
                goto fail;
        }
 
+       if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
+               dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
+
        return 0;
 fail:
        kfree(aencoder);
@@ -1573,18 +1573,6 @@ static void dm_bandwidth_update(struct amdgpu_device *adev)
        /* TODO: implement later */
 }
 
-static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
-                                    u8 level)
-{
-       /* TODO: translate amdgpu_encoder to display_index and call DAL */
-}
-
-static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
-{
-       /* TODO: translate amdgpu_encoder to display_index and call DAL */
-       return 0;
-}
-
 static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
                                struct drm_file *filp)
 {
@@ -1613,10 +1601,8 @@ static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
 static const struct amdgpu_display_funcs dm_display_funcs = {
        .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
        .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
-       .backlight_set_level =
-               dm_set_backlight_level,/* called unconditionally */
-       .backlight_get_level =
-               dm_get_backlight_level,/* called unconditionally */
+       .backlight_set_level = NULL, /* never called for DC */
+       .backlight_get_level = NULL, /* never called for DC */
        .hpd_sense = NULL,/* called unconditionally */
        .hpd_set_polarity = NULL, /* called unconditionally */
        .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
@@ -1724,7 +1710,7 @@ static int dm_early_init(void *handle)
                adev->mode_info.num_dig = 6;
                adev->mode_info.plane_type = dm_plane_type_default;
                break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
        case CHIP_RAVEN:
                adev->mode_info.num_crtc = 4;
                adev->mode_info.num_hpd = 4;
@@ -2175,6 +2161,46 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
        return color_space;
 }
 
+static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
+{
+       if (timing_out->display_color_depth <= COLOR_DEPTH_888)
+               return;
+
+       timing_out->display_color_depth--;
+}
+
+static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
+                                               const struct drm_display_info *info)
+{
+       int normalized_clk;
+       if (timing_out->display_color_depth <= COLOR_DEPTH_888)
+               return;
+       do {
+               normalized_clk = timing_out->pix_clk_khz;
+               /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
+               if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+                       normalized_clk /= 2;
+               /* Adjusting pix clock following on HDMI spec based on colour depth */
+               switch (timing_out->display_color_depth) {
+               case COLOR_DEPTH_101010:
+                       normalized_clk = (normalized_clk * 30) / 24;
+                       break;
+               case COLOR_DEPTH_121212:
+                       normalized_clk = (normalized_clk * 36) / 24;
+                       break;
+               case COLOR_DEPTH_161616:
+                       normalized_clk = (normalized_clk * 48) / 24;
+                       break;
+               default:
+                       return;
+               }
+               if (normalized_clk <= info->max_tmds_clock)
+                       return;
+               reduce_mode_colour_depth(timing_out);
+
+       } while (timing_out->display_color_depth > COLOR_DEPTH_888);
+
+}
 /*****************************************************************************/
 
 static void
@@ -2183,6 +2209,7 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
                                             const struct drm_connector *connector)
 {
        struct dc_crtc_timing *timing_out = &stream->timing;
+       const struct drm_display_info *info = &connector->display_info;
 
        memset(timing_out, 0, sizeof(struct dc_crtc_timing));
 
@@ -2191,8 +2218,10 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
        timing_out->v_border_top = 0;
        timing_out->v_border_bottom = 0;
        /* TODO: un-hardcode */
-
-       if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
+       if (drm_mode_is_420_only(info, mode_in)
+                       && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
+               timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+       else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
                        && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
                timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
        else
@@ -2228,6 +2257,8 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
 
        stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
        stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
+       if (stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
+               adjust_colour_depth_from_display_info(timing_out, info);
 }
 
 static void fill_audio_info(struct audio_info *audio_info,
@@ -3048,15 +3079,25 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
        else
                domain = AMDGPU_GEM_DOMAIN_VRAM;
 
-       r = amdgpu_bo_pin(rbo, domain, &afb->address);
-       amdgpu_bo_unreserve(rbo);
-
+       r = amdgpu_bo_pin(rbo, domain);
        if (unlikely(r != 0)) {
                if (r != -ERESTARTSYS)
                        DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
+               amdgpu_bo_unreserve(rbo);
                return r;
        }
 
+       r = amdgpu_ttm_alloc_gart(&rbo->tbo);
+       if (unlikely(r != 0)) {
+               amdgpu_bo_unpin(rbo);
+               amdgpu_bo_unreserve(rbo);
+               DRM_ERROR("%p bind failed\n", rbo);
+               return r;
+       }
+       amdgpu_bo_unreserve(rbo);
+
+       afb->address = amdgpu_bo_gpu_offset(rbo);
+
        amdgpu_bo_ref(rbo);
 
        if (dm_plane_state_new->dc_state &&
@@ -3426,12 +3467,15 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
        struct edid *edid = amdgpu_dm_connector->edid;
 
        encoder = helper->best_encoder(connector);
-       amdgpu_dm_connector_ddc_get_modes(connector, edid);
-       amdgpu_dm_connector_add_common_modes(encoder, connector);
 
-#if defined(CONFIG_DRM_AMD_DC_FBC)
+       if (!edid || !drm_edid_is_valid(edid)) {
+               drm_add_modes_noedid(connector, 640, 480);
+       } else {
+               amdgpu_dm_connector_ddc_get_modes(connector, edid);
+               amdgpu_dm_connector_add_common_modes(encoder, connector);
+       }
        amdgpu_dm_fbc_init(connector);
-#endif
+
        return amdgpu_dm_connector->num_modes;
 }
 
@@ -3450,7 +3494,6 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
        aconnector->base.stereo_allowed = false;
        aconnector->base.dpms = DRM_MODE_DPMS_OFF;
        aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
-
        mutex_init(&aconnector->hpd_lock);
 
        /* configure support HPD hot plug connector_>polled default value is 0
@@ -3459,9 +3502,13 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
        switch (connector_type) {
        case DRM_MODE_CONNECTOR_HDMIA:
                aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+               aconnector->base.ycbcr_420_allowed =
+                       link->link_enc->features.ycbcr420_supported ? true : false;
                break;
        case DRM_MODE_CONNECTOR_DisplayPort:
                aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+               aconnector->base.ycbcr_420_allowed =
+                       link->link_enc->features.ycbcr420_supported ? true : false;
                break;
        case DRM_MODE_CONNECTOR_DVID:
                aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
@@ -3614,10 +3661,17 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
                link,
                link_index);
 
-       drm_mode_connector_attach_encoder(
+       drm_connector_attach_encoder(
                &aconnector->base, &aencoder->base);
 
        drm_connector_register(&aconnector->base);
+#if defined(CONFIG_DEBUG_FS)
+       res = connector_debugfs_init(aconnector);
+       if (res) {
+               DRM_ERROR("Failed to create debugfs for connector");
+               goto out_free;
+       }
+#endif
 
        if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
                || connector_type == DRM_MODE_CONNECTOR_eDP)
@@ -3914,8 +3968,6 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
 
        /* Flip */
        spin_lock_irqsave(&crtc->dev->event_lock, flags);
-       /* update crtc fb */
-       crtc->primary->fb = fb;
 
        WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
        WARN_ON(!acrtc_state->stream);
@@ -3928,10 +3980,11 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
        if (acrtc->base.state->event)
                prepare_flip_isr(acrtc);
 
+       spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
        surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0];
        surface_updates->flip_addr = &addr;
 
-
        dc_commit_updates_for_stream(adev->dm.dc,
                                             surface_updates,
                                             1,
@@ -3944,9 +3997,6 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
                         __func__,
                         addr.address.grph.addr.high_part,
                         addr.address.grph.addr.low_part);
-
-
-       spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
 }
 
 /*
@@ -4206,6 +4256,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
        struct drm_connector *connector;
        struct drm_connector_state *old_con_state, *new_con_state;
        struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+       int crtc_disable_count = 0;
 
        drm_atomic_helper_update_legacy_modeset_state(dev, state);
 
@@ -4410,6 +4461,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
                bool modeset_needed;
 
+               if (old_crtc_state->active && !new_crtc_state->active)
+                       crtc_disable_count++;
+
                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
                dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
                modeset_needed = modeset_required(
@@ -4463,11 +4517,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
         * so we can put the GPU into runtime suspend if we're not driving any
         * displays anymore
         */
+       for (i = 0; i < crtc_disable_count; i++)
+               pm_runtime_put_autosuspend(dev->dev);
        pm_runtime_mark_last_busy(dev->dev);
-       for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
-               if (old_crtc_state->active && !new_crtc_state->active)
-                       pm_runtime_put_autosuspend(dev->dev);
-       }
 }
 
 
index d5aa89ad5571d73dbf568ce3a1067f8c485b9088..a29dc35954c9a9ec09e5585b132a3098e87f4b3e 100644 (file)
@@ -72,13 +72,11 @@ struct irq_list_head {
        struct work_struct work;
 };
 
-#if defined(CONFIG_DRM_AMD_DC_FBC)
 struct dm_comressor_info {
        void *cpu_addr;
        struct amdgpu_bo *bo_ptr;
        uint64_t gpu_addr;
 };
-#endif
 
 
 struct amdgpu_display_manager {
@@ -129,9 +127,8 @@ struct amdgpu_display_manager {
         * Caches device atomic state for suspend/resume
         */
        struct drm_atomic_state *cached_state;
-#if defined(CONFIG_DRM_AMD_DC_FBC)
+
        struct dm_comressor_info compressor;
-#endif
 };
 
 struct amdgpu_dm_connector {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
new file mode 100644 (file)
index 0000000..0d9e410
--- /dev/null
@@ -0,0 +1,722 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/debugfs.h>
+
+#include "dc.h"
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_debugfs.h"
+
+/* function description
+ * get/ set DP configuration: lane_count, link_rate, spread_spectrum
+ *
+ * valid lane count value: 1, 2, 4
+ * valid link rate value:
+ * 06h = 1.62Gbps per lane
+ * 0Ah = 2.7Gbps per lane
+ * 0Ch = 3.24Gbps per lane
+ * 14h = 5.4Gbps per lane
+ * 1Eh = 8.1Gbps per lane
+ *
+ * debugfs is located at /sys/kernel/debug/dri/0/DP-x/link_settings
+ *
+ * --- to get dp configuration
+ *
+ * cat link_settings
+ *
+ * It will list current, verified, reported, preferred dp configuration.
+ * current -- for current video mode
+ * verified --- maximum configuration which pass link training
+ * reported --- DP rx report caps (DPCD register offset 0, 1 2)
+ * preferred --- user force settings
+ *
+ * --- set (or force) dp configuration
+ *
+ * echo <lane_count>  <link_rate> > link_settings
+ *
+ * for example, to force to  2 lane, 2.7GHz,
+ * echo 4 0xa > link_settings
+ *
+ * spread_spectrum could not be changed dynamically.
+ *
+ * in case invalid lane count, link rate are force, no hw programming will be
+ * done. please check link settings after force operation to see if HW get
+ * programming.
+ *
+ * cat link_settings
+ *
+ * check current and preferred settings.
+ *
+ */
+static ssize_t dp_link_settings_read(struct file *f, char __user *buf,
+                                size_t size, loff_t *pos)
+{
+       struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+       struct dc_link *link = connector->dc_link;
+       char *rd_buf = NULL;
+       char *rd_buf_ptr = NULL;
+       const uint32_t rd_buf_size = 100;
+       uint32_t result = 0;
+       uint8_t str_len = 0;
+       int r;
+
+       if (*pos & 3 || size & 3)
+               return -EINVAL;
+
+       rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+       if (!rd_buf)
+               return 0;
+
+       rd_buf_ptr = rd_buf;
+
+       str_len = strlen("Current:  %d  %d  %d  ");
+       snprintf(rd_buf_ptr, str_len, "Current:  %d  %d  %d  ",
+                       link->cur_link_settings.lane_count,
+                       link->cur_link_settings.link_rate,
+                       link->cur_link_settings.link_spread);
+       rd_buf_ptr += str_len;
+
+       str_len = strlen("Verified:  %d  %d  %d  ");
+       snprintf(rd_buf_ptr, str_len, "Verified:  %d  %d  %d  ",
+                       link->verified_link_cap.lane_count,
+                       link->verified_link_cap.link_rate,
+                       link->verified_link_cap.link_spread);
+       rd_buf_ptr += str_len;
+
+       str_len = strlen("Reported:  %d  %d  %d  ");
+       snprintf(rd_buf_ptr, str_len, "Reported:  %d  %d  %d  ",
+                       link->reported_link_cap.lane_count,
+                       link->reported_link_cap.link_rate,
+                       link->reported_link_cap.link_spread);
+       rd_buf_ptr += str_len;
+
+       str_len = strlen("Preferred:  %d  %d  %d  ");
+       snprintf(rd_buf_ptr, str_len, "Preferred:  %d  %d  %d\n",
+                       link->preferred_link_setting.lane_count,
+                       link->preferred_link_setting.link_rate,
+                       link->preferred_link_setting.link_spread);
+
+       while (size) {
+               if (*pos >= rd_buf_size)
+                       break;
+
+               r = put_user(*(rd_buf + result), buf);
+               if (r)
+                       return r; /* r = -EFAULT */
+
+               buf += 1;
+               size -= 1;
+               *pos += 1;
+               result += 1;
+       }
+
+       kfree(rd_buf);
+       return result;
+}
+
+static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
+                                size_t size, loff_t *pos)
+{
+       struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+       struct dc_link *link = connector->dc_link;
+       struct dc *dc = (struct dc *)link->dc;
+       struct dc_link_settings prefer_link_settings;
+       char *wr_buf = NULL;
+       char *wr_buf_ptr = NULL;
+       const uint32_t wr_buf_size = 40;
+       int r;
+       int bytes_from_user;
+       char *sub_str;
+       /* 0: lane_count; 1: link_rate */
+       uint8_t param_index = 0;
+       long param[2];
+       const char delimiter[3] = {' ', '\n', '\0'};
+       bool valid_input = false;
+
+       if (size == 0)
+               return -EINVAL;
+
+       wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
+       if (!wr_buf)
+               return -EINVAL;
+       wr_buf_ptr = wr_buf;
+
+       r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
+
+       /* r is bytes not be copied */
+       if (r >= wr_buf_size) {
+               kfree(wr_buf);
+               DRM_DEBUG_DRIVER("user data not read\n");
+               return -EINVAL;
+       }
+
+       bytes_from_user = wr_buf_size - r;
+
+       while (isspace(*wr_buf_ptr))
+               wr_buf_ptr++;
+
+       while ((*wr_buf_ptr != '\0') && (param_index < 2)) {
+
+               sub_str = strsep(&wr_buf_ptr, delimiter);
+
+               r = kstrtol(sub_str, 16, &param[param_index]);
+
+               if (r)
+                       DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r);
+
+               param_index++;
+               while (isspace(*wr_buf_ptr))
+                       wr_buf_ptr++;
+       }
+
+       switch (param[0]) {
+       case LANE_COUNT_ONE:
+       case LANE_COUNT_TWO:
+       case LANE_COUNT_FOUR:
+               valid_input = true;
+               break;
+       default:
+               break;
+       }
+
+       switch (param[1]) {
+       case LINK_RATE_LOW:
+       case LINK_RATE_HIGH:
+       case LINK_RATE_RBR2:
+       case LINK_RATE_HIGH2:
+       case LINK_RATE_HIGH3:
+               valid_input = true;
+               break;
+       default:
+               break;
+       }
+
+       if (!valid_input) {
+               kfree(wr_buf);
+               DRM_DEBUG_DRIVER("Invalid Input value No HW will be programmed\n");
+               return bytes_from_user;
+       }
+
+       /* save user force lane_count, link_rate to preferred settings
+        * spread spectrum will not be changed
+        */
+       prefer_link_settings.link_spread = link->cur_link_settings.link_spread;
+       prefer_link_settings.lane_count = param[0];
+       prefer_link_settings.link_rate = param[1];
+
+       dc_link_set_preferred_link_settings(dc, &prefer_link_settings, link);
+
+       kfree(wr_buf);
+       return bytes_from_user;
+}
+
+/* function: get current DP PHY settings: voltage swing, pre-emphasis,
+ * post-cursor2 (defined by VESA DP specification)
+ *
+ * valid values
+ * voltage swing: 0,1,2,3
+ * pre-emphasis : 0,1,2,3
+ * post cursor2 : 0,1,2,3
+ *
+ *
+ * how to use this debugfs
+ *
+ * debugfs is located at /sys/kernel/debug/dri/0/DP-x
+ *
+ * there will be directories, like DP-1, DP-2,DP-3, etc. for DP display
+ *
+ * To figure out which DP-x is the display for DP to be check,
+ * cd DP-x
+ * ls -ll
+ * There should be debugfs file, like link_settings, phy_settings.
+ * cat link_settings
+ * from lane_count, link_rate to figure which DP-x is for display to be worked
+ * on
+ *
+ * To get current DP PHY settings,
+ * cat phy_settings
+ *
+ * To change DP PHY settings,
+ * echo <voltage_swing> <pre-emphasis> <post_cursor2> > phy_settings
+ * for examle, to change voltage swing to 2, pre-emphasis to 3, post_cursor2 to
+ * 0,
+ * echo 2 3 0 > phy_settings
+ *
+ * To check if change be applied, get current phy settings by
+ * cat phy_settings
+ *
+ * In case invalid values are set by user, like
+ * echo 1 4 0 > phy_settings
+ *
+ * HW will NOT be programmed by these settings.
+ * cat phy_settings will show the previous valid settings.
+ */
+static ssize_t dp_phy_settings_read(struct file *f, char __user *buf,
+                                size_t size, loff_t *pos)
+{
+       struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+       struct dc_link *link = connector->dc_link;
+       char *rd_buf = NULL;
+       const uint32_t rd_buf_size = 20;
+       uint32_t result = 0;
+       int r;
+
+       if (*pos & 3 || size & 3)
+               return -EINVAL;
+
+       rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+       if (!rd_buf)
+               return -EINVAL;
+
+       snprintf(rd_buf, rd_buf_size, "  %d  %d  %d  ",
+                       link->cur_lane_setting.VOLTAGE_SWING,
+                       link->cur_lane_setting.PRE_EMPHASIS,
+                       link->cur_lane_setting.POST_CURSOR2);
+
+       while (size) {
+               if (*pos >= rd_buf_size)
+                       break;
+
+               r = put_user((*(rd_buf + result)), buf);
+               if (r)
+                       return r; /* r = -EFAULT */
+
+               buf += 1;
+               size -= 1;
+               *pos += 1;
+               result += 1;
+       }
+
+       kfree(rd_buf);
+       return result;
+}
+
+static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf,
+                                size_t size, loff_t *pos)
+{
+       struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+       struct dc_link *link = connector->dc_link;
+       struct dc *dc = (struct dc *)link->dc;
+       char *wr_buf = NULL;
+       char *wr_buf_ptr = NULL;
+       uint32_t wr_buf_size = 40;
+       int r;
+       int bytes_from_user;
+       char *sub_str;
+       uint8_t param_index = 0;
+       long param[3];
+       const char delimiter[3] = {' ', '\n', '\0'};
+       bool use_prefer_link_setting;
+       struct link_training_settings link_lane_settings;
+
+       if (size == 0)
+               return 0;
+
+       wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
+       if (!wr_buf)
+               return 0;
+       wr_buf_ptr = wr_buf;
+
+       r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
+
+       /* r is bytes not be copied */
+       if (r >= wr_buf_size) {
+               kfree(wr_buf);
+               DRM_DEBUG_DRIVER("user data not be read\n");
+               return 0;
+       }
+
+       bytes_from_user = wr_buf_size - r;
+
+       while (isspace(*wr_buf_ptr))
+               wr_buf_ptr++;
+
+       while ((*wr_buf_ptr != '\0') && (param_index < 3)) {
+
+               sub_str = strsep(&wr_buf_ptr, delimiter);
+
+               r = kstrtol(sub_str, 16, &param[param_index]);
+
+               if (r)
+                       DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r);
+
+               param_index++;
+               while (isspace(*wr_buf_ptr))
+                       wr_buf_ptr++;
+       }
+
+       if ((param[0] > VOLTAGE_SWING_MAX_LEVEL) ||
+                       (param[1] > PRE_EMPHASIS_MAX_LEVEL) ||
+                       (param[2] > POST_CURSOR2_MAX_LEVEL)) {
+               kfree(wr_buf);
+               DRM_DEBUG_DRIVER("Invalid Input No HW will be programmed\n");
+               return bytes_from_user;
+       }
+
+       /* get link settings: lane count, link rate */
+       use_prefer_link_setting =
+               ((link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) &&
+               (link->test_pattern_enabled));
+
+       memset(&link_lane_settings, 0, sizeof(link_lane_settings));
+
+       if (use_prefer_link_setting) {
+               link_lane_settings.link_settings.lane_count =
+                               link->preferred_link_setting.lane_count;
+               link_lane_settings.link_settings.link_rate =
+                               link->preferred_link_setting.link_rate;
+               link_lane_settings.link_settings.link_spread =
+                               link->preferred_link_setting.link_spread;
+       } else {
+               link_lane_settings.link_settings.lane_count =
+                               link->cur_link_settings.lane_count;
+               link_lane_settings.link_settings.link_rate =
+                               link->cur_link_settings.link_rate;
+               link_lane_settings.link_settings.link_spread =
+                               link->cur_link_settings.link_spread;
+       }
+
+       /* apply phy settings from user */
+       for (r = 0; r < link_lane_settings.link_settings.lane_count; r++) {
+               link_lane_settings.lane_settings[r].VOLTAGE_SWING =
+                               (enum dc_voltage_swing) (param[0]);
+               link_lane_settings.lane_settings[r].PRE_EMPHASIS =
+                               (enum dc_pre_emphasis) (param[1]);
+               link_lane_settings.lane_settings[r].POST_CURSOR2 =
+                               (enum dc_post_cursor2) (param[2]);
+       }
+
+       /* program ASIC registers and DPCD registers */
+       dc_link_set_drive_settings(dc, &link_lane_settings, link);
+
+       kfree(wr_buf);
+       return bytes_from_user;
+}
+
+/* function description
+ *
+ * set PHY layer or Link layer test pattern
+ * PHY test pattern is used for PHY SI check.
+ * Link layer test will not affect PHY SI.
+ *
+ * Reset Test Pattern:
+ * 0 = DP_TEST_PATTERN_VIDEO_MODE
+ *
+ * PHY test pattern supported:
+ * 1 = DP_TEST_PATTERN_D102
+ * 2 = DP_TEST_PATTERN_SYMBOL_ERROR
+ * 3 = DP_TEST_PATTERN_PRBS7
+ * 4 = DP_TEST_PATTERN_80BIT_CUSTOM
+ * 5 = DP_TEST_PATTERN_CP2520_1
+ * 6 = DP_TEST_PATTERN_CP2520_2 = DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE
+ * 7 = DP_TEST_PATTERN_CP2520_3
+ *
+ * DP PHY Link Training Patterns
+ * 8 = DP_TEST_PATTERN_TRAINING_PATTERN1
+ * 9 = DP_TEST_PATTERN_TRAINING_PATTERN2
+ * a = DP_TEST_PATTERN_TRAINING_PATTERN3
+ * b = DP_TEST_PATTERN_TRAINING_PATTERN4
+ *
+ * DP Link Layer Test pattern
+ * c = DP_TEST_PATTERN_COLOR_SQUARES
+ * d = DP_TEST_PATTERN_COLOR_SQUARES_CEA
+ * e = DP_TEST_PATTERN_VERTICAL_BARS
+ * f = DP_TEST_PATTERN_HORIZONTAL_BARS
+ * 10= DP_TEST_PATTERN_COLOR_RAMP
+ *
+ * debugfs phy_test_pattern is located at /syskernel/debug/dri/0/DP-x
+ *
+ * --- set test pattern
+ * echo <test pattern #> > test_pattern
+ *
+ * If test pattern # is not supported, NO HW programming will be done.
+ * for DP_TEST_PATTERN_80BIT_CUSTOM, it needs extra 10 bytes of data
+ * for the user pattern. input 10 bytes data are separated by space
+ *
+ * echo 0x4 0x11 0x22 0x33 0x44 0x55 0x66 0x77 0x88 0x99 0xaa > test_pattern
+ *
+ * --- reset test pattern
+ * echo 0 > test_pattern
+ *
+ * --- HPD detection is disabled when set PHY test pattern
+ *
+ * when PHY test pattern (pattern # within [1,7]) is set, HPD pin of HW ASIC
+ * is disable. User could unplug DP display from DP connected and plug scope to
+ * check test pattern PHY SI.
+ * If there is need unplug scope and plug DP display back, do steps below:
+ * echo 0 > phy_test_pattern
+ * unplug scope
+ * plug DP display.
+ *
+ * "echo 0 > phy_test_pattern" will re-enable HPD pin again so that video sw
+ * driver could detect "unplug scope" and "plug DP display"
+ */
+static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __user *buf,
+                                size_t size, loff_t *pos)
+{
+       struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+       struct dc_link *link = connector->dc_link;
+       char *wr_buf = NULL;
+       char *wr_buf_ptr = NULL;
+       uint32_t wr_buf_size = 100;
+       uint32_t wr_buf_count = 0;
+       int r;
+       int bytes_from_user;
+       char *sub_str = NULL;
+       uint8_t param_index = 0;
+       uint8_t param_nums = 0;
+       long param[11] = {0x0};
+       const char delimiter[3] = {' ', '\n', '\0'};
+       enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
+       bool disable_hpd = false;
+       bool valid_test_pattern = false;
+       /* init with defalut 80bit custom pattern */
+       uint8_t custom_pattern[10] = {
+                       0x1f, 0x7c, 0xf0, 0xc1, 0x07,
+                       0x1f, 0x7c, 0xf0, 0xc1, 0x07
+                       };
+       struct dc_link_settings prefer_link_settings = {LANE_COUNT_UNKNOWN,
+                       LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED};
+       struct dc_link_settings cur_link_settings = {LANE_COUNT_UNKNOWN,
+                       LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED};
+       struct link_training_settings link_training_settings;
+       int i;
+
+       if (size == 0)
+               return 0;
+
+       wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL);
+       if (!wr_buf)
+               return 0;
+       wr_buf_ptr = wr_buf;
+
+       r = copy_from_user(wr_buf_ptr, buf, wr_buf_size);
+
+       /* r is bytes not be copied */
+       if (r >= wr_buf_size) {
+               kfree(wr_buf);
+               DRM_DEBUG_DRIVER("user data not be read\n");
+               return 0;
+       }
+
+       bytes_from_user = wr_buf_size - r;
+
+       /* check number of parameters. isspace could not differ space and \n */
+       while ((*wr_buf_ptr != 0xa) && (wr_buf_count < wr_buf_size)) {
+               /* skip space*/
+               while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) {
+                       wr_buf_ptr++;
+                       wr_buf_count++;
+                       }
+
+               if (wr_buf_count == wr_buf_size)
+                       break;
+
+               /* skip non-space*/
+               while ((!isspace(*wr_buf_ptr)) && (wr_buf_count < wr_buf_size)) {
+                       wr_buf_ptr++;
+                       wr_buf_count++;
+                       }
+
+               param_nums++;
+
+               if (wr_buf_count == wr_buf_size)
+                       break;
+       }
+
+       /* max 11 parameters */
+       if (param_nums > 11)
+               param_nums = 11;
+
+       wr_buf_ptr = wr_buf; /* reset buf pinter */
+       wr_buf_count = 0; /* number of char already checked */
+
+       while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) {
+               wr_buf_ptr++;
+               wr_buf_count++;
+       }
+
+       while (param_index < param_nums) {
+               /* after strsep, wr_buf_ptr will be moved to after space */
+               sub_str = strsep(&wr_buf_ptr, delimiter);
+
+               r = kstrtol(sub_str, 16, &param[param_index]);
+
+               if (r)
+                       DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r);
+
+               param_index++;
+       }
+
+       test_pattern = param[0];
+
+       switch (test_pattern) {
+       case DP_TEST_PATTERN_VIDEO_MODE:
+       case DP_TEST_PATTERN_COLOR_SQUARES:
+       case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
+       case DP_TEST_PATTERN_VERTICAL_BARS:
+       case DP_TEST_PATTERN_HORIZONTAL_BARS:
+       case DP_TEST_PATTERN_COLOR_RAMP:
+               valid_test_pattern = true;
+               break;
+
+       case DP_TEST_PATTERN_D102:
+       case DP_TEST_PATTERN_SYMBOL_ERROR:
+       case DP_TEST_PATTERN_PRBS7:
+       case DP_TEST_PATTERN_80BIT_CUSTOM:
+       case DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE:
+       case DP_TEST_PATTERN_TRAINING_PATTERN4:
+               disable_hpd = true;
+               valid_test_pattern = true;
+               break;
+
+       default:
+               valid_test_pattern = false;
+               test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
+               break;
+       }
+
+       if (!valid_test_pattern) {
+               kfree(wr_buf);
+               DRM_DEBUG_DRIVER("Invalid Test Pattern Parameters\n");
+               return bytes_from_user;
+       }
+
+       if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM) {
+               for (i = 0; i < 10; i++) {
+                       if ((uint8_t) param[i + 1] != 0x0)
+                               break;
+               }
+
+               if (i < 10) {
+                       /* not use default value */
+                       for (i = 0; i < 10; i++)
+                               custom_pattern[i] = (uint8_t) param[i + 1];
+               }
+       }
+
+       /* Usage: set DP physical test pattern using debugfs with normal DP
+        * panel. Then plug out DP panel and connect a scope to measure
+        * For normal video mode and test pattern generated from CRCT,
+        * they are visibile to user. So do not disable HPD.
+        * Video Mode is also set to clear the test pattern, so enable HPD
+        * because it might have been disabled after a test pattern was set.
+        * AUX depends on HPD * sequence dependent, do not move!
+        */
+       if (!disable_hpd)
+               dc_link_enable_hpd(link);
+
+       prefer_link_settings.lane_count = link->verified_link_cap.lane_count;
+       prefer_link_settings.link_rate = link->verified_link_cap.link_rate;
+       prefer_link_settings.link_spread = link->verified_link_cap.link_spread;
+
+       cur_link_settings.lane_count = link->cur_link_settings.lane_count;
+       cur_link_settings.link_rate = link->cur_link_settings.link_rate;
+       cur_link_settings.link_spread = link->cur_link_settings.link_spread;
+
+       link_training_settings.link_settings = cur_link_settings;
+
+
+       if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
+               if (prefer_link_settings.lane_count != LANE_COUNT_UNKNOWN &&
+                       prefer_link_settings.link_rate !=  LINK_RATE_UNKNOWN &&
+                       (prefer_link_settings.lane_count != cur_link_settings.lane_count ||
+                       prefer_link_settings.link_rate != cur_link_settings.link_rate))
+                       link_training_settings.link_settings = prefer_link_settings;
+       }
+
+       for (i = 0; i < (unsigned int)(link_training_settings.link_settings.lane_count); i++)
+               link_training_settings.lane_settings[i] = link->cur_lane_setting;
+
+       dc_link_set_test_pattern(
+               link,
+               test_pattern,
+               &link_training_settings,
+               custom_pattern,
+               10);
+
+       /* Usage: Set DP physical test pattern using AMDDP with normal DP panel
+        * Then plug out DP panel and connect a scope to measure DP PHY signal.
+        * Need disable interrupt to avoid SW driver disable DP output. This is
+        * done after the test pattern is set.
+        */
+       if (valid_test_pattern && disable_hpd)
+               dc_link_disable_hpd(link);
+
+       kfree(wr_buf);
+
+       return bytes_from_user;
+}
+
+static const struct file_operations dp_link_settings_debugfs_fops = {
+       .owner = THIS_MODULE,
+       .read = dp_link_settings_read,
+       .write = dp_link_settings_write,
+       .llseek = default_llseek
+};
+
+static const struct file_operations dp_phy_settings_debugfs_fop = {
+       .owner = THIS_MODULE,
+       .read = dp_phy_settings_read,
+       .write = dp_phy_settings_write,
+       .llseek = default_llseek
+};
+
+static const struct file_operations dp_phy_test_pattern_fops = {
+       .owner = THIS_MODULE,
+       .write = dp_phy_test_pattern_debugfs_write,
+       .llseek = default_llseek
+};
+
+static const struct {
+       char *name;
+       const struct file_operations *fops;
+} dp_debugfs_entries[] = {
+               {"link_settings", &dp_link_settings_debugfs_fops},
+               {"phy_settings", &dp_phy_settings_debugfs_fop},
+               {"test_pattern", &dp_phy_test_pattern_fops}
+};
+
+int connector_debugfs_init(struct amdgpu_dm_connector *connector)
+{
+       int i;
+       struct dentry *ent, *dir = connector->base.debugfs_entry;
+
+       if (connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+               for (i = 0; i < ARRAY_SIZE(dp_debugfs_entries); i++) {
+                       ent = debugfs_create_file(dp_debugfs_entries[i].name,
+                                                 0644,
+                                                 dir,
+                                                 connector,
+                                                 dp_debugfs_entries[i].fops);
+                       if (IS_ERR(ent))
+                               return PTR_ERR(ent);
+               }
+       }
+
+       return 0;
+}
+
similarity index 81%
rename from drivers/gpu/drm/amd/powerplay/inc/pp_power_source.h
rename to drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
index b43315cc5d58eadd31e663b3f1e3590232805e5d..d9ed1b2aa811510be17df03565ad2718fa199675 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2018 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
  *
+ * Authors: AMD
+ *
  */
 
-#ifndef PP_POWERSOURCE_H
-#define PP_POWERSOURCE_H
+#ifndef __AMDGPU_DM_DEBUGFS_H__
+#define __AMDGPU_DM_DEBUGFS_H__
 
-enum pp_power_source {
-       PP_PowerSource_AC = 0,
-       PP_PowerSource_DC,
-       PP_PowerSource_LimitedPower,
-       PP_PowerSource_LimitedPower_2,
-       PP_PowerSource_Max
-};
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
 
+int connector_debugfs_init(struct amdgpu_dm_connector *connector);
 
 #endif
index ec304b1a5973d39ed859a8e4a4fbfa5dde414eb3..8403b6a9a77bd0008ab30b67153db5392206a5b6 100644 (file)
@@ -169,6 +169,11 @@ static void get_payload_table(
        mutex_unlock(&mst_mgr->payload_lock);
 }
 
+void dm_helpers_dp_update_branch_info(
+       struct dc_context *ctx,
+       const struct dc_link *link)
+{}
+
 /*
  * Writes payload allocation table in immediate downstream device.
  */
@@ -454,6 +459,22 @@ bool dm_helpers_submit_i2c(
        return result;
 }
 
+bool dm_helpers_is_dp_sink_present(struct dc_link *link)
+{
+       bool dp_sink_present;
+       struct amdgpu_dm_connector *aconnector = link->priv;
+
+       if (!aconnector) {
+               BUG_ON("Failed to found connector for link!");
+               return true;
+       }
+
+       mutex_lock(&aconnector->dm_dp_aux.aux.hw_mutex);
+       dp_sink_present = dc_link_is_dp_sink_present(link);
+       mutex_unlock(&aconnector->dm_dp_aux.aux.hw_mutex);
+       return dp_sink_present;
+}
+
 enum dc_edid_status dm_helpers_read_local_edid(
                struct dc_context *ctx,
                struct dc_link *link,
@@ -497,6 +518,34 @@ enum dc_edid_status dm_helpers_read_local_edid(
                DRM_ERROR("EDID err: %d, on connector: %s",
                                edid_status,
                                aconnector->base.name);
+       if (link->aux_mode) {
+               union test_request test_request = { {0} };
+               union test_response test_response = { {0} };
+
+               dm_helpers_dp_read_dpcd(ctx,
+                                       link,
+                                       DP_TEST_REQUEST,
+                                       &test_request.raw,
+                                       sizeof(union test_request));
+
+               if (!test_request.bits.EDID_READ)
+                       return edid_status;
+
+               test_response.bits.EDID_CHECKSUM_WRITE = 1;
+
+               dm_helpers_dp_write_dpcd(ctx,
+                                       link,
+                                       DP_TEST_EDID_CHECKSUM,
+                                       &sink->dc_edid.raw_edid[sink->dc_edid.length-1],
+                                       1);
+
+               dm_helpers_dp_write_dpcd(ctx,
+                                       link,
+                                       DP_TEST_RESPONSE,
+                                       &test_response.raw,
+                                       sizeof(test_response));
+
+       }
 
        return edid_status;
 }
index 4304d9e408b88d180eabac07327497fdda353b25..9a300732ba3747a86541d8b62c2562e27e96b9fc 100644 (file)
@@ -80,55 +80,72 @@ static void log_dpcd(uint8_t type,
 static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
                                  struct drm_dp_aux_msg *msg)
 {
-       enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ?
-               I2C_MOT_TRUE : I2C_MOT_FALSE;
-       enum ddc_result res;
-       uint32_t read_bytes = msg->size;
+       ssize_t result = 0;
+       enum i2caux_transaction_action action;
+       enum aux_transaction_type type;
 
        if (WARN_ON(msg->size > 16))
                return -E2BIG;
 
        switch (msg->request & ~DP_AUX_I2C_MOT) {
        case DP_AUX_NATIVE_READ:
-               res = dal_ddc_service_read_dpcd_data(
-                               TO_DM_AUX(aux)->ddc_service,
-                               false,
-                               I2C_MOT_UNDEF,
-                               msg->address,
-                               msg->buffer,
-                               msg->size,
-                               &read_bytes);
+               type = AUX_TRANSACTION_TYPE_DP;
+               action = I2CAUX_TRANSACTION_ACTION_DP_READ;
+
+               result = dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
+                                             msg->address,
+                                             &msg->reply,
+                                             msg->buffer,
+                                             msg->size,
+                                             type,
+                                             action);
                break;
        case DP_AUX_NATIVE_WRITE:
-               res = dal_ddc_service_write_dpcd_data(
-                               TO_DM_AUX(aux)->ddc_service,
-                               false,
-                               I2C_MOT_UNDEF,
-                               msg->address,
-                               msg->buffer,
-                               msg->size);
+               type = AUX_TRANSACTION_TYPE_DP;
+               action = I2CAUX_TRANSACTION_ACTION_DP_WRITE;
+
+               dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
+                                    msg->address,
+                                    &msg->reply,
+                                    msg->buffer,
+                                    msg->size,
+                                    type,
+                                    action);
+               result = msg->size;
                break;
        case DP_AUX_I2C_READ:
-               res = dal_ddc_service_read_dpcd_data(
-                               TO_DM_AUX(aux)->ddc_service,
-                               true,
-                               mot,
-                               msg->address,
-                               msg->buffer,
-                               msg->size,
-                               &read_bytes);
+               type = AUX_TRANSACTION_TYPE_I2C;
+               if (msg->request & DP_AUX_I2C_MOT)
+                       action = I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT;
+               else
+                       action = I2CAUX_TRANSACTION_ACTION_I2C_READ;
+
+               result = dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
+                                             msg->address,
+                                             &msg->reply,
+                                             msg->buffer,
+                                             msg->size,
+                                             type,
+                                             action);
                break;
        case DP_AUX_I2C_WRITE:
-               res = dal_ddc_service_write_dpcd_data(
-                               TO_DM_AUX(aux)->ddc_service,
-                               true,
-                               mot,
-                               msg->address,
-                               msg->buffer,
-                               msg->size);
+               type = AUX_TRANSACTION_TYPE_I2C;
+               if (msg->request & DP_AUX_I2C_MOT)
+                       action = I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT;
+               else
+                       action = I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
+
+               dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service,
+                                    msg->address,
+                                    &msg->reply,
+                                    msg->buffer,
+                                    msg->size,
+                                    type,
+                                    action);
+               result = msg->size;
                break;
        default:
-               return 0;
+               return -EINVAL;
        }
 
 #ifdef TRACE_DPCD
@@ -139,9 +156,10 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
                 r == DDC_RESULT_SUCESSFULL);
 #endif
 
-       if (res != DDC_RESULT_SUCESSFULL)
-               return -EIO;
-       return read_bytes;
+       if (result < 0) /* DC doesn't know about kernel error codes */
+               result = -EIO;
+
+       return result;
 }
 
 static enum drm_connector_status
@@ -233,7 +251,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
                edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
 
                if (!edid) {
-                       drm_mode_connector_update_edid_property(
+                       drm_connector_update_edid_property(
                                &aconnector->base,
                                NULL);
                        return ret;
@@ -261,7 +279,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
                                        connector, aconnector->edid);
        }
 
-       drm_mode_connector_update_edid_property(
+       drm_connector_update_edid_property(
                                        &aconnector->base, aconnector->edid);
 
        ret = drm_add_edid_modes(connector, aconnector->edid);
@@ -345,7 +363,7 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
                                                aconnector, connector->base.id, aconnector->mst_port);
 
                        aconnector->port = port;
-                       drm_mode_connector_set_path_property(connector, pathprop);
+                       drm_connector_set_path_property(connector, pathprop);
 
                        drm_connector_list_iter_end(&conn_iter);
                        aconnector->mst_connected = true;
@@ -393,7 +411,7 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
                dev->mode_config.tile_property,
                0);
 
-       drm_mode_connector_set_path_property(connector, pathprop);
+       drm_connector_set_path_property(connector, pathprop);
 
        /*
         * Initialize connector state before adding the connectror to drm and
@@ -441,7 +459,7 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
 static void dm_dp_mst_link_status_reset(struct drm_connector *connector)
 {
        mutex_lock(&connector->dev->mode_config.mutex);
-       drm_mode_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD);
+       drm_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD);
        mutex_unlock(&connector->dev->mode_config.mutex);
 }
 
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
new file mode 100644 (file)
index 0000000..fbe878a
--- /dev/null
@@ -0,0 +1,562 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ */
+#include <linux/string.h>
+#include <linux/acpi.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/amdgpu_drm.h>
+#include "dm_services.h"
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_irq.h"
+#include "amdgpu_pm.h"
+#include "dm_pp_smu.h"
+
+
+bool dm_pp_apply_display_requirements(
+               const struct dc_context *ctx,
+               const struct dm_pp_display_configuration *pp_display_cfg)
+{
+       struct amdgpu_device *adev = ctx->driver_context;
+       int i;
+
+       if (adev->pm.dpm_enabled) {
+
+               memset(&adev->pm.pm_display_cfg, 0,
+                               sizeof(adev->pm.pm_display_cfg));
+
+               adev->pm.pm_display_cfg.cpu_cc6_disable =
+                       pp_display_cfg->cpu_cc6_disable;
+
+               adev->pm.pm_display_cfg.cpu_pstate_disable =
+                       pp_display_cfg->cpu_pstate_disable;
+
+               adev->pm.pm_display_cfg.cpu_pstate_separation_time =
+                       pp_display_cfg->cpu_pstate_separation_time;
+
+               adev->pm.pm_display_cfg.nb_pstate_switch_disable =
+                       pp_display_cfg->nb_pstate_switch_disable;
+
+               adev->pm.pm_display_cfg.num_display =
+                               pp_display_cfg->display_count;
+               adev->pm.pm_display_cfg.num_path_including_non_display =
+                               pp_display_cfg->display_count;
+
+               adev->pm.pm_display_cfg.min_core_set_clock =
+                               pp_display_cfg->min_engine_clock_khz/10;
+               adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
+                               pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
+               adev->pm.pm_display_cfg.min_mem_set_clock =
+                               pp_display_cfg->min_memory_clock_khz/10;
+
+               adev->pm.pm_display_cfg.min_dcef_deep_sleep_set_clk =
+                               pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
+               adev->pm.pm_display_cfg.min_dcef_set_clk =
+                               pp_display_cfg->min_dcfclock_khz/10;
+
+               adev->pm.pm_display_cfg.multi_monitor_in_sync =
+                               pp_display_cfg->all_displays_in_sync;
+               adev->pm.pm_display_cfg.min_vblank_time =
+                               pp_display_cfg->avail_mclk_switch_time_us;
+
+               adev->pm.pm_display_cfg.display_clk =
+                               pp_display_cfg->disp_clk_khz/10;
+
+               adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
+                               pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
+
+               adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
+               adev->pm.pm_display_cfg.line_time_in_us =
+                               pp_display_cfg->line_time_in_us;
+
+               adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
+               adev->pm.pm_display_cfg.crossfire_display_index = -1;
+               adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
+
+               for (i = 0; i < pp_display_cfg->display_count; i++) {
+                       const struct dm_pp_single_disp_config *dc_cfg =
+                                               &pp_display_cfg->disp_configs[i];
+                       adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
+               }
+
+               /* TODO: complete implementation of
+                * pp_display_configuration_change().
+                * Follow example of:
+                * PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c
+                * PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */
+               if (adev->powerplay.pp_funcs->display_configuration_change)
+                       adev->powerplay.pp_funcs->display_configuration_change(
+                               adev->powerplay.pp_handle,
+                               &adev->pm.pm_display_cfg);
+
+               /* TODO: replace by a separate call to 'apply display cfg'? */
+               amdgpu_pm_compute_clocks(adev);
+       }
+
+       return true;
+}
+
+static void get_default_clock_levels(
+               enum dm_pp_clock_type clk_type,
+               struct dm_pp_clock_levels *clks)
+{
+       uint32_t disp_clks_in_khz[6] = {
+                       300000, 400000, 496560, 626090, 685720, 757900 };
+       uint32_t sclks_in_khz[6] = {
+                       300000, 360000, 423530, 514290, 626090, 720000 };
+       uint32_t mclks_in_khz[2] = { 333000, 800000 };
+
+       switch (clk_type) {
+       case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+               clks->num_levels = 6;
+               memmove(clks->clocks_in_khz, disp_clks_in_khz,
+                               sizeof(disp_clks_in_khz));
+               break;
+       case DM_PP_CLOCK_TYPE_ENGINE_CLK:
+               clks->num_levels = 6;
+               memmove(clks->clocks_in_khz, sclks_in_khz,
+                               sizeof(sclks_in_khz));
+               break;
+       case DM_PP_CLOCK_TYPE_MEMORY_CLK:
+               clks->num_levels = 2;
+               memmove(clks->clocks_in_khz, mclks_in_khz,
+                               sizeof(mclks_in_khz));
+               break;
+       default:
+               clks->num_levels = 0;
+               break;
+       }
+}
+
+static enum amd_pp_clock_type dc_to_pp_clock_type(
+               enum dm_pp_clock_type dm_pp_clk_type)
+{
+       enum amd_pp_clock_type amd_pp_clk_type = 0;
+
+       switch (dm_pp_clk_type) {
+       case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+               amd_pp_clk_type = amd_pp_disp_clock;
+               break;
+       case DM_PP_CLOCK_TYPE_ENGINE_CLK:
+               amd_pp_clk_type = amd_pp_sys_clock;
+               break;
+       case DM_PP_CLOCK_TYPE_MEMORY_CLK:
+               amd_pp_clk_type = amd_pp_mem_clock;
+               break;
+       case DM_PP_CLOCK_TYPE_DCEFCLK:
+               amd_pp_clk_type  = amd_pp_dcef_clock;
+               break;
+       case DM_PP_CLOCK_TYPE_DCFCLK:
+               amd_pp_clk_type = amd_pp_dcf_clock;
+               break;
+       case DM_PP_CLOCK_TYPE_PIXELCLK:
+               amd_pp_clk_type = amd_pp_pixel_clock;
+               break;
+       case DM_PP_CLOCK_TYPE_FCLK:
+               amd_pp_clk_type = amd_pp_f_clock;
+               break;
+       case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
+               amd_pp_clk_type = amd_pp_phy_clock;
+               break;
+       case DM_PP_CLOCK_TYPE_DPPCLK:
+               amd_pp_clk_type = amd_pp_dpp_clock;
+               break;
+       default:
+               DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
+                               dm_pp_clk_type);
+               break;
+       }
+
+       return amd_pp_clk_type;
+}
+
+static enum dm_pp_clocks_state pp_to_dc_powerlevel_state(
+                       enum PP_DAL_POWERLEVEL max_clocks_state)
+{
+       switch (max_clocks_state) {
+       case PP_DAL_POWERLEVEL_0:
+               return DM_PP_CLOCKS_DPM_STATE_LEVEL_0;
+       case PP_DAL_POWERLEVEL_1:
+               return DM_PP_CLOCKS_DPM_STATE_LEVEL_1;
+       case PP_DAL_POWERLEVEL_2:
+               return DM_PP_CLOCKS_DPM_STATE_LEVEL_2;
+       case PP_DAL_POWERLEVEL_3:
+               return DM_PP_CLOCKS_DPM_STATE_LEVEL_3;
+       case PP_DAL_POWERLEVEL_4:
+               return DM_PP_CLOCKS_DPM_STATE_LEVEL_4;
+       case PP_DAL_POWERLEVEL_5:
+               return DM_PP_CLOCKS_DPM_STATE_LEVEL_5;
+       case PP_DAL_POWERLEVEL_6:
+               return DM_PP_CLOCKS_DPM_STATE_LEVEL_6;
+       case PP_DAL_POWERLEVEL_7:
+               return DM_PP_CLOCKS_DPM_STATE_LEVEL_7;
+       default:
+               DRM_ERROR("DM_PPLIB: invalid powerlevel state: %d!\n",
+                               max_clocks_state);
+               return DM_PP_CLOCKS_STATE_INVALID;
+       }
+}
+
+static void pp_to_dc_clock_levels(
+               const struct amd_pp_clocks *pp_clks,
+               struct dm_pp_clock_levels *dc_clks,
+               enum dm_pp_clock_type dc_clk_type)
+{
+       uint32_t i;
+
+       if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
+               DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
+                               DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
+                               pp_clks->count,
+                               DM_PP_MAX_CLOCK_LEVELS);
+
+               dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
+       } else
+               dc_clks->num_levels = pp_clks->count;
+
+       DRM_INFO("DM_PPLIB: values for %s clock\n",
+                       DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
+
+       for (i = 0; i < dc_clks->num_levels; i++) {
+               DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
+               dc_clks->clocks_in_khz[i] = pp_clks->clock[i];
+       }
+}
+
+static void pp_to_dc_clock_levels_with_latency(
+               const struct pp_clock_levels_with_latency *pp_clks,
+               struct dm_pp_clock_levels_with_latency *clk_level_info,
+               enum dm_pp_clock_type dc_clk_type)
+{
+       uint32_t i;
+
+       if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
+               DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
+                               DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
+                               pp_clks->num_levels,
+                               DM_PP_MAX_CLOCK_LEVELS);
+
+               clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
+       } else
+               clk_level_info->num_levels = pp_clks->num_levels;
+
+       DRM_DEBUG("DM_PPLIB: values for %s clock\n",
+                       DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
+
+       for (i = 0; i < clk_level_info->num_levels; i++) {
+               DRM_DEBUG("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz);
+               clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
+               clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
+       }
+}
+
+static void pp_to_dc_clock_levels_with_voltage(
+               const struct pp_clock_levels_with_voltage *pp_clks,
+               struct dm_pp_clock_levels_with_voltage *clk_level_info,
+               enum dm_pp_clock_type dc_clk_type)
+{
+       uint32_t i;
+
+       if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
+               DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
+                               DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
+                               pp_clks->num_levels,
+                               DM_PP_MAX_CLOCK_LEVELS);
+
+               clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
+       } else
+               clk_level_info->num_levels = pp_clks->num_levels;
+
+       DRM_INFO("DM_PPLIB: values for %s clock\n",
+                       DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
+
+       for (i = 0; i < clk_level_info->num_levels; i++) {
+               DRM_INFO("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz);
+               clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
+               clk_level_info->data[i].voltage_in_mv = pp_clks->data[i].voltage_in_mv;
+       }
+}
+
+bool dm_pp_get_clock_levels_by_type(
+               const struct dc_context *ctx,
+               enum dm_pp_clock_type clk_type,
+               struct dm_pp_clock_levels *dc_clks)
+{
+       struct amdgpu_device *adev = ctx->driver_context;
+       void *pp_handle = adev->powerplay.pp_handle;
+       struct amd_pp_clocks pp_clks = { 0 };
+       struct amd_pp_simple_clock_info validation_clks = { 0 };
+       uint32_t i;
+
+       if (adev->powerplay.pp_funcs->get_clock_by_type) {
+               if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
+                       dc_to_pp_clock_type(clk_type), &pp_clks)) {
+               /* Error in pplib. Provide default values. */
+                       get_default_clock_levels(clk_type, dc_clks);
+                       return true;
+               }
+       }
+
+       pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
+
+       if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
+               if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks(
+                                               pp_handle, &validation_clks)) {
+                       /* Error in pplib. Provide default values. */
+                       DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
+                       validation_clks.engine_max_clock = 72000;
+                       validation_clks.memory_max_clock = 80000;
+                       validation_clks.level = 0;
+               }
+       }
+
+       DRM_INFO("DM_PPLIB: Validation clocks:\n");
+       DRM_INFO("DM_PPLIB:    engine_max_clock: %d\n",
+                       validation_clks.engine_max_clock);
+       DRM_INFO("DM_PPLIB:    memory_max_clock: %d\n",
+                       validation_clks.memory_max_clock);
+       DRM_INFO("DM_PPLIB:    level           : %d\n",
+                       validation_clks.level);
+
+       /* Translate 10 kHz to kHz. */
+       validation_clks.engine_max_clock *= 10;
+       validation_clks.memory_max_clock *= 10;
+
+       /* Determine the highest non-boosted level from the Validation Clocks */
+       if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
+               for (i = 0; i < dc_clks->num_levels; i++) {
+                       if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
+                               /* This clock is higher the validation clock.
+                                * Than means the previous one is the highest
+                                * non-boosted one. */
+                               DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
+                                               dc_clks->num_levels, i);
+                               dc_clks->num_levels = i > 0 ? i : 1;
+                               break;
+                       }
+               }
+       } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
+               for (i = 0; i < dc_clks->num_levels; i++) {
+                       if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
+                               DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
+                                               dc_clks->num_levels, i);
+                               dc_clks->num_levels = i > 0 ? i : 1;
+                               break;
+                       }
+               }
+       }
+
+       return true;
+}
+
+bool dm_pp_get_clock_levels_by_type_with_latency(
+       const struct dc_context *ctx,
+       enum dm_pp_clock_type clk_type,
+       struct dm_pp_clock_levels_with_latency *clk_level_info)
+{
+       struct amdgpu_device *adev = ctx->driver_context;
+       void *pp_handle = adev->powerplay.pp_handle;
+       struct pp_clock_levels_with_latency pp_clks = { 0 };
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+       if (!pp_funcs || !pp_funcs->get_clock_by_type_with_latency)
+               return false;
+
+       if (pp_funcs->get_clock_by_type_with_latency(pp_handle,
+                                                    dc_to_pp_clock_type(clk_type),
+                                                    &pp_clks))
+               return false;
+
+       pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
+
+       return true;
+}
+
+bool dm_pp_get_clock_levels_by_type_with_voltage(
+       const struct dc_context *ctx,
+       enum dm_pp_clock_type clk_type,
+       struct dm_pp_clock_levels_with_voltage *clk_level_info)
+{
+       struct amdgpu_device *adev = ctx->driver_context;
+       void *pp_handle = adev->powerplay.pp_handle;
+       struct pp_clock_levels_with_voltage pp_clk_info = {0};
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+       if (pp_funcs->get_clock_by_type_with_voltage(pp_handle,
+                                                    dc_to_pp_clock_type(clk_type),
+                                                    &pp_clk_info))
+               return false;
+
+       pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type);
+
+       return true;
+}
+
+bool dm_pp_notify_wm_clock_changes(
+       const struct dc_context *ctx,
+       struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
+{
+       /* TODO: to be implemented */
+       return false;
+}
+
+bool dm_pp_apply_power_level_change_request(
+       const struct dc_context *ctx,
+       struct dm_pp_power_level_change_request *level_change_req)
+{
+       /* TODO: to be implemented */
+       return false;
+}
+
+bool dm_pp_apply_clock_for_voltage_request(
+       const struct dc_context *ctx,
+       struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
+{
+       struct amdgpu_device *adev = ctx->driver_context;
+       struct pp_display_clock_request pp_clock_request = {0};
+       int ret = 0;
+
+       pp_clock_request.clock_type = dc_to_pp_clock_type(clock_for_voltage_req->clk_type);
+       pp_clock_request.clock_freq_in_khz = clock_for_voltage_req->clocks_in_khz;
+
+       if (!pp_clock_request.clock_type)
+               return false;
+
+       if (adev->powerplay.pp_funcs->display_clock_voltage_request)
+               ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
+                       adev->powerplay.pp_handle,
+                       &pp_clock_request);
+       if (ret)
+               return false;
+       return true;
+}
+
+bool dm_pp_get_static_clocks(
+       const struct dc_context *ctx,
+       struct dm_pp_static_clock_info *static_clk_info)
+{
+       struct amdgpu_device *adev = ctx->driver_context;
+       struct amd_pp_clock_info pp_clk_info = {0};
+       int ret = 0;
+
+       if (adev->powerplay.pp_funcs->get_current_clocks)
+               ret = adev->powerplay.pp_funcs->get_current_clocks(
+                       adev->powerplay.pp_handle,
+                       &pp_clk_info);
+       if (ret)
+               return false;
+
+       static_clk_info->max_clocks_state = pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state);
+       static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock * 10;
+       static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock * 10;
+
+       return true;
+}
+
+void pp_rv_set_display_requirement(struct pp_smu *pp,
+               struct pp_smu_display_requirement_rv *req)
+{
+       struct dc_context *ctx = pp->ctx;
+       struct amdgpu_device *adev = ctx->driver_context;
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+       if (!pp_funcs || !pp_funcs->display_configuration_changed)
+               return;
+
+       amdgpu_dpm_display_configuration_changed(adev);
+}
+
+void pp_rv_set_wm_ranges(struct pp_smu *pp,
+               struct pp_smu_wm_range_sets *ranges)
+{
+       struct dc_context *ctx = pp->ctx;
+       struct amdgpu_device *adev = ctx->driver_context;
+       void *pp_handle = adev->powerplay.pp_handle;
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
+       struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = wm_with_clock_ranges.wm_dmif_clocks_ranges;
+       struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = wm_with_clock_ranges.wm_mcif_clocks_ranges;
+       int32_t i;
+
+       wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
+       wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
+
+       for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
+               if (ranges->reader_wm_sets[i].wm_inst > 3)
+                       wm_dce_clocks[i].wm_set_id = WM_SET_A;
+               else
+                       wm_dce_clocks[i].wm_set_id =
+                                       ranges->reader_wm_sets[i].wm_inst;
+               wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
+                               ranges->reader_wm_sets[i].max_drain_clk_khz;
+               wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
+                               ranges->reader_wm_sets[i].min_drain_clk_khz;
+               wm_dce_clocks[i].wm_max_mem_clk_in_khz =
+                               ranges->reader_wm_sets[i].max_fill_clk_khz;
+               wm_dce_clocks[i].wm_min_mem_clk_in_khz =
+                               ranges->reader_wm_sets[i].min_fill_clk_khz;
+       }
+
+       for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
+               if (ranges->writer_wm_sets[i].wm_inst > 3)
+                       wm_soc_clocks[i].wm_set_id = WM_SET_A;
+               else
+                       wm_soc_clocks[i].wm_set_id =
+                                       ranges->writer_wm_sets[i].wm_inst;
+               wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
+                               ranges->writer_wm_sets[i].max_fill_clk_khz;
+               wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
+                               ranges->writer_wm_sets[i].min_fill_clk_khz;
+               wm_soc_clocks[i].wm_max_mem_clk_in_khz =
+                               ranges->writer_wm_sets[i].max_drain_clk_khz;
+               wm_soc_clocks[i].wm_min_mem_clk_in_khz =
+                               ranges->writer_wm_sets[i].min_drain_clk_khz;
+       }
+
+       pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, &wm_with_clock_ranges);
+}
+
+void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
+{
+       struct dc_context *ctx = pp->ctx;
+       struct amdgpu_device *adev = ctx->driver_context;
+       void *pp_handle = adev->powerplay.pp_handle;
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+       if (!pp_funcs || !pp_funcs->notify_smu_enable_pwe)
+               return;
+
+       pp_funcs->notify_smu_enable_pwe(pp_handle);
+}
+
+void dm_pp_get_funcs_rv(
+               struct dc_context *ctx,
+               struct pp_smu_funcs_rv *funcs)
+{
+       funcs->pp_smu.ctx = ctx;
+       funcs->set_display_requirement = pp_rv_set_display_requirement;
+       funcs->set_wm_ranges = pp_rv_set_wm_ranges;
+       funcs->set_pme_wa_enable = pp_rv_set_pme_wa_enable;
+}
index 5a3346124a0177da27c6d205559a2f363f5aa40d..516795342dd2815629e0876031fa47070c9ec12a 100644 (file)
 #include "amdgpu_dm_irq.h"
 #include "amdgpu_pm.h"
 
-unsigned long long dm_get_timestamp(struct dc_context *ctx)
-{
-       struct timespec64 time;
 
-       getrawmonotonic64(&time);
-       return timespec64_to_ns(&time);
-}
 
 unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
                unsigned long long current_time_stamp,
@@ -80,326 +74,3 @@ bool dm_read_persistent_data(struct dc_context *ctx,
 
 /**** power component interfaces ****/
 
-bool dm_pp_apply_display_requirements(
-               const struct dc_context *ctx,
-               const struct dm_pp_display_configuration *pp_display_cfg)
-{
-       struct amdgpu_device *adev = ctx->driver_context;
-
-       if (adev->pm.dpm_enabled) {
-
-               memset(&adev->pm.pm_display_cfg, 0,
-                               sizeof(adev->pm.pm_display_cfg));
-
-               adev->pm.pm_display_cfg.cpu_cc6_disable =
-                       pp_display_cfg->cpu_cc6_disable;
-
-               adev->pm.pm_display_cfg.cpu_pstate_disable =
-                       pp_display_cfg->cpu_pstate_disable;
-
-               adev->pm.pm_display_cfg.cpu_pstate_separation_time =
-                       pp_display_cfg->cpu_pstate_separation_time;
-
-               adev->pm.pm_display_cfg.nb_pstate_switch_disable =
-                       pp_display_cfg->nb_pstate_switch_disable;
-
-               adev->pm.pm_display_cfg.num_display =
-                               pp_display_cfg->display_count;
-               adev->pm.pm_display_cfg.num_path_including_non_display =
-                               pp_display_cfg->display_count;
-
-               adev->pm.pm_display_cfg.min_core_set_clock =
-                               pp_display_cfg->min_engine_clock_khz/10;
-               adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
-                               pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
-               adev->pm.pm_display_cfg.min_mem_set_clock =
-                               pp_display_cfg->min_memory_clock_khz/10;
-
-               adev->pm.pm_display_cfg.multi_monitor_in_sync =
-                               pp_display_cfg->all_displays_in_sync;
-               adev->pm.pm_display_cfg.min_vblank_time =
-                               pp_display_cfg->avail_mclk_switch_time_us;
-
-               adev->pm.pm_display_cfg.display_clk =
-                               pp_display_cfg->disp_clk_khz/10;
-
-               adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
-                               pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
-
-               adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
-               adev->pm.pm_display_cfg.line_time_in_us =
-                               pp_display_cfg->line_time_in_us;
-
-               adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
-               adev->pm.pm_display_cfg.crossfire_display_index = -1;
-               adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
-
-               /* TODO: complete implementation of
-                * pp_display_configuration_change().
-                * Follow example of:
-                * PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c
-                * PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */
-               if (adev->powerplay.pp_funcs->display_configuration_change)
-                       adev->powerplay.pp_funcs->display_configuration_change(
-                               adev->powerplay.pp_handle,
-                               &adev->pm.pm_display_cfg);
-
-               /* TODO: replace by a separate call to 'apply display cfg'? */
-               amdgpu_pm_compute_clocks(adev);
-       }
-
-       return true;
-}
-
-static void get_default_clock_levels(
-               enum dm_pp_clock_type clk_type,
-               struct dm_pp_clock_levels *clks)
-{
-       uint32_t disp_clks_in_khz[6] = {
-                       300000, 400000, 496560, 626090, 685720, 757900 };
-       uint32_t sclks_in_khz[6] = {
-                       300000, 360000, 423530, 514290, 626090, 720000 };
-       uint32_t mclks_in_khz[2] = { 333000, 800000 };
-
-       switch (clk_type) {
-       case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
-               clks->num_levels = 6;
-               memmove(clks->clocks_in_khz, disp_clks_in_khz,
-                               sizeof(disp_clks_in_khz));
-               break;
-       case DM_PP_CLOCK_TYPE_ENGINE_CLK:
-               clks->num_levels = 6;
-               memmove(clks->clocks_in_khz, sclks_in_khz,
-                               sizeof(sclks_in_khz));
-               break;
-       case DM_PP_CLOCK_TYPE_MEMORY_CLK:
-               clks->num_levels = 2;
-               memmove(clks->clocks_in_khz, mclks_in_khz,
-                               sizeof(mclks_in_khz));
-               break;
-       default:
-               clks->num_levels = 0;
-               break;
-       }
-}
-
-static enum amd_pp_clock_type dc_to_pp_clock_type(
-               enum dm_pp_clock_type dm_pp_clk_type)
-{
-       enum amd_pp_clock_type amd_pp_clk_type = 0;
-
-       switch (dm_pp_clk_type) {
-       case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
-               amd_pp_clk_type = amd_pp_disp_clock;
-               break;
-       case DM_PP_CLOCK_TYPE_ENGINE_CLK:
-               amd_pp_clk_type = amd_pp_sys_clock;
-               break;
-       case DM_PP_CLOCK_TYPE_MEMORY_CLK:
-               amd_pp_clk_type = amd_pp_mem_clock;
-               break;
-       default:
-               DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
-                               dm_pp_clk_type);
-               break;
-       }
-
-       return amd_pp_clk_type;
-}
-
-static void pp_to_dc_clock_levels(
-               const struct amd_pp_clocks *pp_clks,
-               struct dm_pp_clock_levels *dc_clks,
-               enum dm_pp_clock_type dc_clk_type)
-{
-       uint32_t i;
-
-       if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
-               DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
-                               DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
-                               pp_clks->count,
-                               DM_PP_MAX_CLOCK_LEVELS);
-
-               dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
-       } else
-               dc_clks->num_levels = pp_clks->count;
-
-       DRM_INFO("DM_PPLIB: values for %s clock\n",
-                       DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
-
-       for (i = 0; i < dc_clks->num_levels; i++) {
-               DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
-               /* translate 10kHz to kHz */
-               dc_clks->clocks_in_khz[i] = pp_clks->clock[i] * 10;
-       }
-}
-
-static void pp_to_dc_clock_levels_with_latency(
-               const struct pp_clock_levels_with_latency *pp_clks,
-               struct dm_pp_clock_levels_with_latency *clk_level_info,
-               enum dm_pp_clock_type dc_clk_type)
-{
-       uint32_t i;
-
-       if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
-               DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
-                               DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
-                               pp_clks->num_levels,
-                               DM_PP_MAX_CLOCK_LEVELS);
-
-               clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
-       } else
-               clk_level_info->num_levels = pp_clks->num_levels;
-
-       DRM_DEBUG("DM_PPLIB: values for %s clock\n",
-                       DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
-
-       for (i = 0; i < clk_level_info->num_levels; i++) {
-               DRM_DEBUG("DM_PPLIB:\t %d\n", pp_clks->data[i].clocks_in_khz);
-               clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
-               clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
-       }
-}
-
-bool dm_pp_get_clock_levels_by_type(
-               const struct dc_context *ctx,
-               enum dm_pp_clock_type clk_type,
-               struct dm_pp_clock_levels *dc_clks)
-{
-       struct amdgpu_device *adev = ctx->driver_context;
-       void *pp_handle = adev->powerplay.pp_handle;
-       struct amd_pp_clocks pp_clks = { 0 };
-       struct amd_pp_simple_clock_info validation_clks = { 0 };
-       uint32_t i;
-
-       if (adev->powerplay.pp_funcs->get_clock_by_type) {
-               if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
-                       dc_to_pp_clock_type(clk_type), &pp_clks)) {
-               /* Error in pplib. Provide default values. */
-                       get_default_clock_levels(clk_type, dc_clks);
-                       return true;
-               }
-       }
-
-       pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
-
-       if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
-               if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks(
-                                               pp_handle, &validation_clks)) {
-                       /* Error in pplib. Provide default values. */
-                       DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
-                       validation_clks.engine_max_clock = 72000;
-                       validation_clks.memory_max_clock = 80000;
-                       validation_clks.level = 0;
-               }
-       }
-
-       DRM_INFO("DM_PPLIB: Validation clocks:\n");
-       DRM_INFO("DM_PPLIB:    engine_max_clock: %d\n",
-                       validation_clks.engine_max_clock);
-       DRM_INFO("DM_PPLIB:    memory_max_clock: %d\n",
-                       validation_clks.memory_max_clock);
-       DRM_INFO("DM_PPLIB:    level           : %d\n",
-                       validation_clks.level);
-
-       /* Translate 10 kHz to kHz. */
-       validation_clks.engine_max_clock *= 10;
-       validation_clks.memory_max_clock *= 10;
-
-       /* Determine the highest non-boosted level from the Validation Clocks */
-       if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
-               for (i = 0; i < dc_clks->num_levels; i++) {
-                       if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
-                               /* This clock is higher the validation clock.
-                                * Than means the previous one is the highest
-                                * non-boosted one. */
-                               DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
-                                               dc_clks->num_levels, i);
-                               dc_clks->num_levels = i > 0 ? i : 1;
-                               break;
-                       }
-               }
-       } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
-               for (i = 0; i < dc_clks->num_levels; i++) {
-                       if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
-                               DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
-                                               dc_clks->num_levels, i);
-                               dc_clks->num_levels = i > 0 ? i : 1;
-                               break;
-                       }
-               }
-       }
-
-       return true;
-}
-
-bool dm_pp_get_clock_levels_by_type_with_latency(
-       const struct dc_context *ctx,
-       enum dm_pp_clock_type clk_type,
-       struct dm_pp_clock_levels_with_latency *clk_level_info)
-{
-       struct amdgpu_device *adev = ctx->driver_context;
-       void *pp_handle = adev->powerplay.pp_handle;
-       struct pp_clock_levels_with_latency pp_clks = { 0 };
-       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-
-       if (!pp_funcs || !pp_funcs->get_clock_by_type_with_latency)
-               return false;
-
-       if (pp_funcs->get_clock_by_type_with_latency(pp_handle,
-                                                    dc_to_pp_clock_type(clk_type),
-                                                    &pp_clks))
-               return false;
-
-       pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
-
-       return true;
-}
-
-bool dm_pp_get_clock_levels_by_type_with_voltage(
-       const struct dc_context *ctx,
-       enum dm_pp_clock_type clk_type,
-       struct dm_pp_clock_levels_with_voltage *clk_level_info)
-{
-       /* TODO: to be implemented */
-       return false;
-}
-
-bool dm_pp_notify_wm_clock_changes(
-       const struct dc_context *ctx,
-       struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
-{
-       /* TODO: to be implemented */
-       return false;
-}
-
-bool dm_pp_apply_power_level_change_request(
-       const struct dc_context *ctx,
-       struct dm_pp_power_level_change_request *level_change_req)
-{
-       /* TODO: to be implemented */
-       return false;
-}
-
-bool dm_pp_apply_clock_for_voltage_request(
-       const struct dc_context *ctx,
-       struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
-{
-       /* TODO: to be implemented */
-       return false;
-}
-
-bool dm_pp_get_static_clocks(
-       const struct dc_context *ctx,
-       struct dm_pp_static_clock_info *static_clk_info)
-{
-       /* TODO: to be implemented */
-       return false;
-}
-
-void dm_pp_get_funcs_rv(
-               struct dc_context *ctx,
-               struct pp_smu_funcs_rv *funcs)
-{}
-
-/**** end of power component interfaces ****/
index aed538a4d1bace016fb37526d099656227b81348..532a515fda9a11a0f03cb3c700152712ba4a7023 100644 (file)
@@ -25,7 +25,7 @@
 
 DC_LIBS = basics bios calcs dce gpio i2caux irq virtual
 
-ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ifdef CONFIG_X86
 DC_LIBS += dcn10 dml
 endif
 
index b49ea96b5daeef2a2ec7e51262f22ca69877e22a..a50a76471107eb768613239f984cbe9384ed628a 100644 (file)
@@ -25,7 +25,7 @@
 # subcomponents.
 
 BASICS = conversion.o fixpt31_32.o \
-       logger.o log_helpers.o vector.o
+       log_helpers.o vector.o
 
 AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS))
 
index 021451549ff781ea69609dbb5b4c5443161acf3e..26583f346c3957d74da49e75ce4ed677a5958b40 100644 (file)
 #include "include/logger_interface.h"
 #include "dm_helpers.h"
 
-#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
-
-struct dc_signal_type_info {
-       enum signal_type type;
-       char name[MAX_NAME_LEN];
-};
-
-static const struct dc_signal_type_info signal_type_info_tbl[] = {
-               {SIGNAL_TYPE_NONE,             "NC"},
-               {SIGNAL_TYPE_DVI_SINGLE_LINK,  "DVI"},
-               {SIGNAL_TYPE_DVI_DUAL_LINK,    "DDVI"},
-               {SIGNAL_TYPE_HDMI_TYPE_A,      "HDMIA"},
-               {SIGNAL_TYPE_LVDS,             "LVDS"},
-               {SIGNAL_TYPE_RGB,              "VGA"},
-               {SIGNAL_TYPE_DISPLAY_PORT,     "DP"},
-               {SIGNAL_TYPE_DISPLAY_PORT_MST, "MST"},
-               {SIGNAL_TYPE_EDP,              "eDP"},
-               {SIGNAL_TYPE_VIRTUAL,          "Virtual"}
-};
-
-void dc_conn_log(struct dc_context *ctx,
-               const struct dc_link *link,
-               uint8_t *hex_data,
-               int hex_data_count,
-               enum dc_log_type event,
-               const char *msg,
-               ...)
+void dc_conn_log_hex_linux(const uint8_t *hex_data, int hex_data_count)
 {
        int i;
-       va_list args;
-       struct log_entry entry = { 0 };
-       enum signal_type signal;
-
-       if (link->local_sink)
-               signal = link->local_sink->sink_signal;
-       else
-               signal = link->connector_signal;
-
-       if (link->type == dc_connection_mst_branch)
-               signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
-
-       dm_logger_open(ctx->logger, &entry, event);
-
-       for (i = 0; i < NUM_ELEMENTS(signal_type_info_tbl); i++)
-               if (signal == signal_type_info_tbl[i].type)
-                       break;
-
-       if (i == NUM_ELEMENTS(signal_type_info_tbl))
-               goto fail;
-
-       dm_logger_append(&entry, "[%s][ConnIdx:%d] ",
-                       signal_type_info_tbl[i].name,
-                       link->link_index);
-
-       va_start(args, msg);
-       dm_logger_append_va(&entry, msg, args);
-
-       if (entry.buf_offset > 0 &&
-           entry.buf[entry.buf_offset - 1] == '\n')
-               entry.buf_offset--;
 
        if (hex_data)
                for (i = 0; i < hex_data_count; i++)
-                       dm_logger_append(&entry, "%2.2X ", hex_data[i]);
-
-       dm_logger_append(&entry, "^\n");
-
-fail:
-       dm_logger_close(&entry);
-
-       va_end(args);
+                       DC_LOG_DEBUG("%2.2X ", hex_data[i]);
 }
+
diff --git a/drivers/gpu/drm/amd/display/dc/basics/logger.c b/drivers/gpu/drm/amd/display/dc/basics/logger.c
deleted file mode 100644 (file)
index 0866874..0000000
+++ /dev/null
@@ -1,405 +0,0 @@
-/*
- * Copyright 2012-15 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-#include "dm_services.h"
-#include "include/logger_interface.h"
-#include "logger.h"
-
-
-#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
-
-static const struct dc_log_type_info log_type_info_tbl[] = {
-               {LOG_ERROR,                 "Error"},
-               {LOG_WARNING,               "Warning"},
-               {LOG_DEBUG,                 "Debug"},
-               {LOG_DC,                    "DC_Interface"},
-               {LOG_SURFACE,               "Surface"},
-               {LOG_HW_HOTPLUG,            "HW_Hotplug"},
-               {LOG_HW_LINK_TRAINING,      "HW_LKTN"},
-               {LOG_HW_SET_MODE,           "HW_Mode"},
-               {LOG_HW_RESUME_S3,          "HW_Resume"},
-               {LOG_HW_AUDIO,              "HW_Audio"},
-               {LOG_HW_HPD_IRQ,            "HW_HPDIRQ"},
-               {LOG_MST,                   "MST"},
-               {LOG_SCALER,                "Scaler"},
-               {LOG_BIOS,                  "BIOS"},
-               {LOG_BANDWIDTH_CALCS,       "BWCalcs"},
-               {LOG_BANDWIDTH_VALIDATION,  "BWValidation"},
-               {LOG_I2C_AUX,               "I2C_AUX"},
-               {LOG_SYNC,                  "Sync"},
-               {LOG_BACKLIGHT,             "Backlight"},
-               {LOG_FEATURE_OVERRIDE,      "Override"},
-               {LOG_DETECTION_EDID_PARSER, "Edid"},
-               {LOG_DETECTION_DP_CAPS,     "DP_Caps"},
-               {LOG_RESOURCE,              "Resource"},
-               {LOG_DML,                   "DML"},
-               {LOG_EVENT_MODE_SET,        "Mode"},
-               {LOG_EVENT_DETECTION,       "Detect"},
-               {LOG_EVENT_LINK_TRAINING,   "LKTN"},
-               {LOG_EVENT_LINK_LOSS,       "LinkLoss"},
-               {LOG_EVENT_UNDERFLOW,       "Underflow"},
-               {LOG_IF_TRACE,              "InterfaceTrace"},
-               {LOG_DTN,                   "DTN"},
-               {LOG_DISPLAYSTATS,          "DisplayStats"}
-};
-
-
-/* ----------- Object init and destruction ----------- */
-static bool construct(struct dc_context *ctx, struct dal_logger *logger,
-                     uint32_t log_mask)
-{
-       /* malloc buffer and init offsets */
-       logger->log_buffer_size = DAL_LOGGER_BUFFER_MAX_SIZE;
-       logger->log_buffer = kcalloc(logger->log_buffer_size, sizeof(char),
-                                    GFP_KERNEL);
-       if (!logger->log_buffer)
-               return false;
-
-       /* Initialize both offsets to start of buffer (empty) */
-       logger->buffer_read_offset = 0;
-       logger->buffer_write_offset = 0;
-
-       logger->open_count = 0;
-
-       logger->flags.bits.ENABLE_CONSOLE = 1;
-       logger->flags.bits.ENABLE_BUFFER = 0;
-
-       logger->ctx = ctx;
-
-       logger->mask = log_mask;
-
-       return true;
-}
-
-static void destruct(struct dal_logger *logger)
-{
-       if (logger->log_buffer) {
-               kfree(logger->log_buffer);
-               logger->log_buffer = NULL;
-       }
-}
-
-struct dal_logger *dal_logger_create(struct dc_context *ctx, uint32_t log_mask)
-{
-       /* malloc struct */
-       struct dal_logger *logger = kzalloc(sizeof(struct dal_logger),
-                                           GFP_KERNEL);
-
-       if (!logger)
-               return NULL;
-       if (!construct(ctx, logger, log_mask)) {
-               kfree(logger);
-               return NULL;
-       }
-
-       return logger;
-}
-
-uint32_t dal_logger_destroy(struct dal_logger **logger)
-{
-       if (logger == NULL || *logger == NULL)
-               return 1;
-       destruct(*logger);
-       kfree(*logger);
-       *logger = NULL;
-
-       return 0;
-}
-
-/* ------------------------------------------------------------------------ */
-
-
-static bool dal_logger_should_log(
-       struct dal_logger *logger,
-       enum dc_log_type log_type)
-{
-       if (logger->mask & (1 << log_type))
-               return true;
-
-       return false;
-}
-
-static void log_to_debug_console(struct log_entry *entry)
-{
-       struct dal_logger *logger = entry->logger;
-
-       if (logger->flags.bits.ENABLE_CONSOLE == 0)
-               return;
-
-       if (entry->buf_offset) {
-               switch (entry->type) {
-               case LOG_ERROR:
-                       dm_error("%s", entry->buf);
-                       break;
-               default:
-                       dm_output_to_console("%s", entry->buf);
-                       break;
-               }
-       }
-}
-
-/* Print everything unread existing in log_buffer to debug console*/
-void dm_logger_flush_buffer(struct dal_logger *logger, bool should_warn)
-{
-       char *string_start = &logger->log_buffer[logger->buffer_read_offset];
-
-       if (should_warn)
-               dm_output_to_console(
-                       "---------------- FLUSHING LOG BUFFER ----------------\n");
-       while (logger->buffer_read_offset < logger->buffer_write_offset) {
-
-               if (logger->log_buffer[logger->buffer_read_offset] == '\0') {
-                       dm_output_to_console("%s", string_start);
-                       string_start = logger->log_buffer + logger->buffer_read_offset + 1;
-               }
-               logger->buffer_read_offset++;
-       }
-       if (should_warn)
-               dm_output_to_console(
-                       "-------------- END FLUSHING LOG BUFFER --------------\n\n");
-}
-
-static void log_to_internal_buffer(struct log_entry *entry)
-{
-
-       uint32_t size = entry->buf_offset;
-       struct dal_logger *logger = entry->logger;
-
-       if (logger->flags.bits.ENABLE_BUFFER == 0)
-               return;
-
-       if (logger->log_buffer == NULL)
-               return;
-
-       if (size > 0 && size < logger->log_buffer_size) {
-
-               int buffer_space = logger->log_buffer_size -
-                               logger->buffer_write_offset;
-
-               if (logger->buffer_write_offset == logger->buffer_read_offset) {
-                       /* Buffer is empty, start writing at beginning */
-                       buffer_space = logger->log_buffer_size;
-                       logger->buffer_write_offset = 0;
-                       logger->buffer_read_offset = 0;
-               }
-
-               if (buffer_space > size) {
-                       /* No wrap around, copy 'size' bytes
-                        * from 'entry->buf' to 'log_buffer'
-                        */
-                       memmove(logger->log_buffer +
-                                       logger->buffer_write_offset,
-                                       entry->buf, size);
-                       logger->buffer_write_offset += size;
-
-               } else {
-                       /* Not enough room remaining, we should flush
-                        * existing logs */
-
-                       /* Flush existing unread logs to console */
-                       dm_logger_flush_buffer(logger, true);
-
-                       /* Start writing to beginning of buffer */
-                       memmove(logger->log_buffer, entry->buf, size);
-                       logger->buffer_write_offset = size;
-                       logger->buffer_read_offset = 0;
-               }
-
-       }
-}
-
-static void log_heading(struct log_entry *entry)
-{
-       int j;
-
-       for (j = 0; j < NUM_ELEMENTS(log_type_info_tbl); j++) {
-
-               const struct dc_log_type_info *info = &log_type_info_tbl[j];
-
-               if (info->type == entry->type)
-                       dm_logger_append(entry, "[%s]\t", info->name);
-       }
-}
-
-static void append_entry(
-               struct log_entry *entry,
-               char *buffer,
-               uint32_t buf_size)
-{
-       if (!entry->buf ||
-               entry->buf_offset + buf_size > entry->max_buf_bytes
-       ) {
-               BREAK_TO_DEBUGGER();
-               return;
-       }
-
-       /* Todo: check if off by 1 byte due to \0 anywhere */
-       memmove(entry->buf + entry->buf_offset, buffer, buf_size);
-       entry->buf_offset += buf_size;
-}
-
-/* ------------------------------------------------------------------------ */
-
-/* Warning: Be careful that 'msg' is null terminated and the total size is
- * less than DAL_LOGGER_BUFFER_MAX_LOG_LINE_SIZE (256) including '\0'
- */
-void dm_logger_write(
-       struct dal_logger *logger,
-       enum dc_log_type log_type,
-       const char *msg,
-       ...)
-{
-       if (logger && dal_logger_should_log(logger, log_type)) {
-               uint32_t size;
-               va_list args;
-               char buffer[LOG_MAX_LINE_SIZE];
-               struct log_entry entry;
-
-               va_start(args, msg);
-
-               entry.logger = logger;
-
-               entry.buf = buffer;
-
-               entry.buf_offset = 0;
-               entry.max_buf_bytes = DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char);
-
-               entry.type = log_type;
-
-               log_heading(&entry);
-
-               size = dm_log_to_buffer(
-                       buffer, LOG_MAX_LINE_SIZE - 1, msg, args);
-
-               buffer[entry.buf_offset + size] = '\0';
-               entry.buf_offset += size + 1;
-
-               /* --Flush log_entry buffer-- */
-               /* print to kernel console */
-               log_to_debug_console(&entry);
-               /* log internally for dsat */
-               log_to_internal_buffer(&entry);
-
-               va_end(args);
-       }
-}
-
-/* Same as dm_logger_write, except without open() and close(), which must
- * be done separately.
- */
-void dm_logger_append(
-       struct log_entry *entry,
-       const char *msg,
-       ...)
-{
-       va_list args;
-
-       va_start(args, msg);
-       dm_logger_append_va(entry, msg, args);
-       va_end(args);
-}
-
-void dm_logger_append_va(
-       struct log_entry *entry,
-       const char *msg,
-       va_list args)
-{
-       struct dal_logger *logger;
-
-       if (!entry) {
-               BREAK_TO_DEBUGGER();
-               return;
-       }
-
-       logger = entry->logger;
-
-       if (logger && logger->open_count > 0 &&
-               dal_logger_should_log(logger, entry->type)) {
-
-               uint32_t size;
-               char buffer[LOG_MAX_LINE_SIZE];
-
-               size = dm_log_to_buffer(
-                       buffer, LOG_MAX_LINE_SIZE, msg, args);
-
-               if (size < LOG_MAX_LINE_SIZE - 1) {
-                       append_entry(entry, buffer, size);
-               } else {
-                       append_entry(entry, "LOG_ERROR, line too long\n", 27);
-               }
-       }
-}
-
-void dm_logger_open(
-               struct dal_logger *logger,
-               struct log_entry *entry, /* out */
-               enum dc_log_type log_type)
-{
-       if (!entry) {
-               BREAK_TO_DEBUGGER();
-               return;
-       }
-
-       entry->type = log_type;
-       entry->logger = logger;
-
-       entry->buf = kzalloc(DAL_LOGGER_BUFFER_MAX_SIZE,
-                            GFP_KERNEL);
-
-       entry->buf_offset = 0;
-       entry->max_buf_bytes = DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char);
-
-       logger->open_count++;
-
-       log_heading(entry);
-}
-
-void dm_logger_close(struct log_entry *entry)
-{
-       struct dal_logger *logger = entry->logger;
-
-       if (logger && logger->open_count > 0) {
-               logger->open_count--;
-       } else {
-               BREAK_TO_DEBUGGER();
-               goto cleanup;
-       }
-
-       /* --Flush log_entry buffer-- */
-       /* print to kernel console */
-       log_to_debug_console(entry);
-       /* log internally for dsat */
-       log_to_internal_buffer(entry);
-
-       /* TODO: Write end heading */
-
-cleanup:
-       if (entry->buf) {
-               kfree(entry->buf);
-               entry->buf = NULL;
-               entry->buf_offset = 0;
-               entry->max_buf_bytes = 0;
-       }
-}
-
index c7f0b27e457e433588897bc9027947e11c92859c..be8a2494355a45e20fb329f81cdccd341e19b0d8 100644 (file)
@@ -3762,6 +3762,200 @@ static struct integrated_info *bios_parser_create_integrated_info(
        return NULL;
 }
 
+enum bp_result update_slot_layout_info(
+       struct dc_bios *dcb,
+       unsigned int i,
+       struct slot_layout_info *slot_layout_info,
+       unsigned int record_offset)
+{
+       unsigned int j;
+       struct bios_parser *bp;
+       ATOM_BRACKET_LAYOUT_RECORD *record;
+       ATOM_COMMON_RECORD_HEADER *record_header;
+       enum bp_result result = BP_RESULT_NORECORD;
+
+       bp = BP_FROM_DCB(dcb);
+       record = NULL;
+       record_header = NULL;
+
+       for (;;) {
+
+               record_header = (ATOM_COMMON_RECORD_HEADER *)
+                       GET_IMAGE(ATOM_COMMON_RECORD_HEADER, record_offset);
+               if (record_header == NULL) {
+                       result = BP_RESULT_BADBIOSTABLE;
+                       break;
+               }
+
+               /* the end of the list */
+               if (record_header->ucRecordType == 0xff ||
+                       record_header->ucRecordSize == 0)       {
+                       break;
+               }
+
+               if (record_header->ucRecordType ==
+                       ATOM_BRACKET_LAYOUT_RECORD_TYPE &&
+                       sizeof(ATOM_BRACKET_LAYOUT_RECORD)
+                       <= record_header->ucRecordSize) {
+                       record = (ATOM_BRACKET_LAYOUT_RECORD *)
+                               (record_header);
+                       result = BP_RESULT_OK;
+                       break;
+               }
+
+               record_offset += record_header->ucRecordSize;
+       }
+
+       /* return if the record not found */
+       if (result != BP_RESULT_OK)
+               return result;
+
+       /* get slot sizes */
+       slot_layout_info->length = record->ucLength;
+       slot_layout_info->width = record->ucWidth;
+
+       /* get info for each connector in the slot */
+       slot_layout_info->num_of_connectors = record->ucConnNum;
+       for (j = 0; j < slot_layout_info->num_of_connectors; ++j) {
+               slot_layout_info->connectors[j].connector_type =
+                       (enum connector_layout_type)
+                       (record->asConnInfo[j].ucConnectorType);
+               switch (record->asConnInfo[j].ucConnectorType) {
+               case CONNECTOR_TYPE_DVI_D:
+                       slot_layout_info->connectors[j].connector_type =
+                               CONNECTOR_LAYOUT_TYPE_DVI_D;
+                       slot_layout_info->connectors[j].length =
+                               CONNECTOR_SIZE_DVI;
+                       break;
+
+               case CONNECTOR_TYPE_HDMI:
+                       slot_layout_info->connectors[j].connector_type =
+                               CONNECTOR_LAYOUT_TYPE_HDMI;
+                       slot_layout_info->connectors[j].length =
+                               CONNECTOR_SIZE_HDMI;
+                       break;
+
+               case CONNECTOR_TYPE_DISPLAY_PORT:
+                       slot_layout_info->connectors[j].connector_type =
+                               CONNECTOR_LAYOUT_TYPE_DP;
+                       slot_layout_info->connectors[j].length =
+                               CONNECTOR_SIZE_DP;
+                       break;
+
+               case CONNECTOR_TYPE_MINI_DISPLAY_PORT:
+                       slot_layout_info->connectors[j].connector_type =
+                               CONNECTOR_LAYOUT_TYPE_MINI_DP;
+                       slot_layout_info->connectors[j].length =
+                               CONNECTOR_SIZE_MINI_DP;
+                       break;
+
+               default:
+                       slot_layout_info->connectors[j].connector_type =
+                               CONNECTOR_LAYOUT_TYPE_UNKNOWN;
+                       slot_layout_info->connectors[j].length =
+                               CONNECTOR_SIZE_UNKNOWN;
+               }
+
+               slot_layout_info->connectors[j].position =
+                       record->asConnInfo[j].ucPosition;
+               slot_layout_info->connectors[j].connector_id =
+                       object_id_from_bios_object_id(
+                               record->asConnInfo[j].usConnectorObjectId);
+       }
+       return result;
+}
+
+
+enum bp_result get_bracket_layout_record(
+       struct dc_bios *dcb,
+       unsigned int bracket_layout_id,
+       struct slot_layout_info *slot_layout_info)
+{
+       unsigned int i;
+       unsigned int record_offset;
+       struct bios_parser *bp;
+       enum bp_result result;
+       ATOM_OBJECT *object;
+       ATOM_OBJECT_TABLE *object_table;
+       unsigned int genericTableOffset;
+
+       bp = BP_FROM_DCB(dcb);
+       object = NULL;
+       if (slot_layout_info == NULL) {
+               DC_LOG_DETECTION_EDID_PARSER("Invalid slot_layout_info\n");
+               return BP_RESULT_BADINPUT;
+       }
+
+
+       genericTableOffset = bp->object_info_tbl_offset +
+               bp->object_info_tbl.v1_3->usMiscObjectTableOffset;
+       object_table = (ATOM_OBJECT_TABLE *)
+               GET_IMAGE(ATOM_OBJECT_TABLE, genericTableOffset);
+       if (!object_table)
+               return BP_RESULT_FAILURE;
+
+       result = BP_RESULT_NORECORD;
+       for (i = 0; i < object_table->ucNumberOfObjects; ++i) {
+
+               if (bracket_layout_id ==
+                       object_table->asObjects[i].usObjectID) {
+
+                       object = &object_table->asObjects[i];
+                       record_offset = object->usRecordOffset +
+                               bp->object_info_tbl_offset;
+
+                       result = update_slot_layout_info(dcb, i,
+                               slot_layout_info, record_offset);
+                       break;
+               }
+       }
+       return result;
+}
+
+static enum bp_result bios_get_board_layout_info(
+       struct dc_bios *dcb,
+       struct board_layout_info *board_layout_info)
+{
+       unsigned int i;
+       struct bios_parser *bp;
+       enum bp_result record_result;
+
+       const unsigned int slot_index_to_vbios_id[MAX_BOARD_SLOTS] = {
+               GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1,
+               GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2,
+               0, 0
+       };
+
+       bp = BP_FROM_DCB(dcb);
+       if (board_layout_info == NULL) {
+               DC_LOG_DETECTION_EDID_PARSER("Invalid board_layout_info\n");
+               return BP_RESULT_BADINPUT;
+       }
+
+       board_layout_info->num_of_slots = 0;
+
+       for (i = 0; i < MAX_BOARD_SLOTS; ++i) {
+               record_result = get_bracket_layout_record(dcb,
+                       slot_index_to_vbios_id[i],
+                       &board_layout_info->slots[i]);
+
+               if (record_result == BP_RESULT_NORECORD && i > 0)
+                       break; /* no more slots present in bios */
+               else if (record_result != BP_RESULT_OK)
+                       return record_result;  /* fail */
+
+               ++board_layout_info->num_of_slots;
+       }
+
+       /* all data is valid */
+       board_layout_info->is_number_of_slots_valid = 1;
+       board_layout_info->is_slots_size_valid = 1;
+       board_layout_info->is_connector_offsets_valid = 1;
+       board_layout_info->is_connector_lengths_valid = 1;
+
+       return BP_RESULT_OK;
+}
+
 /******************************************************************************/
 
 static const struct dc_vbios_funcs vbios_funcs = {
@@ -3836,6 +4030,8 @@ static const struct dc_vbios_funcs vbios_funcs = {
        .post_init = bios_parser_post_init,  /* patch vbios table for mxm module by reading i2c */
 
        .bios_parser_destroy = bios_parser_destroy,
+
+       .get_board_layout_info = bios_get_board_layout_info,
 };
 
 static bool bios_parser_construct(
index b8cef7af3c4afb4515757b09a3e3a6e521f6c3a0..eab007e1793c290ddb17d42688d4a9ba45eab9e4 100644 (file)
 #include "bios_parser_interface.h"
 
 #include "bios_parser_common.h"
+
+/* Temporarily add in defines until ObjectID.h patch is updated in a few days */
+#ifndef GENERIC_OBJECT_ID_BRACKET_LAYOUT
+#define GENERIC_OBJECT_ID_BRACKET_LAYOUT          0x05
+#endif /* GENERIC_OBJECT_ID_BRACKET_LAYOUT */
+
+#ifndef GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1
+#define GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1  \
+       (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+       GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+       GENERIC_OBJECT_ID_BRACKET_LAYOUT << OBJECT_ID_SHIFT)
+#endif /* GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1 */
+
+#ifndef GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2
+#define GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2  \
+       (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+       GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+       GENERIC_OBJECT_ID_BRACKET_LAYOUT << OBJECT_ID_SHIFT)
+#endif /* GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2 */
+
+#define DC_LOGGER \
+       bp->base.ctx->logger
+
 #define LAST_RECORD_TYPE 0xff
 #define SMU9_SYSPLL0_ID  0
 
@@ -86,7 +109,6 @@ static struct atom_encoder_caps_record *get_encoder_cap_record(
 
 #define DATA_TABLES(table) (bp->master_data_tbl->listOfdatatables.table)
 
-
 static void destruct(struct bios_parser *bp)
 {
        kfree(bp->base.bios_local_image);
@@ -656,7 +678,7 @@ static enum bp_result bios_parser_get_gpio_pin_info(
                return BP_RESULT_BADBIOSTABLE;
 
        if (sizeof(struct atom_common_table_header) +
-                       sizeof(struct atom_gpio_pin_lut_v2_1)
+                       sizeof(struct atom_gpio_pin_assignment)
                        > le16_to_cpu(header->table_header.structuresize))
                return BP_RESULT_BADBIOSTABLE;
 
@@ -1854,6 +1876,198 @@ static struct integrated_info *bios_parser_create_integrated_info(
        return NULL;
 }
 
+static enum bp_result update_slot_layout_info(
+       struct dc_bios *dcb,
+       unsigned int i,
+       struct slot_layout_info *slot_layout_info)
+{
+       unsigned int record_offset;
+       unsigned int j;
+       struct atom_display_object_path_v2 *object;
+       struct atom_bracket_layout_record *record;
+       struct atom_common_record_header *record_header;
+       enum bp_result result;
+       struct bios_parser *bp;
+       struct object_info_table *tbl;
+       struct display_object_info_table_v1_4 *v1_4;
+
+       record = NULL;
+       record_header = NULL;
+       result = BP_RESULT_NORECORD;
+
+       bp = BP_FROM_DCB(dcb);
+       tbl = &bp->object_info_tbl;
+       v1_4 = tbl->v1_4;
+
+       object = &v1_4->display_path[i];
+       record_offset = (unsigned int)
+               (object->disp_recordoffset) +
+               (unsigned int)(bp->object_info_tbl_offset);
+
+       for (;;) {
+
+               record_header = (struct atom_common_record_header *)
+                       GET_IMAGE(struct atom_common_record_header,
+                       record_offset);
+               if (record_header == NULL) {
+                       result = BP_RESULT_BADBIOSTABLE;
+                       break;
+               }
+
+               /* the end of the list */
+               if (record_header->record_type == 0xff ||
+                       record_header->record_size == 0)        {
+                       break;
+               }
+
+               if (record_header->record_type ==
+                       ATOM_BRACKET_LAYOUT_RECORD_TYPE &&
+                       sizeof(struct atom_bracket_layout_record)
+                       <= record_header->record_size) {
+                       record = (struct atom_bracket_layout_record *)
+                               (record_header);
+                       result = BP_RESULT_OK;
+                       break;
+               }
+
+               record_offset += record_header->record_size;
+       }
+
+       /* return if the record not found */
+       if (result != BP_RESULT_OK)
+               return result;
+
+       /* get slot sizes */
+       slot_layout_info->length = record->bracketlen;
+       slot_layout_info->width = record->bracketwidth;
+
+       /* get info for each connector in the slot */
+       slot_layout_info->num_of_connectors = record->conn_num;
+       for (j = 0; j < slot_layout_info->num_of_connectors; ++j) {
+               slot_layout_info->connectors[j].connector_type =
+                       (enum connector_layout_type)
+                       (record->conn_info[j].connector_type);
+               switch (record->conn_info[j].connector_type) {
+               case CONNECTOR_TYPE_DVI_D:
+                       slot_layout_info->connectors[j].connector_type =
+                               CONNECTOR_LAYOUT_TYPE_DVI_D;
+                       slot_layout_info->connectors[j].length =
+                               CONNECTOR_SIZE_DVI;
+                       break;
+
+               case CONNECTOR_TYPE_HDMI:
+                       slot_layout_info->connectors[j].connector_type =
+                               CONNECTOR_LAYOUT_TYPE_HDMI;
+                       slot_layout_info->connectors[j].length =
+                               CONNECTOR_SIZE_HDMI;
+                       break;
+
+               case CONNECTOR_TYPE_DISPLAY_PORT:
+                       slot_layout_info->connectors[j].connector_type =
+                               CONNECTOR_LAYOUT_TYPE_DP;
+                       slot_layout_info->connectors[j].length =
+                               CONNECTOR_SIZE_DP;
+                       break;
+
+               case CONNECTOR_TYPE_MINI_DISPLAY_PORT:
+                       slot_layout_info->connectors[j].connector_type =
+                               CONNECTOR_LAYOUT_TYPE_MINI_DP;
+                       slot_layout_info->connectors[j].length =
+                               CONNECTOR_SIZE_MINI_DP;
+                       break;
+
+               default:
+                       slot_layout_info->connectors[j].connector_type =
+                               CONNECTOR_LAYOUT_TYPE_UNKNOWN;
+                       slot_layout_info->connectors[j].length =
+                               CONNECTOR_SIZE_UNKNOWN;
+               }
+
+               slot_layout_info->connectors[j].position =
+                       record->conn_info[j].position;
+               slot_layout_info->connectors[j].connector_id =
+                       object_id_from_bios_object_id(
+                               record->conn_info[j].connectorobjid);
+       }
+       return result;
+}
+
+
+static enum bp_result get_bracket_layout_record(
+       struct dc_bios *dcb,
+       unsigned int bracket_layout_id,
+       struct slot_layout_info *slot_layout_info)
+{
+       unsigned int i;
+       struct bios_parser *bp = BP_FROM_DCB(dcb);
+       enum bp_result result;
+       struct object_info_table *tbl;
+       struct display_object_info_table_v1_4 *v1_4;
+
+       if (slot_layout_info == NULL) {
+               DC_LOG_DETECTION_EDID_PARSER("Invalid slot_layout_info\n");
+               return BP_RESULT_BADINPUT;
+       }
+       tbl = &bp->object_info_tbl;
+       v1_4 = tbl->v1_4;
+
+       result = BP_RESULT_NORECORD;
+       for (i = 0; i < v1_4->number_of_path; ++i)      {
+
+               if (bracket_layout_id ==
+                       v1_4->display_path[i].display_objid) {
+                       result = update_slot_layout_info(dcb, i,
+                               slot_layout_info);
+                       break;
+               }
+       }
+       return result;
+}
+
+static enum bp_result bios_get_board_layout_info(
+       struct dc_bios *dcb,
+       struct board_layout_info *board_layout_info)
+{
+       unsigned int i;
+       struct bios_parser *bp;
+       enum bp_result record_result;
+
+       const unsigned int slot_index_to_vbios_id[MAX_BOARD_SLOTS] = {
+               GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1,
+               GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2,
+               0, 0
+       };
+
+       bp = BP_FROM_DCB(dcb);
+       if (board_layout_info == NULL) {
+               DC_LOG_DETECTION_EDID_PARSER("Invalid board_layout_info\n");
+               return BP_RESULT_BADINPUT;
+       }
+
+       board_layout_info->num_of_slots = 0;
+
+       for (i = 0; i < MAX_BOARD_SLOTS; ++i) {
+               record_result = get_bracket_layout_record(dcb,
+                       slot_index_to_vbios_id[i],
+                       &board_layout_info->slots[i]);
+
+               if (record_result == BP_RESULT_NORECORD && i > 0)
+                       break; /* no more slots present in bios */
+               else if (record_result != BP_RESULT_OK)
+                       return record_result;  /* fail */
+
+               ++board_layout_info->num_of_slots;
+       }
+
+       /* all data is valid */
+       board_layout_info->is_number_of_slots_valid = 1;
+       board_layout_info->is_slots_size_valid = 1;
+       board_layout_info->is_connector_offsets_valid = 1;
+       board_layout_info->is_connector_lengths_valid = 1;
+
+       return BP_RESULT_OK;
+}
+
 static const struct dc_vbios_funcs vbios_funcs = {
        .get_connectors_number = bios_parser_get_connectors_number,
 
@@ -1925,6 +2139,8 @@ static const struct dc_vbios_funcs vbios_funcs = {
        .bios_parser_destroy = firmware_parser_destroy,
 
        .get_smu_clock_info = bios_parser_get_smu_clock_info,
+
+       .get_board_layout_info = bios_get_board_layout_info,
 };
 
 static bool bios_parser_construct(
index 752b08a42d3ec238cc2a02f2298458ed8273d348..2b5dc499a35edafd6cc4f37b37d9d16ed69ff6c6 100644 (file)
        bios_cmd_table_para_revision(bp->base.ctx->driver_context, \
                        GET_INDEX_INTO_MASTER_TABLE(command, fname))
 
-static void init_dig_encoder_control(struct bios_parser *bp);
-static void init_transmitter_control(struct bios_parser *bp);
-static void init_set_pixel_clock(struct bios_parser *bp);
 
-static void init_set_crtc_timing(struct bios_parser *bp);
-
-static void init_select_crtc_source(struct bios_parser *bp);
-static void init_enable_crtc(struct bios_parser *bp);
-
-static void init_external_encoder_control(struct bios_parser *bp);
-static void init_enable_disp_power_gating(struct bios_parser *bp);
-static void init_set_dce_clock(struct bios_parser *bp);
-static void init_get_smu_clock_info(struct bios_parser *bp);
-
-void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp)
-{
-       init_dig_encoder_control(bp);
-       init_transmitter_control(bp);
-       init_set_pixel_clock(bp);
-
-       init_set_crtc_timing(bp);
-
-       init_select_crtc_source(bp);
-       init_enable_crtc(bp);
-
-       init_external_encoder_control(bp);
-       init_enable_disp_power_gating(bp);
-       init_set_dce_clock(bp);
-       init_get_smu_clock_info(bp);
-}
 
 static uint32_t bios_cmd_table_para_revision(void *dev,
                                             uint32_t index)
@@ -829,3 +800,20 @@ static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id)
        return 0;
 }
 
+void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp)
+{
+       init_dig_encoder_control(bp);
+       init_transmitter_control(bp);
+       init_set_pixel_clock(bp);
+
+       init_set_crtc_timing(bp);
+
+       init_select_crtc_source(bp);
+       init_enable_crtc(bp);
+
+       init_external_encoder_control(bp);
+       init_enable_disp_power_gating(bp);
+       init_set_dce_clock(bp);
+       init_get_smu_clock_info(bp);
+
+}
index bbbcef566c551908d42c0c6fda57083c1bdd455e..770ff89ba7e173c273006f122ba77622cde7559f 100644 (file)
@@ -55,7 +55,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
        case DCE_VERSION_11_22:
                *h = dal_cmd_tbl_helper_dce112_get_table2();
                return true;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
        case DCN_VERSION_1_0:
                *h = dal_cmd_tbl_helper_dce112_get_table2();
                return true;
index 95f332ee3e7e6e3858294d485c03c23c1c25f613..416500e51b8dd2f20da95ec9b0a7730363d56f50 100644 (file)
@@ -38,7 +38,7 @@ CFLAGS_dcn_calc_math.o := $(calcs_ccflags) -Wno-tautological-compare
 
 BW_CALCS = dce_calcs.o bw_fixed.o custom_float.o
 
-ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ifdef CONFIG_X86
 BW_CALCS += dcn_calcs.o dcn_calc_math.o dcn_calc_auto.o
 endif
 
index fc3f98fb09ead42defe0ffa82b3fc92af18f2375..62435bfc274dafe62a1b45fc2be1c7de47024d1b 100644 (file)
 
 #ifndef _CALCS_CALCS_LOGGER_H_
 #define _CALCS_CALCS_LOGGER_H_
-#define DC_LOGGER \
-       logger
+#define DC_LOGGER ctx->logger
 
-static void print_bw_calcs_dceip(struct dal_logger *logger, const struct bw_calcs_dceip *dceip)
+static void print_bw_calcs_dceip(struct dc_context *ctx, const struct bw_calcs_dceip *dceip)
 {
 
        DC_LOG_BANDWIDTH_CALCS("#####################################################################");
@@ -122,7 +121,7 @@ static void print_bw_calcs_dceip(struct dal_logger *logger, const struct bw_calc
 
 }
 
-static void print_bw_calcs_vbios(struct dal_logger *logger, const struct bw_calcs_vbios *vbios)
+static void print_bw_calcs_vbios(struct dc_context *ctx, const struct bw_calcs_vbios *vbios)
 {
 
        DC_LOG_BANDWIDTH_CALCS("#####################################################################");
@@ -181,7 +180,7 @@ static void print_bw_calcs_vbios(struct dal_logger *logger, const struct bw_calc
 
 }
 
-static void print_bw_calcs_data(struct dal_logger *logger, struct bw_calcs_data *data)
+static void print_bw_calcs_data(struct dc_context *ctx, struct bw_calcs_data *data)
 {
 
        int i, j, k;
index 2c4e8f0cb2dcdb7fedbf133b8121bcfd55085edf..160d11a15eac7dca953fd71fdac4b84c9445ca98 100644 (file)
@@ -3010,9 +3010,9 @@ bool bw_calcs(struct dc_context *ctx,
                struct bw_fixed low_yclk = vbios->low_yclk;
 
                if (ctx->dc->debug.bandwidth_calcs_trace) {
-                       print_bw_calcs_dceip(ctx->logger, dceip);
-                       print_bw_calcs_vbios(ctx->logger, vbios);
-                       print_bw_calcs_data(ctx->logger, data);
+                       print_bw_calcs_dceip(ctx, dceip);
+                       print_bw_calcs_vbios(ctx, vbios);
+                       print_bw_calcs_data(ctx, data);
                }
                calculate_bandwidth(dceip, vbios, data);
 
index 49a4ea45466d33bd3fb5be6e1ed7dc4d3b42bc7b..bd039322f697b4727a8ef985416d3f5d0355de7b 100644 (file)
@@ -31,6 +31,8 @@
 
 #include "resource.h"
 #include "dcn10/dcn10_resource.h"
+#include "dcn10/dcn10_hubbub.h"
+
 #include "dcn_calc_math.h"
 
 #define DC_LOGGER \
@@ -248,7 +250,24 @@ static void pipe_ctx_to_e2e_pipe_params (
        else if (pipe->bottom_pipe != NULL && pipe->bottom_pipe->plane_state == pipe->plane_state)
                input->src.is_hsplit = true;
 
-       input->src.dcc                 = pipe->plane_state->dcc.enable;
+       if (pipe->plane_res.dpp->ctx->dc->debug.optimized_watermark) {
+               /*
+                * this method requires us to always re-calculate watermark when dcc change
+                * between flip.
+                */
+               input->src.dcc = pipe->plane_state->dcc.enable ? 1 : 0;
+       } else {
+               /*
+                * allow us to disable dcc on the fly without re-calculating WM
+                *
+                * extra overhead for DCC is quite small.  for 1080p WM without
+                * DCC is only 0.417us lower (urgent goes from 6.979us to 6.562us)
+                */
+               unsigned int bpe;
+
+               input->src.dcc = pipe->plane_res.dpp->ctx->dc->res_pool->hubbub->funcs->
+                       dcc_support_pixel_format(pipe->plane_state->format, &bpe) ? 1 : 0;
+       }
        input->src.dcc_rate            = 1;
        input->src.meta_pitch          = pipe->plane_state->dcc.grph.meta_pitch;
        input->src.source_scan         = dm_horz;
@@ -423,6 +442,10 @@ static void dcn_bw_calc_rq_dlg_ttu(
        int total_flip_bytes = 0;
        int i;
 
+       memset(dlg_regs, 0, sizeof(*dlg_regs));
+       memset(ttu_regs, 0, sizeof(*ttu_regs));
+       memset(rq_regs, 0, sizeof(*rq_regs));
+
        for (i = 0; i < number_of_planes; i++) {
                total_active_bw += v->read_bandwidth[i];
                total_prefetch_bw += v->prefetch_bandwidth[i];
@@ -501,6 +524,7 @@ static void split_stream_across_pipes(
        resource_build_scaling_params(secondary_pipe);
 }
 
+#if 0
 static void calc_wm_sets_and_perf_params(
                struct dc_state *context,
                struct dcn_bw_internal_vars *v)
@@ -582,6 +606,7 @@ static void calc_wm_sets_and_perf_params(
        if (v->voltage_level >= 3)
                context->bw.dcn.watermarks.d = context->bw.dcn.watermarks.a;
 }
+#endif
 
 static bool dcn_bw_apply_registry_override(struct dc *dc)
 {
@@ -651,7 +676,7 @@ static void hack_force_pipe_split(struct dcn_bw_internal_vars *v,
 }
 
 static void hack_bounding_box(struct dcn_bw_internal_vars *v,
-               struct dc_debug *dbg,
+               struct dc_debug_options *dbg,
                struct dc_state *context)
 {
        if (dbg->pipe_split_policy == MPC_SPLIT_AVOID)
@@ -883,7 +908,26 @@ bool dcn_validate_bandwidth(
                                ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dc_fixpt_one.value
                                        || v->scaler_rec_out_width[input_idx] == v->viewport_height[input_idx]);
                        }
-                       v->dcc_enable[input_idx] = pipe->plane_state->dcc.enable ? dcn_bw_yes : dcn_bw_no;
+
+                       if (dc->debug.optimized_watermark) {
+                               /*
+                                * this method requires us to always re-calculate watermark when dcc change
+                                * between flip.
+                                */
+                               v->dcc_enable[input_idx] = pipe->plane_state->dcc.enable ? dcn_bw_yes : dcn_bw_no;
+                       } else {
+                               /*
+                                * allow us to disable dcc on the fly without re-calculating WM
+                                *
+                                * extra overhead for DCC is quite small.  for 1080p WM without
+                                * DCC is only 0.417us lower (urgent goes from 6.979us to 6.562us)
+                                */
+                               unsigned int bpe;
+
+                               v->dcc_enable[input_idx] = dc->res_pool->hubbub->funcs->dcc_support_pixel_format(
+                                               pipe->plane_state->format, &bpe) ? dcn_bw_yes : dcn_bw_no;
+                       }
+
                        v->source_pixel_format[input_idx] = tl_pixel_format_to_bw_defs(
                                        pipe->plane_state->format);
                        v->source_surface_mode[input_idx] = tl_sw_mode_to_bw_defs(
@@ -976,43 +1020,60 @@ bool dcn_validate_bandwidth(
                                bw_consumed = v->fabric_and_dram_bandwidth;
 
                display_pipe_configuration(v);
-               calc_wm_sets_and_perf_params(context, v);
-               context->bw.dcn.calc_clk.fclk_khz = (int)(bw_consumed * 1000000 /
+               /*calc_wm_sets_and_perf_params(context, v);*/
+               /* Only 1 set is used by dcn since no noticeable
+                * performance improvement was measured and due to hw bug DEGVIDCN10-254
+                */
+               dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
+
+               context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
+                       v->stutter_exit_watermark * 1000;
+               context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
+                               v->stutter_enter_plus_exit_watermark * 1000;
+               context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
+                               v->dram_clock_change_watermark * 1000;
+               context->bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+               context->bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
+               context->bw.dcn.watermarks.b = context->bw.dcn.watermarks.a;
+               context->bw.dcn.watermarks.c = context->bw.dcn.watermarks.a;
+               context->bw.dcn.watermarks.d = context->bw.dcn.watermarks.a;
+
+               context->bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 /
                                (ddr4_dram_factor_single_Channel * v->number_of_channels));
                if (bw_consumed == v->fabric_and_dram_bandwidth_vmin0p65) {
-                       context->bw.dcn.calc_clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
+                       context->bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
                }
 
-               context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
-               context->bw.dcn.calc_clk.dcfclk_khz = (int)(v->dcfclk * 1000);
+               context->bw.dcn.clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
+               context->bw.dcn.clk.dcfclk_khz = (int)(v->dcfclk * 1000);
 
-               context->bw.dcn.calc_clk.dispclk_khz = (int)(v->dispclk * 1000);
+               context->bw.dcn.clk.dispclk_khz = (int)(v->dispclk * 1000);
                if (dc->debug.max_disp_clk == true)
-                       context->bw.dcn.calc_clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000);
+                       context->bw.dcn.clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000);
 
-               if (context->bw.dcn.calc_clk.dispclk_khz <
+               if (context->bw.dcn.clk.dispclk_khz <
                                dc->debug.min_disp_clk_khz) {
-                       context->bw.dcn.calc_clk.dispclk_khz =
+                       context->bw.dcn.clk.dispclk_khz =
                                        dc->debug.min_disp_clk_khz;
                }
 
-               context->bw.dcn.calc_clk.dppclk_khz = context->bw.dcn.calc_clk.dispclk_khz / v->dispclk_dppclk_ratio;
-
+               context->bw.dcn.clk.dppclk_khz = context->bw.dcn.clk.dispclk_khz / v->dispclk_dppclk_ratio;
+               context->bw.dcn.clk.phyclk_khz = v->phyclk_per_state[v->voltage_level];
                switch (v->voltage_level) {
                case 0:
-                       context->bw.dcn.calc_clk.max_supported_dppclk_khz =
+                       context->bw.dcn.clk.max_supported_dppclk_khz =
                                        (int)(dc->dcn_soc->max_dppclk_vmin0p65 * 1000);
                        break;
                case 1:
-                       context->bw.dcn.calc_clk.max_supported_dppclk_khz =
+                       context->bw.dcn.clk.max_supported_dppclk_khz =
                                        (int)(dc->dcn_soc->max_dppclk_vmid0p72 * 1000);
                        break;
                case 2:
-                       context->bw.dcn.calc_clk.max_supported_dppclk_khz =
+                       context->bw.dcn.clk.max_supported_dppclk_khz =
                                        (int)(dc->dcn_soc->max_dppclk_vnom0p8 * 1000);
                        break;
                default:
-                       context->bw.dcn.calc_clk.max_supported_dppclk_khz =
+                       context->bw.dcn.clk.max_supported_dppclk_khz =
                                        (int)(dc->dcn_soc->max_dppclk_vmax0p9 * 1000);
                        break;
                }
@@ -1225,27 +1286,27 @@ static unsigned int dcn_find_normalized_clock_vdd_Level(
 
 unsigned int dcn_find_dcfclk_suits_all(
        const struct dc *dc,
-       struct clocks_value *clocks)
+       struct dc_clocks *clocks)
 {
        unsigned vdd_level, vdd_level_temp;
        unsigned dcf_clk;
 
        /*find a common supported voltage level*/
        vdd_level = dcn_find_normalized_clock_vdd_Level(
-               dc, DM_PP_CLOCK_TYPE_DISPLAY_CLK, clocks->dispclk_in_khz);
+               dc, DM_PP_CLOCK_TYPE_DISPLAY_CLK, clocks->dispclk_khz);
        vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
-               dc, DM_PP_CLOCK_TYPE_DISPLAYPHYCLK, clocks->phyclk_in_khz);
+               dc, DM_PP_CLOCK_TYPE_DISPLAYPHYCLK, clocks->phyclk_khz);
 
        vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
        vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
-               dc, DM_PP_CLOCK_TYPE_DPPCLK, clocks->dppclk_in_khz);
+               dc, DM_PP_CLOCK_TYPE_DPPCLK, clocks->dppclk_khz);
        vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
 
        vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
-               dc, DM_PP_CLOCK_TYPE_MEMORY_CLK, clocks->dcfclock_in_khz);
+               dc, DM_PP_CLOCK_TYPE_MEMORY_CLK, clocks->fclk_khz);
        vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
        vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
-               dc, DM_PP_CLOCK_TYPE_DCFCLK, clocks->dcfclock_in_khz);
+               dc, DM_PP_CLOCK_TYPE_DCFCLK, clocks->dcfclk_khz);
 
        /*find that level conresponding dcfclk*/
        vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
@@ -1331,21 +1392,14 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
 {
        struct pp_smu_funcs_rv *pp = dc->res_pool->pp_smu;
        struct pp_smu_wm_range_sets ranges = {0};
-       int max_fclk_khz, nom_fclk_khz, mid_fclk_khz, min_fclk_khz;
-       int max_dcfclk_khz, min_dcfclk_khz;
-       int socclk_khz;
+       int min_fclk_khz, min_dcfclk_khz, socclk_khz;
        const int overdrive = 5000000; /* 5 GHz to cover Overdrive */
-       unsigned factor = (ddr4_dram_factor_single_Channel * dc->dcn_soc->number_of_channels);
 
        if (!pp->set_wm_ranges)
                return;
 
        kernel_fpu_begin();
-       max_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 * 1000000 / factor;
-       nom_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 * 1000000 / factor;
-       mid_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 * 1000000 / factor;
        min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32;
-       max_dcfclk_khz = dc->dcn_soc->dcfclkv_max0p9 * 1000;
        min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000;
        socclk_khz = dc->dcn_soc->socclk * 1000;
        kernel_fpu_end();
@@ -1353,105 +1407,46 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
        /* Now notify PPLib/SMU about which Watermarks sets they should select
         * depending on DPM state they are in. And update BW MGR GFX Engine and
         * Memory clock member variables for Watermarks calculations for each
-        * Watermark Set
+        * Watermark Set. Only one watermark set for dcn1 due to hw bug DEGVIDCN10-254.
         */
        /* SOCCLK does not affect anytihng but writeback for DCN so for now we dont
         * care what the value is, hence min to overdrive level
         */
-       ranges.num_reader_wm_sets = WM_COUNT;
-       ranges.num_writer_wm_sets = WM_COUNT;
+       ranges.num_reader_wm_sets = WM_SET_COUNT;
+       ranges.num_writer_wm_sets = WM_SET_COUNT;
        ranges.reader_wm_sets[0].wm_inst = WM_A;
        ranges.reader_wm_sets[0].min_drain_clk_khz = min_dcfclk_khz;
-       ranges.reader_wm_sets[0].max_drain_clk_khz = max_dcfclk_khz;
+       ranges.reader_wm_sets[0].max_drain_clk_khz = overdrive;
        ranges.reader_wm_sets[0].min_fill_clk_khz = min_fclk_khz;
-       ranges.reader_wm_sets[0].max_fill_clk_khz = min_fclk_khz;
+       ranges.reader_wm_sets[0].max_fill_clk_khz = overdrive;
        ranges.writer_wm_sets[0].wm_inst = WM_A;
        ranges.writer_wm_sets[0].min_fill_clk_khz = socclk_khz;
        ranges.writer_wm_sets[0].max_fill_clk_khz = overdrive;
        ranges.writer_wm_sets[0].min_drain_clk_khz = min_fclk_khz;
-       ranges.writer_wm_sets[0].max_drain_clk_khz = min_fclk_khz;
-
-       ranges.reader_wm_sets[1].wm_inst = WM_B;
-       ranges.reader_wm_sets[1].min_drain_clk_khz = min_fclk_khz;
-       ranges.reader_wm_sets[1].max_drain_clk_khz = max_dcfclk_khz;
-       ranges.reader_wm_sets[1].min_fill_clk_khz = mid_fclk_khz;
-       ranges.reader_wm_sets[1].max_fill_clk_khz = mid_fclk_khz;
-       ranges.writer_wm_sets[1].wm_inst = WM_B;
-       ranges.writer_wm_sets[1].min_fill_clk_khz = socclk_khz;
-       ranges.writer_wm_sets[1].max_fill_clk_khz = overdrive;
-       ranges.writer_wm_sets[1].min_drain_clk_khz = mid_fclk_khz;
-       ranges.writer_wm_sets[1].max_drain_clk_khz = mid_fclk_khz;
-
-
-       ranges.reader_wm_sets[2].wm_inst = WM_C;
-       ranges.reader_wm_sets[2].min_drain_clk_khz = min_fclk_khz;
-       ranges.reader_wm_sets[2].max_drain_clk_khz = max_dcfclk_khz;
-       ranges.reader_wm_sets[2].min_fill_clk_khz = nom_fclk_khz;
-       ranges.reader_wm_sets[2].max_fill_clk_khz = nom_fclk_khz;
-       ranges.writer_wm_sets[2].wm_inst = WM_C;
-       ranges.writer_wm_sets[2].min_fill_clk_khz = socclk_khz;
-       ranges.writer_wm_sets[2].max_fill_clk_khz = overdrive;
-       ranges.writer_wm_sets[2].min_drain_clk_khz = nom_fclk_khz;
-       ranges.writer_wm_sets[2].max_drain_clk_khz = nom_fclk_khz;
-
-       ranges.reader_wm_sets[3].wm_inst = WM_D;
-       ranges.reader_wm_sets[3].min_drain_clk_khz = min_fclk_khz;
-       ranges.reader_wm_sets[3].max_drain_clk_khz = max_dcfclk_khz;
-       ranges.reader_wm_sets[3].min_fill_clk_khz = max_fclk_khz;
-       ranges.reader_wm_sets[3].max_fill_clk_khz = max_fclk_khz;
-       ranges.writer_wm_sets[3].wm_inst = WM_D;
-       ranges.writer_wm_sets[3].min_fill_clk_khz = socclk_khz;
-       ranges.writer_wm_sets[3].max_fill_clk_khz = overdrive;
-       ranges.writer_wm_sets[3].min_drain_clk_khz = max_fclk_khz;
-       ranges.writer_wm_sets[3].max_drain_clk_khz = max_fclk_khz;
+       ranges.writer_wm_sets[0].max_drain_clk_khz = overdrive;
 
        if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
                ranges.reader_wm_sets[0].wm_inst = WM_A;
                ranges.reader_wm_sets[0].min_drain_clk_khz = 300000;
-               ranges.reader_wm_sets[0].max_drain_clk_khz = 654000;
+               ranges.reader_wm_sets[0].max_drain_clk_khz = 5000000;
                ranges.reader_wm_sets[0].min_fill_clk_khz = 800000;
-               ranges.reader_wm_sets[0].max_fill_clk_khz = 800000;
+               ranges.reader_wm_sets[0].max_fill_clk_khz = 5000000;
                ranges.writer_wm_sets[0].wm_inst = WM_A;
                ranges.writer_wm_sets[0].min_fill_clk_khz = 200000;
-               ranges.writer_wm_sets[0].max_fill_clk_khz = 757000;
+               ranges.writer_wm_sets[0].max_fill_clk_khz = 5000000;
                ranges.writer_wm_sets[0].min_drain_clk_khz = 800000;
-               ranges.writer_wm_sets[0].max_drain_clk_khz = 800000;
-
-               ranges.reader_wm_sets[1].wm_inst = WM_B;
-               ranges.reader_wm_sets[1].min_drain_clk_khz = 300000;
-               ranges.reader_wm_sets[1].max_drain_clk_khz = 654000;
-               ranges.reader_wm_sets[1].min_fill_clk_khz = 933000;
-               ranges.reader_wm_sets[1].max_fill_clk_khz = 933000;
-               ranges.writer_wm_sets[1].wm_inst = WM_B;
-               ranges.writer_wm_sets[1].min_fill_clk_khz = 200000;
-               ranges.writer_wm_sets[1].max_fill_clk_khz = 757000;
-               ranges.writer_wm_sets[1].min_drain_clk_khz = 933000;
-               ranges.writer_wm_sets[1].max_drain_clk_khz = 933000;
-
-
-               ranges.reader_wm_sets[2].wm_inst = WM_C;
-               ranges.reader_wm_sets[2].min_drain_clk_khz = 300000;
-               ranges.reader_wm_sets[2].max_drain_clk_khz = 654000;
-               ranges.reader_wm_sets[2].min_fill_clk_khz = 1067000;
-               ranges.reader_wm_sets[2].max_fill_clk_khz = 1067000;
-               ranges.writer_wm_sets[2].wm_inst = WM_C;
-               ranges.writer_wm_sets[2].min_fill_clk_khz = 200000;
-               ranges.writer_wm_sets[2].max_fill_clk_khz = 757000;
-               ranges.writer_wm_sets[2].min_drain_clk_khz = 1067000;
-               ranges.writer_wm_sets[2].max_drain_clk_khz = 1067000;
-
-               ranges.reader_wm_sets[3].wm_inst = WM_D;
-               ranges.reader_wm_sets[3].min_drain_clk_khz = 300000;
-               ranges.reader_wm_sets[3].max_drain_clk_khz = 654000;
-               ranges.reader_wm_sets[3].min_fill_clk_khz = 1200000;
-               ranges.reader_wm_sets[3].max_fill_clk_khz = 1200000;
-               ranges.writer_wm_sets[3].wm_inst = WM_D;
-               ranges.writer_wm_sets[3].min_fill_clk_khz = 200000;
-               ranges.writer_wm_sets[3].max_fill_clk_khz = 757000;
-               ranges.writer_wm_sets[3].min_drain_clk_khz = 1200000;
-               ranges.writer_wm_sets[3].max_drain_clk_khz = 1200000;
+               ranges.writer_wm_sets[0].max_drain_clk_khz = 5000000;
        }
 
+       ranges.reader_wm_sets[1] = ranges.writer_wm_sets[0];
+       ranges.reader_wm_sets[1].wm_inst = WM_B;
+
+       ranges.reader_wm_sets[2] = ranges.writer_wm_sets[0];
+       ranges.reader_wm_sets[2].wm_inst = WM_C;
+
+       ranges.reader_wm_sets[3] = ranges.writer_wm_sets[0];
+       ranges.reader_wm_sets[3].wm_inst = WM_D;
+
        /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
        pp->set_wm_ranges(&pp->pp_smu, &ranges);
 }
index 644b2187507b9845e125214f9e941e9a124a33ef..733ac224e7fdc6839438a1089ec93b5d1402dcaf 100644 (file)
@@ -169,6 +169,22 @@ failed_alloc:
        return false;
 }
 
+/**
+ *****************************************************************************
+ *  Function: dc_stream_adjust_vmin_vmax
+ *
+ *  @brief
+ *     Looks up the pipe context of dc_stream_state and updates the
+ *     vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
+ *     Rate, which is a power-saving feature that targets reducing panel
+ *     refresh rate while the screen is static
+ *
+ *  @param [in] dc: dc reference
+ *  @param [in] stream: Initial dc stream state
+ *  @param [in] adjust: Updated parameters for vertical_total_min and
+ *  vertical_total_max
+ *****************************************************************************
+ */
 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
                struct dc_stream_state **streams, int num_streams,
                int vmin, int vmax)
@@ -368,6 +384,71 @@ void dc_stream_set_static_screen_events(struct dc *dc,
        dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
 }
 
+void dc_link_set_drive_settings(struct dc *dc,
+                               struct link_training_settings *lt_settings,
+                               const struct dc_link *link)
+{
+
+       int i;
+
+       for (i = 0; i < dc->link_count; i++) {
+               if (dc->links[i] == link)
+                       break;
+       }
+
+       if (i >= dc->link_count)
+               ASSERT_CRITICAL(false);
+
+       dc_link_dp_set_drive_settings(dc->links[i], lt_settings);
+}
+
+void dc_link_perform_link_training(struct dc *dc,
+                                  struct dc_link_settings *link_setting,
+                                  bool skip_video_pattern)
+{
+       int i;
+
+       for (i = 0; i < dc->link_count; i++)
+               dc_link_dp_perform_link_training(
+                       dc->links[i],
+                       link_setting,
+                       skip_video_pattern);
+}
+
+void dc_link_set_preferred_link_settings(struct dc *dc,
+                                        struct dc_link_settings *link_setting,
+                                        struct dc_link *link)
+{
+       link->preferred_link_setting = *link_setting;
+       dp_retrain_link_dp_test(link, link_setting, false);
+}
+
+void dc_link_enable_hpd(const struct dc_link *link)
+{
+       dc_link_dp_enable_hpd(link);
+}
+
+void dc_link_disable_hpd(const struct dc_link *link)
+{
+       dc_link_dp_disable_hpd(link);
+}
+
+
+void dc_link_set_test_pattern(struct dc_link *link,
+                             enum dp_test_pattern test_pattern,
+                             const struct link_training_settings *p_link_settings,
+                             const unsigned char *p_custom_pattern,
+                             unsigned int cust_pattern_size)
+{
+       if (link != NULL)
+               dc_link_dp_set_test_pattern(
+                       link,
+                       test_pattern,
+                       p_link_settings,
+                       p_custom_pattern,
+                       cust_pattern_size);
+}
+
 static void destruct(struct dc *dc)
 {
        dc_release_state(dc->current_state);
@@ -386,9 +467,6 @@ static void destruct(struct dc *dc)
        if (dc->ctx->created_bios)
                dal_bios_parser_destroy(&dc->ctx->dc_bios);
 
-       if (dc->ctx->logger)
-               dal_logger_destroy(&dc->ctx->logger);
-
        kfree(dc->ctx);
        dc->ctx = NULL;
 
@@ -398,7 +476,7 @@ static void destruct(struct dc *dc)
        kfree(dc->bw_dceip);
        dc->bw_dceip = NULL;
 
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_X86
        kfree(dc->dcn_soc);
        dc->dcn_soc = NULL;
 
@@ -411,11 +489,10 @@ static void destruct(struct dc *dc)
 static bool construct(struct dc *dc,
                const struct dc_init_data *init_params)
 {
-       struct dal_logger *logger;
        struct dc_context *dc_ctx;
        struct bw_calcs_dceip *dc_dceip;
        struct bw_calcs_vbios *dc_vbios;
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_X86
        struct dcn_soc_bounding_box *dcn_soc;
        struct dcn_ip_params *dcn_ip;
 #endif
@@ -437,7 +514,7 @@ static bool construct(struct dc *dc,
        }
 
        dc->bw_vbios = dc_vbios;
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_X86
        dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
        if (!dcn_soc) {
                dm_error("%s: failed to create dcn_soc\n", __func__);
@@ -465,6 +542,7 @@ static bool construct(struct dc *dc,
        dc_ctx->driver_context = init_params->driver;
        dc_ctx->dc = dc;
        dc_ctx->asic_id = init_params->asic_id;
+       dc_ctx->dc_sink_id_count = 0;
        dc->ctx = dc_ctx;
 
        dc->current_state = dc_create_state();
@@ -475,14 +553,7 @@ static bool construct(struct dc *dc,
        }
 
        /* Create logger */
-       logger = dal_logger_create(dc_ctx, init_params->log_mask);
 
-       if (!logger) {
-               /* can *not* call logger. call base driver 'print error' */
-               dm_error("%s: failed to create Logger!\n", __func__);
-               goto fail;
-       }
-       dc_ctx->logger = logger;
        dc_ctx->dce_environment = init_params->dce_environment;
 
        dc_version = resource_parse_asic_id(init_params->asic_id);
@@ -901,9 +972,7 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
        for (i = 0; i < context->stream_count; i++) {
                struct dc_stream_state *stream = context->streams[i];
 
-               dc_stream_log(stream,
-                               dc->ctx->logger,
-                               LOG_DC);
+               dc_stream_log(dc, stream);
        }
 
        result = dc_commit_state_no_check(dc, context);
@@ -927,12 +996,7 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
 
        dc->optimized_required = false;
 
-       /* 3rd param should be true, temp w/a for RV*/
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
-       dc->hwss.set_bandwidth(dc, context, dc->ctx->dce_version < DCN_VERSION_1_0);
-#else
        dc->hwss.set_bandwidth(dc, context, true);
-#endif
        return true;
 }
 
@@ -1548,7 +1612,7 @@ struct dc_sink *dc_link_add_remote_sink(
        struct dc_sink *dc_sink;
        enum dc_edid_status edid_status;
 
-       if (len > MAX_EDID_BUFFER_SIZE) {
+       if (len > DC_MAX_EDID_BUFFER_SIZE) {
                dm_error("Max EDID buffer size breached!\n");
                return NULL;
        }
index 267c76766deaec45caac0e792dd698ffdff10159..caece7c13bc642114d06779b12e613cdc1c6b8d7 100644 (file)
@@ -348,23 +348,23 @@ void context_clock_trace(
                struct dc *dc,
                struct dc_state *context)
 {
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
        DC_LOGGER_INIT(dc->ctx->logger);
        CLOCK_TRACE("Current: dispclk_khz:%d  max_dppclk_khz:%d  dcfclk_khz:%d\n"
                        "dcfclk_deep_sleep_khz:%d  fclk_khz:%d  socclk_khz:%d\n",
-                       context->bw.dcn.calc_clk.dispclk_khz,
-                       context->bw.dcn.calc_clk.dppclk_khz,
-                       context->bw.dcn.calc_clk.dcfclk_khz,
-                       context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
-                       context->bw.dcn.calc_clk.fclk_khz,
-                       context->bw.dcn.calc_clk.socclk_khz);
+                       context->bw.dcn.clk.dispclk_khz,
+                       context->bw.dcn.clk.dppclk_khz,
+                       context->bw.dcn.clk.dcfclk_khz,
+                       context->bw.dcn.clk.dcfclk_deep_sleep_khz,
+                       context->bw.dcn.clk.fclk_khz,
+                       context->bw.dcn.clk.socclk_khz);
        CLOCK_TRACE("Calculated: dispclk_khz:%d  max_dppclk_khz:%d  dcfclk_khz:%d\n"
                        "dcfclk_deep_sleep_khz:%d  fclk_khz:%d  socclk_khz:%d\n",
-                       context->bw.dcn.calc_clk.dispclk_khz,
-                       context->bw.dcn.calc_clk.dppclk_khz,
-                       context->bw.dcn.calc_clk.dcfclk_khz,
-                       context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
-                       context->bw.dcn.calc_clk.fclk_khz,
-                       context->bw.dcn.calc_clk.socclk_khz);
+                       context->bw.dcn.clk.dispclk_khz,
+                       context->bw.dcn.clk.dppclk_khz,
+                       context->bw.dcn.clk.dcfclk_khz,
+                       context->bw.dcn.clk.dcfclk_deep_sleep_khz,
+                       context->bw.dcn.clk.fclk_khz,
+                       context->bw.dcn.clk.socclk_khz);
 #endif
 }
index 2fa521812d23bda9406485591741cd185d9bd293..966d2f9c8c995836e6db51a16893753ed53c0ed6 100644 (file)
@@ -33,6 +33,7 @@
 #include "dc_link_dp.h"
 #include "dc_link_ddc.h"
 #include "link_hwss.h"
+#include "opp.h"
 
 #include "link_encoder.h"
 #include "hw_sequencer.h"
 
 enum {
        LINK_RATE_REF_FREQ_IN_MHZ = 27,
-       PEAK_FACTOR_X1000 = 1006
+       PEAK_FACTOR_X1000 = 1006,
+       /*
+       * Some receivers fail to train on first try and are good
+       * on subsequent tries. 2 retries should be plenty. If we
+       * don't have a successful training then we don't expect to
+       * ever get one.
+       */
+       LINK_TRAINING_MAX_VERIFY_RETRY = 2
 };
 
 /*******************************************************************************
@@ -312,7 +320,7 @@ static enum signal_type get_basic_signal_type(
  * @brief
  * Check whether there is a dongle on DP connector
  */
-static bool is_dp_sink_present(struct dc_link *link)
+bool dc_link_is_dp_sink_present(struct dc_link *link)
 {
        enum gpio_result gpio_result;
        uint32_t clock_pin = 0;
@@ -405,7 +413,7 @@ static enum signal_type link_detect_sink(
                         * we assume signal is DVI; it could be corrected
                         * to HDMI after dongle detection
                         */
-                       if (!is_dp_sink_present(link))
+                       if (!dm_helpers_is_dp_sink_present(link))
                                result = SIGNAL_TYPE_DVI_SINGLE_LINK;
                }
        }
@@ -497,6 +505,10 @@ static bool detect_dp(
                        sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
                        link->type = dc_connection_mst_branch;
 
+                       dal_ddc_service_set_transaction_type(
+                                                       link->ddc,
+                                                       sink_caps->transaction_type);
+
                        /*
                         * This call will initiate MST topology discovery. Which
                         * will detect MST ports and add new DRM connector DRM
@@ -524,6 +536,10 @@ static bool detect_dp(
                        if (reason == DETECT_REASON_BOOT)
                                boot = true;
 
+                       dm_helpers_dp_update_branch_info(
+                               link->ctx,
+                               link);
+
                        if (!dm_helpers_dp_mst_start_top_mgr(
                                link->ctx,
                                link, boot)) {
@@ -751,7 +767,16 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
                                 */
 
                                /* deal with non-mst cases */
-                               dp_hbr_verify_link_cap(link, &link->reported_link_cap);
+                               for (i = 0; i < LINK_TRAINING_MAX_VERIFY_RETRY; i++) {
+                                       int fail_count = 0;
+
+                                       dp_verify_link_cap(link,
+                                                         &link->reported_link_cap,
+                                                         &fail_count);
+
+                                       if (fail_count == 0)
+                                               break;
+                               }
                        }
 
                        /* HDMI-DVI Dongle */
@@ -1284,29 +1309,15 @@ static enum dc_status enable_link_dp(
                max_link_rate = LINK_RATE_HIGH3;
 
        if (link_settings.link_rate == max_link_rate) {
-               if (state->dis_clk->funcs->set_min_clocks_state) {
-                       if (state->dis_clk->cur_min_clks_state < DM_PP_CLOCKS_STATE_NOMINAL)
-                               state->dis_clk->funcs->set_min_clocks_state(
-                                       state->dis_clk, DM_PP_CLOCKS_STATE_NOMINAL);
-               } else {
-                       uint32_t dp_phyclk_in_khz;
-                       const struct clocks_value clocks_value =
-                                       state->dis_clk->cur_clocks_value;
-
-                       /* 27mhz = 27000000hz= 27000khz */
-                       dp_phyclk_in_khz = link_settings.link_rate * 27000;
-
-                       if (((clocks_value.max_non_dp_phyclk_in_khz != 0) &&
-                               (dp_phyclk_in_khz > clocks_value.max_non_dp_phyclk_in_khz)) ||
-                               (dp_phyclk_in_khz > clocks_value.max_dp_phyclk_in_khz)) {
-                               state->dis_clk->funcs->apply_clock_voltage_request(
-                                               state->dis_clk,
-                                               DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
-                                               dp_phyclk_in_khz,
-                                               false,
-                                               true);
-                       }
-               }
+               struct dc_clocks clocks = state->bw.dcn.clk;
+
+               /* dce/dcn compat, do not update dispclk */
+               clocks.dispclk_khz = 0;
+               /* 27mhz = 27000000hz= 27000khz */
+               clocks.phyclk_khz = link_settings.link_rate * 27000;
+
+               state->dis_clk->funcs->update_clocks(
+                               state->dis_clk, &clocks, false);
        }
 
        dp_enable_link_phy(
@@ -1861,28 +1872,6 @@ static enum dc_status enable_link(
                break;
        }
 
-       if (pipe_ctx->stream_res.audio && status == DC_OK) {
-               struct dc *core_dc = pipe_ctx->stream->ctx->dc;
-               /* notify audio driver for audio modes of monitor */
-               struct pp_smu_funcs_rv *pp_smu = core_dc->res_pool->pp_smu;
-               unsigned int i, num_audio = 1;
-               for (i = 0; i < MAX_PIPES; i++) {
-                       /*current_state not updated yet*/
-                       if (core_dc->current_state->res_ctx.pipe_ctx[i].stream_res.audio != NULL)
-                               num_audio++;
-               }
-
-               pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
-
-               if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
-                       /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
-                       pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
-               /* un-mute audio */
-               /* TODO: audio should be per stream rather than per link */
-               pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
-                       pipe_ctx->stream_res.stream_enc, false);
-       }
-
        return status;
 }
 
@@ -2415,10 +2404,13 @@ void core_link_enable_stream(
                        }
        }
 
+       core_dc->hwss.enable_audio_stream(pipe_ctx);
+
        /* turn off otg test pattern if enable */
-       pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
-                       CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
-                       COLOR_DEPTH_UNDEFINED);
+       if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
+               pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+                               CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+                               COLOR_DEPTH_UNDEFINED);
 
        core_dc->hwss.enable_stream(pipe_ctx);
 
@@ -2453,6 +2445,22 @@ void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
        core_dc->hwss.set_avmute(pipe_ctx, enable);
 }
 
+/**
+ *****************************************************************************
+ *  Function: dc_link_enable_hpd_filter
+ *
+ *  @brief
+ *     If enable is true, programs HPD filter on associated HPD line using
+ *     delay_on_disconnect/delay_on_connect values dependent on
+ *     link->connector_signal
+ *
+ *     If enable is false, programs HPD filter on associated HPD line with no
+ *     delays on connect or disconnect
+ *
+ *  @param [in] link: pointer to the dc link
+ *  @param [in] enable: boolean specifying whether to enable hbd
+ *****************************************************************************
+ */
 void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
 {
        struct gpio *hpd;
index ae48d603ebd6ca73c289c71f50795f5d3bd6f65e..8def0d9fa0ff0586343a13d08a9bea59c1a2da32 100644 (file)
@@ -33,6 +33,7 @@
 #include "include/vector.h"
 #include "core_types.h"
 #include "dc_link_ddc.h"
+#include "aux_engine.h"
 
 #define AUX_POWER_UP_WA_DELAY 500
 #define I2C_OVER_AUX_DEFER_WA_DELAY 70
@@ -629,83 +630,61 @@ bool dal_ddc_service_query_ddc_data(
        return ret;
 }
 
-enum ddc_result dal_ddc_service_read_dpcd_data(
-       struct ddc_service *ddc,
-       bool i2c,
-       enum i2c_mot_mode mot,
-       uint32_t address,
-       uint8_t *data,
-       uint32_t len,
-       uint32_t *read)
+int dc_link_aux_transfer(struct ddc_service *ddc,
+                            unsigned int address,
+                            uint8_t *reply,
+                            void *buffer,
+                            unsigned int size,
+                            enum aux_transaction_type type,
+                            enum i2caux_transaction_action action)
 {
-       struct aux_payload read_payload = {
-               .i2c_over_aux = i2c,
-               .write = false,
-               .address = address,
-               .length = len,
-               .data = data,
-       };
-       struct aux_command command = {
-               .payloads = &read_payload,
-               .number_of_payloads = 1,
-               .defer_delay = 0,
-               .max_defer_write_retry = 0,
-               .mot = mot
-       };
-
-       *read = 0;
-
-       if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
-               BREAK_TO_DEBUGGER();
-               return DDC_RESULT_FAILED_INVALID_OPERATION;
-       }
+       struct ddc *ddc_pin = ddc->ddc_pin;
+       struct aux_engine *aux_engine;
+       enum aux_channel_operation_result operation_result;
+       struct aux_request_transaction_data aux_req;
+       struct aux_reply_transaction_data aux_rep;
+       uint8_t returned_bytes = 0;
+       int res = -1;
+       uint32_t status;
 
-       if (dal_i2caux_submit_aux_command(
-               ddc->ctx->i2caux,
-               ddc->ddc_pin,
-               &command)) {
-               *read = command.payloads->length;
-               return DDC_RESULT_SUCESSFULL;
-       }
+       memset(&aux_req, 0, sizeof(aux_req));
+       memset(&aux_rep, 0, sizeof(aux_rep));
 
-       return DDC_RESULT_FAILED_OPERATION;
-}
+       aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
+       aux_engine->funcs->acquire(aux_engine, ddc_pin);
 
-enum ddc_result dal_ddc_service_write_dpcd_data(
-       struct ddc_service *ddc,
-       bool i2c,
-       enum i2c_mot_mode mot,
-       uint32_t address,
-       const uint8_t *data,
-       uint32_t len)
-{
-       struct aux_payload write_payload = {
-               .i2c_over_aux = i2c,
-               .write = true,
-               .address = address,
-               .length = len,
-               .data = (uint8_t *)data,
-       };
-       struct aux_command command = {
-               .payloads = &write_payload,
-               .number_of_payloads = 1,
-               .defer_delay = 0,
-               .max_defer_write_retry = 0,
-               .mot = mot
-       };
-
-       if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
-               BREAK_TO_DEBUGGER();
-               return DDC_RESULT_FAILED_INVALID_OPERATION;
-       }
+       aux_req.type = type;
+       aux_req.action = action;
+
+       aux_req.address = address;
+       aux_req.delay = 0;
+       aux_req.length = size;
+       aux_req.data = buffer;
 
-       if (dal_i2caux_submit_aux_command(
-               ddc->ctx->i2caux,
-               ddc->ddc_pin,
-               &command))
-               return DDC_RESULT_SUCESSFULL;
+       aux_engine->funcs->submit_channel_request(aux_engine, &aux_req);
+       operation_result = aux_engine->funcs->get_channel_status(aux_engine, &returned_bytes);
 
-       return DDC_RESULT_FAILED_OPERATION;
+       switch (operation_result) {
+       case AUX_CHANNEL_OPERATION_SUCCEEDED:
+               res = returned_bytes;
+
+               if (res <= size && res >= 0)
+                       res = aux_engine->funcs->read_channel_reply(aux_engine, size,
+                                                               buffer, reply,
+                                                               &status);
+
+               break;
+       case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+               res = 0;
+               break;
+       case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
+       case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+       case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+               res = -1;
+               break;
+       }
+       aux_engine->funcs->release_engine(aux_engine);
+       return res;
 }
 
 /*test only function*/
index 7857cb42b3e62e5603af2408128e4cefb6531a0c..160841da72a752b815d4b0d35070f07e6629e0bd 100644 (file)
@@ -3,6 +3,7 @@
 #include "dc.h"
 #include "dc_link_dp.h"
 #include "dm_helpers.h"
+#include "opp.h"
 
 #include "inc/core_types.h"
 #include "link_hwss.h"
@@ -38,7 +39,7 @@ static bool decide_fallback_link_setting(
                struct dc_link_settings initial_link_settings,
                struct dc_link_settings *current_link_setting,
                enum link_training_result training_result);
-static struct dc_link_settings get_common_supported_link_settings (
+static struct dc_link_settings get_common_supported_link_settings(
                struct dc_link_settings link_setting_a,
                struct dc_link_settings link_setting_b);
 
@@ -93,8 +94,8 @@ static void dpcd_set_link_settings(
        uint8_t rate = (uint8_t)
        (lt_settings->link_settings.link_rate);
 
-       union down_spread_ctrl downspread = {{0}};
-       union lane_count_set lane_count_set = {{0}};
+       union down_spread_ctrl downspread = { {0} };
+       union lane_count_set lane_count_set = { {0} };
        uint8_t link_set_buffer[2];
 
        downspread.raw = (uint8_t)
@@ -164,11 +165,11 @@ static void dpcd_set_lt_pattern_and_lane_settings(
        const struct link_training_settings *lt_settings,
        enum hw_dp_training_pattern pattern)
 {
-       union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}};
+       union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = { { {0} } };
        const uint32_t dpcd_base_lt_offset =
        DP_TRAINING_PATTERN_SET;
        uint8_t dpcd_lt_buffer[5] = {0};
-       union dpcd_training_pattern dpcd_pattern = {{0}};
+       union dpcd_training_pattern dpcd_pattern = { {0} };
        uint32_t lane;
        uint32_t size_in_bytes;
        bool edp_workaround = false; /* TODO link_prop.INTERNAL */
@@ -232,7 +233,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
                        link,
                        DP_TRAINING_PATTERN_SET,
                        &dpcd_pattern.raw,
-                       sizeof(dpcd_pattern.raw) );
+                       sizeof(dpcd_pattern.raw));
 
                core_link_write_dpcd(
                        link,
@@ -246,7 +247,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
                                link,
                                dpcd_base_lt_offset,
                                dpcd_lt_buffer,
-                               size_in_bytes + sizeof(dpcd_pattern.raw) );
+                               size_in_bytes + sizeof(dpcd_pattern.raw));
 
        link->cur_lane_setting = lt_settings->lane_settings[0];
 }
@@ -428,8 +429,8 @@ static void get_lane_status_and_drive_settings(
        struct link_training_settings *req_settings)
 {
        uint8_t dpcd_buf[6] = {0};
-       union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {{{0}}};
-       struct link_training_settings request_settings = {{0}};
+       union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
+       struct link_training_settings request_settings = { {0} };
        uint32_t lane;
 
        memset(req_settings, '\0', sizeof(struct link_training_settings));
@@ -651,7 +652,7 @@ static bool perform_post_lt_adj_req_sequence(
 
                        if (req_drv_setting_changed) {
                                update_drive_settings(
-                                       lt_settings,req_settings);
+                                       lt_settings, req_settings);
 
                                dc_link_dp_set_drive_settings(link,
                                                lt_settings);
@@ -724,8 +725,8 @@ static enum link_training_result perform_channel_equalization_sequence(
        enum hw_dp_training_pattern hw_tr_pattern;
        uint32_t retries_ch_eq;
        enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
-       union lane_align_status_updated dpcd_lane_status_updated = {{0}};
-       union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}};
+       union lane_align_status_updated dpcd_lane_status_updated = { {0} };
+       union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } };
 
        hw_tr_pattern = get_supported_tp(link);
 
@@ -1027,6 +1028,9 @@ enum link_training_result dc_link_dp_perform_link_training(
                        lt_settings.lane_settings[0].VOLTAGE_SWING,
                        lt_settings.lane_settings[0].PRE_EMPHASIS);
 
+       if (status != LINK_TRAINING_SUCCESS)
+               link->ctx->dc->debug_data.ltFailCount++;
+
        return status;
 }
 
@@ -1082,9 +1086,10 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
        return max_link_cap;
 }
 
-bool dp_hbr_verify_link_cap(
+bool dp_verify_link_cap(
        struct dc_link *link,
-       struct dc_link_settings *known_limit_link_setting)
+       struct dc_link_settings *known_limit_link_setting,
+       int *fail_count)
 {
        struct dc_link_settings max_link_cap = {0};
        struct dc_link_settings cur_link_setting = {0};
@@ -1097,6 +1102,11 @@ bool dp_hbr_verify_link_cap(
        enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
        enum link_training_result status;
 
+       if (link->dc->debug.skip_detection_link_training) {
+               link->verified_link_cap = *known_limit_link_setting;
+               return true;
+       }
+
        success = false;
        skip_link_training = false;
 
@@ -1151,6 +1161,8 @@ bool dp_hbr_verify_link_cap(
                                                        skip_video_pattern);
                        if (status == LINK_TRAINING_SUCCESS)
                                success = true;
+                       else
+                               (*fail_count)++;
                }
 
                if (success)
@@ -1182,7 +1194,7 @@ bool dp_hbr_verify_link_cap(
        return success;
 }
 
-static struct dc_link_settings get_common_supported_link_settings (
+static struct dc_link_settings get_common_supported_link_settings(
                struct dc_link_settings link_setting_a,
                struct dc_link_settings link_setting_b)
 {
@@ -1428,6 +1440,7 @@ static uint32_t bandwidth_in_kbps_from_link_settings(
 
        uint32_t lane_count  = link_setting->lane_count;
        uint32_t kbps = link_rate_in_kbps;
+
        kbps *= lane_count;
        kbps *= 8;   /* 8 bits per byte*/
 
@@ -1445,9 +1458,9 @@ bool dp_validate_mode_timing(
        const struct dc_link_settings *link_setting;
 
        /*always DP fail safe mode*/
-       if (timing->pix_clk_khz == (uint32_t)25175 &&
-               timing->h_addressable == (uint32_t)640 &&
-               timing->v_addressable == (uint32_t)480)
+       if (timing->pix_clk_khz == (uint32_t) 25175 &&
+               timing->h_addressable == (uint32_t) 640 &&
+               timing->v_addressable == (uint32_t) 480)
                return true;
 
        /* We always use verified link settings */
@@ -1647,22 +1660,26 @@ static enum dc_status read_hpd_rx_irq_data(
                        irq_data->raw,
                        sizeof(union hpd_irq_data));
        else {
-               /* Read 2 bytes at this location,... */
+               /* Read 14 bytes in a single read and then copy only the required fields.
+                * This is more efficient than doing it in two separate AUX reads. */
+
+               uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1];
+
                retval = core_link_read_dpcd(
                        link,
                        DP_SINK_COUNT_ESI,
-                       irq_data->raw,
-                       2);
+                       tmp,
+                       sizeof(tmp));
 
                if (retval != DC_OK)
                        return retval;
 
-               /* ... then read remaining 4 at the other location */
-               retval = core_link_read_dpcd(
-                       link,
-                       DP_LANE0_1_STATUS_ESI,
-                       &irq_data->raw[2],
-                       4);
+               irq_data->bytes.sink_cnt.raw = tmp[DP_SINK_COUNT_ESI - DP_SINK_COUNT_ESI];
+               irq_data->bytes.device_service_irq.raw = tmp[DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 - DP_SINK_COUNT_ESI];
+               irq_data->bytes.lane01_status.raw = tmp[DP_LANE0_1_STATUS_ESI - DP_SINK_COUNT_ESI];
+               irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI];
+               irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI];
+               irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI];
        }
 
        return retval;
@@ -1767,12 +1784,10 @@ static void dp_test_send_link_training(struct dc_link *link)
        dp_retrain_link_dp_test(link, &link_settings, false);
 }
 
-/* TODO hbr2 compliance eye output is unstable
+/* TODO Raven hbr2 compliance eye output is unstable
  * (toggling on and off) with debugger break
  * This caueses intermittent PHY automation failure
  * Need to look into the root cause */
-static uint8_t force_tps4_for_cp2520 = 1;
-
 static void dp_test_send_phy_test_pattern(struct dc_link *link)
 {
        union phy_test_pattern dpcd_test_pattern;
@@ -1832,13 +1847,13 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
                break;
        case PHY_TEST_PATTERN_CP2520_1:
                /* CP2520 pattern is unstable, temporarily use TPS4 instead */
-               test_pattern = (force_tps4_for_cp2520 == 1) ?
+               test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ?
                                DP_TEST_PATTERN_TRAINING_PATTERN4 :
                                DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
                break;
        case PHY_TEST_PATTERN_CP2520_2:
                /* CP2520 pattern is unstable, temporarily use TPS4 instead */
-               test_pattern = (force_tps4_for_cp2520 == 1) ?
+               test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ?
                                DP_TEST_PATTERN_TRAINING_PATTERN4 :
                                DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
                break;
@@ -1991,12 +2006,16 @@ static void handle_automated_test(struct dc_link *link)
                        sizeof(test_response));
 }
 
-bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data)
+bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss)
 {
-       union hpd_irq_data hpd_irq_dpcd_data = {{{{0}}}};
+       union hpd_irq_data hpd_irq_dpcd_data = { { { {0} } } };
        union device_service_irq device_service_clear = { { 0 } };
-       enum dc_status result = DDC_RESULT_UNKNOWN;
+       enum dc_status result;
+
        bool status = false;
+
+       if (out_link_loss)
+               *out_link_loss = false;
        /* For use cases related to down stream connection status change,
         * PSR and device auto test, refer to function handle_sst_hpd_irq
         * in DAL2.1*/
@@ -2071,6 +2090,8 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
                        true, LINK_TRAINING_ATTEMPTS);
 
                status = false;
+               if (out_link_loss)
+                       *out_link_loss = true;
        }
 
        if (link->type == dc_connection_active_dongle &&
@@ -2257,6 +2278,11 @@ static void get_active_converter_info(
 
                link->dpcd_caps.branch_hw_revision =
                        dp_hw_fw_revision.ieee_hw_rev;
+
+               memmove(
+                       link->dpcd_caps.branch_fw_revision,
+                       dp_hw_fw_revision.ieee_fw_rev,
+                       sizeof(dp_hw_fw_revision.ieee_fw_rev));
        }
 }
 
@@ -2305,12 +2331,14 @@ static bool retrieve_link_cap(struct dc_link *link)
 {
        uint8_t dpcd_data[DP_ADAPTER_CAP - DP_DPCD_REV + 1];
 
+       struct dp_device_vendor_id sink_id;
        union down_stream_port_count down_strm_port_count;
        union edp_configuration_cap edp_config_cap;
        union dp_downstream_port_present ds_port = { 0 };
        enum dc_status status = DC_ERROR_UNEXPECTED;
        uint32_t read_dpcd_retry_cnt = 3;
        int i;
+       struct dp_sink_hw_fw_revision dp_hw_fw_revision;
 
        memset(dpcd_data, '\0', sizeof(dpcd_data));
        memset(&down_strm_port_count,
@@ -2391,6 +2419,36 @@ static bool retrieve_link_cap(struct dc_link *link)
                        &link->dpcd_caps.sink_count.raw,
                        sizeof(link->dpcd_caps.sink_count.raw));
 
+       /* read sink ieee oui */
+       core_link_read_dpcd(link,
+                       DP_SINK_OUI,
+                       (uint8_t *)(&sink_id),
+                       sizeof(sink_id));
+
+       link->dpcd_caps.sink_dev_id =
+                       (sink_id.ieee_oui[0] << 16) +
+                       (sink_id.ieee_oui[1] << 8) +
+                       (sink_id.ieee_oui[2]);
+
+       memmove(
+               link->dpcd_caps.sink_dev_id_str,
+               sink_id.ieee_device_id,
+               sizeof(sink_id.ieee_device_id));
+
+       core_link_read_dpcd(
+               link,
+               DP_SINK_HW_REVISION_START,
+               (uint8_t *)&dp_hw_fw_revision,
+               sizeof(dp_hw_fw_revision));
+
+       link->dpcd_caps.sink_hw_revision =
+               dp_hw_fw_revision.ieee_hw_rev;
+
+       memmove(
+               link->dpcd_caps.sink_fw_revision,
+               dp_hw_fw_revision.ieee_fw_rev,
+               sizeof(dp_hw_fw_revision.ieee_fw_rev));
+
        /* Connectivity log: detection */
        CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
 
@@ -2495,8 +2553,8 @@ static void set_crtc_test_pattern(struct dc_link *link,
                pipe_ctx->stream->bit_depth_params = params;
                pipe_ctx->stream_res.opp->funcs->
                        opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, &params);
-
-               pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+               if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
+                       pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
                                controller_test_pattern, color_depth);
        }
        break;
@@ -2508,8 +2566,8 @@ static void set_crtc_test_pattern(struct dc_link *link,
                pipe_ctx->stream->bit_depth_params = params;
                pipe_ctx->stream_res.opp->funcs->
                        opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, &params);
-
-               pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+               if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
+                       pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
                                CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
                                color_depth);
        }
index 751f3ac9d92146c4bfd368d67e487efde7541d46..2e65715f76a1ce0ad95f89a7fe47ba672320add3 100644 (file)
@@ -41,7 +41,7 @@
 #include "dce100/dce100_resource.h"
 #include "dce110/dce110_resource.h"
 #include "dce112/dce112_resource.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
 #include "dcn10/dcn10_resource.h"
 #endif
 #include "dce120/dce120_resource.h"
@@ -85,7 +85,7 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
        case FAMILY_AI:
                dc_version = DCE_VERSION_12_0;
                break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
        case FAMILY_RV:
                dc_version = DCN_VERSION_1_0;
                break;
@@ -136,7 +136,7 @@ struct resource_pool *dc_create_resource_pool(
                        num_virtual_links, dc);
                break;
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
        case DCN_VERSION_1_0:
                res_pool = dcn10_create_resource_pool(
                                num_virtual_links, dc);
@@ -522,13 +522,12 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
        }
 }
 
-static void calculate_recout(struct pipe_ctx *pipe_ctx, struct view *recout_skip)
+static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full)
 {
        const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
        const struct dc_stream_state *stream = pipe_ctx->stream;
        struct rect surf_src = plane_state->src_rect;
        struct rect surf_clip = plane_state->clip_rect;
-       int recout_full_x, recout_full_y;
        bool pri_split = pipe_ctx->bottom_pipe &&
                        pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
        bool sec_split = pipe_ctx->top_pipe &&
@@ -597,20 +596,22 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct view *recout_skip
                }
        }
        /* Unclipped recout offset = stream dst offset + ((surf dst offset - stream surf_src offset)
-        *                              * 1/ stream scaling ratio) - (surf surf_src offset * 1/ full scl
-        *                              ratio)
+        *                      * 1/ stream scaling ratio) - (surf surf_src offset * 1/ full scl
+        *                      ratio)
         */
-       recout_full_x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
+       recout_full->x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
                                        * stream->dst.width / stream->src.width -
                        surf_src.x * plane_state->dst_rect.width / surf_src.width
                                        * stream->dst.width / stream->src.width;
-       recout_full_y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
+       recout_full->y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
                                        * stream->dst.height / stream->src.height -
                        surf_src.y * plane_state->dst_rect.height / surf_src.height
                                        * stream->dst.height / stream->src.height;
 
-       recout_skip->width = pipe_ctx->plane_res.scl_data.recout.x - recout_full_x;
-       recout_skip->height = pipe_ctx->plane_res.scl_data.recout.y - recout_full_y;
+       recout_full->width = plane_state->dst_rect.width
+                                       * stream->dst.width / stream->src.width;
+       recout_full->height = plane_state->dst_rect.height
+                                       * stream->dst.height / stream->src.height;
 }
 
 static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
@@ -662,7 +663,7 @@ static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
                        pipe_ctx->plane_res.scl_data.ratios.vert_c, 19);
 }
 
-static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *recout_skip)
+static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct rect *recout_full)
 {
        struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
        struct rect src = pipe_ctx->plane_state->src_rect;
@@ -680,15 +681,14 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
                flip_vert_scan_dir = true;
        else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
                flip_horz_scan_dir = true;
-       if (pipe_ctx->plane_state->horizontal_mirror)
-               flip_horz_scan_dir = !flip_horz_scan_dir;
 
        if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
                        pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
                rect_swap_helper(&src);
                rect_swap_helper(&data->viewport_c);
                rect_swap_helper(&data->viewport);
-       }
+       } else if (pipe_ctx->plane_state->horizontal_mirror)
+                       flip_horz_scan_dir = !flip_horz_scan_dir;
 
        /*
         * Init calculated according to formula:
@@ -708,127 +708,286 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
        data->inits.v_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.v_c, dc_fixpt_div_int(
                        dc_fixpt_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2)), 19);
 
+       if (!flip_horz_scan_dir) {
+               /* Adjust for viewport end clip-off */
+               if ((data->viewport.x + data->viewport.width) < (src.x + src.width)) {
+                       int vp_clip = src.x + src.width - data->viewport.width - data->viewport.x;
+                       int int_part = dc_fixpt_floor(
+                                       dc_fixpt_sub(data->inits.h, data->ratios.horz));
 
+                       int_part = int_part > 0 ? int_part : 0;
+                       data->viewport.width += int_part < vp_clip ? int_part : vp_clip;
+               }
+               if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div) {
+                       int vp_clip = (src.x + src.width) / vpc_div -
+                                       data->viewport_c.width - data->viewport_c.x;
+                       int int_part = dc_fixpt_floor(
+                                       dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c));
+
+                       int_part = int_part > 0 ? int_part : 0;
+                       data->viewport_c.width += int_part < vp_clip ? int_part : vp_clip;
+               }
 
-       /* Adjust for viewport end clip-off */
-       if ((data->viewport.x + data->viewport.width) < (src.x + src.width) && !flip_horz_scan_dir) {
-               int vp_clip = src.x + src.width - data->viewport.width - data->viewport.x;
-               int int_part = dc_fixpt_floor(
-                               dc_fixpt_sub(data->inits.h, data->ratios.horz));
-
-               int_part = int_part > 0 ? int_part : 0;
-               data->viewport.width += int_part < vp_clip ? int_part : vp_clip;
-       }
-       if ((data->viewport.y + data->viewport.height) < (src.y + src.height) && !flip_vert_scan_dir) {
-               int vp_clip = src.y + src.height - data->viewport.height - data->viewport.y;
-               int int_part = dc_fixpt_floor(
-                               dc_fixpt_sub(data->inits.v, data->ratios.vert));
-
-               int_part = int_part > 0 ? int_part : 0;
-               data->viewport.height += int_part < vp_clip ? int_part : vp_clip;
-       }
-       if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div && !flip_horz_scan_dir) {
-               int vp_clip = (src.x + src.width) / vpc_div -
-                               data->viewport_c.width - data->viewport_c.x;
-               int int_part = dc_fixpt_floor(
-                               dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c));
-
-               int_part = int_part > 0 ? int_part : 0;
-               data->viewport_c.width += int_part < vp_clip ? int_part : vp_clip;
-       }
-       if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div && !flip_vert_scan_dir) {
-               int vp_clip = (src.y + src.height) / vpc_div -
-                               data->viewport_c.height - data->viewport_c.y;
-               int int_part = dc_fixpt_floor(
-                               dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c));
-
-               int_part = int_part > 0 ? int_part : 0;
-               data->viewport_c.height += int_part < vp_clip ? int_part : vp_clip;
-       }
-
-       /* Adjust for non-0 viewport offset */
-       if (data->viewport.x && !flip_horz_scan_dir) {
-               int int_part;
-
-               data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int(
-                               data->ratios.horz, recout_skip->width));
-               int_part = dc_fixpt_floor(data->inits.h) - data->viewport.x;
-               if (int_part < data->taps.h_taps) {
-                       int int_adj = data->viewport.x >= (data->taps.h_taps - int_part) ?
-                                               (data->taps.h_taps - int_part) : data->viewport.x;
-                       data->viewport.x -= int_adj;
-                       data->viewport.width += int_adj;
-                       int_part += int_adj;
-               } else if (int_part > data->taps.h_taps) {
-                       data->viewport.x += int_part - data->taps.h_taps;
-                       data->viewport.width -= int_part - data->taps.h_taps;
-                       int_part = data->taps.h_taps;
+               /* Adjust for non-0 viewport offset */
+               if (data->viewport.x) {
+                       int int_part;
+
+                       data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int(
+                                       data->ratios.horz, data->recout.x - recout_full->x));
+                       int_part = dc_fixpt_floor(data->inits.h) - data->viewport.x;
+                       if (int_part < data->taps.h_taps) {
+                               int int_adj = data->viewport.x >= (data->taps.h_taps - int_part) ?
+                                                       (data->taps.h_taps - int_part) : data->viewport.x;
+                               data->viewport.x -= int_adj;
+                               data->viewport.width += int_adj;
+                               int_part += int_adj;
+                       } else if (int_part > data->taps.h_taps) {
+                               data->viewport.x += int_part - data->taps.h_taps;
+                               data->viewport.width -= int_part - data->taps.h_taps;
+                               int_part = data->taps.h_taps;
+                       }
+                       data->inits.h.value &= 0xffffffff;
+                       data->inits.h = dc_fixpt_add_int(data->inits.h, int_part);
                }
-               data->inits.h.value &= 0xffffffff;
-               data->inits.h = dc_fixpt_add_int(data->inits.h, int_part);
-       }
-
-       if (data->viewport_c.x && !flip_horz_scan_dir) {
-               int int_part;
-
-               data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int(
-                               data->ratios.horz_c, recout_skip->width));
-               int_part = dc_fixpt_floor(data->inits.h_c) - data->viewport_c.x;
-               if (int_part < data->taps.h_taps_c) {
-                       int int_adj = data->viewport_c.x >= (data->taps.h_taps_c - int_part) ?
-                                       (data->taps.h_taps_c - int_part) : data->viewport_c.x;
-                       data->viewport_c.x -= int_adj;
-                       data->viewport_c.width += int_adj;
-                       int_part += int_adj;
-               } else if (int_part > data->taps.h_taps_c) {
-                       data->viewport_c.x += int_part - data->taps.h_taps_c;
-                       data->viewport_c.width -= int_part - data->taps.h_taps_c;
-                       int_part = data->taps.h_taps_c;
+
+               if (data->viewport_c.x) {
+                       int int_part;
+
+                       data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int(
+                                       data->ratios.horz_c, data->recout.x - recout_full->x));
+                       int_part = dc_fixpt_floor(data->inits.h_c) - data->viewport_c.x;
+                       if (int_part < data->taps.h_taps_c) {
+                               int int_adj = data->viewport_c.x >= (data->taps.h_taps_c - int_part) ?
+                                               (data->taps.h_taps_c - int_part) : data->viewport_c.x;
+                               data->viewport_c.x -= int_adj;
+                               data->viewport_c.width += int_adj;
+                               int_part += int_adj;
+                       } else if (int_part > data->taps.h_taps_c) {
+                               data->viewport_c.x += int_part - data->taps.h_taps_c;
+                               data->viewport_c.width -= int_part - data->taps.h_taps_c;
+                               int_part = data->taps.h_taps_c;
+                       }
+                       data->inits.h_c.value &= 0xffffffff;
+                       data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part);
                }
-               data->inits.h_c.value &= 0xffffffff;
-               data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part);
-       }
-
-       if (data->viewport.y && !flip_vert_scan_dir) {
-               int int_part;
-
-               data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int(
-                               data->ratios.vert, recout_skip->height));
-               int_part = dc_fixpt_floor(data->inits.v) - data->viewport.y;
-               if (int_part < data->taps.v_taps) {
-                       int int_adj = data->viewport.y >= (data->taps.v_taps - int_part) ?
-                                               (data->taps.v_taps - int_part) : data->viewport.y;
-                       data->viewport.y -= int_adj;
-                       data->viewport.height += int_adj;
-                       int_part += int_adj;
-               } else if (int_part > data->taps.v_taps) {
-                       data->viewport.y += int_part - data->taps.v_taps;
-                       data->viewport.height -= int_part - data->taps.v_taps;
-                       int_part = data->taps.v_taps;
+       } else {
+               /* Adjust for non-0 viewport offset */
+               if (data->viewport.x) {
+                       int int_part = dc_fixpt_floor(
+                                       dc_fixpt_sub(data->inits.h, data->ratios.horz));
+
+                       int_part = int_part > 0 ? int_part : 0;
+                       data->viewport.width += int_part < data->viewport.x ? int_part : data->viewport.x;
+                       data->viewport.x -= int_part < data->viewport.x ? int_part : data->viewport.x;
+               }
+               if (data->viewport_c.x) {
+                       int int_part = dc_fixpt_floor(
+                                       dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c));
+
+                       int_part = int_part > 0 ? int_part : 0;
+                       data->viewport_c.width += int_part < data->viewport_c.x ? int_part : data->viewport_c.x;
+                       data->viewport_c.x -= int_part < data->viewport_c.x ? int_part : data->viewport_c.x;
                }
-               data->inits.v.value &= 0xffffffff;
-               data->inits.v = dc_fixpt_add_int(data->inits.v, int_part);
-       }
-
-       if (data->viewport_c.y && !flip_vert_scan_dir) {
-               int int_part;
-
-               data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int(
-                               data->ratios.vert_c, recout_skip->height));
-               int_part = dc_fixpt_floor(data->inits.v_c) - data->viewport_c.y;
-               if (int_part < data->taps.v_taps_c) {
-                       int int_adj = data->viewport_c.y >= (data->taps.v_taps_c - int_part) ?
-                                       (data->taps.v_taps_c - int_part) : data->viewport_c.y;
-                       data->viewport_c.y -= int_adj;
-                       data->viewport_c.height += int_adj;
-                       int_part += int_adj;
-               } else if (int_part > data->taps.v_taps_c) {
-                       data->viewport_c.y += int_part - data->taps.v_taps_c;
-                       data->viewport_c.height -= int_part - data->taps.v_taps_c;
-                       int_part = data->taps.v_taps_c;
+
+               /* Adjust for viewport end clip-off */
+               if ((data->viewport.x + data->viewport.width) < (src.x + src.width)) {
+                       int int_part;
+                       int end_offset = src.x + src.width
+                                       - data->viewport.x - data->viewport.width;
+
+                       /*
+                        * this is init if vp had no offset, keep in mind this is from the
+                        * right side of vp due to scan direction
+                        */
+                       data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int(
+                                       data->ratios.horz, data->recout.x - recout_full->x));
+                       /*
+                        * this is the difference between first pixel of viewport available to read
+                        * and init position, takning into account scan direction
+                        */
+                       int_part = dc_fixpt_floor(data->inits.h) - end_offset;
+                       if (int_part < data->taps.h_taps) {
+                               int int_adj = end_offset >= (data->taps.h_taps - int_part) ?
+                                                       (data->taps.h_taps - int_part) : end_offset;
+                               data->viewport.width += int_adj;
+                               int_part += int_adj;
+                       } else if (int_part > data->taps.h_taps) {
+                               data->viewport.width += int_part - data->taps.h_taps;
+                               int_part = data->taps.h_taps;
+                       }
+                       data->inits.h.value &= 0xffffffff;
+                       data->inits.h = dc_fixpt_add_int(data->inits.h, int_part);
+               }
+
+               if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div) {
+                       int int_part;
+                       int end_offset = (src.x + src.width) / vpc_div
+                                       - data->viewport_c.x - data->viewport_c.width;
+
+                       /*
+                        * this is init if vp had no offset, keep in mind this is from the
+                        * right side of vp due to scan direction
+                        */
+                       data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int(
+                                       data->ratios.horz_c, data->recout.x - recout_full->x));
+                       /*
+                        * this is the difference between first pixel of viewport available to read
+                        * and init position, takning into account scan direction
+                        */
+                       int_part = dc_fixpt_floor(data->inits.h_c) - end_offset;
+                       if (int_part < data->taps.h_taps_c) {
+                               int int_adj = end_offset >= (data->taps.h_taps_c - int_part) ?
+                                                       (data->taps.h_taps_c - int_part) : end_offset;
+                               data->viewport_c.width += int_adj;
+                               int_part += int_adj;
+                       } else if (int_part > data->taps.h_taps_c) {
+                               data->viewport_c.width += int_part - data->taps.h_taps_c;
+                               int_part = data->taps.h_taps_c;
+                       }
+                       data->inits.h_c.value &= 0xffffffff;
+                       data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part);
+               }
+
+       }
+       if (!flip_vert_scan_dir) {
+               /* Adjust for viewport end clip-off */
+               if ((data->viewport.y + data->viewport.height) < (src.y + src.height)) {
+                       int vp_clip = src.y + src.height - data->viewport.height - data->viewport.y;
+                       int int_part = dc_fixpt_floor(
+                                       dc_fixpt_sub(data->inits.v, data->ratios.vert));
+
+                       int_part = int_part > 0 ? int_part : 0;
+                       data->viewport.height += int_part < vp_clip ? int_part : vp_clip;
+               }
+               if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div) {
+                       int vp_clip = (src.y + src.height) / vpc_div -
+                                       data->viewport_c.height - data->viewport_c.y;
+                       int int_part = dc_fixpt_floor(
+                                       dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c));
+
+                       int_part = int_part > 0 ? int_part : 0;
+                       data->viewport_c.height += int_part < vp_clip ? int_part : vp_clip;
+               }
+
+               /* Adjust for non-0 viewport offset */
+               if (data->viewport.y) {
+                       int int_part;
+
+                       data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int(
+                                       data->ratios.vert, data->recout.y - recout_full->y));
+                       int_part = dc_fixpt_floor(data->inits.v) - data->viewport.y;
+                       if (int_part < data->taps.v_taps) {
+                               int int_adj = data->viewport.y >= (data->taps.v_taps - int_part) ?
+                                                       (data->taps.v_taps - int_part) : data->viewport.y;
+                               data->viewport.y -= int_adj;
+                               data->viewport.height += int_adj;
+                               int_part += int_adj;
+                       } else if (int_part > data->taps.v_taps) {
+                               data->viewport.y += int_part - data->taps.v_taps;
+                               data->viewport.height -= int_part - data->taps.v_taps;
+                               int_part = data->taps.v_taps;
+                       }
+                       data->inits.v.value &= 0xffffffff;
+                       data->inits.v = dc_fixpt_add_int(data->inits.v, int_part);
+               }
+
+               if (data->viewport_c.y) {
+                       int int_part;
+
+                       data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int(
+                                       data->ratios.vert_c, data->recout.y - recout_full->y));
+                       int_part = dc_fixpt_floor(data->inits.v_c) - data->viewport_c.y;
+                       if (int_part < data->taps.v_taps_c) {
+                               int int_adj = data->viewport_c.y >= (data->taps.v_taps_c - int_part) ?
+                                               (data->taps.v_taps_c - int_part) : data->viewport_c.y;
+                               data->viewport_c.y -= int_adj;
+                               data->viewport_c.height += int_adj;
+                               int_part += int_adj;
+                       } else if (int_part > data->taps.v_taps_c) {
+                               data->viewport_c.y += int_part - data->taps.v_taps_c;
+                               data->viewport_c.height -= int_part - data->taps.v_taps_c;
+                               int_part = data->taps.v_taps_c;
+                       }
+                       data->inits.v_c.value &= 0xffffffff;
+                       data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part);
+               }
+       } else {
+               /* Adjust for non-0 viewport offset */
+               if (data->viewport.y) {
+                       int int_part = dc_fixpt_floor(
+                                       dc_fixpt_sub(data->inits.v, data->ratios.vert));
+
+                       int_part = int_part > 0 ? int_part : 0;
+                       data->viewport.height += int_part < data->viewport.y ? int_part : data->viewport.y;
+                       data->viewport.y -= int_part < data->viewport.y ? int_part : data->viewport.y;
+               }
+               if (data->viewport_c.y) {
+                       int int_part = dc_fixpt_floor(
+                                       dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c));
+
+                       int_part = int_part > 0 ? int_part : 0;
+                       data->viewport_c.height += int_part < data->viewport_c.y ? int_part : data->viewport_c.y;
+                       data->viewport_c.y -= int_part < data->viewport_c.y ? int_part : data->viewport_c.y;
+               }
+
+               /* Adjust for viewport end clip-off */
+               if ((data->viewport.y + data->viewport.height) < (src.y + src.height)) {
+                       int int_part;
+                       int end_offset = src.y + src.height
+                                       - data->viewport.y - data->viewport.height;
+
+                       /*
+                        * this is init if vp had no offset, keep in mind this is from the
+                        * right side of vp due to scan direction
+                        */
+                       data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int(
+                                       data->ratios.vert, data->recout.y - recout_full->y));
+                       /*
+                        * this is the difference between first pixel of viewport available to read
+                        * and init position, taking into account scan direction
+                        */
+                       int_part = dc_fixpt_floor(data->inits.v) - end_offset;
+                       if (int_part < data->taps.v_taps) {
+                               int int_adj = end_offset >= (data->taps.v_taps - int_part) ?
+                                                       (data->taps.v_taps - int_part) : end_offset;
+                               data->viewport.height += int_adj;
+                               int_part += int_adj;
+                       } else if (int_part > data->taps.v_taps) {
+                               data->viewport.height += int_part - data->taps.v_taps;
+                               int_part = data->taps.v_taps;
+                       }
+                       data->inits.v.value &= 0xffffffff;
+                       data->inits.v = dc_fixpt_add_int(data->inits.v, int_part);
+               }
+
+               if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div) {
+                       int int_part;
+                       int end_offset = (src.y + src.height) / vpc_div
+                                       - data->viewport_c.y - data->viewport_c.height;
+
+                       /*
+                        * this is init if vp had no offset, keep in mind this is from the
+                        * right side of vp due to scan direction
+                        */
+                       data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int(
+                                       data->ratios.vert_c, data->recout.y - recout_full->y));
+                       /*
+                        * this is the difference between first pixel of viewport available to read
+                        * and init position, taking into account scan direction
+                        */
+                       int_part = dc_fixpt_floor(data->inits.v_c) - end_offset;
+                       if (int_part < data->taps.v_taps_c) {
+                               int int_adj = end_offset >= (data->taps.v_taps_c - int_part) ?
+                                                       (data->taps.v_taps_c - int_part) : end_offset;
+                               data->viewport_c.height += int_adj;
+                               int_part += int_adj;
+                       } else if (int_part > data->taps.v_taps_c) {
+                               data->viewport_c.height += int_part - data->taps.v_taps_c;
+                               int_part = data->taps.v_taps_c;
+                       }
+                       data->inits.v_c.value &= 0xffffffff;
+                       data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part);
                }
-               data->inits.v_c.value &= 0xffffffff;
-               data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part);
        }
 
        /* Interlaced inits based on final vert inits */
@@ -846,7 +1005,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 {
        const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
        struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
-       struct view recout_skip = { 0 };
+       struct rect recout_full = { 0 };
        bool res = false;
        DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
        /* Important: scaling ratio calculation requires pixel format,
@@ -866,7 +1025,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
        if (pipe_ctx->plane_res.scl_data.viewport.height < 16 || pipe_ctx->plane_res.scl_data.viewport.width < 16)
                return false;
 
-       calculate_recout(pipe_ctx, &recout_skip);
+       calculate_recout(pipe_ctx, &recout_full);
 
        /**
         * Setting line buffer pixel depth to 24bpp yields banding
@@ -910,7 +1069,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 
        if (res)
                /* May need to re-check lb size after this in some obscure scenario */
-               calculate_inits_and_adj_vp(pipe_ctx, &recout_skip);
+               calculate_inits_and_adj_vp(pipe_ctx, &recout_full);
 
        DC_LOG_SCALER(
                                "%s: Viewport:\nheight:%d width:%d x:%d "
@@ -1054,7 +1213,7 @@ static struct pipe_ctx *acquire_free_pipe_for_stream(
 
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
 static int acquire_first_split_pipe(
                struct resource_context *res_ctx,
                const struct resource_pool *pool,
@@ -1125,7 +1284,7 @@ bool dc_add_plane_to_context(
 
        free_pipe = acquire_free_pipe_for_stream(context, pool, stream);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
        if (!free_pipe) {
                int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
                if (pipe_idx >= 0)
@@ -1546,8 +1705,8 @@ enum dc_status dc_add_stream_to_ctx(
        struct dc_context *dc_ctx = dc->ctx;
        enum dc_status res;
 
-       if (new_ctx->stream_count >= dc->res_pool->pipe_count) {
-               DC_ERROR("Max streams reached, can add stream %p !\n", stream);
+       if (new_ctx->stream_count >= dc->res_pool->timing_generator_count) {
+               DC_ERROR("Max streams reached, can't add stream %p !\n", stream);
                return DC_ERROR_UNEXPECTED;
        }
 
@@ -1723,7 +1882,7 @@ enum dc_status resource_map_pool_resources(
        /* acquire new resources */
        pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream);
 
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_X86
        if (pipe_idx < 0)
                pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
 #endif
@@ -1789,7 +1948,7 @@ void dc_resource_state_construct(
                const struct dc *dc,
                struct dc_state *dst_ctx)
 {
-       dst_ctx->dis_clk = dc->res_pool->display_clock;
+       dst_ctx->dis_clk = dc->res_pool->dccg;
 }
 
 enum dc_status dc_validate_global_state(
@@ -2347,7 +2506,8 @@ static void set_hdr_static_info_packet(
 {
        /* HDR Static Metadata info packet for HDR10 */
 
-       if (!stream->hdr_static_metadata.valid)
+       if (!stream->hdr_static_metadata.valid ||
+                       stream->use_dynamic_meta)
                return;
 
        *info_packet = stream->hdr_static_metadata;
index 25fae38409aba5f72b0ced6458ea1ef3481e7f13..9971b515c3eb3d81f95a0b6d9bee85e7f29b2267 100644 (file)
@@ -53,6 +53,10 @@ static bool construct(struct dc_sink *sink, const struct dc_sink_init_data *init
        sink->dongle_max_pix_clk = init_params->dongle_max_pix_clk;
        sink->converter_disable_audio = init_params->converter_disable_audio;
        sink->dc_container_id = NULL;
+       sink->sink_id = init_params->link->ctx->dc_sink_id_count;
+       // increment dc_sink_id_count because we don't want two sinks with same ID
+       // unless they are actually the same
+       init_params->link->ctx->dc_sink_id_count++;
 
        return true;
 }
index 3732a1de9d6c2852f862dc14bbf1571ab7db2055..fdcc8ab19bf3f3eb979ab643f7b1d217551f6900 100644 (file)
@@ -30,6 +30,8 @@
 #include "ipp.h"
 #include "timing_generator.h"
 
+#define DC_LOGGER dc->ctx->logger
+
 /*******************************************************************************
  * Private functions
  ******************************************************************************/
@@ -212,6 +214,8 @@ bool dc_stream_set_cursor_attributes(
                }
 
                core_dc->hwss.set_cursor_attribute(pipe_ctx);
+               if (core_dc->hwss.set_cursor_sdr_white_level)
+                       core_dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
        }
 
        if (pipe_to_program)
@@ -317,16 +321,10 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
        return ret;
 }
 
-
-void dc_stream_log(
-       const struct dc_stream_state *stream,
-       struct dal_logger *dm_logger,
-       enum dc_log_type log_type)
+void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream)
 {
-
-       dm_logger_write(dm_logger,
-                       log_type,
-                       "core_stream 0x%x: src: %d, %d, %d, %d; dst: %d, %d, %d, %d, colorSpace:%d\n",
+       DC_LOG_DC(
+                       "core_stream 0x%p: src: %d, %d, %d, %d; dst: %d, %d, %d, %d, colorSpace:%d\n",
                        stream,
                        stream->src.x,
                        stream->src.y,
@@ -337,21 +335,18 @@ void dc_stream_log(
                        stream->dst.width,
                        stream->dst.height,
                        stream->output_color_space);
-       dm_logger_write(dm_logger,
-                       log_type,
+       DC_LOG_DC(
                        "\tpix_clk_khz: %d, h_total: %d, v_total: %d, pixelencoder:%d, displaycolorDepth:%d\n",
                        stream->timing.pix_clk_khz,
                        stream->timing.h_total,
                        stream->timing.v_total,
                        stream->timing.pixel_encoding,
                        stream->timing.display_color_depth);
-       dm_logger_write(dm_logger,
-                       log_type,
+       DC_LOG_DC(
                        "\tsink name: %s, serial: %d\n",
                        stream->sink->edid_caps.display_name,
                        stream->sink->edid_caps.serial_number);
-       dm_logger_write(dm_logger,
-                       log_type,
+       DC_LOG_DC(
                        "\tlink: %d\n",
                        stream->sink->link->link_index);
 }
index 68a71adeb12e4ca915a5200588f490b146c330f9..8fb3aefd195ca3e384babcac8e002106f5cb9e1a 100644 (file)
@@ -84,6 +84,17 @@ struct dc_plane_state *dc_create_plane_state(struct dc *dc)
        return plane_state;
 }
 
+/**
+ *****************************************************************************
+ *  Function: dc_plane_get_status
+ *
+ *  @brief
+ *     Looks up the pipe context of plane_state and updates the pending status
+ *     of the pipe context. Then returns plane_state->status
+ *
+ *  @param [in] plane_state: pointer to the plane_state to get the status of
+ *****************************************************************************
+ */
 const struct dc_plane_status *dc_plane_get_status(
                const struct dc_plane_state *plane_state)
 {
@@ -181,7 +192,7 @@ void dc_transfer_func_release(struct dc_transfer_func *tf)
        kref_put(&tf->refcount, dc_transfer_func_free);
 }
 
-struct dc_transfer_func *dc_create_transfer_func()
+struct dc_transfer_func *dc_create_transfer_func(void)
 {
        struct dc_transfer_func *tf = kvzalloc(sizeof(*tf), GFP_KERNEL);
 
index 9cfde0ccf4e9d44fb241ec6536d5543856af9924..55bcc3bdc6a3b448d872cc9e5a2484aab02f8775 100644 (file)
@@ -38,7 +38,7 @@
 #include "inc/compressor.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.1.44"
+#define DC_VER "3.1.59"
 
 #define MAX_SURFACES 3
 #define MAX_STREAMS 6
@@ -68,6 +68,7 @@ struct dc_caps {
        uint32_t max_planes;
        uint32_t max_downscale_ratio;
        uint32_t i2c_speed_in_khz;
+       uint32_t dmdata_alloc_size;
        unsigned int max_cursor_size;
        unsigned int max_video_width;
        int linear_pitch_alignment;
@@ -76,6 +77,7 @@ struct dc_caps {
        bool is_apu;
        bool dual_link_dvi;
        bool post_blend_color_processing;
+       bool force_dp_tps4_for_cp2520;
 };
 
 struct dc_dcc_surface_param {
@@ -168,6 +170,12 @@ struct dc_config {
        bool disable_disp_pll_sharing;
 };
 
+enum visual_confirm {
+       VISUAL_CONFIRM_DISABLE = 0,
+       VISUAL_CONFIRM_SURFACE = 1,
+       VISUAL_CONFIRM_HDR = 2,
+};
+
 enum dcc_option {
        DCC_ENABLE = 0,
        DCC_DISABLE = 1,
@@ -185,6 +193,10 @@ enum wm_report_mode {
        WM_REPORT_OVERRIDE = 1,
 };
 
+/*
+ * For any clocks that may differ per pipe
+ * only the max is stored in this structure
+ */
 struct dc_clocks {
        int dispclk_khz;
        int max_supported_dppclk_khz;
@@ -193,10 +205,11 @@ struct dc_clocks {
        int socclk_khz;
        int dcfclk_deep_sleep_khz;
        int fclk_khz;
+       int phyclk_khz;
 };
 
-struct dc_debug {
-       bool surface_visual_confirm;
+struct dc_debug_options {
+       enum visual_confirm visual_confirm;
        bool sanity_checks;
        bool max_disp_clk;
        bool surface_trace;
@@ -227,6 +240,7 @@ struct dc_debug {
        int urgent_latency_ns;
        int percent_of_ideal_drambw;
        int dram_clock_change_latency_ns;
+       bool optimized_watermark;
        int always_scale;
        bool disable_pplib_clock_request;
        bool disable_clock_gate;
@@ -242,8 +256,19 @@ struct dc_debug {
        bool always_use_regamma;
        bool p010_mpo_support;
        bool recovery_enabled;
+       bool avoid_vbios_exec_table;
+       bool scl_reset_length10;
+       bool hdmi20_disable;
+       bool skip_detection_link_training;
+};
 
+struct dc_debug_data {
+       uint32_t ltFailCount;
+       uint32_t i2cErrorCount;
+       uint32_t auxErrorCount;
 };
+
+
 struct dc_state;
 struct resource_pool;
 struct dce_hwseq;
@@ -252,8 +277,7 @@ struct dc {
        struct dc_caps caps;
        struct dc_cap_funcs cap_funcs;
        struct dc_config config;
-       struct dc_debug debug;
-
+       struct dc_debug_options debug;
        struct dc_context *ctx;
 
        uint8_t link_count;
@@ -268,7 +292,7 @@ struct dc {
        /* Inputs into BW and WM calculations. */
        struct bw_calcs_dceip *bw_dceip;
        struct bw_calcs_vbios *bw_vbios;
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_X86
        struct dcn_soc_bounding_box *dcn_soc;
        struct dcn_ip_params *dcn_ip;
        struct display_mode_lib dml;
@@ -288,9 +312,9 @@ struct dc {
        bool apply_edp_fast_boot_optimization;
 
        /* FBC compressor */
-#if defined(CONFIG_DRM_AMD_DC_FBC)
        struct compressor *fbc_compressor;
-#endif
+
+       struct dc_debug_data debug_data;
 };
 
 enum frame_buffer_mode {
@@ -358,6 +382,7 @@ enum dc_transfer_func_type {
        TF_TYPE_PREDEFINED,
        TF_TYPE_DISTRIBUTED_POINTS,
        TF_TYPE_BYPASS,
+       TF_TYPE_HWPWL
 };
 
 struct dc_transfer_func_distributed_points {
@@ -377,16 +402,22 @@ enum dc_transfer_func_predefined {
        TRANSFER_FUNCTION_PQ,
        TRANSFER_FUNCTION_LINEAR,
        TRANSFER_FUNCTION_UNITY,
+       TRANSFER_FUNCTION_HLG,
+       TRANSFER_FUNCTION_HLG12,
+       TRANSFER_FUNCTION_GAMMA22
 };
 
 struct dc_transfer_func {
        struct kref refcount;
-       struct dc_transfer_func_distributed_points tf_pts;
        enum dc_transfer_func_type type;
        enum dc_transfer_func_predefined tf;
        /* FP16 1.0 reference level in nits, default is 80 nits, only for PQ*/
        uint32_t sdr_ref_white_level;
        struct dc_context *ctx;
+       union {
+               struct pwl_params pwl;
+               struct dc_transfer_func_distributed_points tf_pts;
+       };
 };
 
 /*
@@ -616,9 +647,14 @@ struct dpcd_caps {
        struct dc_dongle_caps dongle_caps;
 
        uint32_t sink_dev_id;
+       int8_t sink_dev_id_str[6];
+       int8_t sink_hw_revision;
+       int8_t sink_fw_revision[2];
+
        uint32_t branch_dev_id;
        int8_t branch_dev_name[6];
        int8_t branch_hw_revision;
+       int8_t branch_fw_revision[2];
 
        bool allow_invalid_MSA_timing_param;
        bool panel_mode_edp;
@@ -661,9 +697,13 @@ struct dc_sink {
        struct dc_link *link;
        struct dc_context *ctx;
 
+       uint32_t sink_id;
+
        /* private to dc_sink.c */
+       // refcount must be the last member in dc_sink, since we want the
+       // sink structure to be logically cloneable up to (but not including)
+       // refcount
        struct kref refcount;
-
 };
 
 void dc_sink_retain(struct dc_sink *sink);
index d9b84ec7954c511d599683772cf28e1be02e974b..90082bab71f072237c8e4dd63b2783c56f3aa91a 100644 (file)
@@ -198,6 +198,10 @@ struct dc_vbios_funcs {
        void (*post_init)(struct dc_bios *bios);
 
        void (*bios_parser_destroy)(struct dc_bios **dcb);
+
+       enum bp_result (*get_board_layout_info)(
+               struct dc_bios *dcb,
+               struct board_layout_info *board_layout_info);
 };
 
 struct bios_registers {
index e1affeb5cc512eadcd32344e9ac6a273241dc2ae..05c8c31d8b310c1324dbe69c31638a991c1e8a4d 100644 (file)
 #ifndef DC_DDC_TYPES_H_
 #define DC_DDC_TYPES_H_
 
+enum aux_transaction_type {
+       AUX_TRANSACTION_TYPE_DP,
+       AUX_TRANSACTION_TYPE_I2C
+};
+
+
+enum i2caux_transaction_action {
+       I2CAUX_TRANSACTION_ACTION_I2C_WRITE = 0x00,
+       I2CAUX_TRANSACTION_ACTION_I2C_READ = 0x10,
+       I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST = 0x20,
+
+       I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT = 0x40,
+       I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT = 0x50,
+       I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT = 0x60,
+
+       I2CAUX_TRANSACTION_ACTION_DP_WRITE = 0x80,
+       I2CAUX_TRANSACTION_ACTION_DP_READ = 0x90
+};
+
+enum aux_channel_operation_result {
+       AUX_CHANNEL_OPERATION_SUCCEEDED,
+       AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN,
+       AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY,
+       AUX_CHANNEL_OPERATION_FAILED_TIMEOUT,
+       AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON
+};
+
+
+struct aux_request_transaction_data {
+       enum aux_transaction_type type;
+       enum i2caux_transaction_action action;
+       /* 20-bit AUX channel transaction address */
+       uint32_t address;
+       /* delay, in 100-microsecond units */
+       uint8_t delay;
+       uint32_t length;
+       uint8_t *data;
+};
+
+enum aux_transaction_reply {
+       AUX_TRANSACTION_REPLY_AUX_ACK = 0x00,
+       AUX_TRANSACTION_REPLY_AUX_NACK = 0x01,
+       AUX_TRANSACTION_REPLY_AUX_DEFER = 0x02,
+
+       AUX_TRANSACTION_REPLY_I2C_ACK = 0x00,
+       AUX_TRANSACTION_REPLY_I2C_NACK = 0x10,
+       AUX_TRANSACTION_REPLY_I2C_DEFER = 0x20,
+
+       AUX_TRANSACTION_REPLY_HPD_DISCON = 0x40,
+
+       AUX_TRANSACTION_REPLY_INVALID = 0xFF
+};
+
+struct aux_reply_transaction_data {
+       enum aux_transaction_reply status;
+       uint32_t length;
+       uint8_t *data;
+};
+
 struct i2c_payload {
        bool write;
        uint8_t address;
@@ -109,7 +168,7 @@ struct ddc_service {
 
        uint32_t address;
        uint32_t edid_buf_len;
-       uint8_t edid_buf[MAX_EDID_BUFFER_SIZE];
+       uint8_t edid_buf[DC_MAX_EDID_BUFFER_SIZE];
 };
 
 #endif /* DC_DDC_TYPES_H_ */
index 90bccd5ccaa2b754a739095e9e36b732c4eeaf23..da93ab43f2d8a8c2c7910309fcd7bbf2300f13cd 100644 (file)
@@ -430,7 +430,7 @@ union test_request {
        struct {
        uint8_t LINK_TRAINING         :1;
        uint8_t LINK_TEST_PATTRN      :1;
-       uint8_t EDID_REAT             :1;
+       uint8_t EDID_READ             :1;
        uint8_t PHY_TEST_PATTERN      :1;
        uint8_t AUDIO_TEST_PATTERN    :1;
        uint8_t RESERVED              :1;
@@ -443,7 +443,8 @@ union test_response {
        struct {
                uint8_t ACK         :1;
                uint8_t NO_ACK      :1;
-               uint8_t RESERVED    :6;
+               uint8_t EDID_CHECKSUM_WRITE:1;
+               uint8_t RESERVED    :5;
        } bits;
        uint8_t raw;
 };
index bd0fda0ceb919d3d159db4af83409404e8ef873b..e68077e655658b374cdede4c2338ee60d2d5abb5 100644 (file)
@@ -255,3 +255,54 @@ uint32_t generic_reg_wait(const struct dc_context *ctx,
 
        return reg_val;
 }
+
+void generic_write_indirect_reg(const struct dc_context *ctx,
+               uint32_t addr_index, uint32_t addr_data,
+               uint32_t index, uint32_t data)
+{
+       dm_write_reg(ctx, addr_index, index);
+       dm_write_reg(ctx, addr_data, data);
+}
+
+uint32_t generic_read_indirect_reg(const struct dc_context *ctx,
+               uint32_t addr_index, uint32_t addr_data,
+               uint32_t index)
+{
+       uint32_t value = 0;
+
+       dm_write_reg(ctx, addr_index, index);
+       value = dm_read_reg(ctx, addr_data);
+
+       return value;
+}
+
+
+uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
+               uint32_t addr_index, uint32_t addr_data,
+               uint32_t index, uint32_t reg_val, int n,
+               uint8_t shift1, uint32_t mask1, uint32_t field_value1,
+               ...)
+{
+       uint32_t shift, mask, field_value;
+       int i = 1;
+
+       va_list ap;
+
+       va_start(ap, field_value1);
+
+       reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1);
+
+       while (i < n) {
+               shift = va_arg(ap, uint32_t);
+               mask = va_arg(ap, uint32_t);
+               field_value = va_arg(ap, uint32_t);
+
+               reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift);
+               i++;
+       }
+
+       generic_write_indirect_reg(ctx, addr_index, addr_data, index, reg_val);
+       va_end(ap);
+
+       return reg_val;
+}
index b1f70579d61b8c7fbf454bb318109239767731db..1d1f2d5ece519f511cb75b7c2f3c83778eb1135a 100644 (file)
@@ -192,13 +192,14 @@ enum surface_pixel_format {
        /*swaped & float*/
        SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F,
        /*grow graphics here if necessary */
-
+       SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888,
        SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
        SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr =
                SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
        SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb,
        SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr,
        SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb,
+               SURFACE_PIXEL_FORMAT_SUBSAMPLE_END,
        SURFACE_PIXEL_FORMAT_INVALID
 
        /*grow 444 video here if necessary */
@@ -403,9 +404,11 @@ struct dc_cursor_position {
 struct dc_cursor_mi_param {
        unsigned int pixel_clk_khz;
        unsigned int ref_clk_khz;
-       unsigned int viewport_x_start;
-       unsigned int viewport_width;
+       struct rect viewport;
        struct fixed31_32 h_scale_ratio;
+       struct fixed31_32 v_scale_ratio;
+       enum dc_rotation_angle rotation;
+       bool mirror;
 };
 
 /* IPP related types */
@@ -489,6 +492,7 @@ struct dc_cursor_attributes {
        uint32_t height;
 
        enum dc_cursor_color_format color_format;
+       uint32_t sdr_white_level; // for boosting (SDR) cursor in HDR mode
 
        /* In case we support HW Cursor rotation in the future */
        enum dc_rotation_angle rotation_angle;
@@ -496,6 +500,11 @@ struct dc_cursor_attributes {
        union dc_cursor_attribute_flags attribute_flags;
 };
 
+struct dpp_cursor_attributes {
+       int bias;
+       int scale;
+};
+
 /* OPP */
 
 enum dc_color_space {
@@ -567,25 +576,25 @@ struct scaling_taps {
 };
 
 enum dc_timing_standard {
-       TIMING_STANDARD_UNDEFINED,
-       TIMING_STANDARD_DMT,
-       TIMING_STANDARD_GTF,
-       TIMING_STANDARD_CVT,
-       TIMING_STANDARD_CVT_RB,
-       TIMING_STANDARD_CEA770,
-       TIMING_STANDARD_CEA861,
-       TIMING_STANDARD_HDMI,
-       TIMING_STANDARD_TV_NTSC,
-       TIMING_STANDARD_TV_NTSC_J,
-       TIMING_STANDARD_TV_PAL,
-       TIMING_STANDARD_TV_PAL_M,
-       TIMING_STANDARD_TV_PAL_CN,
-       TIMING_STANDARD_TV_SECAM,
-       TIMING_STANDARD_EXPLICIT,
+       DC_TIMING_STANDARD_UNDEFINED,
+       DC_TIMING_STANDARD_DMT,
+       DC_TIMING_STANDARD_GTF,
+       DC_TIMING_STANDARD_CVT,
+       DC_TIMING_STANDARD_CVT_RB,
+       DC_TIMING_STANDARD_CEA770,
+       DC_TIMING_STANDARD_CEA861,
+       DC_TIMING_STANDARD_HDMI,
+       DC_TIMING_STANDARD_TV_NTSC,
+       DC_TIMING_STANDARD_TV_NTSC_J,
+       DC_TIMING_STANDARD_TV_PAL,
+       DC_TIMING_STANDARD_TV_PAL_M,
+       DC_TIMING_STANDARD_TV_PAL_CN,
+       DC_TIMING_STANDARD_TV_SECAM,
+       DC_TIMING_STANDARD_EXPLICIT,
        /*!< For explicit timings from EDID, VBIOS, etc.*/
-       TIMING_STANDARD_USER_OVERRIDE,
+       DC_TIMING_STANDARD_USER_OVERRIDE,
        /*!< For mode timing override by user*/
-       TIMING_STANDARD_MAX
+       DC_TIMING_STANDARD_MAX
 };
 
 enum dc_color_depth {
index 8a716baa1203bdcd7b85f912aa6a22d523f57a5e..070a56926308ac4d314792a39fb3642371301e82 100644 (file)
@@ -172,7 +172,7 @@ bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
  * false - no change in Downstream port status. No further action required
  * from DM. */
 bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
-               union hpd_irq_data *hpd_irq_dpcd_data);
+               union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss);
 
 struct dc_sink_init_data;
 
@@ -210,10 +210,29 @@ bool dc_link_dp_set_test_pattern(
 
 void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
 
+bool dc_link_is_dp_sink_present(struct dc_link *link);
+
 /*
  * DPCD access interfaces
  */
 
+void dc_link_set_drive_settings(struct dc *dc,
+                               struct link_training_settings *lt_settings,
+                               const struct dc_link *link);
+void dc_link_perform_link_training(struct dc *dc,
+                                  struct dc_link_settings *link_setting,
+                                  bool skip_video_pattern);
+void dc_link_set_preferred_link_settings(struct dc *dc,
+                                        struct dc_link_settings *link_setting,
+                                        struct dc_link *link);
+void dc_link_enable_hpd(const struct dc_link *link);
+void dc_link_disable_hpd(const struct dc_link *link);
+void dc_link_set_test_pattern(struct dc_link *link,
+                       enum dp_test_pattern test_pattern,
+                       const struct link_training_settings *p_link_settings,
+                       const unsigned char *p_custom_pattern,
+                       unsigned int cust_pattern_size);
+
 bool dc_submit_i2c(
                struct dc *dc,
                uint32_t link_index,
index d7e6d53bb3834828fd36877fff40fb736b24c2c0..cbfe418006cbade10cbfc58313fcfee45d1482f6 100644 (file)
@@ -59,6 +59,9 @@ struct dc_stream_state {
        struct freesync_context freesync_ctx;
 
        struct dc_info_packet hdr_static_metadata;
+       PHYSICAL_ADDRESS_LOC dmdata_address;
+       bool   use_dynamic_meta;
+
        struct dc_transfer_func *out_transfer_func;
        struct colorspace_transform gamut_remap_matrix;
        struct dc_csc_transform csc_color_matrix;
@@ -97,6 +100,7 @@ struct dc_stream_state {
 
        struct dc_cursor_attributes cursor_attributes;
        struct dc_cursor_position cursor_position;
+       uint32_t sdr_white_level; // for boosting (SDR) cursor in HDR mode
 
        /* from stream struct */
        struct kref refcount;
@@ -144,10 +148,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
 /*
  * Log the current stream state.
  */
-void dc_stream_log(
-       const struct dc_stream_state *stream,
-       struct dal_logger *dc_logger,
-       enum dc_log_type log_type);
+void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream);
 
 uint8_t dc_get_current_stream_count(struct dc *dc);
 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i);
@@ -255,6 +256,7 @@ bool dc_stream_set_cursor_position(
        struct dc_stream_state *stream,
        const struct dc_cursor_position *position);
 
+
 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
                                struct dc_stream_state **stream,
                                int num_streams,
@@ -299,9 +301,4 @@ bool dc_stream_get_crtc_position(struct dc *dc,
                                 unsigned int *v_pos,
                                 unsigned int *nom_v_pos);
 
-void dc_stream_set_static_screen_events(struct dc *dc,
-                                       struct dc_stream_state **stream,
-                                       int num_streams,
-                                       const struct dc_static_screen_events *events);
-
 #endif /* DC_STREAM_H_ */
index 76df2534c4a41bce876278d05ad1e8708ce9d363..8c6eb78b0c3bd8df91dce93bdb899cfad8d97ba5 100644 (file)
@@ -77,8 +77,6 @@ struct dc_context {
        struct dc *dc;
 
        void *driver_context; /* e.g. amdgpu_device */
-
-       struct dal_logger *logger;
        void *cgs_device;
 
        enum dce_environment dce_environment;
@@ -92,13 +90,12 @@ struct dc_context {
        bool created_bios;
        struct gpio_service *gpio_service;
        struct i2caux *i2caux;
-#if defined(CONFIG_DRM_AMD_DC_FBC)
+       uint32_t dc_sink_id_count;
        uint64_t fbc_gpu_addr;
-#endif
 };
 
 
-#define MAX_EDID_BUFFER_SIZE 512
+#define DC_MAX_EDID_BUFFER_SIZE 512
 #define EDID_BLOCK_SIZE 128
 #define MAX_SURFACE_NUM 4
 #define NUM_PIXEL_FORMATS 10
@@ -137,13 +134,13 @@ enum plane_stereo_format {
  */
 
 enum dc_edid_connector_type {
-       EDID_CONNECTOR_UNKNOWN = 0,
-       EDID_CONNECTOR_ANALOG = 1,
-       EDID_CONNECTOR_DIGITAL = 10,
-       EDID_CONNECTOR_DVI = 11,
-       EDID_CONNECTOR_HDMIA = 12,
-       EDID_CONNECTOR_MDDI = 14,
-       EDID_CONNECTOR_DISPLAYPORT = 15
+       DC_EDID_CONNECTOR_UNKNOWN = 0,
+       DC_EDID_CONNECTOR_ANALOG = 1,
+       DC_EDID_CONNECTOR_DIGITAL = 10,
+       DC_EDID_CONNECTOR_DVI = 11,
+       DC_EDID_CONNECTOR_HDMIA = 12,
+       DC_EDID_CONNECTOR_MDDI = 14,
+       DC_EDID_CONNECTOR_DISPLAYPORT = 15
 };
 
 enum dc_edid_status {
@@ -169,7 +166,7 @@ struct dc_cea_audio_mode {
 
 struct dc_edid {
        uint32_t length;
-       uint8_t raw_edid[MAX_EDID_BUFFER_SIZE];
+       uint8_t raw_edid[DC_MAX_EDID_BUFFER_SIZE];
 };
 
 /* When speaker location data block is not available, DEFAULT_SPEAKER_LOCATION
@@ -195,6 +192,7 @@ union display_content_support {
 
 struct dc_panel_patch {
        unsigned int dppowerup_delay;
+       unsigned int extra_t12_ms;
 };
 
 struct dc_edid_caps {
index 11401fd8e5356c8e4866f6660df34ff2da1e155b..825537bd454553ce2ecfce2aea148f9ce173847f 100644 (file)
@@ -28,7 +28,7 @@
 
 DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
 dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
-dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o
+dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o
 
 
 AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
new file mode 100644 (file)
index 0000000..3f5b2e6
--- /dev/null
@@ -0,0 +1,937 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dce_aux.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#define CTX \
+       aux110->base.ctx
+#define REG(reg_name)\
+       (aux110->regs->reg_name)
+
+#define DC_LOGGER \
+       engine->ctx->logger
+
+#include "reg_helper.h"
+
+#define FROM_AUX_ENGINE(ptr) \
+       container_of((ptr), struct aux_engine_dce110, base)
+
+#define FROM_ENGINE(ptr) \
+       FROM_AUX_ENGINE(container_of((ptr), struct aux_engine, base))
+
+#define FROM_AUX_ENGINE_ENGINE(ptr) \
+       container_of((ptr), struct aux_engine, base)
+enum {
+       AUX_INVALID_REPLY_RETRY_COUNTER = 1,
+       AUX_TIMED_OUT_RETRY_COUNTER = 2,
+       AUX_DEFER_RETRY_COUNTER = 6
+};
+static void release_engine(
+       struct aux_engine *engine)
+{
+       struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+       dal_ddc_close(engine->ddc);
+
+       engine->ddc = NULL;
+
+       REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, 1);
+}
+
+#define SW_CAN_ACCESS_AUX 1
+#define DMCU_CAN_ACCESS_AUX 2
+
+static bool is_engine_available(
+       struct aux_engine *engine)
+{
+       struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+       uint32_t value = REG_READ(AUX_ARB_CONTROL);
+       uint32_t field = get_reg_field_value(
+                       value,
+                       AUX_ARB_CONTROL,
+                       AUX_REG_RW_CNTL_STATUS);
+
+       return (field != DMCU_CAN_ACCESS_AUX);
+}
+static bool acquire_engine(
+       struct aux_engine *engine)
+{
+       struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+       uint32_t value = REG_READ(AUX_ARB_CONTROL);
+       uint32_t field = get_reg_field_value(
+                       value,
+                       AUX_ARB_CONTROL,
+                       AUX_REG_RW_CNTL_STATUS);
+       if (field == DMCU_CAN_ACCESS_AUX)
+               return false;
+       /* enable AUX before request SW to access AUX */
+       value = REG_READ(AUX_CONTROL);
+       field = get_reg_field_value(value,
+                               AUX_CONTROL,
+                               AUX_EN);
+
+       if (field == 0) {
+               set_reg_field_value(
+                               value,
+                               1,
+                               AUX_CONTROL,
+                               AUX_EN);
+
+               if (REG(AUX_RESET_MASK)) {
+                       /*DP_AUX block as part of the enable sequence*/
+                       set_reg_field_value(
+                               value,
+                               1,
+                               AUX_CONTROL,
+                               AUX_RESET);
+               }
+
+               REG_WRITE(AUX_CONTROL, value);
+
+               if (REG(AUX_RESET_MASK)) {
+                       /*poll HW to make sure reset it done*/
+
+                       REG_WAIT(AUX_CONTROL, AUX_RESET_DONE, 1,
+                                       1, 11);
+
+                       set_reg_field_value(
+                               value,
+                               0,
+                               AUX_CONTROL,
+                               AUX_RESET);
+
+                       REG_WRITE(AUX_CONTROL, value);
+
+                       REG_WAIT(AUX_CONTROL, AUX_RESET_DONE, 0,
+                                       1, 11);
+               }
+       } /*if (field)*/
+
+       /* request SW to access AUX */
+       REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, 1);
+
+       value = REG_READ(AUX_ARB_CONTROL);
+       field = get_reg_field_value(
+                       value,
+                       AUX_ARB_CONTROL,
+                       AUX_REG_RW_CNTL_STATUS);
+
+       return (field == SW_CAN_ACCESS_AUX);
+}
+
+#define COMPOSE_AUX_SW_DATA_16_20(command, address) \
+       ((command) | ((0xF0000 & (address)) >> 16))
+
+#define COMPOSE_AUX_SW_DATA_8_15(address) \
+       ((0xFF00 & (address)) >> 8)
+
+#define COMPOSE_AUX_SW_DATA_0_7(address) \
+       (0xFF & (address))
+
+static void submit_channel_request(
+       struct aux_engine *engine,
+       struct aux_request_transaction_data *request)
+{
+       struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+       uint32_t value;
+       uint32_t length;
+
+       bool is_write =
+               ((request->type == AUX_TRANSACTION_TYPE_DP) &&
+                (request->action == I2CAUX_TRANSACTION_ACTION_DP_WRITE)) ||
+               ((request->type == AUX_TRANSACTION_TYPE_I2C) &&
+               ((request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) ||
+                (request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT)));
+       if (REG(AUXN_IMPCAL)) {
+               /* clear_aux_error */
+               REG_UPDATE_SEQ(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK,
+                               1,
+                               0);
+
+               REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK,
+                               1,
+                               0);
+
+               /* force_default_calibrate */
+               REG_UPDATE_1BY1_2(AUXN_IMPCAL,
+                               AUXN_IMPCAL_ENABLE, 1,
+                               AUXN_IMPCAL_OVERRIDE_ENABLE, 0);
+
+               /* bug? why AUXN update EN and OVERRIDE_EN 1 by 1 while AUX P toggles OVERRIDE? */
+
+               REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE,
+                               1,
+                               0);
+       }
+       /* set the delay and the number of bytes to write */
+
+       /* The length include
+        * the 4 bit header and the 20 bit address
+        * (that is 3 byte).
+        * If the requested length is non zero this means
+        * an addition byte specifying the length is required.
+        */
+
+       length = request->length ? 4 : 3;
+       if (is_write)
+               length += request->length;
+
+       REG_UPDATE_2(AUX_SW_CONTROL,
+                       AUX_SW_START_DELAY, request->delay,
+                       AUX_SW_WR_BYTES, length);
+
+       /* program action and address and payload data (if 'is_write') */
+       value = REG_UPDATE_4(AUX_SW_DATA,
+                       AUX_SW_INDEX, 0,
+                       AUX_SW_DATA_RW, 0,
+                       AUX_SW_AUTOINCREMENT_DISABLE, 1,
+                       AUX_SW_DATA, COMPOSE_AUX_SW_DATA_16_20(request->action, request->address));
+
+       value = REG_SET_2(AUX_SW_DATA, value,
+                       AUX_SW_AUTOINCREMENT_DISABLE, 0,
+                       AUX_SW_DATA, COMPOSE_AUX_SW_DATA_8_15(request->address));
+
+       value = REG_SET(AUX_SW_DATA, value,
+                       AUX_SW_DATA, COMPOSE_AUX_SW_DATA_0_7(request->address));
+
+       if (request->length) {
+               value = REG_SET(AUX_SW_DATA, value,
+                               AUX_SW_DATA, request->length - 1);
+       }
+
+       if (is_write) {
+               /* Load the HW buffer with the Data to be sent.
+                * This is relevant for write operation.
+                * For read, the data recived data will be
+                * processed in process_channel_reply().
+                */
+               uint32_t i = 0;
+
+               while (i < request->length) {
+                       value = REG_SET(AUX_SW_DATA, value,
+                                       AUX_SW_DATA, request->data[i]);
+
+                       ++i;
+               }
+       }
+
+       REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
+       REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
+                               10, aux110->timeout_period/10);
+       REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
+}
+
+static int read_channel_reply(struct aux_engine *engine, uint32_t size,
+                             uint8_t *buffer, uint8_t *reply_result,
+                             uint32_t *sw_status)
+{
+       struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+       uint32_t bytes_replied;
+       uint32_t reply_result_32;
+
+       *sw_status = REG_GET(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT,
+                            &bytes_replied);
+
+       /* In case HPD is LOW, exit AUX transaction */
+       if ((*sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
+               return -1;
+
+       /* Need at least the status byte */
+       if (!bytes_replied)
+               return -1;
+
+       REG_UPDATE_1BY1_3(AUX_SW_DATA,
+                         AUX_SW_INDEX, 0,
+                         AUX_SW_AUTOINCREMENT_DISABLE, 1,
+                         AUX_SW_DATA_RW, 1);
+
+       REG_GET(AUX_SW_DATA, AUX_SW_DATA, &reply_result_32);
+       reply_result_32 = reply_result_32 >> 4;
+       *reply_result = (uint8_t)reply_result_32;
+
+       if (reply_result_32 == 0) { /* ACK */
+               uint32_t i = 0;
+
+               /* First byte was already used to get the command status */
+               --bytes_replied;
+
+               /* Do not overflow buffer */
+               if (bytes_replied > size)
+                       return -1;
+
+               while (i < bytes_replied) {
+                       uint32_t aux_sw_data_val;
+
+                       REG_GET(AUX_SW_DATA, AUX_SW_DATA, &aux_sw_data_val);
+                       buffer[i] = aux_sw_data_val;
+                       ++i;
+               }
+
+               return i;
+       }
+
+       return 0;
+}
+
+static void process_channel_reply(
+       struct aux_engine *engine,
+       struct aux_reply_transaction_data *reply)
+{
+       int bytes_replied;
+       uint8_t reply_result;
+       uint32_t sw_status;
+
+       bytes_replied = read_channel_reply(engine, reply->length, reply->data,
+                                          &reply_result, &sw_status);
+
+       /* in case HPD is LOW, exit AUX transaction */
+       if ((sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
+               reply->status = AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
+               return;
+       }
+
+       if (bytes_replied < 0) {
+               /* Need to handle an error case...
+                * Hopefully, upper layer function won't call this function if
+                * the number of bytes in the reply was 0, because there was
+                * surely an error that was asserted that should have been
+                * handled for hot plug case, this could happens
+                */
+               if (!(sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
+                       reply->status = AUX_TRANSACTION_REPLY_INVALID;
+                       ASSERT_CRITICAL(false);
+                       return;
+               }
+       } else {
+
+               switch (reply_result) {
+               case 0: /* ACK */
+                       reply->status = AUX_TRANSACTION_REPLY_AUX_ACK;
+               break;
+               case 1: /* NACK */
+                       reply->status = AUX_TRANSACTION_REPLY_AUX_NACK;
+               break;
+               case 2: /* DEFER */
+                       reply->status = AUX_TRANSACTION_REPLY_AUX_DEFER;
+               break;
+               case 4: /* AUX ACK / I2C NACK */
+                       reply->status = AUX_TRANSACTION_REPLY_I2C_NACK;
+               break;
+               case 8: /* AUX ACK / I2C DEFER */
+                       reply->status = AUX_TRANSACTION_REPLY_I2C_DEFER;
+               break;
+               default:
+                       reply->status = AUX_TRANSACTION_REPLY_INVALID;
+               }
+       }
+}
+
+static enum aux_channel_operation_result get_channel_status(
+       struct aux_engine *engine,
+       uint8_t *returned_bytes)
+{
+       struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+       uint32_t value;
+
+       if (returned_bytes == NULL) {
+               /*caller pass NULL pointer*/
+               ASSERT_CRITICAL(false);
+               return AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN;
+       }
+       *returned_bytes = 0;
+
+       /* poll to make sure that SW_DONE is asserted */
+       value = REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
+                               10, aux110->timeout_period/10);
+
+       /* in case HPD is LOW, exit AUX transaction */
+       if ((value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
+               return AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
+
+       /* Note that the following bits are set in 'status.bits'
+        * during CTS 4.2.1.2 (FW 3.3.1):
+        * AUX_SW_RX_MIN_COUNT_VIOL, AUX_SW_RX_INVALID_STOP,
+        * AUX_SW_RX_RECV_NO_DET, AUX_SW_RX_RECV_INVALID_H.
+        *
+        * AUX_SW_RX_MIN_COUNT_VIOL is an internal,
+        * HW debugging bit and should be ignored.
+        */
+       if (value & AUX_SW_STATUS__AUX_SW_DONE_MASK) {
+               if ((value & AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK) ||
+                       (value & AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK))
+                       return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
+
+               else if ((value & AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK) ||
+                       (value & AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK) ||
+                       (value &
+                               AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK) ||
+                       (value & AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK))
+                       return AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY;
+
+               *returned_bytes = get_reg_field_value(value,
+                               AUX_SW_STATUS,
+                               AUX_SW_REPLY_BYTE_COUNT);
+
+               if (*returned_bytes == 0)
+                       return
+                       AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY;
+               else {
+                       *returned_bytes -= 1;
+                       return AUX_CHANNEL_OPERATION_SUCCEEDED;
+               }
+       } else {
+               /*time_elapsed >= aux_engine->timeout_period
+                *  AUX_SW_STATUS__AUX_SW_HPD_DISCON = at this point
+                */
+               ASSERT_CRITICAL(false);
+               return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
+       }
+}
+static void process_read_reply(
+       struct aux_engine *engine,
+       struct read_command_context *ctx)
+{
+       engine->funcs->process_channel_reply(engine, &ctx->reply);
+
+       switch (ctx->reply.status) {
+       case AUX_TRANSACTION_REPLY_AUX_ACK:
+               ctx->defer_retry_aux = 0;
+               if (ctx->returned_byte > ctx->current_read_length) {
+                       ctx->status =
+                               I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+                       ctx->operation_succeeded = false;
+               } else if (ctx->returned_byte < ctx->current_read_length) {
+                       ctx->current_read_length -= ctx->returned_byte;
+
+                       ctx->offset += ctx->returned_byte;
+
+                       ++ctx->invalid_reply_retry_aux_on_ack;
+
+                       if (ctx->invalid_reply_retry_aux_on_ack >
+                               AUX_INVALID_REPLY_RETRY_COUNTER) {
+                               ctx->status =
+                               I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+                               ctx->operation_succeeded = false;
+                       }
+               } else {
+                       ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
+                       ctx->transaction_complete = true;
+                       ctx->operation_succeeded = true;
+               }
+       break;
+       case AUX_TRANSACTION_REPLY_AUX_NACK:
+               ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
+               ctx->operation_succeeded = false;
+       break;
+       case AUX_TRANSACTION_REPLY_AUX_DEFER:
+               ++ctx->defer_retry_aux;
+
+               if (ctx->defer_retry_aux > AUX_DEFER_RETRY_COUNTER) {
+                       ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+                       ctx->operation_succeeded = false;
+               }
+       break;
+       case AUX_TRANSACTION_REPLY_I2C_DEFER:
+               ctx->defer_retry_aux = 0;
+
+               ++ctx->defer_retry_i2c;
+
+               if (ctx->defer_retry_i2c > AUX_DEFER_RETRY_COUNTER) {
+                       ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+                       ctx->operation_succeeded = false;
+               }
+       break;
+       case AUX_TRANSACTION_REPLY_HPD_DISCON:
+               ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+               ctx->operation_succeeded = false;
+       break;
+       default:
+               ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+               ctx->operation_succeeded = false;
+       }
+}
+static void process_read_request(
+       struct aux_engine *engine,
+       struct read_command_context *ctx)
+{
+       enum aux_channel_operation_result operation_result;
+
+       engine->funcs->submit_channel_request(engine, &ctx->request);
+
+       operation_result = engine->funcs->get_channel_status(
+               engine, &ctx->returned_byte);
+
+       switch (operation_result) {
+       case AUX_CHANNEL_OPERATION_SUCCEEDED:
+               if (ctx->returned_byte > ctx->current_read_length) {
+                       ctx->status =
+                               I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+                       ctx->operation_succeeded = false;
+               } else {
+                       ctx->timed_out_retry_aux = 0;
+                       ctx->invalid_reply_retry_aux = 0;
+
+                       ctx->reply.length = ctx->returned_byte;
+                       ctx->reply.data = ctx->buffer;
+
+                       process_read_reply(engine, ctx);
+               }
+       break;
+       case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+               ++ctx->invalid_reply_retry_aux;
+
+               if (ctx->invalid_reply_retry_aux >
+                       AUX_INVALID_REPLY_RETRY_COUNTER) {
+                       ctx->status =
+                               I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+                       ctx->operation_succeeded = false;
+               } else
+                       udelay(400);
+       break;
+       case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+               ++ctx->timed_out_retry_aux;
+
+               if (ctx->timed_out_retry_aux > AUX_TIMED_OUT_RETRY_COUNTER) {
+                       ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+                       ctx->operation_succeeded = false;
+               } else {
+                       /* DP 1.2a, table 2-58:
+                        * "S3: AUX Request CMD PENDING:
+                        * retry 3 times, with 400usec wait on each"
+                        * The HW timeout is set to 550usec,
+                        * so we should not wait here
+                        */
+               }
+       break;
+       case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+               ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+               ctx->operation_succeeded = false;
+       break;
+       default:
+               ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+               ctx->operation_succeeded = false;
+       }
+}
+static bool read_command(
+       struct aux_engine *engine,
+       struct i2caux_transaction_request *request,
+       bool middle_of_transaction)
+{
+       struct read_command_context ctx;
+
+       ctx.buffer = request->payload.data;
+       ctx.current_read_length = request->payload.length;
+       ctx.offset = 0;
+       ctx.timed_out_retry_aux = 0;
+       ctx.invalid_reply_retry_aux = 0;
+       ctx.defer_retry_aux = 0;
+       ctx.defer_retry_i2c = 0;
+       ctx.invalid_reply_retry_aux_on_ack = 0;
+       ctx.transaction_complete = false;
+       ctx.operation_succeeded = true;
+
+       if (request->payload.address_space ==
+               I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
+               ctx.request.type = AUX_TRANSACTION_TYPE_DP;
+               ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_READ;
+               ctx.request.address = request->payload.address;
+       } else if (request->payload.address_space ==
+               I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) {
+               ctx.request.type = AUX_TRANSACTION_TYPE_I2C;
+               ctx.request.action = middle_of_transaction ?
+                       I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT :
+                       I2CAUX_TRANSACTION_ACTION_I2C_READ;
+               ctx.request.address = request->payload.address >> 1;
+       } else {
+               /* in DAL2, there was no return in such case */
+               BREAK_TO_DEBUGGER();
+               return false;
+       }
+
+       ctx.request.delay = 0;
+
+       do {
+               memset(ctx.buffer + ctx.offset, 0, ctx.current_read_length);
+
+               ctx.request.data = ctx.buffer + ctx.offset;
+               ctx.request.length = ctx.current_read_length;
+
+               process_read_request(engine, &ctx);
+
+               request->status = ctx.status;
+
+               if (ctx.operation_succeeded && !ctx.transaction_complete)
+                       if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C)
+                               msleep(engine->delay);
+       } while (ctx.operation_succeeded && !ctx.transaction_complete);
+
+       if (request->payload.address_space ==
+               I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
+               DC_LOG_I2C_AUX("READ: addr:0x%x  value:0x%x Result:%d",
+                               request->payload.address,
+                               request->payload.data[0],
+                               ctx.operation_succeeded);
+       }
+
+       return ctx.operation_succeeded;
+}
+
+static void process_write_reply(
+       struct aux_engine *engine,
+       struct write_command_context *ctx)
+{
+       engine->funcs->process_channel_reply(engine, &ctx->reply);
+
+       switch (ctx->reply.status) {
+       case AUX_TRANSACTION_REPLY_AUX_ACK:
+               ctx->operation_succeeded = true;
+
+               if (ctx->returned_byte) {
+                       ctx->request.action = ctx->mot ?
+                       I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT :
+                       I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST;
+
+                       ctx->current_write_length = 0;
+
+                       ++ctx->ack_m_retry;
+
+                       if (ctx->ack_m_retry > AUX_DEFER_RETRY_COUNTER) {
+                               ctx->status =
+                               I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+                               ctx->operation_succeeded = false;
+                       } else
+                               udelay(300);
+               } else {
+                       ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
+                       ctx->defer_retry_aux = 0;
+                       ctx->ack_m_retry = 0;
+                       ctx->transaction_complete = true;
+               }
+       break;
+       case AUX_TRANSACTION_REPLY_AUX_NACK:
+               ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
+               ctx->operation_succeeded = false;
+       break;
+       case AUX_TRANSACTION_REPLY_AUX_DEFER:
+               ++ctx->defer_retry_aux;
+
+               if (ctx->defer_retry_aux > ctx->max_defer_retry) {
+                       ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+                       ctx->operation_succeeded = false;
+               }
+       break;
+       case AUX_TRANSACTION_REPLY_I2C_DEFER:
+               ctx->defer_retry_aux = 0;
+               ctx->current_write_length = 0;
+
+               ctx->request.action = ctx->mot ?
+                       I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT :
+                       I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST;
+
+               ++ctx->defer_retry_i2c;
+
+               if (ctx->defer_retry_i2c > ctx->max_defer_retry) {
+                       ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+                       ctx->operation_succeeded = false;
+               }
+       break;
+       case AUX_TRANSACTION_REPLY_HPD_DISCON:
+               ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+               ctx->operation_succeeded = false;
+       break;
+       default:
+               ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+               ctx->operation_succeeded = false;
+       }
+}
+static void process_write_request(
+       struct aux_engine *engine,
+       struct write_command_context *ctx)
+{
+       enum aux_channel_operation_result operation_result;
+
+       engine->funcs->submit_channel_request(engine, &ctx->request);
+
+       operation_result = engine->funcs->get_channel_status(
+               engine, &ctx->returned_byte);
+
+       switch (operation_result) {
+       case AUX_CHANNEL_OPERATION_SUCCEEDED:
+               ctx->timed_out_retry_aux = 0;
+               ctx->invalid_reply_retry_aux = 0;
+
+               ctx->reply.length = ctx->returned_byte;
+               ctx->reply.data = ctx->reply_data;
+
+               process_write_reply(engine, ctx);
+       break;
+       case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+               ++ctx->invalid_reply_retry_aux;
+
+               if (ctx->invalid_reply_retry_aux >
+                       AUX_INVALID_REPLY_RETRY_COUNTER) {
+                       ctx->status =
+                               I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+                       ctx->operation_succeeded = false;
+               } else
+                       udelay(400);
+       break;
+       case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+               ++ctx->timed_out_retry_aux;
+
+               if (ctx->timed_out_retry_aux > AUX_TIMED_OUT_RETRY_COUNTER) {
+                       ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+                       ctx->operation_succeeded = false;
+               } else {
+                       /* DP 1.2a, table 2-58:
+                        * "S3: AUX Request CMD PENDING:
+                        * retry 3 times, with 400usec wait on each"
+                        * The HW timeout is set to 550usec,
+                        * so we should not wait here
+                        */
+               }
+       break;
+       case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+               ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+               ctx->operation_succeeded = false;
+       break;
+       default:
+               ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+               ctx->operation_succeeded = false;
+       }
+}
+static bool write_command(
+       struct aux_engine *engine,
+       struct i2caux_transaction_request *request,
+       bool middle_of_transaction)
+{
+       struct write_command_context ctx;
+
+       ctx.mot = middle_of_transaction;
+       ctx.buffer = request->payload.data;
+       ctx.current_write_length = request->payload.length;
+       ctx.timed_out_retry_aux = 0;
+       ctx.invalid_reply_retry_aux = 0;
+       ctx.defer_retry_aux = 0;
+       ctx.defer_retry_i2c = 0;
+       ctx.ack_m_retry = 0;
+       ctx.transaction_complete = false;
+       ctx.operation_succeeded = true;
+
+       if (request->payload.address_space ==
+               I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
+               ctx.request.type = AUX_TRANSACTION_TYPE_DP;
+               ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_WRITE;
+               ctx.request.address = request->payload.address;
+       } else if (request->payload.address_space ==
+               I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) {
+               ctx.request.type = AUX_TRANSACTION_TYPE_I2C;
+               ctx.request.action = middle_of_transaction ?
+                       I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT :
+                       I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
+               ctx.request.address = request->payload.address >> 1;
+       } else {
+               /* in DAL2, there was no return in such case */
+               BREAK_TO_DEBUGGER();
+               return false;
+       }
+
+       ctx.request.delay = 0;
+
+       ctx.max_defer_retry =
+               (engine->max_defer_write_retry > AUX_DEFER_RETRY_COUNTER) ?
+                       engine->max_defer_write_retry : AUX_DEFER_RETRY_COUNTER;
+
+       do {
+               ctx.request.data = ctx.buffer;
+               ctx.request.length = ctx.current_write_length;
+
+               process_write_request(engine, &ctx);
+
+               request->status = ctx.status;
+
+               if (ctx.operation_succeeded && !ctx.transaction_complete)
+                       if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C)
+                               msleep(engine->delay);
+       } while (ctx.operation_succeeded && !ctx.transaction_complete);
+
+       if (request->payload.address_space ==
+               I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
+               DC_LOG_I2C_AUX("WRITE: addr:0x%x  value:0x%x Result:%d",
+                               request->payload.address,
+                               request->payload.data[0],
+                               ctx.operation_succeeded);
+       }
+
+       return ctx.operation_succeeded;
+}
+static bool end_of_transaction_command(
+       struct aux_engine *engine,
+       struct i2caux_transaction_request *request)
+{
+       struct i2caux_transaction_request dummy_request;
+       uint8_t dummy_data;
+
+       /* [tcheng] We only need to send the stop (read with MOT = 0)
+        * for I2C-over-Aux, not native AUX
+        */
+
+       if (request->payload.address_space !=
+               I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C)
+               return false;
+
+       dummy_request.operation = request->operation;
+       dummy_request.payload.address_space = request->payload.address_space;
+       dummy_request.payload.address = request->payload.address;
+
+       /*
+        * Add a dummy byte due to some receiver quirk
+        * where one byte is sent along with MOT = 0.
+        * Ideally this should be 0.
+        */
+
+       dummy_request.payload.length = 0;
+       dummy_request.payload.data = &dummy_data;
+
+       if (request->operation == I2CAUX_TRANSACTION_READ)
+               return read_command(engine, &dummy_request, false);
+       else
+               return write_command(engine, &dummy_request, false);
+
+       /* according Syed, it does not need now DoDummyMOT */
+}
+static bool submit_request(
+       struct aux_engine *engine,
+       struct i2caux_transaction_request *request,
+       bool middle_of_transaction)
+{
+
+       bool result;
+       bool mot_used = true;
+
+       switch (request->operation) {
+       case I2CAUX_TRANSACTION_READ:
+               result = read_command(engine, request, mot_used);
+       break;
+       case I2CAUX_TRANSACTION_WRITE:
+               result = write_command(engine, request, mot_used);
+       break;
+       default:
+               result = false;
+       }
+
+       /* [tcheng]
+        * need to send stop for the last transaction to free up the AUX
+        * if the above command fails, this would be the last transaction
+        */
+
+       if (!middle_of_transaction || !result)
+               end_of_transaction_command(engine, request);
+
+       /* mask AUX interrupt */
+
+       return result;
+}
+enum i2caux_engine_type get_engine_type(
+               const struct aux_engine *engine)
+{
+       return I2CAUX_ENGINE_TYPE_AUX;
+}
+
+static bool acquire(
+       struct aux_engine *engine,
+       struct ddc *ddc)
+{
+
+       enum gpio_result result;
+
+       if (engine->funcs->is_engine_available) {
+               /*check whether SW could use the engine*/
+               if (!engine->funcs->is_engine_available(engine))
+                       return false;
+       }
+
+       result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
+               GPIO_DDC_CONFIG_TYPE_MODE_AUX);
+
+       if (result != GPIO_RESULT_OK)
+               return false;
+
+       if (!engine->funcs->acquire_engine(engine)) {
+               dal_ddc_close(ddc);
+               return false;
+       }
+
+       engine->ddc = ddc;
+
+       return true;
+}
+
+static const struct aux_engine_funcs aux_engine_funcs = {
+       .acquire_engine = acquire_engine,
+       .submit_channel_request = submit_channel_request,
+       .process_channel_reply = process_channel_reply,
+       .read_channel_reply = read_channel_reply,
+       .get_channel_status = get_channel_status,
+       .is_engine_available = is_engine_available,
+       .release_engine = release_engine,
+       .destroy_engine = dce110_engine_destroy,
+       .submit_request = submit_request,
+       .get_engine_type = get_engine_type,
+       .acquire = acquire,
+};
+
+void dce110_engine_destroy(struct aux_engine **engine)
+{
+
+       struct aux_engine_dce110 *engine110 = FROM_AUX_ENGINE(*engine);
+
+       kfree(engine110);
+       *engine = NULL;
+
+}
+struct aux_engine *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110,
+               struct dc_context *ctx,
+               uint32_t inst,
+               uint32_t timeout_period,
+               const struct dce110_aux_registers *regs)
+{
+       aux_engine110->base.ddc = NULL;
+       aux_engine110->base.ctx = ctx;
+       aux_engine110->base.delay = 0;
+       aux_engine110->base.max_defer_write_retry = 0;
+       aux_engine110->base.funcs = &aux_engine_funcs;
+       aux_engine110->base.inst = inst;
+       aux_engine110->timeout_period = timeout_period;
+       aux_engine110->regs = regs;
+
+       return &aux_engine110->base;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
new file mode 100644 (file)
index 0000000..f7caab8
--- /dev/null
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_AUX_ENGINE_DCE110_H__
+#define __DAL_AUX_ENGINE_DCE110_H__
+#include "aux_engine.h"
+
+#define AUX_COMMON_REG_LIST(id)\
+       SRI(AUX_CONTROL, DP_AUX, id), \
+       SRI(AUX_ARB_CONTROL, DP_AUX, id), \
+       SRI(AUX_SW_DATA, DP_AUX, id), \
+       SRI(AUX_SW_CONTROL, DP_AUX, id), \
+       SRI(AUX_INTERRUPT_CONTROL, DP_AUX, id), \
+       SRI(AUX_SW_STATUS, DP_AUX, id), \
+       SR(AUXN_IMPCAL), \
+       SR(AUXP_IMPCAL)
+
+struct dce110_aux_registers {
+       uint32_t AUX_CONTROL;
+       uint32_t AUX_ARB_CONTROL;
+       uint32_t AUX_SW_DATA;
+       uint32_t AUX_SW_CONTROL;
+       uint32_t AUX_INTERRUPT_CONTROL;
+       uint32_t AUX_SW_STATUS;
+       uint32_t AUXN_IMPCAL;
+       uint32_t AUXP_IMPCAL;
+
+       uint32_t AUX_RESET_MASK;
+};
+
+enum { /* This is the timeout as defined in DP 1.2a,
+        * 2.3.4 "Detailed uPacket TX AUX CH State Description".
+        */
+       AUX_TIMEOUT_PERIOD = 400,
+
+       /* Ideally, the SW timeout should be just above 550usec
+        * which is programmed in HW.
+        * But the SW timeout of 600usec is not reliable,
+        * because on some systems, delay_in_microseconds()
+        * returns faster than it should.
+        * EPR #379763: by trial-and-error on different systems,
+        * 700usec is the minimum reliable SW timeout for polling
+        * the AUX_SW_STATUS.AUX_SW_DONE bit.
+        * This timeout expires *only* when there is
+        * AUX Error or AUX Timeout conditions - not during normal operation.
+        * During normal operation, AUX_SW_STATUS.AUX_SW_DONE bit is set
+        * at most within ~240usec. That means,
+        * increasing this timeout will not affect normal operation,
+        * and we'll timeout after
+        * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 1600usec.
+        * This timeout is especially important for
+        * resume from S3 and CTS.
+        */
+       SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 4
+};
+struct aux_engine_dce110 {
+       struct aux_engine base;
+       const struct dce110_aux_registers *regs;
+       struct {
+               uint32_t aux_control;
+               uint32_t aux_arb_control;
+               uint32_t aux_sw_data;
+               uint32_t aux_sw_control;
+               uint32_t aux_interrupt_control;
+               uint32_t aux_sw_status;
+       } addr;
+       uint32_t timeout_period;
+};
+
+struct aux_engine_dce110_init_data {
+       uint32_t engine_id;
+       uint32_t timeout_period;
+       struct dc_context *ctx;
+       const struct dce110_aux_registers *regs;
+};
+
+struct aux_engine *dce110_aux_engine_construct(
+               struct aux_engine_dce110 *aux_engine110,
+               struct dc_context *ctx,
+               uint32_t inst,
+               uint32_t timeout_period,
+               const struct dce110_aux_registers *regs);
+
+void dce110_engine_destroy(struct aux_engine **engine);
+
+bool dce110_aux_engine_acquire(
+       struct aux_engine *aux_engine,
+       struct ddc *ddc);
+#endif
index 88b09dd758baad980f2c6fdd37d5d152c909227a..439dcf3b596ccdec9447656796499b7848673848 100644 (file)
@@ -133,7 +133,7 @@ static bool calculate_fb_and_fractional_fb_divider(
        uint64_t feedback_divider;
 
        feedback_divider =
-               (uint64_t)(target_pix_clk_khz * ref_divider * post_divider);
+               (uint64_t)target_pix_clk_khz * ref_divider * post_divider;
        feedback_divider *= 10;
        /* additional factor, since we divide by 10 afterwards */
        feedback_divider *= (uint64_t)(calc_pll_cs->fract_fb_divider_factor);
@@ -145,8 +145,8 @@ static bool calculate_fb_and_fractional_fb_divider(
  * of fractional feedback decimal point and the fractional FB Divider precision
  * is 2 then the equation becomes (ullfeedbackDivider + 5*100) / (10*100))*/
 
-       feedback_divider += (uint64_t)
-                       (5 * calc_pll_cs->fract_fb_divider_precision_factor);
+       feedback_divider += 5ULL *
+                           calc_pll_cs->fract_fb_divider_precision_factor;
        feedback_divider =
                div_u64(feedback_divider,
                        calc_pll_cs->fract_fb_divider_precision_factor * 10);
@@ -203,8 +203,8 @@ static bool calc_fb_divider_checking_tolerance(
                        &fract_feedback_divider);
 
        /*Actual calculated value*/
-       actual_calc_clk_khz = (uint64_t)(feedback_divider *
-                                       calc_pll_cs->fract_fb_divider_factor) +
+       actual_calc_clk_khz = (uint64_t)feedback_divider *
+                                       calc_pll_cs->fract_fb_divider_factor +
                                                        fract_feedback_divider;
        actual_calc_clk_khz *= calc_pll_cs->ref_freq_khz;
        actual_calc_clk_khz =
@@ -592,7 +592,7 @@ static uint32_t dce110_get_pix_clk_dividers(
        case DCE_VERSION_11_2:
        case DCE_VERSION_11_22:
        case DCE_VERSION_12_0:
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
        case DCN_VERSION_1_0:
 #endif
 
@@ -909,7 +909,7 @@ static bool dce110_program_pix_clk(
        struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
        struct bp_pixel_clock_parameters bp_pc_params = {0};
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
        if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) {
                unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
                unsigned dp_dto_ref_kHz = 700000;
@@ -982,7 +982,7 @@ static bool dce110_program_pix_clk(
        case DCE_VERSION_11_2:
        case DCE_VERSION_11_22:
        case DCE_VERSION_12_0:
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
        case DCN_VERSION_1_0:
 #endif
 
index c45e2f76189e7be44dc5178f3b1d25aec9c23f3a..801bb65707b3204ed1055d583926b055658a7179 100644 (file)
@@ -55,7 +55,7 @@
        CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\
        CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE, mask_sh)
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
 
 #define CS_COMMON_REG_LIST_DCN1_0(index, pllid) \
                SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\
index 8a581c67bf2d13b08ff0c28c6c39c16490a15d45..0db8d1da3d0ecc43bde2b55f12c10223f0f4fb9e 100644 (file)
@@ -30,7 +30,7 @@
 #include "bios_parser_interface.h"
 #include "dc.h"
 #include "dmcu.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
 #include "dcn_calcs.h"
 #endif
 #include "core_types.h"
@@ -38,7 +38,7 @@
 #include "dal_asic_id.h"
 
 #define TO_DCE_CLOCKS(clocks)\
-       container_of(clocks, struct dce_disp_clk, base)
+       container_of(clocks, struct dce_dccg, base)
 
 #define REG(reg) \
        (clk_dce->regs->reg)
@@ -101,99 +101,78 @@ static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
 /*ClocksStatePerformance*/
 { .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };
 
-/* Starting point for each divider range.*/
-enum dce_divider_range_start {
-       DIVIDER_RANGE_01_START = 200, /* 2.00*/
-       DIVIDER_RANGE_02_START = 1600, /* 16.00*/
-       DIVIDER_RANGE_03_START = 3200, /* 32.00*/
-       DIVIDER_RANGE_SCALE_FACTOR = 100 /* Results are scaled up by 100.*/
+/* Starting DID for each range */
+enum dentist_base_divider_id {
+       DENTIST_BASE_DID_1 = 0x08,
+       DENTIST_BASE_DID_2 = 0x40,
+       DENTIST_BASE_DID_3 = 0x60,
+       DENTIST_MAX_DID    = 0x80
 };
 
-/* Ranges for divider identifiers (Divider ID or DID)
- mmDENTIST_DISPCLK_CNTL.DENTIST_DISPCLK_WDIVIDER*/
-enum dce_divider_id_register_setting {
-       DIVIDER_RANGE_01_BASE_DIVIDER_ID = 0X08,
-       DIVIDER_RANGE_02_BASE_DIVIDER_ID = 0X40,
-       DIVIDER_RANGE_03_BASE_DIVIDER_ID = 0X60,
-       DIVIDER_RANGE_MAX_DIVIDER_ID = 0X80
+/* Starting point and step size for each divider range.*/
+enum dentist_divider_range {
+       DENTIST_DIVIDER_RANGE_1_START = 8,   /* 2.00  */
+       DENTIST_DIVIDER_RANGE_1_STEP  = 1,   /* 0.25  */
+       DENTIST_DIVIDER_RANGE_2_START = 64,  /* 16.00 */
+       DENTIST_DIVIDER_RANGE_2_STEP  = 2,   /* 0.50  */
+       DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */
+       DENTIST_DIVIDER_RANGE_3_STEP  = 4,   /* 1.00  */
+       DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4
 };
 
-/* Step size between each divider within a range.
- Incrementing the DENTIST_DISPCLK_WDIVIDER by one
- will increment the divider by this much.*/
-enum dce_divider_range_step_size {
-       DIVIDER_RANGE_01_STEP_SIZE = 25, /* 0.25*/
-       DIVIDER_RANGE_02_STEP_SIZE = 50, /* 0.50*/
-       DIVIDER_RANGE_03_STEP_SIZE = 100 /* 1.00 */
-};
-
-static bool dce_divider_range_construct(
-       struct dce_divider_range *div_range,
-       int range_start,
-       int range_step,
-       int did_min,
-       int did_max)
+static int dentist_get_divider_from_did(int did)
 {
-       div_range->div_range_start = range_start;
-       div_range->div_range_step = range_step;
-       div_range->did_min = did_min;
-       div_range->did_max = did_max;
-
-       if (div_range->div_range_step == 0) {
-               div_range->div_range_step = 1;
-               /*div_range_step cannot be zero*/
-               BREAK_TO_DEBUGGER();
+       if (did < DENTIST_BASE_DID_1)
+               did = DENTIST_BASE_DID_1;
+       if (did > DENTIST_MAX_DID)
+               did = DENTIST_MAX_DID;
+
+       if (did < DENTIST_BASE_DID_2) {
+               return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP
+                                                       * (did - DENTIST_BASE_DID_1);
+       } else if (did < DENTIST_BASE_DID_3) {
+               return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP
+                                                       * (did - DENTIST_BASE_DID_2);
+       } else {
+               return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP
+                                                       * (did - DENTIST_BASE_DID_3);
        }
-       /* Calculate this based on the other inputs.*/
-       /* See DividerRange.h for explanation of */
-       /* the relationship between divider id (DID) and a divider.*/
-       /* Number of Divider IDs = (Maximum Divider ID - Minimum Divider ID)*/
-       /* Maximum divider identified in this range =
-        * (Number of Divider IDs)*Step size between dividers
-        *  + The start of this range.*/
-       div_range->div_range_end = (did_max - did_min) * range_step
-               + range_start;
-       return true;
-}
-
-static int dce_divider_range_calc_divider(
-       struct dce_divider_range *div_range,
-       int did)
-{
-       /* Is this DID within our range?*/
-       if ((did < div_range->did_min) || (did >= div_range->did_max))
-               return INVALID_DIVIDER;
-
-       return ((did - div_range->did_min) * div_range->div_range_step)
-                       + div_range->div_range_start;
-
 }
 
-static int dce_divider_range_get_divider(
-       struct dce_divider_range *div_range,
-       int ranges_num,
-       int did)
+/* SW will adjust DP REF Clock average value for all purposes
+ * (DP DTO / DP Audio DTO and DP GTC)
+ if clock is spread for all cases:
+ -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
+ calculations for DS_INCR/DS_MODULO (this is planned to be default case)
+ -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
+ calculations (not planned to be used, but average clock should still
+ be valid)
+ -if SS enabled on DP Ref clock and HW de-spreading disabled
+ (should not be case with CIK) then SW should program all rates
+ generated according to average value (case as with previous ASICs)
+  */
+static int dccg_adjust_dp_ref_freq_for_ss(struct dce_dccg *clk_dce, int dp_ref_clk_khz)
 {
-       int div = INVALID_DIVIDER;
-       int i;
+       if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
+               struct fixed31_32 ss_percentage = dc_fixpt_div_int(
+                               dc_fixpt_from_fraction(clk_dce->dprefclk_ss_percentage,
+                                                       clk_dce->dprefclk_ss_divider), 200);
+               struct fixed31_32 adj_dp_ref_clk_khz;
 
-       for (i = 0; i < ranges_num; i++) {
-               /* Calculate divider with given divider ID*/
-               div = dce_divider_range_calc_divider(&div_range[i], did);
-               /* Found a valid return divider*/
-               if (div != INVALID_DIVIDER)
-                       break;
+               ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage);
+               adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz);
+               dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
        }
-       return div;
+       return dp_ref_clk_khz;
 }
 
-static int dce_clocks_get_dp_ref_freq(struct display_clock *clk)
+static int dce_get_dp_ref_freq_khz(struct dccg *clk)
 {
-       struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+       struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
        int dprefclk_wdivider;
        int dprefclk_src_sel;
        int dp_ref_clk_khz = 600000;
-       int target_div = INVALID_DIVIDER;
+       int target_div;
 
        /* ASSERT DP Reference Clock source is from DFS*/
        REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
@@ -204,80 +183,27 @@ static int dce_clocks_get_dp_ref_freq(struct display_clock *clk)
        REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
 
        /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
-       target_div = dce_divider_range_get_divider(
-                       clk_dce->divider_ranges,
-                       DIVIDER_RANGE_MAX,
-                       dprefclk_wdivider);
-
-       if (target_div != INVALID_DIVIDER) {
-               /* Calculate the current DFS clock, in kHz.*/
-               dp_ref_clk_khz = (DIVIDER_RANGE_SCALE_FACTOR
-                       * clk_dce->dentist_vco_freq_khz) / target_div;
-       }
+       target_div = dentist_get_divider_from_did(dprefclk_wdivider);
 
-       /* SW will adjust DP REF Clock average value for all purposes
-        * (DP DTO / DP Audio DTO and DP GTC)
-        if clock is spread for all cases:
-        -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
-        calculations for DS_INCR/DS_MODULO (this is planned to be default case)
-        -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
-        calculations (not planned to be used, but average clock should still
-        be valid)
-        -if SS enabled on DP Ref clock and HW de-spreading disabled
-        (should not be case with CIK) then SW should program all rates
-        generated according to average value (case as with previous ASICs)
-         */
-       if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
-               struct fixed31_32 ss_percentage = dc_fixpt_div_int(
-                               dc_fixpt_from_fraction(
-                                               clk_dce->dprefclk_ss_percentage,
-                                               clk_dce->dprefclk_ss_divider), 200);
-               struct fixed31_32 adj_dp_ref_clk_khz;
+       /* Calculate the current DFS clock, in kHz.*/
+       dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+               * clk_dce->dentist_vco_freq_khz) / target_div;
 
-               ss_percentage = dc_fixpt_sub(dc_fixpt_one,
-                                                               ss_percentage);
-               adj_dp_ref_clk_khz =
-                       dc_fixpt_mul_int(
-                               ss_percentage,
-                               dp_ref_clk_khz);
-               dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
-       }
-
-       return dp_ref_clk_khz;
+       return dccg_adjust_dp_ref_freq_for_ss(clk_dce, dp_ref_clk_khz);
 }
 
-/* TODO: This is DCN DPREFCLK: it could be program by DENTIST by VBIOS
- * or CLK0_CLK11 by SMU. For DCE120, it is wlays 600Mhz. Will re-visit
- * clock implementation
- */
-static int dce_clocks_get_dp_ref_freq_wrkaround(struct display_clock *clk)
+static int dce12_get_dp_ref_freq_khz(struct dccg *clk)
 {
-       struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
-       int dp_ref_clk_khz = 600000;
-
-       if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
-               struct fixed31_32 ss_percentage = dc_fixpt_div_int(
-                               dc_fixpt_from_fraction(
-                                               clk_dce->dprefclk_ss_percentage,
-                                               clk_dce->dprefclk_ss_divider), 200);
-               struct fixed31_32 adj_dp_ref_clk_khz;
+       struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
 
-               ss_percentage = dc_fixpt_sub(dc_fixpt_one,
-                                                               ss_percentage);
-               adj_dp_ref_clk_khz =
-                       dc_fixpt_mul_int(
-                               ss_percentage,
-                               dp_ref_clk_khz);
-               dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
-       }
-
-       return dp_ref_clk_khz;
+       return dccg_adjust_dp_ref_freq_for_ss(clk_dce, 600000);
 }
+
 static enum dm_pp_clocks_state dce_get_required_clocks_state(
-       struct display_clock *clk,
-       struct state_dependent_clocks *req_clocks)
+       struct dccg *clk,
+       struct dc_clocks *req_clocks)
 {
-       struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+       struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
        int i;
        enum dm_pp_clocks_state low_req_clk;
 
@@ -286,53 +212,30 @@ static enum dm_pp_clocks_state dce_get_required_clocks_state(
         * all required clocks
         */
        for (i = clk->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
-               if (req_clocks->display_clk_khz >
+               if (req_clocks->dispclk_khz >
                                clk_dce->max_clks_by_state[i].display_clk_khz
-                       || req_clocks->pixel_clk_khz >
+                       || req_clocks->phyclk_khz >
                                clk_dce->max_clks_by_state[i].pixel_clk_khz)
                        break;
 
        low_req_clk = i + 1;
        if (low_req_clk > clk->max_clks_state) {
-               DC_LOG_WARNING("%s: clocks unsupported disp_clk %d pix_clk %d",
-                               __func__,
-                               req_clocks->display_clk_khz,
-                               req_clocks->pixel_clk_khz);
-               low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
+               /* set max clock state for high phyclock, invalid on exceeding display clock */
+               if (clk_dce->max_clks_by_state[clk->max_clks_state].display_clk_khz
+                               < req_clocks->dispclk_khz)
+                       low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
+               else
+                       low_req_clk = clk->max_clks_state;
        }
 
        return low_req_clk;
 }
 
-static bool dce_clock_set_min_clocks_state(
-       struct display_clock *clk,
-       enum dm_pp_clocks_state clocks_state)
-{
-       struct dm_pp_power_level_change_request level_change_req = {
-                       clocks_state };
-
-       if (clocks_state > clk->max_clks_state) {
-               /*Requested state exceeds max supported state.*/
-               DC_LOG_WARNING("Requested state exceeds max supported state");
-               return false;
-       } else if (clocks_state == clk->cur_min_clks_state) {
-               /*if we're trying to set the same state, we can just return
-                * since nothing needs to be done*/
-               return true;
-       }
-
-       /* get max clock state from PPLIB */
-       if (dm_pp_apply_power_level_change_request(clk->ctx, &level_change_req))
-               clk->cur_min_clks_state = clocks_state;
-
-       return true;
-}
-
 static int dce_set_clock(
-       struct display_clock *clk,
+       struct dccg *clk,
        int requested_clk_khz)
 {
-       struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+       struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
        struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
        struct dc_bios *bp = clk->ctx->dc_bios;
        int actual_clock = requested_clk_khz;
@@ -364,10 +267,10 @@ static int dce_set_clock(
 }
 
 static int dce_psr_set_clock(
-       struct display_clock *clk,
+       struct dccg *clk,
        int requested_clk_khz)
 {
-       struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+       struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
        struct dc_context *ctx = clk_dce->base.ctx;
        struct dc *core_dc = ctx->dc;
        struct dmcu *dmcu = core_dc->res_pool->dmcu;
@@ -380,10 +283,10 @@ static int dce_psr_set_clock(
 }
 
 static int dce112_set_clock(
-       struct display_clock *clk,
+       struct dccg *clk,
        int requested_clk_khz)
 {
-       struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+       struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
        struct bp_set_dce_clock_parameters dce_clk_params;
        struct dc_bios *bp = clk->ctx->dc_bios;
        struct dc *core_dc = clk->ctx->dc;
@@ -432,9 +335,9 @@ static int dce112_set_clock(
        return actual_clock;
 }
 
-static void dce_clock_read_integrated_info(struct dce_disp_clk *clk_dce)
+static void dce_clock_read_integrated_info(struct dce_dccg *clk_dce)
 {
-       struct dc_debug *debug = &clk_dce->base.ctx->dc->debug;
+       struct dc_debug_options *debug = &clk_dce->base.ctx->dc->debug;
        struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
        struct integrated_info info = { { { 0 } } };
        struct dc_firmware_info fw_info = { { 0 } };
@@ -488,11 +391,9 @@ static void dce_clock_read_integrated_info(struct dce_disp_clk *clk_dce)
        if (!debug->disable_dfs_bypass && bp->integrated_info)
                if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
                        clk_dce->dfs_bypass_enabled = true;
-
-       clk_dce->use_max_disp_clk = debug->max_disp_clk;
 }
 
-static void dce_clock_read_ss_info(struct dce_disp_clk *clk_dce)
+static void dce_clock_read_ss_info(struct dce_dccg *clk_dce)
 {
        struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
        int ss_info_num = bp->funcs->get_ss_entry_number(
@@ -548,139 +449,263 @@ static void dce_clock_read_ss_info(struct dce_disp_clk *clk_dce)
        }
 }
 
-static bool dce_apply_clock_voltage_request(
-       struct display_clock *clk,
-       enum dm_pp_clock_type clocks_type,
-       int clocks_in_khz,
-       bool pre_mode_set,
-       bool update_dp_phyclk)
+static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
+{
+       return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk);
+}
+
+static void dce12_update_clocks(struct dccg *dccg,
+                       struct dc_clocks *new_clocks,
+                       bool safe_to_lower)
 {
-       bool send_request = false;
        struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
 
-       switch (clocks_type) {
-       case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
-       case DM_PP_CLOCK_TYPE_PIXELCLK:
-       case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
-               break;
-       default:
-               BREAK_TO_DEBUGGER();
-               return false;
+       if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
+               clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
+               clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz;
+               dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
+               dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
+
+               dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
        }
 
-       clock_voltage_req.clk_type = clocks_type;
-       clock_voltage_req.clocks_in_khz = clocks_in_khz;
-
-       /* to pplib */
-       if (pre_mode_set) {
-               switch (clocks_type) {
-               case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
-                       if (clocks_in_khz > clk->cur_clocks_value.dispclk_in_khz) {
-                               clk->cur_clocks_value.dispclk_notify_pplib_done = true;
-                               send_request = true;
-                       } else
-                               clk->cur_clocks_value.dispclk_notify_pplib_done = false;
-                       /* no matter incrase or decrase clock, update current clock value */
-                       clk->cur_clocks_value.dispclk_in_khz = clocks_in_khz;
-                       break;
-               case DM_PP_CLOCK_TYPE_PIXELCLK:
-                       if (clocks_in_khz > clk->cur_clocks_value.max_pixelclk_in_khz) {
-                               clk->cur_clocks_value.pixelclk_notify_pplib_done = true;
-                               send_request = true;
-                       } else
-                               clk->cur_clocks_value.pixelclk_notify_pplib_done = false;
-                       /* no matter incrase or decrase clock, update current clock value */
-                       clk->cur_clocks_value.max_pixelclk_in_khz = clocks_in_khz;
-                       break;
-               case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
-                       if (clocks_in_khz > clk->cur_clocks_value.max_non_dp_phyclk_in_khz) {
-                               clk->cur_clocks_value.phyclk_notigy_pplib_done = true;
-                               send_request = true;
-                       } else
-                               clk->cur_clocks_value.phyclk_notigy_pplib_done = false;
-                       /* no matter incrase or decrase clock, update current clock value */
-                       clk->cur_clocks_value.max_non_dp_phyclk_in_khz = clocks_in_khz;
-                       break;
-               default:
-                       ASSERT(0);
-                       break;
-               }
+       if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
+               clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
+               clock_voltage_req.clocks_in_khz = new_clocks->phyclk_khz;
+               dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
+
+               dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+       }
+}
+
+#ifdef CONFIG_X86
+static int dcn1_determine_dppclk_threshold(struct dccg *dccg, struct dc_clocks *new_clocks)
+{
+       bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
+       bool dispclk_increase = new_clocks->dispclk_khz > dccg->clks.dispclk_khz;
+       int disp_clk_threshold = new_clocks->max_supported_dppclk_khz;
+       bool cur_dpp_div = dccg->clks.dispclk_khz > dccg->clks.dppclk_khz;
+
+       /* increase clock, looking for div is 0 for current, request div is 1*/
+       if (dispclk_increase) {
+               /* already divided by 2, no need to reach target clk with 2 steps*/
+               if (cur_dpp_div)
+                       return new_clocks->dispclk_khz;
+
+               /* request disp clk is lower than maximum supported dpp clk,
+                * no need to reach target clk with two steps.
+                */
+               if (new_clocks->dispclk_khz <= disp_clk_threshold)
+                       return new_clocks->dispclk_khz;
+
+               /* target dpp clk not request divided by 2, still within threshold */
+               if (!request_dpp_div)
+                       return new_clocks->dispclk_khz;
 
        } else {
-               switch (clocks_type) {
-               case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
-                       if (!clk->cur_clocks_value.dispclk_notify_pplib_done)
-                               send_request = true;
-                       break;
-               case DM_PP_CLOCK_TYPE_PIXELCLK:
-                       if (!clk->cur_clocks_value.pixelclk_notify_pplib_done)
-                               send_request = true;
-                       break;
-               case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
-                       if (!clk->cur_clocks_value.phyclk_notigy_pplib_done)
-                               send_request = true;
-                       break;
-               default:
-                       ASSERT(0);
-                       break;
-               }
+               /* decrease clock, looking for current dppclk divided by 2,
+                * request dppclk not divided by 2.
+                */
+
+               /* current dpp clk not divided by 2, no need to ramp*/
+               if (!cur_dpp_div)
+                       return new_clocks->dispclk_khz;
+
+               /* current disp clk is lower than current maximum dpp clk,
+                * no need to ramp
+                */
+               if (dccg->clks.dispclk_khz <= disp_clk_threshold)
+                       return new_clocks->dispclk_khz;
+
+               /* request dpp clk need to be divided by 2 */
+               if (request_dpp_div)
+                       return new_clocks->dispclk_khz;
        }
-       if (send_request) {
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
-               if (clk->ctx->dce_version >= DCN_VERSION_1_0) {
-                       struct dc *core_dc = clk->ctx->dc;
-                       /*use dcfclk request voltage*/
-                       clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
-                       clock_voltage_req.clocks_in_khz =
-                               dcn_find_dcfclk_suits_all(core_dc, &clk->cur_clocks_value);
-               }
+
+       return disp_clk_threshold;
+}
+
+static void dcn1_ramp_up_dispclk_with_dpp(struct dccg *dccg, struct dc_clocks *new_clocks)
+{
+       struct dc *dc = dccg->ctx->dc;
+       int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(dccg, new_clocks);
+       bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
+       int i;
+
+       /* set disp clk to dpp clk threshold */
+       dccg->funcs->set_dispclk(dccg, dispclk_to_dpp_threshold);
+
+       /* update request dpp clk division option */
+       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+               struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+               if (!pipe_ctx->plane_state)
+                       continue;
+
+               pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
+                               pipe_ctx->plane_res.dpp,
+                               request_dpp_div,
+                               true);
+       }
+
+       /* If target clk not same as dppclk threshold, set to target clock */
+       if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz)
+               dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
+
+       dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
+       dccg->clks.dppclk_khz = new_clocks->dppclk_khz;
+       dccg->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
+}
+
+static void dcn1_update_clocks(struct dccg *dccg,
+                       struct dc_clocks *new_clocks,
+                       bool safe_to_lower)
+{
+       struct dc *dc = dccg->ctx->dc;
+       struct pp_smu_display_requirement_rv *smu_req_cur =
+                       &dc->res_pool->pp_smu_req;
+       struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
+       struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+       struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+       bool send_request_to_increase = false;
+       bool send_request_to_lower = false;
+
+       if (new_clocks->phyclk_khz)
+               smu_req.display_count = 1;
+       else
+               smu_req.display_count = 0;
+
+       if (new_clocks->dispclk_khz > dccg->clks.dispclk_khz
+                       || new_clocks->phyclk_khz > dccg->clks.phyclk_khz
+                       || new_clocks->fclk_khz > dccg->clks.fclk_khz
+                       || new_clocks->dcfclk_khz > dccg->clks.dcfclk_khz)
+               send_request_to_increase = true;
+
+       if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
+               dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
+
+               send_request_to_lower = true;
+       }
+
+       if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, dccg->clks.fclk_khz)) {
+               dccg->clks.fclk_khz = new_clocks->fclk_khz;
+               clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK;
+               clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz;
+               smu_req.hard_min_fclk_khz = new_clocks->fclk_khz;
+
+               dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+               send_request_to_lower = true;
+       }
+
+       if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, dccg->clks.dcfclk_khz)) {
+               dccg->clks.dcfclk_khz = new_clocks->dcfclk_khz;
+               smu_req.hard_min_dcefclk_khz = new_clocks->dcfclk_khz;
+
+               send_request_to_lower = true;
+       }
+
+       if (should_set_clock(safe_to_lower,
+                       new_clocks->dcfclk_deep_sleep_khz, dccg->clks.dcfclk_deep_sleep_khz)) {
+               dccg->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
+               smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz;
+
+               send_request_to_lower = true;
+       }
+
+       /* make sure dcf clk is before dpp clk to
+        * make sure we have enough voltage to run dpp clk
+        */
+       if (send_request_to_increase) {
+               /*use dcfclk to request voltage*/
+               clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+               clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+               dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+               if (pp_smu->set_display_requirement)
+                       pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+       }
+
+       /* dcn1 dppclk is tied to dispclk */
+       if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
+               dcn1_ramp_up_dispclk_with_dpp(dccg, new_clocks);
+               dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
+
+               send_request_to_lower = true;
+       }
+
+       if (!send_request_to_increase && send_request_to_lower) {
+               /*use dcfclk to request voltage*/
+               clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+               clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+               dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+               if (pp_smu->set_display_requirement)
+                       pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+       }
+
+
+       *smu_req_cur = smu_req;
+}
 #endif
-               dm_pp_apply_clock_for_voltage_request(
-                       clk->ctx, &clock_voltage_req);
+
+static void dce_update_clocks(struct dccg *dccg,
+                       struct dc_clocks *new_clocks,
+                       bool safe_to_lower)
+{
+       struct dm_pp_power_level_change_request level_change_req;
+
+       level_change_req.power_level = dce_get_required_clocks_state(dccg, new_clocks);
+       /* get max clock state from PPLIB */
+       if ((level_change_req.power_level < dccg->cur_min_clks_state && safe_to_lower)
+                       || level_change_req.power_level > dccg->cur_min_clks_state) {
+               if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req))
+                       dccg->cur_min_clks_state = level_change_req.power_level;
        }
-       if (update_dp_phyclk && (clocks_in_khz >
-       clk->cur_clocks_value.max_dp_phyclk_in_khz))
-               clk->cur_clocks_value.max_dp_phyclk_in_khz = clocks_in_khz;
 
-       return true;
+       if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
+               dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
+               dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
+       }
 }
 
+#ifdef CONFIG_X86
+static const struct display_clock_funcs dcn1_funcs = {
+       .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+       .set_dispclk = dce112_set_clock,
+       .update_clocks = dcn1_update_clocks
+};
+#endif
 
 static const struct display_clock_funcs dce120_funcs = {
-       .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq_wrkaround,
-       .apply_clock_voltage_request = dce_apply_clock_voltage_request,
-       .set_clock = dce112_set_clock
+       .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+       .set_dispclk = dce112_set_clock,
+       .update_clocks = dce12_update_clocks
 };
 
 static const struct display_clock_funcs dce112_funcs = {
-       .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
-       .get_required_clocks_state = dce_get_required_clocks_state,
-       .set_min_clocks_state = dce_clock_set_min_clocks_state,
-       .set_clock = dce112_set_clock
+       .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+       .set_dispclk = dce112_set_clock,
+       .update_clocks = dce_update_clocks
 };
 
 static const struct display_clock_funcs dce110_funcs = {
-       .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
-       .get_required_clocks_state = dce_get_required_clocks_state,
-       .set_min_clocks_state = dce_clock_set_min_clocks_state,
-       .set_clock = dce_psr_set_clock
+       .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+       .set_dispclk = dce_psr_set_clock,
+       .update_clocks = dce_update_clocks
 };
 
 static const struct display_clock_funcs dce_funcs = {
-       .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
-       .get_required_clocks_state = dce_get_required_clocks_state,
-       .set_min_clocks_state = dce_clock_set_min_clocks_state,
-       .set_clock = dce_set_clock
+       .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+       .set_dispclk = dce_set_clock,
+       .update_clocks = dce_update_clocks
 };
 
-static void dce_disp_clk_construct(
-       struct dce_disp_clk *clk_dce,
+static void dce_dccg_construct(
+       struct dce_dccg *clk_dce,
        struct dc_context *ctx,
-       const struct dce_disp_clk_registers *regs,
-       const struct dce_disp_clk_shift *clk_shift,
-       const struct dce_disp_clk_mask *clk_mask)
+       const struct dccg_registers *regs,
+       const struct dccg_shift *clk_shift,
+       const struct dccg_mask *clk_mask)
 {
-       struct display_clock *base = &clk_dce->base;
+       struct dccg *base = &clk_dce->base;
 
        base->ctx = ctx;
        base->funcs = &dce_funcs;
@@ -700,34 +725,15 @@ static void dce_disp_clk_construct(
 
        dce_clock_read_integrated_info(clk_dce);
        dce_clock_read_ss_info(clk_dce);
-
-       dce_divider_range_construct(
-               &clk_dce->divider_ranges[DIVIDER_RANGE_01],
-               DIVIDER_RANGE_01_START,
-               DIVIDER_RANGE_01_STEP_SIZE,
-               DIVIDER_RANGE_01_BASE_DIVIDER_ID,
-               DIVIDER_RANGE_02_BASE_DIVIDER_ID);
-       dce_divider_range_construct(
-               &clk_dce->divider_ranges[DIVIDER_RANGE_02],
-               DIVIDER_RANGE_02_START,
-               DIVIDER_RANGE_02_STEP_SIZE,
-               DIVIDER_RANGE_02_BASE_DIVIDER_ID,
-               DIVIDER_RANGE_03_BASE_DIVIDER_ID);
-       dce_divider_range_construct(
-               &clk_dce->divider_ranges[DIVIDER_RANGE_03],
-               DIVIDER_RANGE_03_START,
-               DIVIDER_RANGE_03_STEP_SIZE,
-               DIVIDER_RANGE_03_BASE_DIVIDER_ID,
-               DIVIDER_RANGE_MAX_DIVIDER_ID);
 }
 
-struct display_clock *dce_disp_clk_create(
+struct dccg *dce_dccg_create(
        struct dc_context *ctx,
-       const struct dce_disp_clk_registers *regs,
-       const struct dce_disp_clk_shift *clk_shift,
-       const struct dce_disp_clk_mask *clk_mask)
+       const struct dccg_registers *regs,
+       const struct dccg_shift *clk_shift,
+       const struct dccg_mask *clk_mask)
 {
-       struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+       struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
 
        if (clk_dce == NULL) {
                BREAK_TO_DEBUGGER();
@@ -738,19 +744,19 @@ struct display_clock *dce_disp_clk_create(
                dce80_max_clks_by_state,
                sizeof(dce80_max_clks_by_state));
 
-       dce_disp_clk_construct(
+       dce_dccg_construct(
                clk_dce, ctx, regs, clk_shift, clk_mask);
 
        return &clk_dce->base;
 }
 
-struct display_clock *dce110_disp_clk_create(
+struct dccg *dce110_dccg_create(
        struct dc_context *ctx,
-       const struct dce_disp_clk_registers *regs,
-       const struct dce_disp_clk_shift *clk_shift,
-       const struct dce_disp_clk_mask *clk_mask)
+       const struct dccg_registers *regs,
+       const struct dccg_shift *clk_shift,
+       const struct dccg_mask *clk_mask)
 {
-       struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+       struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
 
        if (clk_dce == NULL) {
                BREAK_TO_DEBUGGER();
@@ -761,7 +767,7 @@ struct display_clock *dce110_disp_clk_create(
                dce110_max_clks_by_state,
                sizeof(dce110_max_clks_by_state));
 
-       dce_disp_clk_construct(
+       dce_dccg_construct(
                clk_dce, ctx, regs, clk_shift, clk_mask);
 
        clk_dce->base.funcs = &dce110_funcs;
@@ -769,13 +775,13 @@ struct display_clock *dce110_disp_clk_create(
        return &clk_dce->base;
 }
 
-struct display_clock *dce112_disp_clk_create(
+struct dccg *dce112_dccg_create(
        struct dc_context *ctx,
-       const struct dce_disp_clk_registers *regs,
-       const struct dce_disp_clk_shift *clk_shift,
-       const struct dce_disp_clk_mask *clk_mask)
+       const struct dccg_registers *regs,
+       const struct dccg_shift *clk_shift,
+       const struct dccg_mask *clk_mask)
 {
-       struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+       struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
 
        if (clk_dce == NULL) {
                BREAK_TO_DEBUGGER();
@@ -786,7 +792,7 @@ struct display_clock *dce112_disp_clk_create(
                dce112_max_clks_by_state,
                sizeof(dce112_max_clks_by_state));
 
-       dce_disp_clk_construct(
+       dce_dccg_construct(
                clk_dce, ctx, regs, clk_shift, clk_mask);
 
        clk_dce->base.funcs = &dce112_funcs;
@@ -794,10 +800,9 @@ struct display_clock *dce112_disp_clk_create(
        return &clk_dce->base;
 }
 
-struct display_clock *dce120_disp_clk_create(struct dc_context *ctx)
+struct dccg *dce120_dccg_create(struct dc_context *ctx)
 {
-       struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
-       struct dm_pp_clock_levels_with_voltage clk_level_info = {0};
+       struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
 
        if (clk_dce == NULL) {
                BREAK_TO_DEBUGGER();
@@ -808,28 +813,59 @@ struct display_clock *dce120_disp_clk_create(struct dc_context *ctx)
                dce120_max_clks_by_state,
                sizeof(dce120_max_clks_by_state));
 
-       dce_disp_clk_construct(
+       dce_dccg_construct(
                clk_dce, ctx, NULL, NULL, NULL);
 
        clk_dce->base.funcs = &dce120_funcs;
 
-       /* new in dce120 */
-       if (!ctx->dc->debug.disable_pplib_clock_request  &&
-                       dm_pp_get_clock_levels_by_type_with_voltage(
-                       ctx, DM_PP_CLOCK_TYPE_DISPLAY_CLK, &clk_level_info)
-                                               && clk_level_info.num_levels)
-               clk_dce->max_displ_clk_in_khz =
-                       clk_level_info.data[clk_level_info.num_levels - 1].clocks_in_khz;
-       else
-               clk_dce->max_displ_clk_in_khz = 1133000;
+       return &clk_dce->base;
+}
+
+#ifdef CONFIG_X86
+struct dccg *dcn1_dccg_create(struct dc_context *ctx)
+{
+       struct dc_debug_options *debug = &ctx->dc->debug;
+       struct dc_bios *bp = ctx->dc_bios;
+       struct dc_firmware_info fw_info = { { 0 } };
+       struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+
+       if (clk_dce == NULL) {
+               BREAK_TO_DEBUGGER();
+               return NULL;
+       }
+
+       clk_dce->base.ctx = ctx;
+       clk_dce->base.funcs = &dcn1_funcs;
+
+       clk_dce->dfs_bypass_disp_clk = 0;
+
+       clk_dce->dprefclk_ss_percentage = 0;
+       clk_dce->dprefclk_ss_divider = 1000;
+       clk_dce->ss_on_dprefclk = false;
+
+       if (bp->integrated_info)
+               clk_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
+       if (clk_dce->dentist_vco_freq_khz == 0) {
+               bp->funcs->get_firmware_info(bp, &fw_info);
+               clk_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
+               if (clk_dce->dentist_vco_freq_khz == 0)
+                       clk_dce->dentist_vco_freq_khz = 3600000;
+       }
+
+       if (!debug->disable_dfs_bypass && bp->integrated_info)
+               if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
+                       clk_dce->dfs_bypass_enabled = true;
+
+       dce_clock_read_ss_info(clk_dce);
 
        return &clk_dce->base;
 }
+#endif
 
-void dce_disp_clk_destroy(struct display_clock **disp_clk)
+void dce_dccg_destroy(struct dccg **dccg)
 {
-       struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(*disp_clk);
+       struct dce_dccg *clk_dce = TO_DCE_CLOCKS(*dccg);
 
        kfree(clk_dce);
-       *disp_clk = NULL;
+       *dccg = NULL;
 }
index 0e717e0dc8f021322a652cc9316c8681012a6103..e5e44adc6c27cda88930ab020c04ee6e0458e803 100644 (file)
@@ -33,6 +33,9 @@
        .DPREFCLK_CNTL = mmDPREFCLK_CNTL, \
        .DENTIST_DISPCLK_CNTL = mmDENTIST_DISPCLK_CNTL
 
+#define CLK_COMMON_REG_LIST_DCN_BASE() \
+       SR(DENTIST_DISPCLK_CNTL)
+
 #define CLK_SF(reg_name, field_name, post_fix)\
        .field_name = reg_name ## __ ## field_name ## post_fix
 
        CLK_SF(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, mask_sh), \
        CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, mask_sh)
 
+#define CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh) \
+       CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh),\
+       CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh)
+
 #define CLK_REG_FIELD_LIST(type) \
        type DPREFCLK_SRC_SEL; \
-       type DENTIST_DPREFCLK_WDIVIDER;
+       type DENTIST_DPREFCLK_WDIVIDER; \
+       type DENTIST_DISPCLK_WDIVIDER; \
+       type DENTIST_DISPCLK_CHG_DONE;
 
-struct dce_disp_clk_shift {
+struct dccg_shift {
        CLK_REG_FIELD_LIST(uint8_t)
 };
 
-struct dce_disp_clk_mask {
+struct dccg_mask {
        CLK_REG_FIELD_LIST(uint32_t)
 };
 
-struct dce_disp_clk_registers {
+struct dccg_registers {
        uint32_t DPREFCLK_CNTL;
        uint32_t DENTIST_DISPCLK_CNTL;
 };
 
-/* Array identifiers and count for the divider ranges.*/
-enum dce_divider_range_count {
-       DIVIDER_RANGE_01 = 0,
-       DIVIDER_RANGE_02,
-       DIVIDER_RANGE_03,
-       DIVIDER_RANGE_MAX /* == 3*/
-};
-
-enum dce_divider_error_types {
-       INVALID_DID = 0,
-       INVALID_DIVIDER = 1
-};
-
-struct dce_divider_range {
-       int div_range_start;
-       /* The end of this range of dividers.*/
-       int div_range_end;
-       /* The distance between each divider in this range.*/
-       int div_range_step;
-       /* The divider id for the lowest divider.*/
-       int did_min;
-       /* The divider id for the highest divider.*/
-       int did_max;
-};
-
-struct dce_disp_clk {
-       struct display_clock base;
-       const struct dce_disp_clk_registers *regs;
-       const struct dce_disp_clk_shift *clk_shift;
-       const struct dce_disp_clk_mask *clk_mask;
+struct dce_dccg {
+       struct dccg base;
+       const struct dccg_registers *regs;
+       const struct dccg_shift *clk_shift;
+       const struct dccg_mask *clk_mask;
 
        struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
-       struct dce_divider_range divider_ranges[DIVIDER_RANGE_MAX];
 
-       bool use_max_disp_clk;
        int dentist_vco_freq_khz;
 
        /* Cache the status of DFS-bypass feature*/
@@ -106,32 +88,33 @@ struct dce_disp_clk {
        int dprefclk_ss_percentage;
        /* DPREFCLK SS percentage Divider (100 or 1000) */
        int dprefclk_ss_divider;
-
-       /* max disp_clk from PPLIB for max validation display clock*/
-       int max_displ_clk_in_khz;
 };
 
 
-struct display_clock *dce_disp_clk_create(
+struct dccg *dce_dccg_create(
        struct dc_context *ctx,
-       const struct dce_disp_clk_registers *regs,
-       const struct dce_disp_clk_shift *clk_shift,
-       const struct dce_disp_clk_mask *clk_mask);
+       const struct dccg_registers *regs,
+       const struct dccg_shift *clk_shift,
+       const struct dccg_mask *clk_mask);
 
-struct display_clock *dce110_disp_clk_create(
+struct dccg *dce110_dccg_create(
        struct dc_context *ctx,
-       const struct dce_disp_clk_registers *regs,
-       const struct dce_disp_clk_shift *clk_shift,
-       const struct dce_disp_clk_mask *clk_mask);
+       const struct dccg_registers *regs,
+       const struct dccg_shift *clk_shift,
+       const struct dccg_mask *clk_mask);
 
-struct display_clock *dce112_disp_clk_create(
+struct dccg *dce112_dccg_create(
        struct dc_context *ctx,
-       const struct dce_disp_clk_registers *regs,
-       const struct dce_disp_clk_shift *clk_shift,
-       const struct dce_disp_clk_mask *clk_mask);
+       const struct dccg_registers *regs,
+       const struct dccg_shift *clk_shift,
+       const struct dccg_mask *clk_mask);
+
+struct dccg *dce120_dccg_create(struct dc_context *ctx);
 
-struct display_clock *dce120_disp_clk_create(struct dc_context *ctx);
+#ifdef CONFIG_X86
+struct dccg *dcn1_dccg_create(struct dc_context *ctx);
+#endif
 
-void dce_disp_clk_destroy(struct display_clock **disp_clk);
+void dce_dccg_destroy(struct dccg **dccg);
 
 #endif /* _DCE_CLOCKS_H_ */
index a576b8bbb3cd4c1da1e0c3adec0bb53f47a3e1b5..ca7989e4932bdd93a848ee46421986a094579228 100644 (file)
@@ -150,7 +150,7 @@ static void dce_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
        }
 }
 
-static void dce_dmcu_setup_psr(struct dmcu *dmcu,
+static bool dce_dmcu_setup_psr(struct dmcu *dmcu,
                struct dc_link *link,
                struct psr_context *psr_context)
 {
@@ -261,6 +261,8 @@ static void dce_dmcu_setup_psr(struct dmcu *dmcu,
 
        /* notifyDMCUMsg */
        REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+       return true;
 }
 
 static bool dce_is_dmcu_initialized(struct dmcu *dmcu)
@@ -314,7 +316,7 @@ static void dce_get_psr_wait_loop(
        return;
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
 static void dcn10_get_dmcu_state(struct dmcu *dmcu)
 {
        struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
@@ -545,24 +547,25 @@ static void dcn10_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
         *  least a few frames. Should never hit the max retry assert below.
         */
        if (wait == true) {
-       for (retryCount = 0; retryCount <= 1000; retryCount++) {
-               dcn10_get_dmcu_psr_state(dmcu, &psr_state);
-               if (enable) {
-                       if (psr_state != 0)
-                               break;
-               } else {
-                       if (psr_state == 0)
-                               break;
+               for (retryCount = 0; retryCount <= 1000; retryCount++) {
+                       dcn10_get_dmcu_psr_state(dmcu, &psr_state);
+                       if (enable) {
+                               if (psr_state != 0)
+                                       break;
+                       } else {
+                               if (psr_state == 0)
+                                       break;
+                       }
+                       udelay(500);
                }
-               udelay(500);
-       }
 
-       /* assert if max retry hit */
-       ASSERT(retryCount <= 1000);
+               /* assert if max retry hit */
+               if (retryCount >= 1000)
+                       ASSERT(0);
        }
 }
 
-static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
+static bool dcn10_dmcu_setup_psr(struct dmcu *dmcu,
                struct dc_link *link,
                struct psr_context *psr_context)
 {
@@ -577,7 +580,7 @@ static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
 
        /* If microcontroller is not running, do nothing */
        if (dmcu->dmcu_state != DMCU_RUNNING)
-               return;
+               return false;
 
        link->link_enc->funcs->psr_program_dp_dphy_fast_training(link->link_enc,
                        psr_context->psrExitLinkTrainingRequired);
@@ -677,6 +680,11 @@ static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
 
        /* notifyDMCUMsg */
        REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+       /* waitDMCUReadyForCmd */
+       REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000);
+
+       return true;
 }
 
 static void dcn10_psr_wait_loop(
@@ -735,7 +743,7 @@ static const struct dmcu_funcs dce_funcs = {
        .is_dmcu_initialized = dce_is_dmcu_initialized
 };
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
 static const struct dmcu_funcs dcn10_funcs = {
        .dmcu_init = dcn10_dmcu_init,
        .load_iram = dcn10_dmcu_load_iram,
@@ -787,7 +795,7 @@ struct dmcu *dce_dmcu_create(
        return &dmcu_dce->base;
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
 struct dmcu *dcn10_dmcu_create(
        struct dc_context *ctx,
        const struct dce_dmcu_registers *regs,
index 057b8afd74bcc5f9f149b7365fb6d5199d75fe8d..64dc75378541539028b7333f22d00329240a3166 100644 (file)
        SR(DCCG_GATE_DISABLE_CNTL2), \
        SR(DCFCLK_CNTL),\
        SR(DCFCLK_CNTL), \
+       SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \
        /* todo:  get these from GVM instead of reading registers ourselves */\
        MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32),\
        MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32),\
@@ -249,7 +250,6 @@ struct dce_hwseq_registers {
        uint32_t DISPCLK_FREQ_CHANGE_CNTL;
        uint32_t RBBMIF_TIMEOUT_DIS;
        uint32_t RBBMIF_TIMEOUT_DIS_2;
-       uint32_t DENTIST_DISPCLK_CNTL;
        uint32_t DCHUBBUB_CRC_CTRL;
        uint32_t DPP_TOP0_DPP_CRC_CTRL;
        uint32_t DPP_TOP0_DPP_CRC_VAL_R_G;
@@ -276,6 +276,8 @@ struct dce_hwseq_registers {
        uint32_t MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;
        uint32_t MC_VM_SYSTEM_APERTURE_LOW_ADDR;
        uint32_t MC_VM_SYSTEM_APERTURE_HIGH_ADDR;
+       uint32_t AZALIA_AUDIO_DTO;
+       uint32_t AZALIA_CONTROLLER_CLOCK_GATING;
 };
  /* set field name */
 #define HWS_SF(blk_name, reg_name, field_name, post_fix)\
@@ -362,7 +364,8 @@ struct dce_hwseq_registers {
        HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, OTG0_),\
        HWS_SF1(OTG0_, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, mask_sh), \
        HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \
-       HWS_SF(, DCFCLK_CNTL, DCFCLK_GATE_DIS, mask_sh)
+       HWS_SF(, DCFCLK_CNTL, DCFCLK_GATE_DIS, mask_sh), \
+       HWS_SF(, DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, mask_sh)
 
 #define HWSEQ_DCN1_MASK_SH_LIST(mask_sh)\
        HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
@@ -496,14 +499,13 @@ struct dce_hwseq_registers {
        type DOMAIN7_PGFSM_PWR_STATUS; \
        type DCFCLK_GATE_DIS; \
        type DCHUBBUB_GLOBAL_TIMER_REFDIV; \
-       type DENTIST_DPPCLK_WDIVIDER; \
-       type DENTIST_DISPCLK_WDIVIDER; \
        type VGA_TEST_ENABLE; \
        type VGA_TEST_RENDER_START; \
        type D1VGA_MODE_ENABLE; \
        type D2VGA_MODE_ENABLE; \
        type D3VGA_MODE_ENABLE; \
-       type D4VGA_MODE_ENABLE;
+       type D4VGA_MODE_ENABLE; \
+       type AZALIA_AUDIO_DTO_MODULE;
 
 struct dce_hwseq_shift {
        HWSEQ_REG_FIELD_LIST(uint8_t)
index dbe3b26b6d9eb6134bf34f184827a19841d28cbd..60e3c6a73d370c7ecbad78ee664beb3d9d19833a 100644 (file)
@@ -646,6 +646,9 @@ static bool dce110_link_encoder_validate_hdmi_output(
        if (!enc110->base.features.flags.bits.HDMI_6GB_EN &&
                adjusted_pix_clk_khz >= 300000)
                return false;
+       if (enc110->base.ctx->dc->debug.hdmi20_disable &&
+               crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+               return false;
        return true;
 }
 
@@ -773,6 +776,9 @@ void dce110_link_encoder_construct(
                                __func__,
                                result);
        }
+       if (enc110->base.ctx->dc->debug.hdmi20_disable) {
+               enc110->base.features.flags.bits.HDMI_6GB_EN = 0;
+       }
 }
 
 bool dce110_link_encoder_validate_output_with_stream(
index b235a75355b855e03cfd679126e72a9e30dd7d00..85686d9176364e0373248c4808eb631b5169173a 100644 (file)
@@ -729,7 +729,7 @@ static bool dce_mi_program_surface_flip_and_addr(
        return true;
 }
 
-static struct mem_input_funcs dce_mi_funcs = {
+static const struct mem_input_funcs dce_mi_funcs = {
        .mem_input_program_display_marks = dce_mi_program_display_marks,
        .allocate_mem_input = dce_mi_allocate_dmif,
        .free_mem_input = dce_mi_free_dmif,
@@ -741,6 +741,29 @@ static struct mem_input_funcs dce_mi_funcs = {
        .mem_input_is_flip_pending = dce_mi_is_flip_pending
 };
 
+static const struct mem_input_funcs dce112_mi_funcs = {
+       .mem_input_program_display_marks = dce112_mi_program_display_marks,
+       .allocate_mem_input = dce_mi_allocate_dmif,
+       .free_mem_input = dce_mi_free_dmif,
+       .mem_input_program_surface_flip_and_addr =
+                       dce_mi_program_surface_flip_and_addr,
+       .mem_input_program_pte_vm = dce_mi_program_pte_vm,
+       .mem_input_program_surface_config =
+                       dce_mi_program_surface_config,
+       .mem_input_is_flip_pending = dce_mi_is_flip_pending
+};
+
+static const struct mem_input_funcs dce120_mi_funcs = {
+       .mem_input_program_display_marks = dce120_mi_program_display_marks,
+       .allocate_mem_input = dce_mi_allocate_dmif,
+       .free_mem_input = dce_mi_free_dmif,
+       .mem_input_program_surface_flip_and_addr =
+                       dce_mi_program_surface_flip_and_addr,
+       .mem_input_program_pte_vm = dce_mi_program_pte_vm,
+       .mem_input_program_surface_config =
+                       dce_mi_program_surface_config,
+       .mem_input_is_flip_pending = dce_mi_is_flip_pending
+};
 
 void dce_mem_input_construct(
        struct dce_mem_input *dce_mi,
@@ -769,7 +792,7 @@ void dce112_mem_input_construct(
        const struct dce_mem_input_mask *mi_mask)
 {
        dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask);
-       dce_mi->base.funcs->mem_input_program_display_marks = dce112_mi_program_display_marks;
+       dce_mi->base.funcs = &dce112_mi_funcs;
 }
 
 void dce120_mem_input_construct(
@@ -781,5 +804,5 @@ void dce120_mem_input_construct(
        const struct dce_mem_input_mask *mi_mask)
 {
        dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask);
-       dce_mi->base.funcs->mem_input_program_display_marks = dce120_mi_program_display_marks;
+       dce_mi->base.funcs = &dce120_mi_funcs;
 }
index c0e813c7ddd41db3a372218e564f0f1d70882b2e..b139b40178200f41ce4c31e31beb899da0835df8 100644 (file)
@@ -135,7 +135,7 @@ static void dce110_update_generic_info_packet(
                        AFMT_GENERIC0_UPDATE, (packet_index == 0),
                        AFMT_GENERIC2_UPDATE, (packet_index == 2));
        }
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
        if (REG(AFMT_VBI_PACKET_CONTROL1)) {
                switch (packet_index) {
                case 0:
@@ -229,7 +229,7 @@ static void dce110_update_hdmi_info_packet(
                                HDMI_GENERIC1_SEND, send,
                                HDMI_GENERIC1_LINE, line);
                break;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
        case 4:
                if (REG(HDMI_GENERIC_PACKET_CONTROL2))
                        REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2,
@@ -274,7 +274,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
        struct dc_crtc_timing *crtc_timing,
        enum dc_color_space output_color_space)
 {
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
        uint32_t h_active_start;
        uint32_t v_active_start;
        uint32_t misc0 = 0;
@@ -289,11 +289,6 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
 
        struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
-       if (REG(DP_DB_CNTL))
-               REG_UPDATE(DP_DB_CNTL, DP_DB_DISABLE, 1);
-#endif
-
        /* set pixel encoding */
        switch (crtc_timing->pixel_encoding) {
        case PIXEL_ENCODING_YCBCR422:
@@ -322,7 +317,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
                if (enc110->se_mask->DP_VID_M_DOUBLE_VALUE_EN)
                        REG_UPDATE(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, 1);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
                if (enc110->se_mask->DP_VID_N_MUL)
                        REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1);
 #endif
@@ -333,7 +328,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
                break;
        }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
        if (REG(DP_MSA_MISC))
                misc1 = REG_READ(DP_MSA_MISC);
 #endif
@@ -367,7 +362,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
        /* set dynamic range and YCbCr range */
 
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
        switch (crtc_timing->display_color_depth) {
        case COLOR_DEPTH_666:
                colorimetry_bpc = 0;
@@ -446,7 +441,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
                                DP_DYN_RANGE, dynamic_range_rgb,
                                DP_YCBCR_RANGE, dynamic_range_ycbcr);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
                if (REG(DP_MSA_COLORIMETRY))
                        REG_SET(DP_MSA_COLORIMETRY, 0, DP_MSA_MISC0, misc0);
 
@@ -481,7 +476,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
                                crtc_timing->v_front_porch;
 
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
                /* start at begining of left border */
                if (REG(DP_MSA_TIMING_PARAM2))
                        REG_SET_2(DP_MSA_TIMING_PARAM2, 0,
@@ -756,7 +751,7 @@ static void dce110_stream_encoder_update_hdmi_info_packets(
                dce110_update_hdmi_info_packet(enc110, 3, &info_frame->hdrsmd);
        }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
        if (enc110->se_mask->HDMI_DB_DISABLE) {
                /* for bring up, disable dp double  TODO */
                if (REG(HDMI_DB_CONTROL))
@@ -794,7 +789,7 @@ static void dce110_stream_encoder_stop_hdmi_info_packets(
                HDMI_GENERIC1_LINE, 0,
                HDMI_GENERIC1_SEND, 0);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
        /* stop generic packets 2 & 3 on HDMI */
        if (REG(HDMI_GENERIC_PACKET_CONTROL2))
                REG_SET_6(HDMI_GENERIC_PACKET_CONTROL2, 0,
index a02e719d779443be17d5cd85b0da1650a89dc4e4..ab63d0d0304cb1012164edf8017b1d89aad638e6 100644 (file)
@@ -155,7 +155,7 @@ static void program_overscan(
        int overscan_bottom = data->v_active
                        - data->recout.y - data->recout.height;
 
-       if (xfm_dce->base.ctx->dc->debug.surface_visual_confirm) {
+       if (xfm_dce->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
                overscan_bottom += 2;
                overscan_right += 2;
        }
index 41f83ecd7469b0a03e922196273b1ea19c7bdf4b..74c05e8788073433895536f856ae451e77a37fe8 100644 (file)
@@ -125,17 +125,50 @@ static void dce100_pplib_apply_display_requirements(
        dc->prev_display_config = *pp_display_cfg;
 }
 
+/* unit: in_khz before mode set, get pixel clock from context. ASIC register
+ * may not be programmed yet
+ */
+static uint32_t get_max_pixel_clock_for_all_paths(
+       struct dc *dc,
+       struct dc_state *context)
+{
+       uint32_t max_pix_clk = 0;
+       int i;
+
+       for (i = 0; i < MAX_PIPES; i++) {
+               struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+               if (pipe_ctx->stream == NULL)
+                       continue;
+
+               /* do not check under lay */
+               if (pipe_ctx->top_pipe)
+                       continue;
+
+               if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
+                       max_pix_clk =
+                               pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
+       }
+       return max_pix_clk;
+}
+
 void dce100_set_bandwidth(
                struct dc *dc,
                struct dc_state *context,
                bool decrease_allowed)
 {
-       if (decrease_allowed || context->bw.dce.dispclk_khz > dc->current_state->bw.dce.dispclk_khz) {
-               dc->res_pool->display_clock->funcs->set_clock(
-                               dc->res_pool->display_clock,
-                               context->bw.dce.dispclk_khz * 115 / 100);
-               dc->current_state->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz;
-       }
+       struct dc_clocks req_clks;
+
+       req_clks.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
+       req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context);
+
+       dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
+
+       dc->res_pool->dccg->funcs->update_clocks(
+                       dc->res_pool->dccg,
+                       &req_clks,
+                       decrease_allowed);
+
        dce100_pplib_apply_display_requirements(dc, context);
 }
 
index 38ec0d609297f832362d45397a2080918e4937c0..fd2bdae4dcec71f30680967ecc7b2f241a9be221 100644 (file)
@@ -52,6 +52,7 @@
 #include "dce/dce_10_0_sh_mask.h"
 
 #include "dce/dce_dmcu.h"
+#include "dce/dce_aux.h"
 #include "dce/dce_abm.h"
 
 #ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
@@ -135,15 +136,15 @@ static const struct dce110_timing_generator_offsets dce100_tg_offsets[] = {
        .reg_name = mm ## block ## id ## _ ## reg_name
 
 
-static const struct dce_disp_clk_registers disp_clk_regs = {
+static const struct dccg_registers disp_clk_regs = {
                CLK_COMMON_REG_LIST_DCE_BASE()
 };
 
-static const struct dce_disp_clk_shift disp_clk_shift = {
+static const struct dccg_shift disp_clk_shift = {
                CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
 };
 
-static const struct dce_disp_clk_mask disp_clk_mask = {
+static const struct dccg_mask disp_clk_mask = {
                CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
 };
 
@@ -279,7 +280,20 @@ static const struct dce_opp_shift opp_shift = {
 static const struct dce_opp_mask opp_mask = {
        OPP_COMMON_MASK_SH_LIST_DCE_100(_MASK)
 };
+#define aux_engine_regs(id)\
+[id] = {\
+       AUX_COMMON_REG_LIST(id), \
+       .AUX_RESET_MASK = 0 \
+}
 
+static const struct dce110_aux_registers aux_engine_regs[] = {
+               aux_engine_regs(0),
+               aux_engine_regs(1),
+               aux_engine_regs(2),
+               aux_engine_regs(3),
+               aux_engine_regs(4),
+               aux_engine_regs(5)
+};
 
 #define audio_regs(id)\
 [id] = {\
@@ -572,6 +586,23 @@ struct output_pixel_processor *dce100_opp_create(
        return &opp->base;
 }
 
+struct aux_engine *dce100_aux_engine_create(
+       struct dc_context *ctx,
+       uint32_t inst)
+{
+       struct aux_engine_dce110 *aux_engine =
+               kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+       if (!aux_engine)
+               return NULL;
+
+       dce110_aux_engine_construct(aux_engine, ctx, inst,
+                                   SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+                                   &aux_engine_regs[inst]);
+
+       return &aux_engine->base;
+}
+
 struct clock_source *dce100_clock_source_create(
        struct dc_context *ctx,
        struct dc_bios *bios,
@@ -624,6 +655,10 @@ static void destruct(struct dce110_resource_pool *pool)
                        kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
                        pool->base.timing_generators[i] = NULL;
                }
+
+               if (pool->base.engines[i] != NULL)
+                       dce110_engine_destroy(&pool->base.engines[i]);
+
        }
 
        for (i = 0; i < pool->base.stream_enc_count; i++) {
@@ -644,8 +679,8 @@ static void destruct(struct dce110_resource_pool *pool)
                        dce_aud_destroy(&pool->base.audios[i]);
        }
 
-       if (pool->base.display_clock != NULL)
-               dce_disp_clk_destroy(&pool->base.display_clock);
+       if (pool->base.dccg != NULL)
+               dce_dccg_destroy(&pool->base.dccg);
 
        if (pool->base.abm != NULL)
                                dce_abm_destroy(&pool->base.abm);
@@ -678,9 +713,22 @@ bool dce100_validate_bandwidth(
        struct dc  *dc,
        struct dc_state *context)
 {
-       /* TODO implement when needed but for now hardcode max value*/
-       context->bw.dce.dispclk_khz = 681000;
-       context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
+       int i;
+       bool at_least_one_pipe = false;
+
+       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+               if (context->res_ctx.pipe_ctx[i].stream)
+                       at_least_one_pipe = true;
+       }
+
+       if (at_least_one_pipe) {
+               /* TODO implement when needed but for now hardcode max value*/
+               context->bw.dce.dispclk_khz = 681000;
+               context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
+       } else {
+               context->bw.dce.dispclk_khz = 0;
+               context->bw.dce.yclk_khz = 0;
+       }
 
        return true;
 }
@@ -817,11 +865,11 @@ static bool construct(
                }
        }
 
-       pool->base.display_clock = dce_disp_clk_create(ctx,
+       pool->base.dccg = dce_dccg_create(ctx,
                        &disp_clk_regs,
                        &disp_clk_shift,
                        &disp_clk_mask);
-       if (pool->base.display_clock == NULL) {
+       if (pool->base.dccg == NULL) {
                dm_error("DC: failed to create display clock!\n");
                BREAK_TO_DEBUGGER();
                goto res_create_fail;
@@ -851,7 +899,7 @@ static bool construct(
         * max_clock_state
         */
        if (dm_pp_get_static_clocks(ctx, &static_clk_info))
-               pool->base.display_clock->max_clks_state =
+               pool->base.dccg->max_clks_state =
                                        static_clk_info.max_clocks_state;
        {
                struct irq_service_init_data init_data;
@@ -915,6 +963,13 @@ static bool construct(
                                "DC: failed to create output pixel processor!\n");
                        goto res_create_fail;
                }
+               pool->base.engines[i] = dce100_aux_engine_create(ctx, i);
+               if (pool->base.engines[i] == NULL) {
+                       BREAK_TO_DEBUGGER();
+                       dm_error(
+                               "DC:failed to create aux engine!!\n");
+                       goto res_create_fail;
+               }
        }
 
        dc->caps.max_planes =  pool->base.pipe_count;
index e2994d3370448028d86de5d6e11b6cfa8d4acc01..1f7f25013217dfad9226ed55a98aee0461b500a0 100644 (file)
@@ -143,7 +143,7 @@ static void wait_for_fbc_state_changed(
        struct dce110_compressor *cp110,
        bool enabled)
 {
-       uint8_t counter = 0;
+       uint32_t counter = 0;
        uint32_t addr = mmFBC_STATUS;
        uint32_t value;
 
@@ -158,7 +158,7 @@ static void wait_for_fbc_state_changed(
                counter++;
        }
 
-       if (counter == 10) {
+       if (counter == 1000) {
                DC_LOG_WARNING("%s: wait counter exceeded, changes to HW not applied",
                        __func__);
        } else {
@@ -551,9 +551,7 @@ void dce110_compressor_construct(struct dce110_compressor *compressor,
        compressor->base.lpt_channels_num = 0;
        compressor->base.attached_inst = 0;
        compressor->base.is_enabled = false;
-#if defined(CONFIG_DRM_AMD_DC_FBC)
        compressor->base.funcs = &dce110_compressor_funcs;
 
-#endif
 }
 
index c29052b6da5a8603bbf8b771dad34e62d5678590..1149c413f6d2330145762cbabf07ee06d9888c7a 100644 (file)
@@ -34,9 +34,7 @@
 #include "dce/dce_hwseq.h"
 #include "gpio_service_interface.h"
 
-#if defined(CONFIG_DRM_AMD_DC_FBC)
 #include "dce110_compressor.h"
-#endif
 
 #include "bios/bios_parser_helper.h"
 #include "timing_generator.h"
@@ -667,16 +665,25 @@ static enum dc_status bios_parser_crtc_source_select(
 
 void dce110_update_info_frame(struct pipe_ctx *pipe_ctx)
 {
+       bool is_hdmi;
+       bool is_dp;
+
        ASSERT(pipe_ctx->stream);
 
        if (pipe_ctx->stream_res.stream_enc == NULL)
                return;  /* this is not root pipe */
 
-       if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+       is_hdmi = dc_is_hdmi_signal(pipe_ctx->stream->signal);
+       is_dp = dc_is_dp_signal(pipe_ctx->stream->signal);
+
+       if (!is_hdmi && !is_dp)
+               return;
+
+       if (is_hdmi)
                pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets(
                        pipe_ctx->stream_res.stream_enc,
                        &pipe_ctx->stream_res.encoder_info_frame);
-       else if (dc_is_dp_signal(pipe_ctx->stream->signal))
+       else
                pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets(
                        pipe_ctx->stream_res.stream_enc,
                        &pipe_ctx->stream_res.encoder_info_frame);
@@ -857,17 +864,22 @@ void hwss_edp_power_control(
                if (power_up) {
                        unsigned long long current_ts = dm_get_timestamp(ctx);
                        unsigned long long duration_in_ms =
-                                       dm_get_elapse_time_in_ns(
+                                       div64_u64(dm_get_elapse_time_in_ns(
                                                        ctx,
                                                        current_ts,
-                                                       div64_u64(link->link_trace.time_stamp.edp_poweroff, 1000000));
+                                                       link->link_trace.time_stamp.edp_poweroff), 1000000);
                        unsigned long long wait_time_ms = 0;
 
                        /* max 500ms from LCDVDD off to on */
+                       unsigned long long edp_poweroff_time_ms = 500;
+
+                       if (link->local_sink != NULL)
+                               edp_poweroff_time_ms =
+                                               500 + link->local_sink->edid_caps.panel_patch.extra_t12_ms;
                        if (link->link_trace.time_stamp.edp_poweroff == 0)
-                               wait_time_ms = 500;
-                       else if (duration_in_ms < 500)
-                               wait_time_ms = 500 - duration_in_ms;
+                               wait_time_ms = edp_poweroff_time_ms;
+                       else if (duration_in_ms < edp_poweroff_time_ms)
+                               wait_time_ms = edp_poweroff_time_ms - duration_in_ms;
 
                        if (wait_time_ms) {
                                msleep(wait_time_ms);
@@ -972,19 +984,35 @@ void hwss_edp_backlight_control(
                edp_receiver_ready_T9(link);
 }
 
-void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
+void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
 {
-       struct dc_stream_state *stream = pipe_ctx->stream;
-       struct dc_link *link = stream->sink->link;
-       struct dc *dc = pipe_ctx->stream->ctx->dc;
+       struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+       /* notify audio driver for audio modes of monitor */
+       struct pp_smu_funcs_rv *pp_smu = core_dc->res_pool->pp_smu;
+       unsigned int i, num_audio = 1;
 
-       if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
-               pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
-                       pipe_ctx->stream_res.stream_enc);
+       if (pipe_ctx->stream_res.audio) {
+               for (i = 0; i < MAX_PIPES; i++) {
+                       /*current_state not updated yet*/
+                       if (core_dc->current_state->res_ctx.pipe_ctx[i].stream_res.audio != NULL)
+                               num_audio++;
+               }
 
-       if (dc_is_dp_signal(pipe_ctx->stream->signal))
-               pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
-                       pipe_ctx->stream_res.stream_enc);
+               pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
+
+               if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
+                       /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
+                       pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
+               /* un-mute audio */
+               /* TODO: audio should be per stream rather than per link */
+               pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
+                       pipe_ctx->stream_res.stream_enc, false);
+       }
+}
+
+void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
+{
+       struct dc *dc = pipe_ctx->stream->ctx->dc;
 
        pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
                        pipe_ctx->stream_res.stream_enc, true);
@@ -1015,7 +1043,23 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
                 * stream->stream_engine_id);
                 */
        }
+}
 
+void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
+{
+       struct dc_stream_state *stream = pipe_ctx->stream;
+       struct dc_link *link = stream->sink->link;
+       struct dc *dc = pipe_ctx->stream->ctx->dc;
+
+       if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+               pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
+                       pipe_ctx->stream_res.stream_enc);
+
+       if (dc_is_dp_signal(pipe_ctx->stream->signal))
+               pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
+                       pipe_ctx->stream_res.stream_enc);
+
+       dc->hwss.disable_audio_stream(pipe_ctx, option);
 
        link->link_enc->funcs->connect_dig_be_to_fe(
                        link->link_enc,
@@ -1206,13 +1250,13 @@ static void program_scaler(const struct dc *dc,
 {
        struct tg_color color = {0};
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
        /* TOFPGA */
        if (pipe_ctx->plane_res.xfm->funcs->transform_set_pixel_storage_depth == NULL)
                return;
 #endif
 
-       if (dc->debug.surface_visual_confirm)
+       if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
                get_surface_visual_confirm_color(pipe_ctx, &color);
        else
                color_space_to_black_color(dc,
@@ -1298,6 +1342,30 @@ static enum dc_status apply_single_controller_ctx_to_hw(
        struct pipe_ctx *pipe_ctx_old = &dc->current_state->res_ctx.
                        pipe_ctx[pipe_ctx->pipe_idx];
 
+       if (pipe_ctx->stream_res.audio != NULL) {
+               struct audio_output audio_output;
+
+               build_audio_output(context, pipe_ctx, &audio_output);
+
+               if (dc_is_dp_signal(pipe_ctx->stream->signal))
+                       pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup(
+                                       pipe_ctx->stream_res.stream_enc,
+                                       pipe_ctx->stream_res.audio->inst,
+                                       &pipe_ctx->stream->audio_info);
+               else
+                       pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_setup(
+                                       pipe_ctx->stream_res.stream_enc,
+                                       pipe_ctx->stream_res.audio->inst,
+                                       &pipe_ctx->stream->audio_info,
+                                       &audio_output.crtc_info);
+
+               pipe_ctx->stream_res.audio->funcs->az_configure(
+                               pipe_ctx->stream_res.audio,
+                               pipe_ctx->stream->signal,
+                               &audio_output.crtc_info,
+                               &pipe_ctx->stream->audio_info);
+       }
+
        /*  */
        dc->hwss.enable_stream_timing(pipe_ctx, context, dc);
 
@@ -1412,7 +1480,7 @@ static void power_down_controllers(struct dc *dc)
 {
        int i;
 
-       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+       for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
                dc->res_pool->timing_generators[i]->funcs->disable_crtc(
                                dc->res_pool->timing_generators[i]);
        }
@@ -1441,10 +1509,8 @@ static void power_down_all_hw_blocks(struct dc *dc)
 
        power_down_clock_sources(dc);
 
-#if defined(CONFIG_DRM_AMD_DC_FBC)
        if (dc->fbc_compressor)
                dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
-#endif
 }
 
 static void disable_vga_and_power_gate_all_controllers(
@@ -1454,12 +1520,13 @@ static void disable_vga_and_power_gate_all_controllers(
        struct timing_generator *tg;
        struct dc_context *ctx = dc->ctx;
 
-       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+       for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
                tg = dc->res_pool->timing_generators[i];
 
                if (tg->funcs->disable_vga)
                        tg->funcs->disable_vga(tg);
-
+       }
+       for (i = 0; i < dc->res_pool->pipe_count; i++) {
                /* Enable CLOCK gating for each pipe BEFORE controller
                 * powergating. */
                enable_display_pipe_clock_gating(ctx,
@@ -1602,7 +1669,7 @@ static void dce110_set_displaymarks(
        }
 }
 
-static void set_safe_displaymarks(
+void dce110_set_safe_displaymarks(
                struct resource_context *res_ctx,
                const struct resource_pool *pool)
 {
@@ -1686,9 +1753,7 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
        if (events->force_trigger)
                value |= 0x1;
 
-#if defined(CONFIG_DRM_AMD_DC_FBC)
        value |= 0x84;
-#endif
 
        for (i = 0; i < num_pipes; i++)
                pipe_ctx[i]->stream_res.tg->funcs->
@@ -1696,23 +1761,15 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
 }
 
 /* unit: in_khz before mode set, get pixel clock from context. ASIC register
- * may not be programmed yet.
- * TODO: after mode set, pre_mode_set = false,
- * may read PLL register to get pixel clock
+ * may not be programmed yet
  */
 static uint32_t get_max_pixel_clock_for_all_paths(
        struct dc *dc,
-       struct dc_state *context,
-       bool pre_mode_set)
+       struct dc_state *context)
 {
        uint32_t max_pix_clk = 0;
        int i;
 
-       if (!pre_mode_set) {
-               /* TODO: read ASIC register to get pixel clock */
-               ASSERT(0);
-       }
-
        for (i = 0; i < MAX_PIPES; i++) {
                struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
 
@@ -1728,96 +1785,9 @@ static uint32_t get_max_pixel_clock_for_all_paths(
                                pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
        }
 
-       if (max_pix_clk == 0)
-               ASSERT(0);
-
        return max_pix_clk;
 }
 
-/*
- * Find clock state based on clock requested. if clock value is 0, simply
- * set clock state as requested without finding clock state by clock value
- */
-
-static void apply_min_clocks(
-       struct dc *dc,
-       struct dc_state *context,
-       enum dm_pp_clocks_state *clocks_state,
-       bool pre_mode_set)
-{
-       struct state_dependent_clocks req_clocks = {0};
-
-       if (!pre_mode_set) {
-               /* set clock_state without verification */
-               if (context->dis_clk->funcs->set_min_clocks_state) {
-                       context->dis_clk->funcs->set_min_clocks_state(
-                                               context->dis_clk, *clocks_state);
-                       return;
-               }
-
-               /* TODO: This is incorrect. Figure out how to fix. */
-               context->dis_clk->funcs->apply_clock_voltage_request(
-                               context->dis_clk,
-                               DM_PP_CLOCK_TYPE_DISPLAY_CLK,
-                               context->dis_clk->cur_clocks_value.dispclk_in_khz,
-                               pre_mode_set,
-                               false);
-
-               context->dis_clk->funcs->apply_clock_voltage_request(
-                               context->dis_clk,
-                               DM_PP_CLOCK_TYPE_PIXELCLK,
-                               context->dis_clk->cur_clocks_value.max_pixelclk_in_khz,
-                               pre_mode_set,
-                               false);
-
-               context->dis_clk->funcs->apply_clock_voltage_request(
-                               context->dis_clk,
-                               DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
-                               context->dis_clk->cur_clocks_value.max_non_dp_phyclk_in_khz,
-                               pre_mode_set,
-                               false);
-               return;
-       }
-
-       /* get the required state based on state dependent clocks:
-        * display clock and pixel clock
-        */
-       req_clocks.display_clk_khz = context->bw.dce.dispclk_khz;
-
-       req_clocks.pixel_clk_khz = get_max_pixel_clock_for_all_paths(
-                       dc, context, true);
-
-       if (context->dis_clk->funcs->get_required_clocks_state) {
-               *clocks_state = context->dis_clk->funcs->get_required_clocks_state(
-                               context->dis_clk, &req_clocks);
-               context->dis_clk->funcs->set_min_clocks_state(
-                       context->dis_clk, *clocks_state);
-       } else {
-               context->dis_clk->funcs->apply_clock_voltage_request(
-                               context->dis_clk,
-                               DM_PP_CLOCK_TYPE_DISPLAY_CLK,
-                               req_clocks.display_clk_khz,
-                               pre_mode_set,
-                               false);
-
-               context->dis_clk->funcs->apply_clock_voltage_request(
-                               context->dis_clk,
-                               DM_PP_CLOCK_TYPE_PIXELCLK,
-                               req_clocks.pixel_clk_khz,
-                               pre_mode_set,
-                               false);
-
-               context->dis_clk->funcs->apply_clock_voltage_request(
-                               context->dis_clk,
-                               DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
-                               req_clocks.pixel_clk_khz,
-                               pre_mode_set,
-                               false);
-       }
-}
-
-#if defined(CONFIG_DRM_AMD_DC_FBC)
-
 /*
  *  Check if FBC can be enabled
  */
@@ -1896,7 +1866,6 @@ static void enable_fbc(struct dc *dc,
                compr->funcs->enable_fbc(compr, &params);
        }
 }
-#endif
 
 static void dce110_reset_hw_ctx_wrap(
                struct dc *dc,
@@ -1949,97 +1918,12 @@ static void dce110_reset_hw_ctx_wrap(
        }
 }
 
-
-enum dc_status dce110_apply_ctx_to_hw(
+static void dce110_setup_audio_dto(
                struct dc *dc,
                struct dc_state *context)
 {
-       struct dc_bios *dcb = dc->ctx->dc_bios;
-       enum dc_status status;
        int i;
-       enum dm_pp_clocks_state clocks_state = DM_PP_CLOCKS_STATE_INVALID;
-
-       /* Reset old context */
-       /* look up the targets that have been removed since last commit */
-       dc->hwss.reset_hw_ctx_wrap(dc, context);
-
-       /* Skip applying if no targets */
-       if (context->stream_count <= 0)
-               return DC_OK;
-
-       /* Apply new context */
-       dcb->funcs->set_scratch_critical_state(dcb, true);
 
-       /* below is for real asic only */
-       for (i = 0; i < dc->res_pool->pipe_count; i++) {
-               struct pipe_ctx *pipe_ctx_old =
-                                       &dc->current_state->res_ctx.pipe_ctx[i];
-               struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
-               if (pipe_ctx->stream == NULL || pipe_ctx->top_pipe)
-                       continue;
-
-               if (pipe_ctx->stream == pipe_ctx_old->stream) {
-                       if (pipe_ctx_old->clock_source != pipe_ctx->clock_source)
-                               dce_crtc_switch_to_clk_src(dc->hwseq,
-                                               pipe_ctx->clock_source, i);
-                       continue;
-               }
-
-               dc->hwss.enable_display_power_gating(
-                               dc, i, dc->ctx->dc_bios,
-                               PIPE_GATING_CONTROL_DISABLE);
-       }
-
-       set_safe_displaymarks(&context->res_ctx, dc->res_pool);
-
-#if defined(CONFIG_DRM_AMD_DC_FBC)
-       if (dc->fbc_compressor)
-               dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
-#endif
-       /*TODO: when pplib works*/
-       apply_min_clocks(dc, context, &clocks_state, true);
-
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
-       if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
-               if (context->bw.dcn.calc_clk.fclk_khz
-                               > dc->current_state->bw.dcn.cur_clk.fclk_khz) {
-                       struct dm_pp_clock_for_voltage_req clock;
-
-                       clock.clk_type = DM_PP_CLOCK_TYPE_FCLK;
-                       clock.clocks_in_khz = context->bw.dcn.calc_clk.fclk_khz;
-                       dm_pp_apply_clock_for_voltage_request(dc->ctx, &clock);
-                       dc->current_state->bw.dcn.cur_clk.fclk_khz = clock.clocks_in_khz;
-                       context->bw.dcn.cur_clk.fclk_khz = clock.clocks_in_khz;
-               }
-               if (context->bw.dcn.calc_clk.dcfclk_khz
-                               > dc->current_state->bw.dcn.cur_clk.dcfclk_khz) {
-                       struct dm_pp_clock_for_voltage_req clock;
-
-                       clock.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
-                       clock.clocks_in_khz = context->bw.dcn.calc_clk.dcfclk_khz;
-                       dm_pp_apply_clock_for_voltage_request(dc->ctx, &clock);
-                       dc->current_state->bw.dcn.cur_clk.dcfclk_khz = clock.clocks_in_khz;
-                       context->bw.dcn.cur_clk.dcfclk_khz = clock.clocks_in_khz;
-               }
-               if (context->bw.dcn.calc_clk.dispclk_khz
-                               > dc->current_state->bw.dcn.cur_clk.dispclk_khz) {
-                       dc->res_pool->display_clock->funcs->set_clock(
-                                       dc->res_pool->display_clock,
-                                       context->bw.dcn.calc_clk.dispclk_khz);
-                       dc->current_state->bw.dcn.cur_clk.dispclk_khz =
-                                       context->bw.dcn.calc_clk.dispclk_khz;
-                       context->bw.dcn.cur_clk.dispclk_khz =
-                                       context->bw.dcn.calc_clk.dispclk_khz;
-               }
-       } else
-#endif
-       if (context->bw.dce.dispclk_khz
-                       > dc->current_state->bw.dce.dispclk_khz) {
-               dc->res_pool->display_clock->funcs->set_clock(
-                               dc->res_pool->display_clock,
-                               context->bw.dce.dispclk_khz * 115 / 100);
-       }
        /* program audio wall clock. use HDMI as clock source if HDMI
         * audio active. Otherwise, use DP as clock source
         * first, loop to find any HDMI audio, if not, loop find DP audio
@@ -2113,6 +1997,52 @@ enum dc_status dce110_apply_ctx_to_hw(
                        }
                }
        }
+}
+
+enum dc_status dce110_apply_ctx_to_hw(
+               struct dc *dc,
+               struct dc_state *context)
+{
+       struct dc_bios *dcb = dc->ctx->dc_bios;
+       enum dc_status status;
+       int i;
+
+       /* Reset old context */
+       /* look up the targets that have been removed since last commit */
+       dc->hwss.reset_hw_ctx_wrap(dc, context);
+
+       /* Skip applying if no targets */
+       if (context->stream_count <= 0)
+               return DC_OK;
+
+       /* Apply new context */
+       dcb->funcs->set_scratch_critical_state(dcb, true);
+
+       /* below is for real asic only */
+       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+               struct pipe_ctx *pipe_ctx_old =
+                                       &dc->current_state->res_ctx.pipe_ctx[i];
+               struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+               if (pipe_ctx->stream == NULL || pipe_ctx->top_pipe)
+                       continue;
+
+               if (pipe_ctx->stream == pipe_ctx_old->stream) {
+                       if (pipe_ctx_old->clock_source != pipe_ctx->clock_source)
+                               dce_crtc_switch_to_clk_src(dc->hwseq,
+                                               pipe_ctx->clock_source, i);
+                       continue;
+               }
+
+               dc->hwss.enable_display_power_gating(
+                               dc, i, dc->ctx->dc_bios,
+                               PIPE_GATING_CONTROL_DISABLE);
+       }
+
+       if (dc->fbc_compressor)
+               dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
+
+       dce110_setup_audio_dto(dc, context);
 
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                struct pipe_ctx *pipe_ctx_old =
@@ -2131,31 +2061,6 @@ enum dc_status dce110_apply_ctx_to_hw(
                if (pipe_ctx->top_pipe)
                        continue;
 
-               if (context->res_ctx.pipe_ctx[i].stream_res.audio != NULL) {
-
-                       struct audio_output audio_output;
-
-                       build_audio_output(context, pipe_ctx, &audio_output);
-
-                       if (dc_is_dp_signal(pipe_ctx->stream->signal))
-                               pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup(
-                                               pipe_ctx->stream_res.stream_enc,
-                                               pipe_ctx->stream_res.audio->inst,
-                                               &pipe_ctx->stream->audio_info);
-                       else
-                               pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_setup(
-                                               pipe_ctx->stream_res.stream_enc,
-                                               pipe_ctx->stream_res.audio->inst,
-                                               &pipe_ctx->stream->audio_info,
-                                               &audio_output.crtc_info);
-
-                       pipe_ctx->stream_res.audio->funcs->az_configure(
-                                       pipe_ctx->stream_res.audio,
-                                       pipe_ctx->stream->signal,
-                                       &audio_output.crtc_info,
-                                       &pipe_ctx->stream->audio_info);
-               }
-
                status = apply_single_controller_ctx_to_hw(
                                pipe_ctx,
                                context,
@@ -2165,17 +2070,11 @@ enum dc_status dce110_apply_ctx_to_hw(
                        return status;
        }
 
-       /* to save power */
-       apply_min_clocks(dc, context, &clocks_state, false);
-
        dcb->funcs->set_scratch_critical_state(dcb, false);
 
-#if defined(CONFIG_DRM_AMD_DC_FBC)
        if (dc->fbc_compressor)
                enable_fbc(dc, context);
 
-#endif
-
        return DC_OK;
 }
 
@@ -2490,10 +2389,9 @@ static void init_hw(struct dc *dc)
                abm->funcs->init_backlight(abm);
                abm->funcs->abm_init(abm);
        }
-#if defined(CONFIG_DRM_AMD_DC_FBC)
+
        if (dc->fbc_compressor)
                dc->fbc_compressor->funcs->power_up_fbc(dc->fbc_compressor);
-#endif
 
 }
 
@@ -2654,20 +2552,25 @@ static void pplib_apply_display_requirements(
        dc->prev_display_config = *pp_display_cfg;
 }
 
-static void dce110_set_bandwidth(
+void dce110_set_bandwidth(
                struct dc *dc,
                struct dc_state *context,
                bool decrease_allowed)
 {
-       dce110_set_displaymarks(dc, context);
+       struct dc_clocks req_clks;
 
-       if (decrease_allowed || context->bw.dce.dispclk_khz > dc->current_state->bw.dce.dispclk_khz) {
-               dc->res_pool->display_clock->funcs->set_clock(
-                               dc->res_pool->display_clock,
-                               context->bw.dce.dispclk_khz * 115 / 100);
-               dc->current_state->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz;
-       }
+       req_clks.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
+       req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context);
+
+       if (decrease_allowed)
+               dce110_set_displaymarks(dc, context);
+       else
+               dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
 
+       dc->res_pool->dccg->funcs->update_clocks(
+                       dc->res_pool->dccg,
+                       &req_clks,
+                       decrease_allowed);
        pplib_apply_display_requirements(dc, context);
 }
 
@@ -2679,9 +2582,7 @@ static void dce110_program_front_end_for_pipe(
        struct dc_plane_state *plane_state = pipe_ctx->plane_state;
        struct xfm_grph_csc_adjustment adjust;
        struct out_csc_color_matrix tbl_entry;
-#if defined(CONFIG_DRM_AMD_DC_FBC)
        unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
-#endif
        unsigned int i;
        DC_LOGGER_INIT();
        memset(&tbl_entry, 0, sizeof(tbl_entry));
@@ -2722,7 +2623,6 @@ static void dce110_program_front_end_for_pipe(
 
        program_scaler(dc, pipe_ctx);
 
-#if defined(CONFIG_DRM_AMD_DC_FBC)
        /* fbc not applicable on Underlay pipe */
        if (dc->fbc_compressor && old_pipe->stream &&
            pipe_ctx->pipe_idx != underlay_idx) {
@@ -2731,7 +2631,6 @@ static void dce110_program_front_end_for_pipe(
                else
                        enable_fbc(dc, dc->current_state);
        }
-#endif
 
        mi->funcs->mem_input_program_surface_config(
                        mi,
@@ -2907,9 +2806,11 @@ void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
        struct dc_cursor_mi_param param = {
                .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz,
                .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
-               .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
-               .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
-               .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
+               .viewport = pipe_ctx->plane_res.scl_data.viewport,
+               .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
+               .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
+               .rotation = pipe_ctx->plane_state->rotation,
+               .mirror = pipe_ctx->plane_state->horizontal_mirror
        };
 
        if (pipe_ctx->plane_state->address.type
@@ -2968,6 +2869,8 @@ static const struct hw_sequencer_funcs dce110_funcs = {
        .disable_stream = dce110_disable_stream,
        .unblank_stream = dce110_unblank_stream,
        .blank_stream = dce110_blank_stream,
+       .enable_audio_stream = dce110_enable_audio_stream,
+       .disable_audio_stream = dce110_disable_audio_stream,
        .enable_display_pipe_clock_gating = enable_display_pipe_clock_gating,
        .enable_display_power_gating = dce110_enable_display_power_gating,
        .disable_plane = dce110_power_down_fe,
index 5d7e9f5168277e0d7eb376328dc86f5f648faeaf..e4c5db75c4c656b010e16ab490286801bb94bfb4 100644 (file)
@@ -49,6 +49,10 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
                struct dc_link_settings *link_settings);
 
 void dce110_blank_stream(struct pipe_ctx *pipe_ctx);
+
+void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx);
+void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option);
+
 void dce110_update_info_frame(struct pipe_ctx *pipe_ctx);
 
 void dce110_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
@@ -56,10 +60,19 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context);
 
 void dce110_power_down(struct dc *dc);
 
+void dce110_set_safe_displaymarks(
+               struct resource_context *res_ctx,
+               const struct resource_pool *pool);
+
 void dce110_fill_display_configs(
        const struct dc_state *context,
        struct dm_pp_display_configuration *pp_display_cfg);
 
+void dce110_set_bandwidth(
+               struct dc *dc,
+               struct dc_state *context,
+               bool decrease_allowed);
+
 uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
 
 void dp_receiver_power_ctrl(struct dc_link *link, bool on);
index 0564c8e312529a6d4133ca1d0010172ca8c187da..9b9fc3d96c0712c83e843531c14cb59db67b9727 100644 (file)
@@ -1011,7 +1011,7 @@ void dce110_free_mem_input_v(
 {
 }
 
-static struct mem_input_funcs dce110_mem_input_v_funcs = {
+static const struct mem_input_funcs dce110_mem_input_v_funcs = {
        .mem_input_program_display_marks =
                        dce_mem_input_v_program_display_marks,
        .mem_input_program_chroma_display_marks =
index ee33786bdef635279278455c2f26a5b5f19db79d..e5e9e92521e91fab5afd921d245a575802a8b330 100644 (file)
 #include "dce/dce_clock_source.h"
 #include "dce/dce_hwseq.h"
 #include "dce110/dce110_hw_sequencer.h"
+#include "dce/dce_aux.h"
 #include "dce/dce_abm.h"
 #include "dce/dce_dmcu.h"
 
 #define DC_LOGGER \
                dc->ctx->logger
-#if defined(CONFIG_DRM_AMD_DC_FBC)
+
 #include "dce110/dce110_compressor.h"
-#endif
 
 #include "reg_helper.h"
 
@@ -147,15 +147,15 @@ static const struct dce110_timing_generator_offsets dce110_tg_offsets[] = {
 #define SRI(reg_name, block, id)\
        .reg_name = mm ## block ## id ## _ ## reg_name
 
-static const struct dce_disp_clk_registers disp_clk_regs = {
+static const struct dccg_registers disp_clk_regs = {
                CLK_COMMON_REG_LIST_DCE_BASE()
 };
 
-static const struct dce_disp_clk_shift disp_clk_shift = {
+static const struct dccg_shift disp_clk_shift = {
                CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
 };
 
-static const struct dce_disp_clk_mask disp_clk_mask = {
+static const struct dccg_mask disp_clk_mask = {
                CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
 };
 
@@ -307,6 +307,21 @@ static const struct dce_opp_mask opp_mask = {
        OPP_COMMON_MASK_SH_LIST_DCE_110(_MASK)
 };
 
+#define aux_engine_regs(id)\
+[id] = {\
+       AUX_COMMON_REG_LIST(id), \
+       .AUX_RESET_MASK = 0 \
+}
+
+static const struct dce110_aux_registers aux_engine_regs[] = {
+               aux_engine_regs(0),
+               aux_engine_regs(1),
+               aux_engine_regs(2),
+               aux_engine_regs(3),
+               aux_engine_regs(4),
+               aux_engine_regs(5)
+};
+
 #define audio_regs(id)\
 [id] = {\
        AUD_COMMON_REG_LIST(id)\
@@ -589,6 +604,23 @@ static struct output_pixel_processor *dce110_opp_create(
        return &opp->base;
 }
 
+struct aux_engine *dce110_aux_engine_create(
+       struct dc_context *ctx,
+       uint32_t inst)
+{
+       struct aux_engine_dce110 *aux_engine =
+               kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+       if (!aux_engine)
+               return NULL;
+
+       dce110_aux_engine_construct(aux_engine, ctx, inst,
+                                   SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+                                   &aux_engine_regs[inst]);
+
+       return &aux_engine->base;
+}
+
 struct clock_source *dce110_clock_source_create(
        struct dc_context *ctx,
        struct dc_bios *bios,
@@ -652,6 +684,10 @@ static void destruct(struct dce110_resource_pool *pool)
                        kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
                        pool->base.timing_generators[i] = NULL;
                }
+
+               if (pool->base.engines[i] != NULL)
+                       dce110_engine_destroy(&pool->base.engines[i]);
+
        }
 
        for (i = 0; i < pool->base.stream_enc_count; i++) {
@@ -680,8 +716,8 @@ static void destruct(struct dce110_resource_pool *pool)
        if (pool->base.dmcu != NULL)
                dce_dmcu_destroy(&pool->base.dmcu);
 
-       if (pool->base.display_clock != NULL)
-               dce_disp_clk_destroy(&pool->base.display_clock);
+       if (pool->base.dccg != NULL)
+               dce_dccg_destroy(&pool->base.dccg);
 
        if (pool->base.irqs != NULL) {
                dal_irq_service_destroy(&pool->base.irqs);
@@ -795,43 +831,38 @@ static bool dce110_validate_bandwidth(
 
        if (memcmp(&dc->current_state->bw.dce,
                        &context->bw.dce, sizeof(context->bw.dce))) {
-               struct log_entry log_entry;
-               dm_logger_open(
-                       dc->ctx->logger,
-                       &log_entry,
-                       LOG_BANDWIDTH_CALCS);
-               dm_logger_append(&log_entry, "%s: finish,\n"
+
+               DC_LOG_BANDWIDTH_CALCS(
+                       "%s: finish,\n"
+                       "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+                       "stutMark_b: %d stutMark_a: %d\n"
+                       "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+                       "stutMark_b: %d stutMark_a: %d\n"
                        "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
-                       "stutMark_b: %d stutMark_a: %d\n",
+                       "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n"
+                       "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
+                       "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n"
+                       ,
                        __func__,
                        context->bw.dce.nbp_state_change_wm_ns[0].b_mark,
                        context->bw.dce.nbp_state_change_wm_ns[0].a_mark,
                        context->bw.dce.urgent_wm_ns[0].b_mark,
                        context->bw.dce.urgent_wm_ns[0].a_mark,
                        context->bw.dce.stutter_exit_wm_ns[0].b_mark,
-                       context->bw.dce.stutter_exit_wm_ns[0].a_mark);
-               dm_logger_append(&log_entry,
-                       "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
-                       "stutMark_b: %d stutMark_a: %d\n",
+                       context->bw.dce.stutter_exit_wm_ns[0].a_mark,
                        context->bw.dce.nbp_state_change_wm_ns[1].b_mark,
                        context->bw.dce.nbp_state_change_wm_ns[1].a_mark,
                        context->bw.dce.urgent_wm_ns[1].b_mark,
                        context->bw.dce.urgent_wm_ns[1].a_mark,
                        context->bw.dce.stutter_exit_wm_ns[1].b_mark,
-                       context->bw.dce.stutter_exit_wm_ns[1].a_mark);
-               dm_logger_append(&log_entry,
-                       "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
-                       "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n",
+                       context->bw.dce.stutter_exit_wm_ns[1].a_mark,
                        context->bw.dce.nbp_state_change_wm_ns[2].b_mark,
                        context->bw.dce.nbp_state_change_wm_ns[2].a_mark,
                        context->bw.dce.urgent_wm_ns[2].b_mark,
                        context->bw.dce.urgent_wm_ns[2].a_mark,
                        context->bw.dce.stutter_exit_wm_ns[2].b_mark,
                        context->bw.dce.stutter_exit_wm_ns[2].a_mark,
-                       context->bw.dce.stutter_mode_enable);
-               dm_logger_append(&log_entry,
-                       "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
-                       "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n",
+                       context->bw.dce.stutter_mode_enable,
                        context->bw.dce.cpuc_state_change_enable,
                        context->bw.dce.cpup_state_change_enable,
                        context->bw.dce.nbp_state_change_enable,
@@ -841,7 +872,6 @@ static bool dce110_validate_bandwidth(
                        context->bw.dce.sclk_deep_sleep_khz,
                        context->bw.dce.yclk_khz,
                        context->bw.dce.blackout_recovery_time_us);
-               dm_logger_close(&log_entry);
        }
        return result;
 }
@@ -1180,11 +1210,11 @@ static bool construct(
                }
        }
 
-       pool->base.display_clock = dce110_disp_clk_create(ctx,
+       pool->base.dccg = dce110_dccg_create(ctx,
                        &disp_clk_regs,
                        &disp_clk_shift,
                        &disp_clk_mask);
-       if (pool->base.display_clock == NULL) {
+       if (pool->base.dccg == NULL) {
                dm_error("DC: failed to create display clock!\n");
                BREAK_TO_DEBUGGER();
                goto res_create_fail;
@@ -1214,7 +1244,7 @@ static bool construct(
         * max_clock_state
         */
        if (dm_pp_get_static_clocks(ctx, &static_clk_info))
-               pool->base.display_clock->max_clks_state =
+               pool->base.dccg->max_clks_state =
                                static_clk_info.max_clocks_state;
 
        {
@@ -1265,14 +1295,18 @@ static bool construct(
                                "DC: failed to create output pixel processor!\n");
                        goto res_create_fail;
                }
+
+               pool->base.engines[i] = dce110_aux_engine_create(ctx, i);
+               if (pool->base.engines[i] == NULL) {
+                       BREAK_TO_DEBUGGER();
+                       dm_error(
+                               "DC:failed to create aux engine!!\n");
+                       goto res_create_fail;
+               }
        }
 
-#if defined(CONFIG_DRM_AMD_DC_FBC)
        dc->fbc_compressor = dce110_compressor_create(ctx);
 
-
-
-#endif
        if (!underlay_create(ctx, &pool->base))
                goto res_create_fail;
 
index a7dce060204fcccac68edd94c6010b00889c882c..aa8d6b10d2c3fe3cca25617b545336efb37e6655 100644 (file)
@@ -235,7 +235,7 @@ static void program_overscan(
        int overscan_right = data->h_active - data->recout.x - data->recout.width;
        int overscan_bottom = data->v_active - data->recout.y - data->recout.height;
 
-       if (xfm_dce->base.ctx->dc->debug.surface_visual_confirm) {
+       if (xfm_dce->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
                overscan_bottom += 2;
                overscan_right += 2;
        }
index 00c0a1ef15ebd5d602d97c1f6b724814ccbc260b..84a05ff2d67489fe9f2877b6f60817f7414867f1 100644 (file)
@@ -49,6 +49,7 @@
 #include "dce112/dce112_hw_sequencer.h"
 #include "dce/dce_abm.h"
 #include "dce/dce_dmcu.h"
+#include "dce/dce_aux.h"
 
 #include "reg_helper.h"
 
@@ -146,15 +147,15 @@ static const struct dce110_timing_generator_offsets dce112_tg_offsets[] = {
        .reg_name = mm ## block ## id ## _ ## reg_name
 
 
-static const struct dce_disp_clk_registers disp_clk_regs = {
+static const struct dccg_registers disp_clk_regs = {
                CLK_COMMON_REG_LIST_DCE_BASE()
 };
 
-static const struct dce_disp_clk_shift disp_clk_shift = {
+static const struct dccg_shift disp_clk_shift = {
                CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
 };
 
-static const struct dce_disp_clk_mask disp_clk_mask = {
+static const struct dccg_mask disp_clk_mask = {
                CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
 };
 
@@ -314,6 +315,21 @@ static const struct dce_opp_mask opp_mask = {
        OPP_COMMON_MASK_SH_LIST_DCE_112(_MASK)
 };
 
+#define aux_engine_regs(id)\
+[id] = {\
+       AUX_COMMON_REG_LIST(id), \
+       .AUX_RESET_MASK = 0 \
+}
+
+static const struct dce110_aux_registers aux_engine_regs[] = {
+               aux_engine_regs(0),
+               aux_engine_regs(1),
+               aux_engine_regs(2),
+               aux_engine_regs(3),
+               aux_engine_regs(4),
+               aux_engine_regs(5)
+};
+
 #define audio_regs(id)\
 [id] = {\
        AUD_COMMON_REG_LIST(id)\
@@ -588,6 +604,23 @@ struct output_pixel_processor *dce112_opp_create(
        return &opp->base;
 }
 
+struct aux_engine *dce112_aux_engine_create(
+       struct dc_context *ctx,
+       uint32_t inst)
+{
+       struct aux_engine_dce110 *aux_engine =
+               kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+       if (!aux_engine)
+               return NULL;
+
+       dce110_aux_engine_construct(aux_engine, ctx, inst,
+                                   SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+                                   &aux_engine_regs[inst]);
+
+       return &aux_engine->base;
+}
+
 struct clock_source *dce112_clock_source_create(
        struct dc_context *ctx,
        struct dc_bios *bios,
@@ -625,6 +658,9 @@ static void destruct(struct dce110_resource_pool *pool)
                if (pool->base.opps[i] != NULL)
                        dce110_opp_destroy(&pool->base.opps[i]);
 
+               if (pool->base.engines[i] != NULL)
+                       dce110_engine_destroy(&pool->base.engines[i]);
+
                if (pool->base.transforms[i] != NULL)
                        dce112_transform_destroy(&pool->base.transforms[i]);
 
@@ -640,6 +676,10 @@ static void destruct(struct dce110_resource_pool *pool)
                        kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
                        pool->base.timing_generators[i] = NULL;
                }
+
+               if (pool->base.engines[i] != NULL)
+                       dce110_engine_destroy(&pool->base.engines[i]);
+
        }
 
        for (i = 0; i < pool->base.stream_enc_count; i++) {
@@ -668,8 +708,8 @@ static void destruct(struct dce110_resource_pool *pool)
        if (pool->base.dmcu != NULL)
                dce_dmcu_destroy(&pool->base.dmcu);
 
-       if (pool->base.display_clock != NULL)
-               dce_disp_clk_destroy(&pool->base.display_clock);
+       if (pool->base.dccg != NULL)
+               dce_dccg_destroy(&pool->base.dccg);
 
        if (pool->base.irqs != NULL) {
                dal_irq_service_destroy(&pool->base.irqs);
@@ -744,43 +784,38 @@ bool dce112_validate_bandwidth(
 
        if (memcmp(&dc->current_state->bw.dce,
                        &context->bw.dce, sizeof(context->bw.dce))) {
-               struct log_entry log_entry;
-               dm_logger_open(
-                       dc->ctx->logger,
-                       &log_entry,
-                       LOG_BANDWIDTH_CALCS);
-               dm_logger_append(&log_entry, "%s: finish,\n"
+
+               DC_LOG_BANDWIDTH_CALCS(
+                       "%s: finish,\n"
                        "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
-                       "stutMark_b: %d stutMark_a: %d\n",
+                       "stutMark_b: %d stutMark_a: %d\n"
+                       "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+                       "stutMark_b: %d stutMark_a: %d\n"
+                       "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+                       "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n"
+                       "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
+                       "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n"
+                       ,
                        __func__,
                        context->bw.dce.nbp_state_change_wm_ns[0].b_mark,
                        context->bw.dce.nbp_state_change_wm_ns[0].a_mark,
                        context->bw.dce.urgent_wm_ns[0].b_mark,
                        context->bw.dce.urgent_wm_ns[0].a_mark,
                        context->bw.dce.stutter_exit_wm_ns[0].b_mark,
-                       context->bw.dce.stutter_exit_wm_ns[0].a_mark);
-               dm_logger_append(&log_entry,
-                       "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
-                       "stutMark_b: %d stutMark_a: %d\n",
+                       context->bw.dce.stutter_exit_wm_ns[0].a_mark,
                        context->bw.dce.nbp_state_change_wm_ns[1].b_mark,
                        context->bw.dce.nbp_state_change_wm_ns[1].a_mark,
                        context->bw.dce.urgent_wm_ns[1].b_mark,
                        context->bw.dce.urgent_wm_ns[1].a_mark,
                        context->bw.dce.stutter_exit_wm_ns[1].b_mark,
-                       context->bw.dce.stutter_exit_wm_ns[1].a_mark);
-               dm_logger_append(&log_entry,
-                       "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
-                       "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n",
+                       context->bw.dce.stutter_exit_wm_ns[1].a_mark,
                        context->bw.dce.nbp_state_change_wm_ns[2].b_mark,
                        context->bw.dce.nbp_state_change_wm_ns[2].a_mark,
                        context->bw.dce.urgent_wm_ns[2].b_mark,
                        context->bw.dce.urgent_wm_ns[2].a_mark,
                        context->bw.dce.stutter_exit_wm_ns[2].b_mark,
                        context->bw.dce.stutter_exit_wm_ns[2].a_mark,
-                       context->bw.dce.stutter_mode_enable);
-               dm_logger_append(&log_entry,
-                       "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
-                       "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n",
+                       context->bw.dce.stutter_mode_enable,
                        context->bw.dce.cpuc_state_change_enable,
                        context->bw.dce.cpup_state_change_enable,
                        context->bw.dce.nbp_state_change_enable,
@@ -790,7 +825,6 @@ bool dce112_validate_bandwidth(
                        context->bw.dce.sclk_deep_sleep_khz,
                        context->bw.dce.yclk_khz,
                        context->bw.dce.blackout_recovery_time_us);
-               dm_logger_close(&log_entry);
        }
        return result;
 }
@@ -1000,7 +1034,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
                        eng_clks.data[0].clocks_in_khz;
        clk_ranges.wm_clk_ranges[0].wm_max_eng_clk_in_khz =
                        eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
-       clk_ranges.wm_clk_ranges[0].wm_min_memg_clk_in_khz =
+       clk_ranges.wm_clk_ranges[0].wm_min_mem_clk_in_khz =
                        mem_clks.data[0].clocks_in_khz;
        clk_ranges.wm_clk_ranges[0].wm_max_mem_clk_in_khz =
                        mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
@@ -1010,7 +1044,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
                        eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
        /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
        clk_ranges.wm_clk_ranges[1].wm_max_eng_clk_in_khz = 5000000;
-       clk_ranges.wm_clk_ranges[1].wm_min_memg_clk_in_khz =
+       clk_ranges.wm_clk_ranges[1].wm_min_mem_clk_in_khz =
                        mem_clks.data[0].clocks_in_khz;
        clk_ranges.wm_clk_ranges[1].wm_max_mem_clk_in_khz =
                        mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
@@ -1020,7 +1054,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
                        eng_clks.data[0].clocks_in_khz;
        clk_ranges.wm_clk_ranges[2].wm_max_eng_clk_in_khz =
                        eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
-       clk_ranges.wm_clk_ranges[2].wm_min_memg_clk_in_khz =
+       clk_ranges.wm_clk_ranges[2].wm_min_mem_clk_in_khz =
                        mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
        /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
        clk_ranges.wm_clk_ranges[2].wm_max_mem_clk_in_khz = 5000000;
@@ -1030,7 +1064,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
                        eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
        /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
        clk_ranges.wm_clk_ranges[3].wm_max_eng_clk_in_khz = 5000000;
-       clk_ranges.wm_clk_ranges[3].wm_min_memg_clk_in_khz =
+       clk_ranges.wm_clk_ranges[3].wm_min_mem_clk_in_khz =
                        mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
        /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
        clk_ranges.wm_clk_ranges[3].wm_max_mem_clk_in_khz = 5000000;
@@ -1124,11 +1158,11 @@ static bool construct(
                }
        }
 
-       pool->base.display_clock = dce112_disp_clk_create(ctx,
+       pool->base.dccg = dce112_dccg_create(ctx,
                        &disp_clk_regs,
                        &disp_clk_shift,
                        &disp_clk_mask);
-       if (pool->base.display_clock == NULL) {
+       if (pool->base.dccg == NULL) {
                dm_error("DC: failed to create display clock!\n");
                BREAK_TO_DEBUGGER();
                goto res_create_fail;
@@ -1158,7 +1192,7 @@ static bool construct(
         * max_clock_state
         */
        if (dm_pp_get_static_clocks(ctx, &static_clk_info))
-               pool->base.display_clock->max_clks_state =
+               pool->base.dccg->max_clks_state =
                                static_clk_info.max_clocks_state;
 
        {
@@ -1214,6 +1248,13 @@ static bool construct(
                                "DC:failed to create output pixel processor!\n");
                        goto res_create_fail;
                }
+               pool->base.engines[i] = dce112_aux_engine_create(ctx, i);
+               if (pool->base.engines[i] == NULL) {
+                       BREAK_TO_DEBUGGER();
+                       dm_error(
+                               "DC:failed to create aux engine!!\n");
+                       goto res_create_fail;
+               }
        }
 
        if (!resource_construct(num_virtual_links, dc, &pool->base,
index e96ff86d2fc3b608751a2cdbe70d968d2032f401..5853522a618298a6bdc20df629f1c494d13468ae 100644 (file)
@@ -244,7 +244,16 @@ static void dce120_update_dchub(
        dh_data->dchub_info_valid = false;
 }
 
+static void dce120_set_bandwidth(
+               struct dc *dc,
+               struct dc_state *context,
+               bool decrease_allowed)
+{
+       if (context->stream_count <= 0)
+               return;
 
+       dce110_set_bandwidth(dc, context, decrease_allowed);
+}
 
 void dce120_hw_sequencer_construct(struct dc *dc)
 {
@@ -254,5 +263,6 @@ void dce120_hw_sequencer_construct(struct dc *dc)
        dce110_hw_sequencer_construct(dc);
        dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating;
        dc->hwss.update_dchub = dce120_update_dchub;
+       dc->hwss.set_bandwidth = dce120_set_bandwidth;
 }
 
index 2d58daccc0056cb4b95cd73fc09326ea31dac05c..61d8e22d23c9542af476bbcc5e0c0519ec398226 100644 (file)
@@ -53,6 +53,7 @@
 #include "dce/dce_hwseq.h"
 #include "dce/dce_abm.h"
 #include "dce/dce_dmcu.h"
+#include "dce/dce_aux.h"
 
 #include "dce/dce_12_0_offset.h"
 #include "dce/dce_12_0_sh_mask.h"
@@ -297,6 +298,20 @@ static const struct dce_opp_shift opp_shift = {
 static const struct dce_opp_mask opp_mask = {
        OPP_COMMON_MASK_SH_LIST_DCE_120(_MASK)
 };
+ #define aux_engine_regs(id)\
+[id] = {\
+       AUX_COMMON_REG_LIST(id), \
+       .AUX_RESET_MASK = 0 \
+}
+
+static const struct dce110_aux_registers aux_engine_regs[] = {
+               aux_engine_regs(0),
+               aux_engine_regs(1),
+               aux_engine_regs(2),
+               aux_engine_regs(3),
+               aux_engine_regs(4),
+               aux_engine_regs(5)
+};
 
 #define audio_regs(id)\
 [id] = {\
@@ -361,6 +376,22 @@ struct output_pixel_processor *dce120_opp_create(
                             ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
        return &opp->base;
 }
+struct aux_engine *dce120_aux_engine_create(
+       struct dc_context *ctx,
+       uint32_t inst)
+{
+       struct aux_engine_dce110 *aux_engine =
+               kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+       if (!aux_engine)
+               return NULL;
+
+       dce110_aux_engine_construct(aux_engine, ctx, inst,
+                                   SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+                                   &aux_engine_regs[inst]);
+
+       return &aux_engine->base;
+}
 
 static const struct bios_registers bios_regs = {
        .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 + NBIO_BASE(mmBIOS_SCRATCH_6_BASE_IDX)
@@ -373,7 +404,7 @@ static const struct resource_caps res_cap = {
                .num_pll = 6,
 };
 
-static const struct dc_debug debug_defaults = {
+static const struct dc_debug_options debug_defaults = {
                .disable_clock_gate = true,
 };
 
@@ -467,6 +498,10 @@ static void destruct(struct dce110_resource_pool *pool)
                        kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
                        pool->base.timing_generators[i] = NULL;
                }
+
+               if (pool->base.engines[i] != NULL)
+                       dce110_engine_destroy(&pool->base.engines[i]);
+
        }
 
        for (i = 0; i < pool->base.audio_count; i++) {
@@ -494,8 +529,8 @@ static void destruct(struct dce110_resource_pool *pool)
        if (pool->base.dmcu != NULL)
                dce_dmcu_destroy(&pool->base.dmcu);
 
-       if (pool->base.display_clock != NULL)
-               dce_disp_clk_destroy(&pool->base.display_clock);
+       if (pool->base.dccg != NULL)
+               dce_dccg_destroy(&pool->base.dccg);
 }
 
 static void read_dce_straps(
@@ -775,7 +810,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
                        eng_clks.data[0].clocks_in_khz;
        clk_ranges.wm_clk_ranges[0].wm_max_eng_clk_in_khz =
                        eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
-       clk_ranges.wm_clk_ranges[0].wm_min_memg_clk_in_khz =
+       clk_ranges.wm_clk_ranges[0].wm_min_mem_clk_in_khz =
                        mem_clks.data[0].clocks_in_khz;
        clk_ranges.wm_clk_ranges[0].wm_max_mem_clk_in_khz =
                        mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
@@ -785,7 +820,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
                        eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
        /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
        clk_ranges.wm_clk_ranges[1].wm_max_eng_clk_in_khz = 5000000;
-       clk_ranges.wm_clk_ranges[1].wm_min_memg_clk_in_khz =
+       clk_ranges.wm_clk_ranges[1].wm_min_mem_clk_in_khz =
                        mem_clks.data[0].clocks_in_khz;
        clk_ranges.wm_clk_ranges[1].wm_max_mem_clk_in_khz =
                        mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
@@ -795,7 +830,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
                        eng_clks.data[0].clocks_in_khz;
        clk_ranges.wm_clk_ranges[2].wm_max_eng_clk_in_khz =
                        eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
-       clk_ranges.wm_clk_ranges[2].wm_min_memg_clk_in_khz =
+       clk_ranges.wm_clk_ranges[2].wm_min_mem_clk_in_khz =
                        mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
        /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
        clk_ranges.wm_clk_ranges[2].wm_max_mem_clk_in_khz = 5000000;
@@ -805,7 +840,7 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
                        eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
        /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
        clk_ranges.wm_clk_ranges[3].wm_max_eng_clk_in_khz = 5000000;
-       clk_ranges.wm_clk_ranges[3].wm_min_memg_clk_in_khz =
+       clk_ranges.wm_clk_ranges[3].wm_min_mem_clk_in_khz =
                        mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
        /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
        clk_ranges.wm_clk_ranges[3].wm_max_mem_clk_in_khz = 5000000;
@@ -894,11 +929,11 @@ static bool construct(
                }
        }
 
-       pool->base.display_clock = dce120_disp_clk_create(ctx);
-       if (pool->base.display_clock == NULL) {
+       pool->base.dccg = dce120_dccg_create(ctx);
+       if (pool->base.dccg == NULL) {
                dm_error("DC: failed to create display clock!\n");
                BREAK_TO_DEBUGGER();
-               goto disp_clk_create_fail;
+               goto dccg_create_fail;
        }
 
        pool->base.dmcu = dce_dmcu_create(ctx,
@@ -984,6 +1019,13 @@ static bool construct(
                        dm_error(
                                "DC: failed to create output pixel processor!\n");
                }
+               pool->base.engines[i] = dce120_aux_engine_create(ctx, i);
+                               if (pool->base.engines[i] == NULL) {
+                                       BREAK_TO_DEBUGGER();
+                                       dm_error(
+                                               "DC:failed to create aux engine!!\n");
+                                       goto res_create_fail;
+                               }
 
                /* check next valid pipe */
                j++;
@@ -1011,7 +1053,7 @@ static bool construct(
 
 irqs_create_fail:
 controller_create_fail:
-disp_clk_create_fail:
+dccg_create_fail:
 clk_src_create_fail:
 res_create_fail:
 
index 48a0689647225720a9d3f136201a33bd9a5cb2a1..dc9f3e9afc338bcb08d16a3b16deb496800e05a1 100644 (file)
@@ -54,6 +54,7 @@
 #include "reg_helper.h"
 
 #include "dce/dce_dmcu.h"
+#include "dce/dce_aux.h"
 #include "dce/dce_abm.h"
 /* TODO remove this include */
 
@@ -153,15 +154,15 @@ static const struct dce110_timing_generator_offsets dce80_tg_offsets[] = {
        .reg_name = mm ## block ## id ## _ ## reg_name
 
 
-static const struct dce_disp_clk_registers disp_clk_regs = {
+static const struct dccg_registers disp_clk_regs = {
                CLK_COMMON_REG_LIST_DCE_BASE()
 };
 
-static const struct dce_disp_clk_shift disp_clk_shift = {
+static const struct dccg_shift disp_clk_shift = {
                CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
 };
 
-static const struct dce_disp_clk_mask disp_clk_mask = {
+static const struct dccg_mask disp_clk_mask = {
                CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
 };
 
@@ -298,6 +299,21 @@ static const struct dce_opp_mask opp_mask = {
        OPP_COMMON_MASK_SH_LIST_DCE_80(_MASK)
 };
 
+#define aux_engine_regs(id)\
+[id] = {\
+       AUX_COMMON_REG_LIST(id), \
+       .AUX_RESET_MASK = 0 \
+}
+
+static const struct dce110_aux_registers aux_engine_regs[] = {
+               aux_engine_regs(0),
+               aux_engine_regs(1),
+               aux_engine_regs(2),
+               aux_engine_regs(3),
+               aux_engine_regs(4),
+               aux_engine_regs(5)
+};
+
 #define audio_regs(id)\
 [id] = {\
        AUD_COMMON_REG_LIST(id)\
@@ -448,6 +464,23 @@ static struct output_pixel_processor *dce80_opp_create(
        return &opp->base;
 }
 
+struct aux_engine *dce80_aux_engine_create(
+       struct dc_context *ctx,
+       uint32_t inst)
+{
+       struct aux_engine_dce110 *aux_engine =
+               kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+       if (!aux_engine)
+               return NULL;
+
+       dce110_aux_engine_construct(aux_engine, ctx, inst,
+                                   SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+                                   &aux_engine_regs[inst]);
+
+       return &aux_engine->base;
+}
+
 static struct stream_encoder *dce80_stream_encoder_create(
        enum engine_id eng_id,
        struct dc_context *ctx)
@@ -655,6 +688,9 @@ static void destruct(struct dce110_resource_pool *pool)
                        kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
                        pool->base.timing_generators[i] = NULL;
                }
+
+               if (pool->base.engines[i] != NULL)
+                       dce110_engine_destroy(&pool->base.engines[i]);
        }
 
        for (i = 0; i < pool->base.stream_enc_count; i++) {
@@ -683,8 +719,8 @@ static void destruct(struct dce110_resource_pool *pool)
                }
        }
 
-       if (pool->base.display_clock != NULL)
-               dce_disp_clk_destroy(&pool->base.display_clock);
+       if (pool->base.dccg != NULL)
+               dce_dccg_destroy(&pool->base.dccg);
 
        if (pool->base.irqs != NULL) {
                dal_irq_service_destroy(&pool->base.irqs);
@@ -822,11 +858,11 @@ static bool dce80_construct(
                }
        }
 
-       pool->base.display_clock = dce_disp_clk_create(ctx,
+       pool->base.dccg = dce_dccg_create(ctx,
                        &disp_clk_regs,
                        &disp_clk_shift,
                        &disp_clk_mask);
-       if (pool->base.display_clock == NULL) {
+       if (pool->base.dccg == NULL) {
                dm_error("DC: failed to create display clock!\n");
                BREAK_TO_DEBUGGER();
                goto res_create_fail;
@@ -852,7 +888,7 @@ static bool dce80_construct(
                goto res_create_fail;
        }
        if (dm_pp_get_static_clocks(ctx, &static_clk_info))
-               pool->base.display_clock->max_clks_state =
+               pool->base.dccg->max_clks_state =
                                        static_clk_info.max_clocks_state;
 
        {
@@ -899,6 +935,14 @@ static bool dce80_construct(
                        dm_error("DC: failed to create output pixel processor!\n");
                        goto res_create_fail;
                }
+
+               pool->base.engines[i] = dce80_aux_engine_create(ctx, i);
+               if (pool->base.engines[i] == NULL) {
+                       BREAK_TO_DEBUGGER();
+                       dm_error(
+                               "DC:failed to create aux engine!!\n");
+                       goto res_create_fail;
+               }
        }
 
        dc->caps.max_planes =  pool->base.pipe_count;
@@ -1006,11 +1050,11 @@ static bool dce81_construct(
                }
        }
 
-       pool->base.display_clock = dce_disp_clk_create(ctx,
+       pool->base.dccg = dce_dccg_create(ctx,
                        &disp_clk_regs,
                        &disp_clk_shift,
                        &disp_clk_mask);
-       if (pool->base.display_clock == NULL) {
+       if (pool->base.dccg == NULL) {
                dm_error("DC: failed to create display clock!\n");
                BREAK_TO_DEBUGGER();
                goto res_create_fail;
@@ -1037,7 +1081,7 @@ static bool dce81_construct(
        }
 
        if (dm_pp_get_static_clocks(ctx, &static_clk_info))
-               pool->base.display_clock->max_clks_state =
+               pool->base.dccg->max_clks_state =
                                        static_clk_info.max_clocks_state;
 
        {
@@ -1187,11 +1231,11 @@ static bool dce83_construct(
                }
        }
 
-       pool->base.display_clock = dce_disp_clk_create(ctx,
+       pool->base.dccg = dce_dccg_create(ctx,
                        &disp_clk_regs,
                        &disp_clk_shift,
                        &disp_clk_mask);
-       if (pool->base.display_clock == NULL) {
+       if (pool->base.dccg == NULL) {
                dm_error("DC: failed to create display clock!\n");
                BREAK_TO_DEBUGGER();
                goto res_create_fail;
@@ -1218,7 +1262,7 @@ static bool dce83_construct(
        }
 
        if (dm_pp_get_static_clocks(ctx, &static_clk_info))
-               pool->base.display_clock->max_clks_state =
+               pool->base.dccg->max_clks_state =
                                        static_clk_info.max_clocks_state;
 
        {
index c69fa4bfab0af125974e18fcc6f9adc798edfd2f..bf8b68f8db4f7ffab3329b7170906abbbdc42d2d 100644 (file)
@@ -145,10 +145,10 @@ static bool dpp_get_optimal_number_of_taps(
                pixel_width = scl_data->viewport.width;
 
        /* Some ASICs does not support  FP16 scaling, so we reject modes require this*/
-       if (scl_data->viewport.width  != scl_data->h_active &&
-               scl_data->viewport.height != scl_data->v_active &&
+       if (scl_data->format == PIXEL_FORMAT_FP16 &&
                dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
-               scl_data->format == PIXEL_FORMAT_FP16)
+               scl_data->ratios.horz.value != dc_fixpt_one.value &&
+               scl_data->ratios.vert.value != dc_fixpt_one.value)
                return false;
 
        if (scl_data->viewport.width > scl_data->h_active &&
@@ -445,10 +445,10 @@ void dpp1_set_cursor_position(
                uint32_t width)
 {
        struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
-       int src_x_offset = pos->x - pos->x_hotspot - param->viewport_x_start;
+       int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
        uint32_t cur_en = pos->enable ? 1 : 0;
 
-       if (src_x_offset >= (int)param->viewport_width)
+       if (src_x_offset >= (int)param->viewport.width)
                cur_en = 0;  /* not visible beyond right edge*/
 
        if (src_x_offset + (int)width <= 0)
@@ -459,6 +459,18 @@ void dpp1_set_cursor_position(
 
 }
 
+void dpp1_cnv_set_optional_cursor_attributes(
+               struct dpp *dpp_base,
+               struct dpp_cursor_attributes *attr)
+{
+       struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+       if (attr) {
+               REG_UPDATE(CURSOR0_FP_SCALE_BIAS,  CUR0_FP_BIAS,  attr->bias);
+               REG_UPDATE(CURSOR0_FP_SCALE_BIAS,  CUR0_FP_SCALE, attr->scale);
+       }
+}
+
 void dpp1_dppclk_control(
                struct dpp *dpp_base,
                bool dppclk_div,
@@ -499,6 +511,7 @@ static const struct dpp_funcs dcn10_dpp_funcs = {
                .dpp_full_bypass                = dpp1_full_bypass,
                .set_cursor_attributes = dpp1_set_cursor_attributes,
                .set_cursor_position = dpp1_set_cursor_position,
+               .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
                .dpp_dppclk_control = dpp1_dppclk_control,
                .dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier,
 };
index e862cafa6501b78e7601fe9e95ebf7f81b728130..e2889e61b18c682dd796de5b756fb2a1815631a6 100644 (file)
        SRI(CURSOR0_CONTROL, CNVC_CUR, id), \
        SRI(CURSOR0_COLOR0, CNVC_CUR, id), \
        SRI(CURSOR0_COLOR1, CNVC_CUR, id), \
+       SRI(CURSOR0_FP_SCALE_BIAS, CNVC_CUR, id), \
        SRI(DPP_CONTROL, DPP_TOP, id), \
        SRI(CM_HDR_MULT_COEF, CM, id)
 
        TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh), \
        TF_SF(CNVC_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \
        TF_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh), \
+       TF_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, mask_sh), \
+       TF_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, mask_sh), \
        TF_SF(DPP_TOP0_DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh), \
        TF_SF(CM0_CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, mask_sh)
 
        type CUR0_COLOR1; \
        type DPPCLK_RATE_CONTROL; \
        type DPP_CLOCK_ENABLE; \
-       type CM_HDR_MULT_COEF;
+       type CM_HDR_MULT_COEF; \
+       type CUR0_FP_BIAS; \
+       type CUR0_FP_SCALE;
 
 struct dcn_dpp_shift {
        TF_REG_FIELD_LIST(uint8_t)
@@ -1329,7 +1334,8 @@ struct dcn_dpp_mask {
        uint32_t CURSOR0_COLOR0; \
        uint32_t CURSOR0_COLOR1; \
        uint32_t DPP_CONTROL; \
-       uint32_t CM_HDR_MULT_COEF;
+       uint32_t CM_HDR_MULT_COEF; \
+       uint32_t CURSOR0_FP_SCALE_BIAS;
 
 struct dcn_dpp_registers {
        DPP_COMMON_REG_VARIABLE_LIST
@@ -1370,6 +1376,10 @@ void dpp1_set_cursor_position(
                const struct dc_cursor_mi_param *param,
                uint32_t width);
 
+void dpp1_cnv_set_optional_cursor_attributes(
+                       struct dpp *dpp_base,
+                       struct dpp_cursor_attributes *attr);
+
 bool dpp1_dscl_is_lb_conf_valid(
                int ceil_vratio,
                int num_partitions,
index f862fd148ccaff5190597a9fc34118299c57122e..4a863a5dab4178103698f574e1fa64b9dd85a625 100644 (file)
@@ -621,6 +621,10 @@ static void dpp1_dscl_set_manual_ratio_init(
 static void dpp1_dscl_set_recout(
                        struct dcn10_dpp *dpp, const struct rect *recout)
 {
+       int visual_confirm_on = 0;
+       if (dpp->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE)
+               visual_confirm_on = 1;
+
        REG_SET_2(RECOUT_START, 0,
                /* First pixel of RECOUT */
                         RECOUT_START_X, recout->x,
@@ -632,8 +636,7 @@ static void dpp1_dscl_set_recout(
                         RECOUT_WIDTH, recout->width,
                /* Number of RECOUT vertical lines */
                         RECOUT_HEIGHT, recout->height
-                        - dpp->base.ctx->dc->debug.surface_visual_confirm * 4 *
-                        (dpp->base.inst + 1));
+                        - visual_confirm_on * 4 * (dpp->base.inst + 1));
 }
 
 /* Main function to program scaler and line buffer in manual scaling mode */
@@ -655,6 +658,12 @@ void dpp1_dscl_set_scaler_manual_scale(
 
        dpp->scl_data = *scl_data;
 
+       /* Autocal off */
+       REG_SET_3(DSCL_AUTOCAL, 0,
+               AUTOCAL_MODE, AUTOCAL_MODE_OFF,
+               AUTOCAL_NUM_PIPE, 0,
+               AUTOCAL_PIPE_ID, 0);
+
        /* Recout */
        dpp1_dscl_set_recout(dpp, &scl_data->recout);
 
@@ -678,12 +687,6 @@ void dpp1_dscl_set_scaler_manual_scale(
        if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS)
                return;
 
-       /* Autocal off */
-       REG_SET_3(DSCL_AUTOCAL, 0,
-               AUTOCAL_MODE, AUTOCAL_MODE_OFF,
-               AUTOCAL_NUM_PIPE, 0,
-               AUTOCAL_PIPE_ID, 0);
-
        /* Black offsets */
        if (ycbcr)
                REG_SET_2(SCL_BLACK_OFFSET, 0,
index 943143efbb823ae11e5096f21bfa7670d6914a5b..1ea91e153d3a6b05caebdce7291cda3e4b889495 100644 (file)
@@ -190,10 +190,17 @@ static uint32_t convert_and_clamp(
 }
 
 
+void hubbub1_wm_change_req_wa(struct hubbub *hubbub)
+{
+       REG_UPDATE_SEQ(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
+                       DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0, 1);
+}
+
 void hubbub1_program_watermarks(
                struct hubbub *hubbub,
                struct dcn_watermark_set *watermarks,
-               unsigned int refclk_mhz)
+               unsigned int refclk_mhz,
+               bool safe_to_lower)
 {
        uint32_t force_en = hubbub->ctx->dc->debug.disable_stutter ? 1 : 0;
        /*
@@ -202,191 +209,259 @@ void hubbub1_program_watermarks(
         */
        uint32_t prog_wm_value;
 
-       REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
-                       DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0);
 
        /* Repeat for water mark set A, B, C and D. */
        /* clock state A */
-       prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
-                       refclk_mhz, 0x1fffff);
-       REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
-
-       DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
-               "HW register value = 0x%x\n",
-               watermarks->a.urgent_ns, prog_wm_value);
+       if (safe_to_lower || watermarks->a.urgent_ns > hubbub->watermarks.a.urgent_ns) {
+               hubbub->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
+               prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
+                               refclk_mhz, 0x1fffff);
+               REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
 
-       prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns,
-                       refclk_mhz, 0x1fffff);
-       REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value);
-       DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
-               "HW register value = 0x%x\n",
-               watermarks->a.pte_meta_urgent_ns, prog_wm_value);
+               DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
+                       "HW register value = 0x%x\n",
+                       watermarks->a.urgent_ns, prog_wm_value);
+       }
 
-       if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) {
-               prog_wm_value = convert_and_clamp(
-                               watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
+       if (safe_to_lower || watermarks->a.pte_meta_urgent_ns > hubbub->watermarks.a.pte_meta_urgent_ns) {
+               hubbub->watermarks.a.pte_meta_urgent_ns = watermarks->a.pte_meta_urgent_ns;
+               prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns,
                                refclk_mhz, 0x1fffff);
-               REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
-               DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
+               REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value);
+               DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
                        "HW register value = 0x%x\n",
-                       watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+                       watermarks->a.pte_meta_urgent_ns, prog_wm_value);
+       }
+
+       if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) {
+               if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
+                               > hubbub->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
+                       hubbub->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
+                                       watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
+                       prog_wm_value = convert_and_clamp(
+                                       watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
+                                       refclk_mhz, 0x1fffff);
+                       REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
+                       DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
+                               "HW register value = 0x%x\n",
+                               watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+               }
 
+               if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
+                               > hubbub->watermarks.a.cstate_pstate.cstate_exit_ns) {
+                       hubbub->watermarks.a.cstate_pstate.cstate_exit_ns =
+                                       watermarks->a.cstate_pstate.cstate_exit_ns;
+                       prog_wm_value = convert_and_clamp(
+                                       watermarks->a.cstate_pstate.cstate_exit_ns,
+                                       refclk_mhz, 0x1fffff);
+                       REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
+                       DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
+                               "HW register value = 0x%x\n",
+                               watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
+               }
+       }
 
+       if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
+                       > hubbub->watermarks.a.cstate_pstate.pstate_change_ns) {
+               hubbub->watermarks.a.cstate_pstate.pstate_change_ns =
+                               watermarks->a.cstate_pstate.pstate_change_ns;
                prog_wm_value = convert_and_clamp(
-                               watermarks->a.cstate_pstate.cstate_exit_ns,
+                               watermarks->a.cstate_pstate.pstate_change_ns,
                                refclk_mhz, 0x1fffff);
-               REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
-               DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
-                       "HW register value = 0x%x\n",
-                       watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
+               REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
+               DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
+                       "HW register value = 0x%x\n\n",
+                       watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
        }
 
-       prog_wm_value = convert_and_clamp(
-                       watermarks->a.cstate_pstate.pstate_change_ns,
-                       refclk_mhz, 0x1fffff);
-       REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
-       DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
-               "HW register value = 0x%x\n\n",
-               watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
-
-
        /* clock state B */
-       prog_wm_value = convert_and_clamp(
-                       watermarks->b.urgent_ns, refclk_mhz, 0x1fffff);
-       REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
-       DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
-               "HW register value = 0x%x\n",
-               watermarks->b.urgent_ns, prog_wm_value);
-
-
-       prog_wm_value = convert_and_clamp(
-                       watermarks->b.pte_meta_urgent_ns,
-                       refclk_mhz, 0x1fffff);
-       REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value);
-       DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
-               "HW register value = 0x%x\n",
-               watermarks->b.pte_meta_urgent_ns, prog_wm_value);
+       if (safe_to_lower || watermarks->b.urgent_ns > hubbub->watermarks.b.urgent_ns) {
+               hubbub->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
+               prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
+                               refclk_mhz, 0x1fffff);
+               REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
 
+               DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
+                       "HW register value = 0x%x\n",
+                       watermarks->b.urgent_ns, prog_wm_value);
+       }
 
-       if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) {
-               prog_wm_value = convert_and_clamp(
-                               watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
+       if (safe_to_lower || watermarks->b.pte_meta_urgent_ns > hubbub->watermarks.b.pte_meta_urgent_ns) {
+               hubbub->watermarks.b.pte_meta_urgent_ns = watermarks->b.pte_meta_urgent_ns;
+               prog_wm_value = convert_and_clamp(watermarks->b.pte_meta_urgent_ns,
                                refclk_mhz, 0x1fffff);
-               REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
-               DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_B calculated =%d\n"
+               REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value);
+               DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
                        "HW register value = 0x%x\n",
-                       watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+                       watermarks->b.pte_meta_urgent_ns, prog_wm_value);
+       }
+
+       if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) {
+               if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
+                               > hubbub->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
+                       hubbub->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
+                                       watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
+                       prog_wm_value = convert_and_clamp(
+                                       watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
+                                       refclk_mhz, 0x1fffff);
+                       REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
+                       DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
+                               "HW register value = 0x%x\n",
+                               watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+               }
 
+               if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
+                               > hubbub->watermarks.b.cstate_pstate.cstate_exit_ns) {
+                       hubbub->watermarks.b.cstate_pstate.cstate_exit_ns =
+                                       watermarks->b.cstate_pstate.cstate_exit_ns;
+                       prog_wm_value = convert_and_clamp(
+                                       watermarks->b.cstate_pstate.cstate_exit_ns,
+                                       refclk_mhz, 0x1fffff);
+                       REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
+                       DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
+                               "HW register value = 0x%x\n",
+                               watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
+               }
+       }
 
+       if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
+                       > hubbub->watermarks.b.cstate_pstate.pstate_change_ns) {
+               hubbub->watermarks.b.cstate_pstate.pstate_change_ns =
+                               watermarks->b.cstate_pstate.pstate_change_ns;
                prog_wm_value = convert_and_clamp(
-                               watermarks->b.cstate_pstate.cstate_exit_ns,
+                               watermarks->b.cstate_pstate.pstate_change_ns,
                                refclk_mhz, 0x1fffff);
-               REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
-               DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
-                       "HW register value = 0x%x\n",
-                       watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
+               REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
+               DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
+                       "HW register value = 0x%x\n\n",
+                       watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
        }
 
-       prog_wm_value = convert_and_clamp(
-                       watermarks->b.cstate_pstate.pstate_change_ns,
-                       refclk_mhz, 0x1fffff);
-       REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
-       DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n\n"
-               "HW register value = 0x%x\n",
-               watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
-
        /* clock state C */
-       prog_wm_value = convert_and_clamp(
-                       watermarks->c.urgent_ns, refclk_mhz, 0x1fffff);
-       REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
-       DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
-               "HW register value = 0x%x\n",
-               watermarks->c.urgent_ns, prog_wm_value);
-
-
-       prog_wm_value = convert_and_clamp(
-                       watermarks->c.pte_meta_urgent_ns,
-                       refclk_mhz, 0x1fffff);
-       REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value);
-       DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
-               "HW register value = 0x%x\n",
-               watermarks->c.pte_meta_urgent_ns, prog_wm_value);
+       if (safe_to_lower || watermarks->c.urgent_ns > hubbub->watermarks.c.urgent_ns) {
+               hubbub->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
+               prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
+                               refclk_mhz, 0x1fffff);
+               REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
 
+               DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
+                       "HW register value = 0x%x\n",
+                       watermarks->c.urgent_ns, prog_wm_value);
+       }
 
-       if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) {
-               prog_wm_value = convert_and_clamp(
-                               watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
+       if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub->watermarks.c.pte_meta_urgent_ns) {
+               hubbub->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns;
+               prog_wm_value = convert_and_clamp(watermarks->c.pte_meta_urgent_ns,
                                refclk_mhz, 0x1fffff);
-               REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
-               DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_C calculated =%d\n"
+               REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value);
+               DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
                        "HW register value = 0x%x\n",
-                       watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+                       watermarks->c.pte_meta_urgent_ns, prog_wm_value);
+       }
 
+       if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) {
+               if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
+                               > hubbub->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
+                       hubbub->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
+                                       watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
+                       prog_wm_value = convert_and_clamp(
+                                       watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
+                                       refclk_mhz, 0x1fffff);
+                       REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
+                       DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
+                               "HW register value = 0x%x\n",
+                               watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+               }
+
+               if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
+                               > hubbub->watermarks.c.cstate_pstate.cstate_exit_ns) {
+                       hubbub->watermarks.c.cstate_pstate.cstate_exit_ns =
+                                       watermarks->c.cstate_pstate.cstate_exit_ns;
+                       prog_wm_value = convert_and_clamp(
+                                       watermarks->c.cstate_pstate.cstate_exit_ns,
+                                       refclk_mhz, 0x1fffff);
+                       REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
+                       DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
+                               "HW register value = 0x%x\n",
+                               watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
+               }
+       }
 
+       if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
+                       > hubbub->watermarks.c.cstate_pstate.pstate_change_ns) {
+               hubbub->watermarks.c.cstate_pstate.pstate_change_ns =
+                               watermarks->c.cstate_pstate.pstate_change_ns;
                prog_wm_value = convert_and_clamp(
-                               watermarks->c.cstate_pstate.cstate_exit_ns,
+                               watermarks->c.cstate_pstate.pstate_change_ns,
                                refclk_mhz, 0x1fffff);
-               REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
-               DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
-                       "HW register value = 0x%x\n",
-                       watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
+               REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
+               DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
+                       "HW register value = 0x%x\n\n",
+                       watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
        }
 
-       prog_wm_value = convert_and_clamp(
-                       watermarks->c.cstate_pstate.pstate_change_ns,
-                       refclk_mhz, 0x1fffff);
-       REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
-       DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n\n"
-               "HW register value = 0x%x\n",
-               watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
-
        /* clock state D */
-       prog_wm_value = convert_and_clamp(
-                       watermarks->d.urgent_ns, refclk_mhz, 0x1fffff);
-       REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
-       DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
-               "HW register value = 0x%x\n",
-               watermarks->d.urgent_ns, prog_wm_value);
-
-       prog_wm_value = convert_and_clamp(
-                       watermarks->d.pte_meta_urgent_ns,
-                       refclk_mhz, 0x1fffff);
-       REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value);
-       DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
-               "HW register value = 0x%x\n",
-               watermarks->d.pte_meta_urgent_ns, prog_wm_value);
-
-
-       if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
-               prog_wm_value = convert_and_clamp(
-                               watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
+       if (safe_to_lower || watermarks->d.urgent_ns > hubbub->watermarks.d.urgent_ns) {
+               hubbub->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
+               prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
                                refclk_mhz, 0x1fffff);
-               REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
-               DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_D calculated =%d\n"
-                       "HW register value = 0x%x\n",
-                       watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+               REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
 
+               DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
+                       "HW register value = 0x%x\n",
+                       watermarks->d.urgent_ns, prog_wm_value);
+       }
 
-               prog_wm_value = convert_and_clamp(
-                               watermarks->d.cstate_pstate.cstate_exit_ns,
+       if (safe_to_lower || watermarks->d.pte_meta_urgent_ns > hubbub->watermarks.d.pte_meta_urgent_ns) {
+               hubbub->watermarks.d.pte_meta_urgent_ns = watermarks->d.pte_meta_urgent_ns;
+               prog_wm_value = convert_and_clamp(watermarks->d.pte_meta_urgent_ns,
                                refclk_mhz, 0x1fffff);
-               REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
-               DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
+               REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value);
+               DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
                        "HW register value = 0x%x\n",
-                       watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
+                       watermarks->d.pte_meta_urgent_ns, prog_wm_value);
        }
 
+       if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
+               if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
+                               > hubbub->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
+                       hubbub->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
+                                       watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
+                       prog_wm_value = convert_and_clamp(
+                                       watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
+                                       refclk_mhz, 0x1fffff);
+                       REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
+                       DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
+                               "HW register value = 0x%x\n",
+                               watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+               }
 
-       prog_wm_value = convert_and_clamp(
-                       watermarks->d.cstate_pstate.pstate_change_ns,
-                       refclk_mhz, 0x1fffff);
-       REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
-       DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
-               "HW register value = 0x%x\n\n",
-               watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
+               if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
+                               > hubbub->watermarks.d.cstate_pstate.cstate_exit_ns) {
+                       hubbub->watermarks.d.cstate_pstate.cstate_exit_ns =
+                                       watermarks->d.cstate_pstate.cstate_exit_ns;
+                       prog_wm_value = convert_and_clamp(
+                                       watermarks->d.cstate_pstate.cstate_exit_ns,
+                                       refclk_mhz, 0x1fffff);
+                       REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
+                       DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
+                               "HW register value = 0x%x\n",
+                               watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
+               }
+       }
 
-       REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
-                       DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
+       if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
+                       > hubbub->watermarks.d.cstate_pstate.pstate_change_ns) {
+               hubbub->watermarks.d.cstate_pstate.pstate_change_ns =
+                               watermarks->d.cstate_pstate.pstate_change_ns;
+               prog_wm_value = convert_and_clamp(
+                               watermarks->d.cstate_pstate.pstate_change_ns,
+                               refclk_mhz, 0x1fffff);
+               REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
+               DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
+                       "HW register value = 0x%x\n\n",
+                       watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
+       }
 
        REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL,
                        DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
@@ -408,6 +483,11 @@ void hubbub1_update_dchub(
        struct hubbub *hubbub,
        struct dchub_init_data *dh_data)
 {
+       if (REG(DCHUBBUB_SDPIF_FB_TOP) == 0) {
+               ASSERT(false);
+               /*should not come here*/
+               return;
+       }
        /* TODO: port code from dal2 */
        switch (dh_data->fb_mode) {
        case FRAME_BUFFER_MODE_ZFB_ONLY:
index 6315a0e6b0d69aca4833fa5ea5ea286268b5964d..d6e596eef4c5523b03f0b98e36583108c5819986 100644 (file)
@@ -185,6 +185,7 @@ struct hubbub {
        const struct dcn_hubbub_shift *shifts;
        const struct dcn_hubbub_mask *masks;
        unsigned int debug_test_index_pstate;
+       struct dcn_watermark_set watermarks;
 };
 
 void hubbub1_update_dchub(
@@ -194,10 +195,13 @@ void hubbub1_update_dchub(
 bool hubbub1_verify_allow_pstate_change_high(
        struct hubbub *hubbub);
 
+void hubbub1_wm_change_req_wa(struct hubbub *hubbub);
+
 void hubbub1_program_watermarks(
                struct hubbub *hubbub,
                struct dcn_watermark_set *watermarks,
-               unsigned int refclk_mhz);
+               unsigned int refclk_mhz,
+               bool safe_to_lower);
 
 void hubbub1_toggle_watermark_change_req(
                struct hubbub *hubbub);
index c28085be39ff9530730481f98e3c2c800ea167cc..2138cd3c5d1dc12c73c0f0abef6e13fbbf4bf9a8 100644 (file)
@@ -152,21 +152,19 @@ void hubp1_program_tiling(
                        PIPE_ALIGNED, info->gfx9.pipe_aligned);
 }
 
-void hubp1_program_size_and_rotation(
+void hubp1_program_size(
        struct hubp *hubp,
-       enum dc_rotation_angle rotation,
        enum surface_pixel_format format,
        const union plane_size *plane_size,
-       struct dc_plane_dcc_param *dcc,
-       bool horizontal_mirror)
+       struct dc_plane_dcc_param *dcc)
 {
        struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
-       uint32_t pitch, meta_pitch, pitch_c, meta_pitch_c, mirror;
+       uint32_t pitch, meta_pitch, pitch_c, meta_pitch_c;
 
        /* Program data and meta surface pitch (calculation from addrlib)
         * 444 or 420 luma
         */
-       if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+       if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN && format < SURFACE_PIXEL_FORMAT_SUBSAMPLE_END) {
                ASSERT(plane_size->video.chroma_pitch != 0);
                /* Chroma pitch zero can cause system hang! */
 
@@ -192,13 +190,22 @@ void hubp1_program_size_and_rotation(
        if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
                REG_UPDATE_2(DCSURF_SURFACE_PITCH_C,
                        PITCH_C, pitch_c, META_PITCH_C, meta_pitch_c);
+}
+
+void hubp1_program_rotation(
+       struct hubp *hubp,
+       enum dc_rotation_angle rotation,
+       bool horizontal_mirror)
+{
+       struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+       uint32_t mirror;
+
 
        if (horizontal_mirror)
                mirror = 1;
        else
                mirror = 0;
 
-
        /* Program rotation angle and horz mirror - no mirror */
        if (rotation == ROTATION_ANGLE_0)
                REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
@@ -287,6 +294,10 @@ void hubp1_program_pixel_format(
                REG_UPDATE(DCSURF_SURFACE_CONFIG,
                                SURFACE_PIXEL_FORMAT, 66);
                break;
+       case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:
+               REG_UPDATE(DCSURF_SURFACE_CONFIG,
+                               SURFACE_PIXEL_FORMAT, 12);
+               break;
        default:
                BREAK_TO_DEBUGGER();
                break;
@@ -450,9 +461,6 @@ bool hubp1_program_surface_flip_and_addr(
 
        hubp->request_address = *address;
 
-       if (flip_immediate)
-               hubp->current_address = *address;
-
        return true;
 }
 
@@ -481,8 +489,8 @@ void hubp1_program_surface_config(
 {
        hubp1_dcc_control(hubp, dcc->enable, dcc->grph.independent_64b_blks);
        hubp1_program_tiling(hubp, tiling_info, format);
-       hubp1_program_size_and_rotation(
-                       hubp, rotation, format, plane_size, dcc, horizontal_mirror);
+       hubp1_program_size(hubp, format, plane_size, dcc);
+       hubp1_program_rotation(hubp, rotation, horizontal_mirror);
        hubp1_program_pixel_format(hubp, format);
 }
 
@@ -688,7 +696,6 @@ bool hubp1_is_flip_pending(struct hubp *hubp)
        if (earliest_inuse_address.grph.addr.quad_part != hubp->request_address.grph.addr.quad_part)
                return true;
 
-       hubp->current_address = hubp->request_address;
        return false;
 }
 
@@ -1061,9 +1068,11 @@ void hubp1_cursor_set_position(
                const struct dc_cursor_mi_param *param)
 {
        struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
-       int src_x_offset = pos->x - pos->x_hotspot - param->viewport_x_start;
+       int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x;
+       int x_hotspot = pos->x_hotspot;
+       int y_hotspot = pos->y_hotspot;
+       uint32_t dst_x_offset;
        uint32_t cur_en = pos->enable ? 1 : 0;
-       uint32_t dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
 
        /*
         * Guard aganst cursor_set_position() from being called with invalid
@@ -1075,6 +1084,18 @@ void hubp1_cursor_set_position(
        if (hubp->curs_attr.address.quad_part == 0)
                return;
 
+       if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
+               src_x_offset = pos->y - pos->y_hotspot - param->viewport.x;
+               y_hotspot = pos->x_hotspot;
+               x_hotspot = pos->y_hotspot;
+       }
+
+       if (param->mirror) {
+               x_hotspot = param->viewport.width - x_hotspot;
+               src_x_offset = param->viewport.x + param->viewport.width - src_x_offset;
+       }
+
+       dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
        dst_x_offset *= param->ref_clk_khz;
        dst_x_offset /= param->pixel_clk_khz;
 
@@ -1085,7 +1106,7 @@ void hubp1_cursor_set_position(
                                dc_fixpt_from_int(dst_x_offset),
                                param->h_scale_ratio));
 
-       if (src_x_offset >= (int)param->viewport_width)
+       if (src_x_offset >= (int)param->viewport.width)
                cur_en = 0;  /* not visible beyond right edge*/
 
        if (src_x_offset + (int)hubp->curs_attr.width <= 0)
@@ -1102,8 +1123,8 @@ void hubp1_cursor_set_position(
                        CURSOR_Y_POSITION, pos->y);
 
        REG_SET_2(CURSOR_HOT_SPOT, 0,
-                       CURSOR_HOT_SPOT_X, pos->x_hotspot,
-                       CURSOR_HOT_SPOT_Y, pos->y_hotspot);
+                       CURSOR_HOT_SPOT_X, x_hotspot,
+                       CURSOR_HOT_SPOT_Y, y_hotspot);
 
        REG_SET(CURSOR_DST_OFFSET, 0,
                        CURSOR_DST_X_OFFSET, dst_x_offset);
@@ -1125,7 +1146,7 @@ void hubp1_vtg_sel(struct hubp *hubp, uint32_t otg_inst)
        REG_UPDATE(DCHUBP_CNTL, HUBP_VTG_SEL, otg_inst);
 }
 
-static struct hubp_funcs dcn10_hubp_funcs = {
+static const struct hubp_funcs dcn10_hubp_funcs = {
        .hubp_program_surface_flip_and_addr =
                        hubp1_program_surface_flip_and_addr,
        .hubp_program_surface_config =
index d901d5092969d6897022243cea08b6351caaa0f1..f689feace82d189ef1e99a16a955b364c1b61e34 100644 (file)
        HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH, META_PITCH, mask_sh),\
        HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, PITCH_C, mask_sh),\
        HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, META_PITCH_C, mask_sh),\
-       HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, ROTATION_ANGLE, mask_sh),\
-       HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, H_MIRROR_EN, mask_sh),\
        HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, mask_sh),\
        HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_TYPE, mask_sh),\
        HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_PENDING, mask_sh),\
 #define HUBP_MASK_SH_LIST_DCN10(mask_sh)\
        HUBP_MASK_SH_LIST_DCN(mask_sh),\
        HUBP_MASK_SH_LIST_DCN_VM(mask_sh),\
+       HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, ROTATION_ANGLE, mask_sh),\
+       HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, H_MIRROR_EN, mask_sh),\
        HUBP_SF(HUBPREQ0_PREFETCH_SETTINS, DST_Y_PREFETCH, mask_sh),\
        HUBP_SF(HUBPREQ0_PREFETCH_SETTINS, VRATIO_PREFETCH, mask_sh),\
        HUBP_SF(HUBPREQ0_PREFETCH_SETTINS_C, VRATIO_PREFETCH_C, mask_sh),\
@@ -679,12 +679,15 @@ void hubp1_program_pixel_format(
        struct hubp *hubp,
        enum surface_pixel_format format);
 
-void hubp1_program_size_and_rotation(
+void hubp1_program_size(
        struct hubp *hubp,
-       enum dc_rotation_angle rotation,
        enum surface_pixel_format format,
        const union plane_size *plane_size,
-       struct dc_plane_dcc_param *dcc,
+       struct dc_plane_dcc_param *dcc);
+
+void hubp1_program_rotation(
+       struct hubp *hubp,
+       enum dc_rotation_angle rotation,
        bool horizontal_mirror);
 
 void hubp1_program_tiling(
index f8e0576af6e0b327b9f685370b4c752f7ecd3d1c..c87f6e603055a3dae9f75c5e6f8786ebfba9ef8a 100644 (file)
@@ -337,13 +337,13 @@ void dcn10_log_hw_state(struct dc *dc)
 
        DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d  dcfclk_deep_sleep_khz:%d  dispclk_khz:%d\n"
                "dppclk_khz:%d  max_supported_dppclk_khz:%d  fclk_khz:%d  socclk_khz:%d\n\n",
-                       dc->current_state->bw.dcn.calc_clk.dcfclk_khz,
-                       dc->current_state->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
-                       dc->current_state->bw.dcn.calc_clk.dispclk_khz,
-                       dc->current_state->bw.dcn.calc_clk.dppclk_khz,
-                       dc->current_state->bw.dcn.calc_clk.max_supported_dppclk_khz,
-                       dc->current_state->bw.dcn.calc_clk.fclk_khz,
-                       dc->current_state->bw.dcn.calc_clk.socclk_khz);
+                       dc->current_state->bw.dcn.clk.dcfclk_khz,
+                       dc->current_state->bw.dcn.clk.dcfclk_deep_sleep_khz,
+                       dc->current_state->bw.dcn.clk.dispclk_khz,
+                       dc->current_state->bw.dcn.clk.dppclk_khz,
+                       dc->current_state->bw.dcn.clk.max_supported_dppclk_khz,
+                       dc->current_state->bw.dcn.clk.fclk_khz,
+                       dc->current_state->bw.dcn.clk.socclk_khz);
 
        log_mpc_crc(dc);
 
@@ -415,6 +415,8 @@ static void dpp_pg_control(
 
        if (hws->ctx->dc->debug.disable_dpp_power_gate)
                return;
+       if (REG(DOMAIN1_PG_CONFIG) == 0)
+               return;
 
        switch (dpp_inst) {
        case 0: /* DPP0 */
@@ -465,6 +467,8 @@ static void hubp_pg_control(
 
        if (hws->ctx->dc->debug.disable_hubp_power_gate)
                return;
+       if (REG(DOMAIN0_PG_CONFIG) == 0)
+               return;
 
        switch (hubp_inst) {
        case 0: /* DCHUBP0 */
@@ -719,19 +723,7 @@ static void reset_back_end_for_pipe(
                if (!pipe_ctx->stream->dpms_off)
                        core_link_disable_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE);
                else if (pipe_ctx->stream_res.audio) {
-                       /*
-                        * if stream is already disabled outside of commit streams path,
-                        * audio disable was skipped. Need to do it here
-                        */
-                       pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
-
-                       if (dc->caps.dynamic_audio == true) {
-                               /*we have to dynamic arbitrate the audio endpoints*/
-                               pipe_ctx->stream_res.audio = NULL;
-                               /*we free the resource, need reset is_audio_acquired*/
-                               update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
-                       }
-
+                       dc->hwss.disable_audio_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE);
                }
 
        }
@@ -842,7 +834,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
 }
 
 
-static void dcn10_verify_allow_pstate_change_high(struct dc *dc)
+void dcn10_verify_allow_pstate_change_high(struct dc *dc)
 {
        static bool should_log_hw_state; /* prevent hw state log by default */
 
@@ -877,7 +869,8 @@ void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
                return;
 
        mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
-       opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
+       if (opp != NULL)
+               opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
 
        dc->optimized_required = true;
 
@@ -1022,7 +1015,7 @@ static void dcn10_init_hw(struct dc *dc)
        /* Reset all MPCC muxes */
        dc->res_pool->mpc->funcs->mpc_init(dc->res_pool->mpc);
 
-       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+       for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
                struct timing_generator *tg = dc->res_pool->timing_generators[i];
                struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
                struct hubp *hubp = dc->res_pool->hubps[i];
@@ -1164,12 +1157,19 @@ static void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_c
 
        if (plane_state == NULL)
                return;
+
        addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
+
        pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
                        pipe_ctx->plane_res.hubp,
                        &plane_state->address,
                        plane_state->flip_immediate);
+
        plane_state->status.requested_address = plane_state->address;
+
+       if (plane_state->flip_immediate)
+               plane_state->status.current_address = plane_state->address;
+
        if (addr_patched)
                pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
 }
@@ -1355,10 +1355,11 @@ static void dcn10_enable_per_frame_crtc_position_reset(
 
        DC_SYNC_INFO("Setting up\n");
        for (i = 0; i < group_size; i++)
-               grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
-                               grouped_pipes[i]->stream_res.tg,
-                               grouped_pipes[i]->stream->triggered_crtc_reset.event_source->status.primary_otg_inst,
-                               &grouped_pipes[i]->stream->triggered_crtc_reset);
+               if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
+                       grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
+                                       grouped_pipes[i]->stream_res.tg,
+                                       grouped_pipes[i]->stream->triggered_crtc_reset.event_source->status.primary_otg_inst,
+                                       &grouped_pipes[i]->stream->triggered_crtc_reset);
 
        DC_SYNC_INFO("Waiting for trigger\n");
 
@@ -1774,6 +1775,43 @@ static void dcn10_get_surface_visual_confirm_color(
        }
 }
 
+static void dcn10_get_hdr_visual_confirm_color(
+               struct pipe_ctx *pipe_ctx,
+               struct tg_color *color)
+{
+       uint32_t color_value = MAX_TG_COLOR_VALUE;
+
+       // Determine the overscan color based on the top-most (desktop) plane's context
+       struct pipe_ctx *top_pipe_ctx  = pipe_ctx;
+
+       while (top_pipe_ctx->top_pipe != NULL)
+               top_pipe_ctx = top_pipe_ctx->top_pipe;
+
+       switch (top_pipe_ctx->plane_res.scl_data.format) {
+       case PIXEL_FORMAT_ARGB2101010:
+               if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_UNITY) {
+                       /* HDR10, ARGB2101010 - set boarder color to red */
+                       color->color_r_cr = color_value;
+               }
+               break;
+       case PIXEL_FORMAT_FP16:
+               if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
+                       /* HDR10, FP16 - set boarder color to blue */
+                       color->color_b_cb = color_value;
+               } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
+                       /* FreeSync 2 HDR - set boarder color to green */
+                       color->color_g_y = color_value;
+               }
+               break;
+       default:
+               /* SDR - set boarder color to Gray */
+               color->color_r_cr = color_value/2;
+               color->color_b_cb = color_value/2;
+               color->color_g_y = color_value/2;
+               break;
+       }
+}
+
 static uint16_t fixed_point_to_int_frac(
        struct fixed31_32 arg,
        uint8_t integer_bits,
@@ -1854,11 +1892,10 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
                dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
 }
 
-
-static void update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
+static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
 {
        struct hubp *hubp = pipe_ctx->plane_res.hubp;
-       struct mpcc_blnd_cfg blnd_cfg;
+       struct mpcc_blnd_cfg blnd_cfg = {0};
        bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
        int mpcc_id;
        struct mpcc *new_mpcc;
@@ -1869,13 +1906,17 @@ static void update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
 
        /* TODO: proper fix once fpga works */
 
-       if (dc->debug.surface_visual_confirm)
+       if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
+               dcn10_get_hdr_visual_confirm_color(
+                               pipe_ctx, &blnd_cfg.black_color);
+       } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
                dcn10_get_surface_visual_confirm_color(
                                pipe_ctx, &blnd_cfg.black_color);
-       else
+       } else {
                color_space_to_black_color(
-                       dc, pipe_ctx->stream->output_color_space,
-                       &blnd_cfg.black_color);
+                               dc, pipe_ctx->stream->output_color_space,
+                               &blnd_cfg.black_color);
+       }
 
        if (per_pixel_alpha)
                blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
@@ -1964,18 +2005,17 @@ static void update_dchubp_dpp(
         * divided by 2
         */
        if (plane_state->update_flags.bits.full_update) {
-               bool should_divided_by_2 = context->bw.dcn.calc_clk.dppclk_khz <=
-                               context->bw.dcn.cur_clk.dispclk_khz / 2;
+               bool should_divided_by_2 = context->bw.dcn.clk.dppclk_khz <=
+                               dc->res_pool->dccg->clks.dispclk_khz / 2;
 
                dpp->funcs->dpp_dppclk_control(
                                dpp,
                                should_divided_by_2,
                                true);
 
-               dc->current_state->bw.dcn.cur_clk.dppclk_khz =
-                               should_divided_by_2 ?
-                               context->bw.dcn.cur_clk.dispclk_khz / 2 :
-                               context->bw.dcn.cur_clk.dispclk_khz;
+               dc->res_pool->dccg->clks.dppclk_khz = should_divided_by_2 ?
+                                               dc->res_pool->dccg->clks.dispclk_khz / 2 :
+                                                       dc->res_pool->dccg->clks.dispclk_khz;
        }
 
        /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
@@ -2001,7 +2041,7 @@ static void update_dchubp_dpp(
 
        if (plane_state->update_flags.bits.full_update ||
                plane_state->update_flags.bits.per_pixel_alpha_change)
-               update_mpcc(dc, pipe_ctx);
+               dc->hwss.update_mpcc(dc, pipe_ctx);
 
        if (plane_state->update_flags.bits.full_update ||
                plane_state->update_flags.bits.per_pixel_alpha_change ||
@@ -2063,12 +2103,13 @@ static void update_dchubp_dpp(
 
 static void dcn10_blank_pixel_data(
                struct dc *dc,
-               struct stream_resource *stream_res,
-               struct dc_stream_state *stream,
+               struct pipe_ctx *pipe_ctx,
                bool blank)
 {
        enum dc_color_space color_space;
        struct tg_color black_color = {0};
+       struct stream_resource *stream_res = &pipe_ctx->stream_res;
+       struct dc_stream_state *stream = pipe_ctx->stream;
 
        /* program otg blank color */
        color_space = stream->output_color_space;
@@ -2110,6 +2151,33 @@ static void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
                        pipe_ctx->plane_res.dpp, hw_mult);
 }
 
+void dcn10_program_pipe(
+               struct dc *dc,
+               struct pipe_ctx *pipe_ctx,
+               struct dc_state *context)
+{
+       if (pipe_ctx->plane_state->update_flags.bits.full_update)
+               dcn10_enable_plane(dc, pipe_ctx, context);
+
+       update_dchubp_dpp(dc, pipe_ctx, context);
+
+       set_hdr_multiplier(pipe_ctx);
+
+       if (pipe_ctx->plane_state->update_flags.bits.full_update ||
+                       pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+                       pipe_ctx->plane_state->update_flags.bits.gamma_change)
+               dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
+
+       /* dcn10_translate_regamma_to_hw_format takes 750us to finish
+        * only do gamma programming for full update.
+        * TODO: This can be further optimized/cleaned up
+        * Always call this for now since it does memcmp inside before
+        * doing heavy calculation and programming
+        */
+       if (pipe_ctx->plane_state->update_flags.bits.full_update)
+               dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
+}
+
 static void program_all_pipe_in_tree(
                struct dc *dc,
                struct pipe_ctx *pipe_ctx,
@@ -2127,31 +2195,12 @@ static void program_all_pipe_in_tree(
                pipe_ctx->stream_res.tg->funcs->program_global_sync(
                                pipe_ctx->stream_res.tg);
 
-               dc->hwss.blank_pixel_data(dc, &pipe_ctx->stream_res,
-                               pipe_ctx->stream, blank);
+               dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
+
        }
 
        if (pipe_ctx->plane_state != NULL) {
-               if (pipe_ctx->plane_state->update_flags.bits.full_update)
-                       dcn10_enable_plane(dc, pipe_ctx, context);
-
-               update_dchubp_dpp(dc, pipe_ctx, context);
-
-               set_hdr_multiplier(pipe_ctx);
-
-               if (pipe_ctx->plane_state->update_flags.bits.full_update ||
-                               pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
-                               pipe_ctx->plane_state->update_flags.bits.gamma_change)
-                       dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
-
-               /* dcn10_translate_regamma_to_hw_format takes 750us to finish
-                * only do gamma programming for full update.
-                * TODO: This can be further optimized/cleaned up
-                * Always call this for now since it does memcmp inside before
-                * doing heavy calculation and programming
-                */
-               if (pipe_ctx->plane_state->update_flags.bits.full_update)
-                       dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
+               dcn10_program_pipe(dc, pipe_ctx, context);
        }
 
        if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx) {
@@ -2165,12 +2214,12 @@ static void dcn10_pplib_apply_display_requirements(
 {
        struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
 
-       pp_display_cfg->min_engine_clock_khz = context->bw.dcn.cur_clk.dcfclk_khz;
-       pp_display_cfg->min_memory_clock_khz = context->bw.dcn.cur_clk.fclk_khz;
-       pp_display_cfg->min_engine_clock_deep_sleep_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz;
-       pp_display_cfg->min_dcfc_deep_sleep_clock_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz;
-       pp_display_cfg->min_dcfclock_khz = context->bw.dcn.cur_clk.dcfclk_khz;
-       pp_display_cfg->disp_clk_khz = context->bw.dcn.cur_clk.dispclk_khz;
+       pp_display_cfg->min_engine_clock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
+       pp_display_cfg->min_memory_clock_khz = dc->res_pool->dccg->clks.fclk_khz;
+       pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
+       pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
+       pp_display_cfg->min_dcfclock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
+       pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
        dce110_fill_display_configs(context, pp_display_cfg);
 
        if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
@@ -2232,8 +2281,6 @@ static void dcn10_apply_ctx_for_surface(
        int i;
        struct timing_generator *tg;
        bool removed_pipe[4] = { false };
-       unsigned int ref_clk_mhz = dc->res_pool->ref_clock_inKhz/1000;
-       bool program_water_mark = false;
        struct pipe_ctx *top_pipe_to_program =
                        find_top_pipe_for_stream(dc, context, stream);
        DC_LOGGER_INIT(dc->ctx->logger);
@@ -2247,7 +2294,7 @@ static void dcn10_apply_ctx_for_surface(
 
        if (num_planes == 0) {
                /* OTG blank before remove all front end */
-               dc->hwss.blank_pixel_data(dc, &top_pipe_to_program->stream_res, top_pipe_to_program->stream, true);
+               dc->hwss.blank_pixel_data(dc, top_pipe_to_program, true);
        }
 
        /* Disconnect unused mpcc */
@@ -2278,11 +2325,10 @@ static void dcn10_apply_ctx_for_surface(
                        old_pipe_ctx->plane_state &&
                        old_pipe_ctx->stream_res.tg == tg) {
 
-                       hwss1_plane_atomic_disconnect(dc, old_pipe_ctx);
+                       dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx);
                        removed_pipe[i] = true;
 
-                       DC_LOG_DC(
-                                       "Reset mpcc for pipe %d\n",
+                       DC_LOG_DC("Reset mpcc for pipe %d\n",
                                        old_pipe_ctx->pipe_idx);
                }
        }
@@ -2295,248 +2341,41 @@ static void dcn10_apply_ctx_for_surface(
        if (num_planes == 0)
                false_optc_underflow_wa(dc, stream, tg);
 
-       for (i = 0; i < dc->res_pool->pipe_count; i++) {
-               struct pipe_ctx *old_pipe_ctx =
-                               &dc->current_state->res_ctx.pipe_ctx[i];
-               struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
-               if (pipe_ctx->stream == stream &&
-                               pipe_ctx->plane_state &&
-                       pipe_ctx->plane_state->update_flags.bits.full_update)
-                       program_water_mark = true;
-
+       for (i = 0; i < dc->res_pool->pipe_count; i++)
                if (removed_pipe[i])
-                       dcn10_disable_plane(dc, old_pipe_ctx);
-       }
-
-       if (program_water_mark) {
-               if (dc->debug.sanity_checks) {
-                       /* pstate stuck check after watermark update */
-                       dcn10_verify_allow_pstate_change_high(dc);
-               }
-
-               /* watermark is for all pipes */
-               hubbub1_program_watermarks(dc->res_pool->hubbub,
-                               &context->bw.dcn.watermarks, ref_clk_mhz);
-
-               if (dc->debug.sanity_checks) {
-                       /* pstate stuck check after watermark update */
-                       dcn10_verify_allow_pstate_change_high(dc);
-               }
-       }
-/*     DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
-                       "\n============== Watermark parameters ==============\n"
-                       "a.urgent_ns: %d \n"
-                       "a.cstate_enter_plus_exit: %d \n"
-                       "a.cstate_exit: %d \n"
-                       "a.pstate_change: %d \n"
-                       "a.pte_meta_urgent: %d \n"
-                       "b.urgent_ns: %d \n"
-                       "b.cstate_enter_plus_exit: %d \n"
-                       "b.cstate_exit: %d \n"
-                       "b.pstate_change: %d \n"
-                       "b.pte_meta_urgent: %d \n",
-                       context->bw.dcn.watermarks.a.urgent_ns,
-                       context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns,
-                       context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns,
-                       context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns,
-                       context->bw.dcn.watermarks.a.pte_meta_urgent_ns,
-                       context->bw.dcn.watermarks.b.urgent_ns,
-                       context->bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns,
-                       context->bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns,
-                       context->bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns,
-                       context->bw.dcn.watermarks.b.pte_meta_urgent_ns
-                       );
-       DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
-                       "\nc.urgent_ns: %d \n"
-                       "c.cstate_enter_plus_exit: %d \n"
-                       "c.cstate_exit: %d \n"
-                       "c.pstate_change: %d \n"
-                       "c.pte_meta_urgent: %d \n"
-                       "d.urgent_ns: %d \n"
-                       "d.cstate_enter_plus_exit: %d \n"
-                       "d.cstate_exit: %d \n"
-                       "d.pstate_change: %d \n"
-                       "d.pte_meta_urgent: %d \n"
-                       "========================================================\n",
-                       context->bw.dcn.watermarks.c.urgent_ns,
-                       context->bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns,
-                       context->bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns,
-                       context->bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns,
-                       context->bw.dcn.watermarks.c.pte_meta_urgent_ns,
-                       context->bw.dcn.watermarks.d.urgent_ns,
-                       context->bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns,
-                       context->bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns,
-                       context->bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns,
-                       context->bw.dcn.watermarks.d.pte_meta_urgent_ns
-                       );
-*/
-}
+                       dcn10_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
 
-static inline bool should_set_clock(bool decrease_allowed, int calc_clk, int cur_clk)
-{
-       return ((decrease_allowed && calc_clk < cur_clk) || calc_clk > cur_clk);
-}
-
-static int determine_dppclk_threshold(struct dc *dc, struct dc_state *context)
-{
-       bool request_dpp_div = context->bw.dcn.calc_clk.dispclk_khz >
-                       context->bw.dcn.calc_clk.dppclk_khz;
-       bool dispclk_increase = context->bw.dcn.calc_clk.dispclk_khz >
-                       context->bw.dcn.cur_clk.dispclk_khz;
-       int disp_clk_threshold = context->bw.dcn.calc_clk.max_supported_dppclk_khz;
-       bool cur_dpp_div = context->bw.dcn.cur_clk.dispclk_khz >
-                       context->bw.dcn.cur_clk.dppclk_khz;
-
-       /* increase clock, looking for div is 0 for current, request div is 1*/
-       if (dispclk_increase) {
-               /* already divided by 2, no need to reach target clk with 2 steps*/
-               if (cur_dpp_div)
-                       return context->bw.dcn.calc_clk.dispclk_khz;
-
-               /* request disp clk is lower than maximum supported dpp clk,
-                * no need to reach target clk with two steps.
-                */
-               if (context->bw.dcn.calc_clk.dispclk_khz <= disp_clk_threshold)
-                       return context->bw.dcn.calc_clk.dispclk_khz;
-
-               /* target dpp clk not request divided by 2, still within threshold */
-               if (!request_dpp_div)
-                       return context->bw.dcn.calc_clk.dispclk_khz;
-
-       } else {
-               /* decrease clock, looking for current dppclk divided by 2,
-                * request dppclk not divided by 2.
-                */
-
-               /* current dpp clk not divided by 2, no need to ramp*/
-               if (!cur_dpp_div)
-                       return context->bw.dcn.calc_clk.dispclk_khz;
-
-               /* current disp clk is lower than current maximum dpp clk,
-                * no need to ramp
-                */
-               if (context->bw.dcn.cur_clk.dispclk_khz <= disp_clk_threshold)
-                       return context->bw.dcn.calc_clk.dispclk_khz;
-
-               /* request dpp clk need to be divided by 2 */
-               if (request_dpp_div)
-                       return context->bw.dcn.calc_clk.dispclk_khz;
-       }
-
-       return disp_clk_threshold;
-}
-
-static void ramp_up_dispclk_with_dpp(struct dc *dc, struct dc_state *context)
-{
-       int i;
-       bool request_dpp_div = context->bw.dcn.calc_clk.dispclk_khz >
-                               context->bw.dcn.calc_clk.dppclk_khz;
-
-       int dispclk_to_dpp_threshold = determine_dppclk_threshold(dc, context);
-
-       /* set disp clk to dpp clk threshold */
-       dc->res_pool->display_clock->funcs->set_clock(
-                       dc->res_pool->display_clock,
-                       dispclk_to_dpp_threshold);
-
-       /* update request dpp clk division option */
-       for (i = 0; i < dc->res_pool->pipe_count; i++) {
-               struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
-
-               if (!pipe_ctx->plane_state)
-                       continue;
-
-               pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
-                               pipe_ctx->plane_res.dpp,
-                               request_dpp_div,
-                               true);
-       }
-
-       /* If target clk not same as dppclk threshold, set to target clock */
-       if (dispclk_to_dpp_threshold != context->bw.dcn.calc_clk.dispclk_khz) {
-               dc->res_pool->display_clock->funcs->set_clock(
-                               dc->res_pool->display_clock,
-                               context->bw.dcn.calc_clk.dispclk_khz);
-       }
-
-       context->bw.dcn.cur_clk.dispclk_khz =
-                       context->bw.dcn.calc_clk.dispclk_khz;
-       context->bw.dcn.cur_clk.dppclk_khz =
-                       context->bw.dcn.calc_clk.dppclk_khz;
-       context->bw.dcn.cur_clk.max_supported_dppclk_khz =
-                       context->bw.dcn.calc_clk.max_supported_dppclk_khz;
+       if (dc->hwseq->wa.DEGVIDCN10_254)
+               hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
 }
 
 static void dcn10_set_bandwidth(
                struct dc *dc,
                struct dc_state *context,
-               bool decrease_allowed)
+               bool safe_to_lower)
 {
-       struct pp_smu_display_requirement_rv *smu_req_cur =
-                       &dc->res_pool->pp_smu_req;
-       struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
-       struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
-
-       if (dc->debug.sanity_checks) {
+       if (dc->debug.sanity_checks)
                dcn10_verify_allow_pstate_change_high(dc);
-       }
-
-       if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
-               return;
-
-       if (should_set_clock(
-                       decrease_allowed,
-                       context->bw.dcn.calc_clk.dcfclk_khz,
-                       dc->current_state->bw.dcn.cur_clk.dcfclk_khz)) {
-               context->bw.dcn.cur_clk.dcfclk_khz =
-                               context->bw.dcn.calc_clk.dcfclk_khz;
-               smu_req.hard_min_dcefclk_khz =
-                               context->bw.dcn.calc_clk.dcfclk_khz;
-       }
-
-       if (should_set_clock(
-                       decrease_allowed,
-                       context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
-                       dc->current_state->bw.dcn.cur_clk.dcfclk_deep_sleep_khz)) {
-               context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz =
-                               context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz;
-       }
-
-       if (should_set_clock(
-                       decrease_allowed,
-                       context->bw.dcn.calc_clk.fclk_khz,
-                       dc->current_state->bw.dcn.cur_clk.fclk_khz)) {
-               context->bw.dcn.cur_clk.fclk_khz =
-                               context->bw.dcn.calc_clk.fclk_khz;
-               smu_req.hard_min_fclk_khz = context->bw.dcn.calc_clk.fclk_khz;
-       }
-
-       smu_req.display_count = context->stream_count;
-
-       if (pp_smu->set_display_requirement)
-               pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
 
-       *smu_req_cur = smu_req;
+       if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+               if (context->stream_count == 0)
+                       context->bw.dcn.clk.phyclk_khz = 0;
 
-       /* make sure dcf clk is before dpp clk to
-        * make sure we have enough voltage to run dpp clk
-        */
-       if (should_set_clock(
-                       decrease_allowed,
-                       context->bw.dcn.calc_clk.dispclk_khz,
-                       dc->current_state->bw.dcn.cur_clk.dispclk_khz)) {
+               dc->res_pool->dccg->funcs->update_clocks(
+                               dc->res_pool->dccg,
+                               &context->bw.dcn.clk,
+                               safe_to_lower);
 
-               ramp_up_dispclk_with_dpp(dc, context);
+               dcn10_pplib_apply_display_requirements(dc, context);
        }
 
-       dcn10_pplib_apply_display_requirements(dc, context);
+       hubbub1_program_watermarks(dc->res_pool->hubbub,
+                       &context->bw.dcn.watermarks,
+                       dc->res_pool->ref_clock_inKhz / 1000,
+                       true);
 
-       if (dc->debug.sanity_checks) {
+       if (dc->debug.sanity_checks)
                dcn10_verify_allow_pstate_change_high(dc);
-       }
-
-       /* need to fix this function.  not doing the right thing here */
 }
 
 static void set_drr(struct pipe_ctx **pipe_ctx,
@@ -2701,16 +2540,20 @@ static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
 {
        struct dc_plane_state *plane_state = pipe_ctx->plane_state;
        struct timing_generator *tg = pipe_ctx->stream_res.tg;
+       bool flip_pending;
 
        if (plane_state == NULL)
                return;
 
-       plane_state->status.is_flip_pending =
-                       pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
+       flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
                                        pipe_ctx->plane_res.hubp);
 
-       plane_state->status.current_address = pipe_ctx->plane_res.hubp->current_address;
-       if (pipe_ctx->plane_res.hubp->current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
+       plane_state->status.is_flip_pending = flip_pending;
+
+       if (!flip_pending)
+               plane_state->status.current_address = plane_state->status.requested_address;
+
+       if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
                        tg->funcs->is_stereo_left_eye) {
                plane_state->status.is_right_eye =
                                !tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
@@ -2719,8 +2562,14 @@ static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
 
 static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
 {
-       if (hws->ctx->dc->res_pool->hubbub != NULL)
-               hubbub1_update_dchub(hws->ctx->dc->res_pool->hubbub, dh_data);
+       if (hws->ctx->dc->res_pool->hubbub != NULL) {
+               struct hubp *hubp = hws->ctx->dc->res_pool->hubps[0];
+
+               if (hubp->funcs->hubp_update_dchub)
+                       hubp->funcs->hubp_update_dchub(hubp, dh_data);
+               else
+                       hubbub1_update_dchub(hws->ctx->dc->res_pool->hubbub, dh_data);
+       }
 }
 
 static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
@@ -2731,9 +2580,11 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
        struct dc_cursor_mi_param param = {
                .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz,
                .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
-               .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
-               .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
-               .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
+               .viewport = pipe_ctx->plane_res.scl_data.viewport,
+               .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
+               .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
+               .rotation = pipe_ctx->plane_state->rotation,
+               .mirror = pipe_ctx->plane_state->horizontal_mirror
        };
 
        if (pipe_ctx->plane_state->address.type
@@ -2757,6 +2608,33 @@ static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
                pipe_ctx->plane_res.dpp, attributes->color_format);
 }
 
+static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
+{
+       uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
+       struct fixed31_32 multiplier;
+       struct dpp_cursor_attributes opt_attr = { 0 };
+       uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
+       struct custom_float_format fmt;
+
+       if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
+               return;
+
+       fmt.exponenta_bits = 5;
+       fmt.mantissa_bits = 10;
+       fmt.sign = true;
+
+       if (sdr_white_level > 80) {
+               multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
+               convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
+       }
+
+       opt_attr.scale = hw_scale;
+       opt_attr.bias = 0;
+
+       pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
+                       pipe_ctx->plane_res.dpp, &opt_attr);
+}
+
 static const struct hw_sequencer_funcs dcn10_funcs = {
        .program_gamut_remap = program_gamut_remap,
        .program_csc_matrix = program_csc_matrix,
@@ -2764,7 +2642,9 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
        .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
        .apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
        .update_plane_addr = dcn10_update_plane_addr,
+       .plane_atomic_disconnect = hwss1_plane_atomic_disconnect,
        .update_dchub = dcn10_update_dchub,
+       .update_mpcc = dcn10_update_mpcc,
        .update_pending_status = dcn10_update_pending_status,
        .set_input_transfer_func = dcn10_set_input_transfer_func,
        .set_output_transfer_func = dcn10_set_output_transfer_func,
@@ -2778,6 +2658,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
        .disable_stream = dce110_disable_stream,
        .unblank_stream = dce110_unblank_stream,
        .blank_stream = dce110_blank_stream,
+       .enable_audio_stream = dce110_enable_audio_stream,
+       .disable_audio_stream = dce110_disable_audio_stream,
        .enable_display_power_gating = dcn10_dummy_display_power_gating,
        .disable_plane = dcn10_disable_plane,
        .blank_pixel_data = dcn10_blank_pixel_data,
@@ -2800,7 +2682,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
        .edp_power_control = hwss_edp_power_control,
        .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
        .set_cursor_position = dcn10_set_cursor_position,
-       .set_cursor_attribute = dcn10_set_cursor_attribute
+       .set_cursor_attribute = dcn10_set_cursor_attribute,
+       .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level
 };
 
 
index 44f734b73f9ea41d75e65c49131d6f4b4a27dfdd..7139fb73e966eeaf79823c6204b9fc34ccd5d6da 100644 (file)
@@ -39,4 +39,11 @@ bool is_rgb_cspace(enum dc_color_space output_color_space);
 
 void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx);
 
+void dcn10_verify_allow_pstate_change_high(struct dc *dc);
+
+void dcn10_program_pipe(
+               struct dc *dc,
+               struct pipe_ctx *pipe_ctx,
+               struct dc_state *context);
+
 #endif /* __DC_HWSS_DCN10_H__ */
index 21fa40ac0786b61c4435f61015e11a763321c9b7..6f675206a136a7406859f5486ef5400893f75b11 100644 (file)
@@ -65,11 +65,6 @@ enum {
        DP_MST_UPDATE_MAX_RETRY = 50
 };
 
-
-
-static void aux_initialize(struct dcn10_link_encoder *enc10);
-
-
 static const struct link_encoder_funcs dcn10_lnk_enc_funcs = {
        .validate_output_with_stream =
                dcn10_link_encoder_validate_output_with_stream,
@@ -445,12 +440,11 @@ static uint8_t get_frontend_source(
        }
 }
 
-static void configure_encoder(
+void configure_encoder(
        struct dcn10_link_encoder *enc10,
        const struct dc_link_settings *link_settings)
 {
        /* set number of lanes */
-
        REG_SET(DP_CONFIG, 0,
                        DP_UDI_LANES, link_settings->lane_count - LANE_COUNT_ONE);
 
@@ -602,6 +596,9 @@ static bool dcn10_link_encoder_validate_hdmi_output(
        if (!enc10->base.features.flags.bits.HDMI_6GB_EN &&
                adjusted_pix_clk_khz >= 300000)
                return false;
+       if (enc10->base.ctx->dc->debug.hdmi20_disable &&
+               crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+               return false;
        return true;
 }
 
@@ -734,6 +731,9 @@ void dcn10_link_encoder_construct(
                                __func__,
                                result);
        }
+       if (enc10->base.ctx->dc->debug.hdmi20_disable) {
+               enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
+       }
 }
 
 bool dcn10_link_encoder_validate_output_with_stream(
@@ -812,7 +812,7 @@ void dcn10_link_encoder_hw_init(
                ASSERT(result == BP_RESULT_OK);
 
        }
-       aux_initialize(enc10);
+       dcn10_aux_initialize(enc10);
 
        /* reinitialize HPD.
         * hpd_initialize() will pass DIG_FE id to HW context.
@@ -995,6 +995,8 @@ void dcn10_link_encoder_disable_output(
 
        if (!dcn10_is_dig_enabled(enc)) {
                /* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */
+       /*in DP_Alt_No_Connect case, we turn off the dig already,
+       after excuation the PHY w/a sequence, not allow touch PHY any more*/
                return;
        }
        /* Power-down RX and disable GPU PHY should be paired.
@@ -1347,8 +1349,7 @@ void dcn10_link_encoder_disable_hpd(struct link_encoder *enc)
                                FN(reg, f1), v1,\
                                FN(reg, f2), v2)
 
-static void aux_initialize(
-       struct dcn10_link_encoder *enc10)
+void dcn10_aux_initialize(struct dcn10_link_encoder *enc10)
 {
        enum hpd_source_id hpd_source = enc10->base.hpd_source;
 
index 2a97cdb2cfbb361655fbfdc16fba83e66d627797..49ead12b25329f497e6acf2c43411e1a8548ff11 100644 (file)
@@ -42,6 +42,7 @@
 #define LE_DCN_COMMON_REG_LIST(id) \
        SRI(DIG_BE_CNTL, DIG, id), \
        SRI(DIG_BE_EN_CNTL, DIG, id), \
+       SRI(TMDS_CTL_BITS, DIG, id), \
        SRI(DP_CONFIG, DP, id), \
        SRI(DP_DPHY_CNTL, DP, id), \
        SRI(DP_DPHY_PRBS_CNTL, DP, id), \
@@ -64,6 +65,7 @@
        SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
        SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id)
 
+
 #define LE_DCN10_REG_LIST(id)\
        LE_DCN_COMMON_REG_LIST(id)
 
@@ -100,6 +102,7 @@ struct dcn10_link_enc_registers {
        uint32_t DP_DPHY_BS_SR_SWAP_CNTL;
        uint32_t DP_DPHY_HBR2_PATTERN_CONTROL;
        uint32_t DP_SEC_CNTL1;
+       uint32_t TMDS_CTL_BITS;
 };
 
 #define LE_SF(reg_name, field_name, post_fix)\
@@ -110,6 +113,7 @@ struct dcn10_link_enc_registers {
        LE_SF(DIG0_DIG_BE_CNTL, DIG_HPD_SELECT, mask_sh),\
        LE_SF(DIG0_DIG_BE_CNTL, DIG_MODE, mask_sh),\
        LE_SF(DIG0_DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, mask_sh),\
+       LE_SF(DIG0_TMDS_CTL_BITS, TMDS_CTL0, mask_sh), \
        LE_SF(DP0_DP_DPHY_CNTL, DPHY_BYPASS, mask_sh),\
        LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE0, mask_sh),\
        LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE1, mask_sh),\
@@ -198,10 +202,11 @@ struct dcn10_link_enc_registers {
        type DP_MSE_SAT_SLOT_COUNT3;\
        type DP_MSE_SAT_UPDATE;\
        type DP_MSE_16_MTP_KEEPOUT;\
+       type DC_HPD_EN;\
+       type TMDS_CTL0;\
        type AUX_HPD_SEL;\
        type AUX_LS_READ_EN;\
-       type AUX_RX_RECEIVE_WINDOW;\
-       type DC_HPD_EN
+       type AUX_RX_RECEIVE_WINDOW
 
 struct dcn10_link_enc_shift {
        DCN_LINK_ENCODER_REG_FIELD_LIST(uint8_t);
@@ -266,6 +271,10 @@ void dcn10_link_encoder_setup(
        struct link_encoder *enc,
        enum signal_type signal);
 
+void configure_encoder(
+       struct dcn10_link_encoder *enc10,
+       const struct dc_link_settings *link_settings);
+
 /* enables TMDS PHY output */
 /* TODO: still need depth or just pass in adjusted pixel clock? */
 void dcn10_link_encoder_enable_tmds_output(
@@ -327,4 +336,6 @@ void dcn10_psr_program_secondary_packet(struct link_encoder *enc,
 
 bool dcn10_is_dig_enabled(struct link_encoder *enc);
 
+void dcn10_aux_initialize(struct dcn10_link_encoder *enc10);
+
 #endif /* __DC_LINK_ENCODER__DCN10_H__ */
index 9ca51ae46de743c9a9b455177a33b6eb25716b39..958994edf2c49de84428848bd25abe15e466a036 100644 (file)
@@ -428,7 +428,7 @@ void mpc1_read_mpcc_state(
                        MPCC_BUSY, &s->busy);
 }
 
-const struct mpc_funcs dcn10_mpc_funcs = {
+static const struct mpc_funcs dcn10_mpc_funcs = {
        .read_mpcc_state = mpc1_read_mpcc_state,
        .insert_plane = mpc1_insert_plane,
        .remove_mpcc = mpc1_remove_mpcc,
index 77a1a9d541a410119f92e77cc144477137337983..ab958cff3b7601a0c2dbe187ddb443af6800643c 100644 (file)
@@ -385,7 +385,7 @@ void opp1_destroy(struct output_pixel_processor **opp)
        *opp = NULL;
 }
 
-static struct opp_funcs dcn10_opp_funcs = {
+static const struct opp_funcs dcn10_opp_funcs = {
                .opp_set_dyn_expansion = opp1_set_dyn_expansion,
                .opp_program_fmt = opp1_program_fmt,
                .opp_program_bit_depth_reduction = opp1_program_bit_depth_reduction,
index f2fbce0e3fc56fbd3dd6ff9d36b2e878692dffc1..411f89218e0194fcd80980b239c9e514acc0d9f8 100644 (file)
@@ -1257,6 +1257,37 @@ void optc1_read_otg_state(struct optc *optc1,
                        OPTC_UNDERFLOW_OCCURRED_STATUS, &s->underflow_occurred_status);
 }
 
+bool optc1_get_otg_active_size(struct timing_generator *optc,
+               uint32_t *otg_active_width,
+               uint32_t *otg_active_height)
+{
+       uint32_t otg_enabled;
+       uint32_t v_blank_start;
+       uint32_t v_blank_end;
+       uint32_t h_blank_start;
+       uint32_t h_blank_end;
+       struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+
+       REG_GET(OTG_CONTROL,
+                       OTG_MASTER_EN, &otg_enabled);
+
+       if (otg_enabled == 0)
+               return false;
+
+       REG_GET_2(OTG_V_BLANK_START_END,
+                       OTG_V_BLANK_START, &v_blank_start,
+                       OTG_V_BLANK_END, &v_blank_end);
+
+       REG_GET_2(OTG_H_BLANK_START_END,
+                       OTG_H_BLANK_START, &h_blank_start,
+                       OTG_H_BLANK_END, &h_blank_end);
+
+       *otg_active_width = v_blank_start - v_blank_end;
+       *otg_active_height = h_blank_start - h_blank_end;
+       return true;
+}
+
 void optc1_clear_optc_underflow(struct timing_generator *optc)
 {
        struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -1293,6 +1324,72 @@ bool optc1_is_optc_underflow_occurred(struct timing_generator *optc)
        return (underflow_occurred == 1);
 }
 
+bool optc1_configure_crc(struct timing_generator *optc,
+                         const struct crc_params *params)
+{
+       struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+       /* Cannot configure crc on a CRTC that is disabled */
+       if (!optc1_is_tg_enabled(optc))
+               return false;
+
+       REG_WRITE(OTG_CRC_CNTL, 0);
+
+       if (!params->enable)
+               return true;
+
+       /* Program frame boundaries */
+       /* Window A x axis start and end. */
+       REG_UPDATE_2(OTG_CRC0_WINDOWA_X_CONTROL,
+                       OTG_CRC0_WINDOWA_X_START, params->windowa_x_start,
+                       OTG_CRC0_WINDOWA_X_END, params->windowa_x_end);
+
+       /* Window A y axis start and end. */
+       REG_UPDATE_2(OTG_CRC0_WINDOWA_Y_CONTROL,
+                       OTG_CRC0_WINDOWA_Y_START, params->windowa_y_start,
+                       OTG_CRC0_WINDOWA_Y_END, params->windowa_y_end);
+
+       /* Window B x axis start and end. */
+       REG_UPDATE_2(OTG_CRC0_WINDOWB_X_CONTROL,
+                       OTG_CRC0_WINDOWB_X_START, params->windowb_x_start,
+                       OTG_CRC0_WINDOWB_X_END, params->windowb_x_end);
+
+       /* Window B y axis start and end. */
+       REG_UPDATE_2(OTG_CRC0_WINDOWB_Y_CONTROL,
+                       OTG_CRC0_WINDOWB_Y_START, params->windowb_y_start,
+                       OTG_CRC0_WINDOWB_Y_END, params->windowb_y_end);
+
+       /* Set crc mode and selection, and enable. Only using CRC0*/
+       REG_UPDATE_3(OTG_CRC_CNTL,
+                       OTG_CRC_CONT_EN, params->continuous_mode ? 1 : 0,
+                       OTG_CRC0_SELECT, params->selection,
+                       OTG_CRC_EN, 1);
+
+       return true;
+}
+
+bool optc1_get_crc(struct timing_generator *optc,
+                   uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
+{
+       uint32_t field = 0;
+       struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+       REG_GET(OTG_CRC_CNTL, OTG_CRC_EN, &field);
+
+       /* Early return if CRC is not enabled for this CRTC */
+       if (!field)
+               return false;
+
+       REG_GET_2(OTG_CRC0_DATA_RG,
+                       CRC0_R_CR, r_cr,
+                       CRC0_G_Y, g_y);
+
+       REG_GET(OTG_CRC0_DATA_B,
+                       CRC0_B_CB, b_cb);
+
+       return true;
+}
+
 static const struct timing_generator_funcs dcn10_tg_funcs = {
                .validate_timing = optc1_validate_timing,
                .program_timing = optc1_program_timing,
@@ -1305,6 +1402,7 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
                .get_position = optc1_get_position,
                .get_frame_count = optc1_get_vblank_counter,
                .get_scanoutpos = optc1_get_crtc_scanoutpos,
+               .get_otg_active_size = optc1_get_otg_active_size,
                .set_early_control = optc1_set_early_control,
                /* used by enable_timing_synchronization. Not need for FPGA */
                .wait_for_state = optc1_wait_for_state,
@@ -1328,6 +1426,8 @@ static const struct timing_generator_funcs dcn10_tg_funcs = {
                .is_tg_enabled = optc1_is_tg_enabled,
                .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred,
                .clear_optc_underflow = optc1_clear_optc_underflow,
+               .get_crc = optc1_get_crc,
+               .configure_crc = optc1_configure_crc,
 };
 
 void dcn10_timing_generator_init(struct optc *optc1)
index c62052f46460ee8514086f3cdf834ab008b7216e..c1b114209fe8da2f1e5499660548f4db855f889c 100644 (file)
        SRI(CONTROL, VTG, inst),\
        SRI(OTG_VERT_SYNC_CONTROL, OTG, inst),\
        SRI(OTG_MASTER_UPDATE_MODE, OTG, inst),\
-       SRI(OTG_GSL_CONTROL, OTG, inst)
+       SRI(OTG_GSL_CONTROL, OTG, inst),\
+       SRI(OTG_CRC_CNTL, OTG, inst),\
+       SRI(OTG_CRC0_DATA_RG, OTG, inst),\
+       SRI(OTG_CRC0_DATA_B, OTG, inst),\
+       SRI(OTG_CRC0_WINDOWA_X_CONTROL, OTG, inst),\
+       SRI(OTG_CRC0_WINDOWA_Y_CONTROL, OTG, inst),\
+       SRI(OTG_CRC0_WINDOWB_X_CONTROL, OTG, inst),\
+       SRI(OTG_CRC0_WINDOWB_Y_CONTROL, OTG, inst)
 
 #define TG_COMMON_REG_LIST_DCN1_0(inst) \
        TG_COMMON_REG_LIST_DCN(inst),\
@@ -138,6 +145,13 @@ struct dcn_optc_registers {
        uint32_t OTG_GSL_WINDOW_X;
        uint32_t OTG_GSL_WINDOW_Y;
        uint32_t OTG_VUPDATE_KEEPOUT;
+       uint32_t OTG_CRC_CNTL;
+       uint32_t OTG_CRC0_DATA_RG;
+       uint32_t OTG_CRC0_DATA_B;
+       uint32_t OTG_CRC0_WINDOWA_X_CONTROL;
+       uint32_t OTG_CRC0_WINDOWA_Y_CONTROL;
+       uint32_t OTG_CRC0_WINDOWB_X_CONTROL;
+       uint32_t OTG_CRC0_WINDOWB_Y_CONTROL;
 };
 
 #define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\
@@ -232,7 +246,21 @@ struct dcn_optc_registers {
        SF(OTG0_OTG_GSL_CONTROL, OTG_GSL2_EN, mask_sh),\
        SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_EN, mask_sh),\
        SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_FORCE_DELAY, mask_sh),\
-       SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_CHECK_ALL_FIELDS, mask_sh)
+       SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_CHECK_ALL_FIELDS, mask_sh),\
+       SF(OTG0_OTG_CRC_CNTL, OTG_CRC_CONT_EN, mask_sh),\
+       SF(OTG0_OTG_CRC_CNTL, OTG_CRC0_SELECT, mask_sh),\
+       SF(OTG0_OTG_CRC_CNTL, OTG_CRC_EN, mask_sh),\
+       SF(OTG0_OTG_CRC0_DATA_RG, CRC0_R_CR, mask_sh),\
+       SF(OTG0_OTG_CRC0_DATA_RG, CRC0_G_Y, mask_sh),\
+       SF(OTG0_OTG_CRC0_DATA_B, CRC0_B_CB, mask_sh),\
+       SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_START, mask_sh),\
+       SF(OTG0_OTG_CRC0_WINDOWA_X_CONTROL, OTG_CRC0_WINDOWA_X_END, mask_sh),\
+       SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_START, mask_sh),\
+       SF(OTG0_OTG_CRC0_WINDOWA_Y_CONTROL, OTG_CRC0_WINDOWA_Y_END, mask_sh),\
+       SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_START, mask_sh),\
+       SF(OTG0_OTG_CRC0_WINDOWB_X_CONTROL, OTG_CRC0_WINDOWB_X_END, mask_sh),\
+       SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_START, mask_sh),\
+       SF(OTG0_OTG_CRC0_WINDOWB_Y_CONTROL, OTG_CRC0_WINDOWB_Y_END, mask_sh)
 
 
 #define TG_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
@@ -363,7 +391,22 @@ struct dcn_optc_registers {
        type OTG_MASTER_UPDATE_LOCK_GSL_EN;\
        type MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET;\
        type MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET;\
-       type OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN;
+       type OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN;\
+       type OTG_CRC_CONT_EN;\
+       type OTG_CRC0_SELECT;\
+       type OTG_CRC_EN;\
+       type CRC0_R_CR;\
+       type CRC0_G_Y;\
+       type CRC0_B_CB;\
+       type OTG_CRC0_WINDOWA_X_START;\
+       type OTG_CRC0_WINDOWA_X_END;\
+       type OTG_CRC0_WINDOWA_Y_START;\
+       type OTG_CRC0_WINDOWA_Y_END;\
+       type OTG_CRC0_WINDOWB_X_START;\
+       type OTG_CRC0_WINDOWB_X_END;\
+       type OTG_CRC0_WINDOWB_Y_START;\
+       type OTG_CRC0_WINDOWB_Y_END;
+
 
 #define TG_REG_FIELD_LIST(type) \
        TG_REG_FIELD_LIST_DCN1_0(type)
@@ -507,4 +550,19 @@ bool optc1_is_optc_underflow_occurred(struct timing_generator *optc);
 
 void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable);
 
+bool optc1_get_otg_active_size(struct timing_generator *optc,
+               uint32_t *otg_active_width,
+               uint32_t *otg_active_height);
+
+void optc1_enable_crtc_reset(
+               struct timing_generator *optc,
+               int source_tg_inst,
+               struct crtc_trigger_info *crtc_tp);
+
+bool optc1_configure_crc(struct timing_generator *optc,
+                         const struct crc_params *params);
+
+bool optc1_get_crc(struct timing_generator *optc,
+                   uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb);
+
 #endif /* __DC_TIMING_GENERATOR_DCN10_H__ */
index df5cb2d1d1645ebab3f6f4e79469d037a395798d..6b44ed3697a4f4d5fafdc2bd9febede20e4cbae7 100644 (file)
 #include "reg_helper.h"
 #include "dce/dce_abm.h"
 #include "dce/dce_dmcu.h"
+#include "dce/dce_aux.h"
+
+const struct _vcs_dpi_ip_params_st dcn1_0_ip = {
+       .rob_buffer_size_kbytes = 64,
+       .det_buffer_size_kbytes = 164,
+       .dpte_buffer_size_in_pte_reqs = 42,
+       .dpp_output_buffer_pixels = 2560,
+       .opp_output_buffer_lines = 1,
+       .pixel_chunk_size_kbytes = 8,
+       .pte_enable = 1,
+       .pte_chunk_size_kbytes = 2,
+       .meta_chunk_size_kbytes = 2,
+       .writeback_chunk_size_kbytes = 2,
+       .line_buffer_size_bits = 589824,
+       .max_line_buffer_lines = 12,
+       .IsLineBufferBppFixed = 0,
+       .LineBufferFixedBpp = -1,
+       .writeback_luma_buffer_size_kbytes = 12,
+       .writeback_chroma_buffer_size_kbytes = 8,
+       .max_num_dpp = 4,
+       .max_num_wb = 2,
+       .max_dchub_pscl_bw_pix_per_clk = 4,
+       .max_pscl_lb_bw_pix_per_clk = 2,
+       .max_lb_vscl_bw_pix_per_clk = 4,
+       .max_vscl_hscl_bw_pix_per_clk = 4,
+       .max_hscl_ratio = 4,
+       .max_vscl_ratio = 4,
+       .hscl_mults = 4,
+       .vscl_mults = 4,
+       .max_hscl_taps = 8,
+       .max_vscl_taps = 8,
+       .dispclk_ramp_margin_percent = 1,
+       .underscan_factor = 1.10,
+       .min_vblank_lines = 14,
+       .dppclk_delay_subtotal = 90,
+       .dispclk_delay_subtotal = 42,
+       .dcfclk_cstate_latency = 10,
+       .max_inter_dcn_tile_repeaters = 8,
+       .can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0,
+       .bug_forcing_LC_req_same_size_fixed = 0,
+};
+
+const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc = {
+       .sr_exit_time_us = 9.0,
+       .sr_enter_plus_exit_time_us = 11.0,
+       .urgent_latency_us = 4.0,
+       .writeback_latency_us = 12.0,
+       .ideal_dram_bw_after_urgent_percent = 80.0,
+       .max_request_size_bytes = 256,
+       .downspread_percent = 0.5,
+       .dram_page_open_time_ns = 50.0,
+       .dram_rw_turnaround_time_ns = 17.5,
+       .dram_return_buffer_per_channel_bytes = 8192,
+       .round_trip_ping_latency_dcfclk_cycles = 128,
+       .urgent_out_of_order_return_per_channel_bytes = 256,
+       .channel_interleave_bytes = 256,
+       .num_banks = 8,
+       .num_chans = 2,
+       .vmm_page_size_bytes = 4096,
+       .dram_clock_change_latency_us = 17.0,
+       .writeback_dram_clock_change_latency_us = 23.0,
+       .return_bus_width_bytes = 64,
+};
 
 #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
        #define mmDP0_DP_DPHY_INTERNAL_CTRL             0x210f
@@ -294,6 +357,21 @@ static const struct dcn10_opp_mask opp_mask = {
                OPP_MASK_SH_LIST_DCN10(_MASK),
 };
 
+#define aux_engine_regs(id)\
+[id] = {\
+       AUX_COMMON_REG_LIST(id), \
+       .AUX_RESET_MASK = 0 \
+}
+
+static const struct dce110_aux_registers aux_engine_regs[] = {
+               aux_engine_regs(0),
+               aux_engine_regs(1),
+               aux_engine_regs(2),
+               aux_engine_regs(3),
+               aux_engine_regs(4),
+               aux_engine_regs(5)
+};
+
 #define tf_regs(id)\
 [id] = {\
        TF_REG_LIST_DCN10(id),\
@@ -417,13 +495,14 @@ static const struct dce110_clk_src_mask cs_mask = {
 
 static const struct resource_caps res_cap = {
                .num_timing_generator = 4,
+               .num_opp = 4,
                .num_video_plane = 4,
                .num_audio = 4,
                .num_stream_encoder = 4,
                .num_pll = 4,
 };
 
-static const struct dc_debug debug_defaults_drv = {
+static const struct dc_debug_options debug_defaults_drv = {
                .sanity_checks = true,
                .disable_dmcu = true,
                .force_abm_enable = false,
@@ -436,7 +515,7 @@ static const struct dc_debug debug_defaults_drv = {
                 */
                .min_disp_clk_khz = 100000,
 
-               .disable_pplib_clock_request = true,
+               .disable_pplib_clock_request = false,
                .disable_pplib_wm_range = false,
                .pplib_wm_report_mode = WM_REPORT_DEFAULT,
                .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
@@ -451,7 +530,7 @@ static const struct dc_debug debug_defaults_drv = {
                .max_downscale_src_width = 3840,
 };
 
-static const struct dc_debug debug_defaults_diags = {
+static const struct dc_debug_options debug_defaults_diags = {
                .disable_dmcu = true,
                .force_abm_enable = false,
                .timing_trace = true,
@@ -515,6 +594,23 @@ static struct output_pixel_processor *dcn10_opp_create(
        return &opp->base;
 }
 
+struct aux_engine *dcn10_aux_engine_create(
+       struct dc_context *ctx,
+       uint32_t inst)
+{
+       struct aux_engine_dce110 *aux_engine =
+               kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+       if (!aux_engine)
+               return NULL;
+
+       dce110_aux_engine_construct(aux_engine, ctx, inst,
+                                   SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+                                   &aux_engine_regs[inst]);
+
+       return &aux_engine->base;
+}
+
 static struct mpc *dcn10_mpc_create(struct dc_context *ctx)
 {
        struct dcn10_mpc *mpc10 = kzalloc(sizeof(struct dcn10_mpc),
@@ -680,6 +776,7 @@ static struct dce_hwseq *dcn10_hwseq_create(
                hws->masks = &hwseq_mask;
                hws->wa.DEGVIDCN10_253 = true;
                hws->wa.false_optc_underflow = true;
+               hws->wa.DEGVIDCN10_254 = true;
        }
        return hws;
 }
@@ -762,6 +859,9 @@ static void destruct(struct dcn10_resource_pool *pool)
                        kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
                        pool->base.timing_generators[i] = NULL;
                }
+
+               if (pool->base.engines[i] != NULL)
+                       pool->base.engines[i]->funcs->destroy_engine(&pool->base.engines[i]);
        }
 
        for (i = 0; i < pool->base.stream_enc_count; i++)
@@ -790,8 +890,8 @@ static void destruct(struct dcn10_resource_pool *pool)
        if (pool->base.dmcu != NULL)
                dce_dmcu_destroy(&pool->base.dmcu);
 
-       if (pool->base.display_clock != NULL)
-               dce_disp_clk_destroy(&pool->base.display_clock);
+       if (pool->base.dccg != NULL)
+               dce_dccg_destroy(&pool->base.dccg);
 
        kfree(pool->base.pp_smu);
 }
@@ -971,11 +1071,11 @@ static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_st
        return DC_OK;
 }
 
-static struct dc_cap_funcs cap_funcs = {
+static const struct dc_cap_funcs cap_funcs = {
        .get_dcc_compression_cap = dcn10_get_dcc_compression_cap
 };
 
-static struct resource_funcs dcn10_res_pool_funcs = {
+static const struct resource_funcs dcn10_res_pool_funcs = {
        .destroy = dcn10_destroy_resource_pool,
        .link_enc_create = dcn10_link_encoder_create,
        .validate_bandwidth = dcn_validate_bandwidth,
@@ -1027,6 +1127,8 @@ static bool construct(
        dc->caps.max_slave_planes = 1;
        dc->caps.is_apu = true;
        dc->caps.post_blend_color_processing = false;
+       /* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */
+       dc->caps.force_dp_tps4_for_cp2520 = true;
 
        if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
                dc->debug = debug_defaults_drv;
@@ -1070,8 +1172,8 @@ static bool construct(
                }
        }
 
-       pool->base.display_clock = dce120_disp_clk_create(ctx);
-       if (pool->base.display_clock == NULL) {
+       pool->base.dccg = dcn1_dccg_create(ctx);
+       if (pool->base.dccg == NULL) {
                dm_error("DC: failed to create display clock!\n");
                BREAK_TO_DEBUGGER();
                goto fail;
@@ -1191,6 +1293,14 @@ static bool construct(
                        goto fail;
                }
 
+               pool->base.engines[i] = dcn10_aux_engine_create(ctx, i);
+               if (pool->base.engines[i] == NULL) {
+                       BREAK_TO_DEBUGGER();
+                       dm_error(
+                               "DC:failed to create aux engine!!\n");
+                       goto fail;
+               }
+
                /* check next valid pipe */
                j++;
        }
index c928ee4cd3826dbd813132cc2b074e4f6b5815c2..6f9078f3c4d39a353686ec9b5380cc27c63ba922 100644 (file)
@@ -257,20 +257,18 @@ void enc1_stream_encoder_dp_set_stream_attribute(
        uint8_t colorimetry_bpc;
        uint8_t dynamic_range_rgb = 0; /*full range*/
        uint8_t dynamic_range_ycbcr = 1; /*bt709*/
+       uint8_t dp_pixel_encoding = 0;
+       uint8_t dp_component_depth = 0;
 
        struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
 
-       REG_UPDATE(DP_DB_CNTL, DP_DB_DISABLE, 1);
-
        /* set pixel encoding */
        switch (crtc_timing->pixel_encoding) {
        case PIXEL_ENCODING_YCBCR422:
-               REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
-                               DP_PIXEL_ENCODING_TYPE_YCBCR422);
+               dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR422;
                break;
        case PIXEL_ENCODING_YCBCR444:
-               REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
-                               DP_PIXEL_ENCODING_TYPE_YCBCR444);
+               dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR444;
 
                if (crtc_timing->flags.Y_ONLY)
                        if (crtc_timing->display_color_depth != COLOR_DEPTH_666)
@@ -278,8 +276,8 @@ void enc1_stream_encoder_dp_set_stream_attribute(
                                 * Color depth of Y-only could be
                                 * 8, 10, 12, 16 bits
                                 */
-                               REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
-                                               DP_PIXEL_ENCODING_TYPE_Y_ONLY);
+                               dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_Y_ONLY;
+
                /* Note: DP_MSA_MISC1 bit 7 is the indicator
                 * of Y-only mode.
                 * This bit is set in HW if register
@@ -287,48 +285,55 @@ void enc1_stream_encoder_dp_set_stream_attribute(
                 */
                break;
        case PIXEL_ENCODING_YCBCR420:
-               REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
-                               DP_PIXEL_ENCODING_TYPE_YCBCR420);
+               dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR420;
                REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1);
                break;
        default:
-               REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
-                               DP_PIXEL_ENCODING_TYPE_RGB444);
+               dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_RGB444;
                break;
        }
 
        misc1 = REG_READ(DP_MSA_MISC);
+       /* For YCbCr420 and BT2020 Colorimetry Formats, VSC SDP shall be used.
+        * When MISC1, bit 6, is Set to 1, a Source device uses a VSC SDP to indicate the
+        * Pixel Encoding/Colorimetry Format and that a Sink device shall ignore MISC1, bit 7,
+        * and MISC0, bits 7:1 (MISC1, bit 7, and MISC0, bits 7:1, become "don't care").
+        */
+       if ((crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) ||
+                       (output_color_space == COLOR_SPACE_2020_YCBCR) ||
+                       (output_color_space == COLOR_SPACE_2020_RGB_FULLRANGE) ||
+                       (output_color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE))
+               misc1 = misc1 | 0x40;
+       else
+               misc1 = misc1 & ~0x40;
 
        /* set color depth */
-
        switch (crtc_timing->display_color_depth) {
        case COLOR_DEPTH_666:
-               REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
-                               0);
+               dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_6BPC;
                break;
        case COLOR_DEPTH_888:
-               REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
-                               DP_COMPONENT_PIXEL_DEPTH_8BPC);
+               dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_8BPC;
                break;
        case COLOR_DEPTH_101010:
-               REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
-                               DP_COMPONENT_PIXEL_DEPTH_10BPC);
-
+               dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_10BPC;
                break;
        case COLOR_DEPTH_121212:
-               REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
-                               DP_COMPONENT_PIXEL_DEPTH_12BPC);
+               dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_12BPC;
                break;
        case COLOR_DEPTH_161616:
-               REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
-                               DP_COMPONENT_PIXEL_DEPTH_16BPC);
+               dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_16BPC;
                break;
        default:
-               REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
-                               DP_COMPONENT_PIXEL_DEPTH_6BPC);
+               dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_6BPC;
                break;
        }
 
+       /* Set DP pixel encoding and component depth */
+       REG_UPDATE_2(DP_PIXEL_FORMAT,
+                       DP_PIXEL_ENCODING, dp_pixel_encoding,
+                       DP_COMPONENT_DEPTH, dp_component_depth);
+
        /* set dynamic range and YCbCr range */
 
        switch (crtc_timing->display_color_depth) {
@@ -354,7 +359,6 @@ void enc1_stream_encoder_dp_set_stream_attribute(
 
        switch (output_color_space) {
        case COLOR_SPACE_SRGB:
-               misc0 = misc0 | 0x0;
                misc1 = misc1 & ~0x80; /* bit7 = 0*/
                dynamic_range_rgb = 0; /*full range*/
                break;
@@ -1087,27 +1091,6 @@ static union audio_cea_channels speakers_to_channels(
        return cea_channels;
 }
 
-static uint32_t calc_max_audio_packets_per_line(
-       const struct audio_crtc_info *crtc_info)
-{
-       uint32_t max_packets_per_line;
-
-       max_packets_per_line =
-               crtc_info->h_total - crtc_info->h_active;
-
-       if (crtc_info->pixel_repetition)
-               max_packets_per_line *= crtc_info->pixel_repetition;
-
-       /* for other hdmi features */
-       max_packets_per_line -= 58;
-       /* for Control Period */
-       max_packets_per_line -= 16;
-       /* Number of Audio Packets per Line */
-       max_packets_per_line /= 32;
-
-       return max_packets_per_line;
-}
-
 static void get_audio_clock_info(
        enum dc_color_depth color_depth,
        uint32_t crtc_pixel_clock_in_khz,
@@ -1201,16 +1184,9 @@ static void enc1_se_setup_hdmi_audio(
        struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
 
        struct audio_clock_info audio_clock_info = {0};
-       uint32_t max_packets_per_line;
-
-       /* For now still do calculation, although this field is ignored when
-        * above HDMI_PACKET_GEN_VERSION set to 1
-        */
-       max_packets_per_line = calc_max_audio_packets_per_line(crtc_info);
 
        /* HDMI_AUDIO_PACKET_CONTROL */
-       REG_UPDATE_2(HDMI_AUDIO_PACKET_CONTROL,
-                       HDMI_AUDIO_PACKETS_PER_LINE, max_packets_per_line,
+       REG_UPDATE(HDMI_AUDIO_PACKET_CONTROL,
                        HDMI_AUDIO_DELAY_EN, 1);
 
        /* AFMT_AUDIO_PACKET_CONTROL */
index 034369fbb9e2cb5e5f0eb95931fb7b9e9ddafdfb..5d4527d03045e1902f65bf5f5ea511dd58875f04 100644 (file)
@@ -40,6 +40,14 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
        const struct dc_edid *edid,
        struct dc_edid_caps *edid_caps);
 
+
+/*
+ * Update DP branch info
+ */
+void dm_helpers_dp_update_branch_info(
+               struct dc_context *ctx,
+               const struct dc_link *link);
+
 /*
  * Writes payload allocation table in immediate downstream device.
  */
@@ -103,6 +111,9 @@ bool dm_helpers_submit_i2c(
                const struct dc_link *link,
                struct i2c_command *cmd);
 
+bool dm_helpers_is_dp_sink_present(
+               struct dc_link *link);
+
 enum dc_edid_status dm_helpers_read_local_edid(
                struct dc_context *ctx,
                struct dc_link *link,
index eac4bfe12257620b2c49625b34b9f85250d76a32..58ed2055ef9f7ad8d9f82cd1b2956f1119cbdf6e 100644 (file)
@@ -40,7 +40,7 @@ enum wm_set_id {
        WM_B,
        WM_C,
        WM_D,
-       WM_COUNT,
+       WM_SET_COUNT,
 };
 
 struct pp_smu_wm_set_range {
@@ -53,10 +53,10 @@ struct pp_smu_wm_set_range {
 
 struct pp_smu_wm_range_sets {
        uint32_t num_reader_wm_sets;
-       struct pp_smu_wm_set_range reader_wm_sets[WM_COUNT];
+       struct pp_smu_wm_set_range reader_wm_sets[WM_SET_COUNT];
 
        uint32_t num_writer_wm_sets;
-       struct pp_smu_wm_set_range writer_wm_sets[WM_COUNT];
+       struct pp_smu_wm_set_range writer_wm_sets[WM_SET_COUNT];
 };
 
 struct pp_smu_display_requirement_rv {
index 4ff9b2bba1782f18713ece0fa9faa4cae45bb6e2..eb5ab3978e8407e30cc694eb09f3d83ee2c662eb 100644 (file)
@@ -339,7 +339,10 @@ bool dm_dmcu_set_pipe(struct dc_context *ctx, unsigned int controller_id);
 #define dm_log_to_buffer(buffer, size, fmt, args)\
        vsnprintf(buffer, size, fmt, args)
 
-unsigned long long dm_get_timestamp(struct dc_context *ctx);
+static inline unsigned long long dm_get_timestamp(struct dc_context *ctx)
+{
+       return ktime_get_raw_ns();
+}
 
 unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
                unsigned long long current_time_stamp,
index ab8c77d4e6dfec4dbd4c6b6a7eff1b10221cdf3e..2b83f922ac02667239756e54b5ec30fd7ed65f15 100644 (file)
@@ -137,7 +137,7 @@ struct dm_pp_clock_range_for_wm_set {
        enum dm_pp_wm_set_id wm_set_id;
        uint32_t wm_min_eng_clk_in_khz;
        uint32_t wm_max_eng_clk_in_khz;
-       uint32_t wm_min_memg_clk_in_khz;
+       uint32_t wm_min_mem_clk_in_khz;
        uint32_t wm_max_mem_clk_in_khz;
 };
 
@@ -150,7 +150,7 @@ struct dm_pp_clock_range_for_dmif_wm_set_soc15 {
        enum dm_pp_wm_set_id wm_set_id;
        uint32_t wm_min_dcfclk_clk_in_khz;
        uint32_t wm_max_dcfclk_clk_in_khz;
-       uint32_t wm_min_memg_clk_in_khz;
+       uint32_t wm_min_mem_clk_in_khz;
        uint32_t wm_max_mem_clk_in_khz;
 };
 
@@ -158,7 +158,7 @@ struct dm_pp_clock_range_for_mcif_wm_set_soc15 {
        enum dm_pp_wm_set_id wm_set_id;
        uint32_t wm_min_socclk_clk_in_khz;
        uint32_t wm_max_socclk_clk_in_khz;
-       uint32_t wm_min_memg_clk_in_khz;
+       uint32_t wm_min_mem_clk_in_khz;
        uint32_t wm_max_mem_clk_in_khz;
 };
 
index f83a608f93e94b564ab2385144827591b34166ef..d97ca6528f9d9d943f41bb416466277d2fcdf2bc 100644 (file)
@@ -36,11 +36,10 @@ CFLAGS_display_mode_lib.o := $(dml_ccflags)
 CFLAGS_display_pipe_clocks.o := $(dml_ccflags)
 CFLAGS_dml1_display_rq_dlg_calc.o := $(dml_ccflags)
 CFLAGS_display_rq_dlg_helpers.o := $(dml_ccflags)
-CFLAGS_soc_bounding_box.o := $(dml_ccflags)
 CFLAGS_dml_common_defs.o := $(dml_ccflags)
 
 DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \
-         soc_bounding_box.o dml_common_defs.o
+       dml_common_defs.o
 
 AMD_DAL_DML = $(addprefix $(AMDDALPATH)/dc/dml/,$(DML))
 
index fd9d97aab07154cb1e9562d5d22a79deca8040b7..dddeb0d4db8f337bfc4c7825ea47bba046e13935 100644 (file)
 #include "display_mode_lib.h"
 #include "dc_features.h"
 
-static const struct _vcs_dpi_ip_params_st dcn1_0_ip = {
-       .rob_buffer_size_kbytes = 64,
-       .det_buffer_size_kbytes = 164,
-       .dpte_buffer_size_in_pte_reqs = 42,
-       .dpp_output_buffer_pixels = 2560,
-       .opp_output_buffer_lines = 1,
-       .pixel_chunk_size_kbytes = 8,
-       .pte_enable = 1,
-       .pte_chunk_size_kbytes = 2,
-       .meta_chunk_size_kbytes = 2,
-       .writeback_chunk_size_kbytes = 2,
-       .line_buffer_size_bits = 589824,
-       .max_line_buffer_lines = 12,
-       .IsLineBufferBppFixed = 0,
-       .LineBufferFixedBpp = -1,
-       .writeback_luma_buffer_size_kbytes = 12,
-       .writeback_chroma_buffer_size_kbytes = 8,
-       .max_num_dpp = 4,
-       .max_num_wb = 2,
-       .max_dchub_pscl_bw_pix_per_clk = 4,
-       .max_pscl_lb_bw_pix_per_clk = 2,
-       .max_lb_vscl_bw_pix_per_clk = 4,
-       .max_vscl_hscl_bw_pix_per_clk = 4,
-       .max_hscl_ratio = 4,
-       .max_vscl_ratio = 4,
-       .hscl_mults = 4,
-       .vscl_mults = 4,
-       .max_hscl_taps = 8,
-       .max_vscl_taps = 8,
-       .dispclk_ramp_margin_percent = 1,
-       .underscan_factor = 1.10,
-       .min_vblank_lines = 14,
-       .dppclk_delay_subtotal = 90,
-       .dispclk_delay_subtotal = 42,
-       .dcfclk_cstate_latency = 10,
-       .max_inter_dcn_tile_repeaters = 8,
-       .can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0,
-       .bug_forcing_LC_req_same_size_fixed = 0,
-};
-
-static const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc = {
-       .sr_exit_time_us = 9.0,
-       .sr_enter_plus_exit_time_us = 11.0,
-       .urgent_latency_us = 4.0,
-       .writeback_latency_us = 12.0,
-       .ideal_dram_bw_after_urgent_percent = 80.0,
-       .max_request_size_bytes = 256,
-       .downspread_percent = 0.5,
-       .dram_page_open_time_ns = 50.0,
-       .dram_rw_turnaround_time_ns = 17.5,
-       .dram_return_buffer_per_channel_bytes = 8192,
-       .round_trip_ping_latency_dcfclk_cycles = 128,
-       .urgent_out_of_order_return_per_channel_bytes = 256,
-       .channel_interleave_bytes = 256,
-       .num_banks = 8,
-       .num_chans = 2,
-       .vmm_page_size_bytes = 4096,
-       .dram_clock_change_latency_us = 17.0,
-       .writeback_dram_clock_change_latency_us = 23.0,
-       .return_bus_width_bytes = 64,
-};
+extern const struct _vcs_dpi_ip_params_st dcn1_0_ip;
+extern const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc;
 
 static void set_soc_bounding_box(struct _vcs_dpi_soc_bounding_box_st *soc, enum dml_project project)
 {
index 3c2abcb8a1b0c2c4bbc5af7e6c5bea7643819cdb..6352062488898d4d445d94411635594d197df42d 100644 (file)
@@ -27,7 +27,6 @@
 
 
 #include "dml_common_defs.h"
-#include "soc_bounding_box.h"
 #include "dml1_display_rq_dlg_calc.h"
 
 enum dml_project {
index 7fa0375939aec0dd370cfc0291bc8200c506b895..cbafce649e3334e9e834fb0d9492a7d10ae07c46 100644 (file)
@@ -64,10 +64,9 @@ struct _vcs_dpi_voltage_scaling_st {
        double dscclk_mhz;
        double dcfclk_mhz;
        double socclk_mhz;
-       double dram_speed_mhz;
+       double dram_speed_mts;
        double fabricclk_mhz;
        double dispclk_mhz;
-       double dram_bw_per_chan_gbps;
        double phyclk_mhz;
        double dppclk_mhz;
 };
@@ -112,6 +111,8 @@ struct _vcs_dpi_soc_bounding_box_st {
        double xfc_bus_transport_time_us;
        double xfc_xbuf_latency_tolerance_us;
        int use_urgent_burst_bw;
+       double max_hscl_ratio;
+       double max_vscl_ratio;
        struct _vcs_dpi_voltage_scaling_st clock_limits[7];
 };
 
@@ -304,6 +305,7 @@ struct _vcs_dpi_display_pipe_dest_params_st {
        unsigned char otg_inst;
        unsigned char odm_split_cnt;
        unsigned char odm_combine;
+       unsigned char use_maximum_vstartup;
 };
 
 struct _vcs_dpi_display_pipe_params_st {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c b/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c
deleted file mode 100644 (file)
index 324239c..0000000
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-#include "soc_bounding_box.h"
-#include "display_mode_lib.h"
-#include "dc_features.h"
-
-#include "dml_inline_defs.h"
-
-/*
- * NOTE:
- *   This file is gcc-parseable HW gospel, coming straight from HW engineers.
- *
- * It doesn't adhere to Linux kernel style and sometimes will do things in odd
- * ways. Unless there is something clearly wrong with it the code should
- * remain as-is as it provides us with a guarantee from HW that it is correct.
- */
-
-void dml_socbb_set_latencies(soc_bounding_box_st *to_box, soc_bounding_box_st *from_box)
-{
-       to_box->dram_clock_change_latency_us = from_box->dram_clock_change_latency_us;
-       to_box->sr_exit_time_us = from_box->sr_exit_time_us;
-       to_box->sr_enter_plus_exit_time_us = from_box->sr_enter_plus_exit_time_us;
-       to_box->urgent_latency_us = from_box->urgent_latency_us;
-       to_box->writeback_latency_us = from_box->writeback_latency_us;
-}
-
-voltage_scaling_st dml_socbb_voltage_scaling(
-               const soc_bounding_box_st *soc,
-               enum voltage_state voltage)
-{
-       const voltage_scaling_st *voltage_state;
-       const voltage_scaling_st * const voltage_end = soc->clock_limits + DC__VOLTAGE_STATES;
-
-       for (voltage_state = soc->clock_limits;
-                       voltage_state < voltage_end && voltage_state->state != voltage;
-                       voltage_state++) {
-       }
-
-       if (voltage_state < voltage_end)
-               return *voltage_state;
-       return soc->clock_limits[DC__VOLTAGE_STATES - 1];
-}
-
-double dml_socbb_return_bw_mhz(soc_bounding_box_st *box, enum voltage_state voltage)
-{
-       double return_bw;
-
-       voltage_scaling_st state = dml_socbb_voltage_scaling(box, voltage);
-
-       return_bw = dml_min((double) box->return_bus_width_bytes * state.dcfclk_mhz,
-                       state.dram_bw_per_chan_gbps * 1000.0 * (double) box->num_chans
-                                       * box->ideal_dram_bw_after_urgent_percent / 100.0);
-
-       return_bw = dml_min((double) box->return_bus_width_bytes * state.fabricclk_mhz, return_bw);
-
-       return return_bw;
-}
index 562ee189d780c4de20d150a16497c27ef44300a8..b9d9930a49743cbae8d54c45d88326a7c3444be9 100644 (file)
@@ -61,7 +61,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCE120)
 ###############################################################################
 # DCN 1x
 ###############################################################################
-ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ifdef CONFIG_X86
 GPIO_DCN10 = hw_translate_dcn10.o hw_factory_dcn10.o
 
 AMD_DAL_GPIO_DCN10 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn10/,$(GPIO_DCN10))
index 9c4a56c738c0127aa06224b085500f41f3aa6944..bf40725f982ff4cc92b534aa859a339ba38b4324 100644 (file)
        DDC_GPIO_I2C_REG_LIST(cd),\
        .ddc_setup = 0
 
-#define DDC_MASK_SH_LIST(mask_sh) \
+#define DDC_MASK_SH_LIST_COMMON(mask_sh) \
                SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE, mask_sh),\
                SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_EDID_DETECT_ENABLE, mask_sh),\
                SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_EDID_DETECT_MODE, mask_sh),\
                SF_DDC(DC_GPIO_DDC1_MASK, DC_GPIO_DDC1DATA_PD_EN, mask_sh),\
                SF_DDC(DC_GPIO_DDC1_MASK, DC_GPIO_DDC1CLK_PD_EN, mask_sh),\
-               SF_DDC(DC_GPIO_DDC1_MASK, AUX_PAD1_MODE, mask_sh),\
+               SF_DDC(DC_GPIO_DDC1_MASK, AUX_PAD1_MODE, mask_sh)
+
+#define DDC_MASK_SH_LIST(mask_sh) \
+               DDC_MASK_SH_LIST_COMMON(mask_sh),\
                SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SDA_PD_DIS, mask_sh),\
                SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SCL_PD_DIS, mask_sh)
 
index ab5483c0c502cedbe04e83375a04da6799a5c12a..f20161c5706d7278cc0c48f92eedebf0bdf0ce73 100644 (file)
@@ -375,6 +375,7 @@ struct gpio *dal_gpio_create_irq(
        case GPIO_ID_GPIO_PAD:
        break;
        default:
+               id = GPIO_ID_HPD;
                ASSERT_CRITICAL(false);
                return NULL;
        }
index 0caee3523017fe74e45a217e604cd57004a64486..83df779984e5da9d32df682f5fea318426684d00 100644 (file)
@@ -43,7 +43,7 @@
 #include "dce80/hw_factory_dce80.h"
 #include "dce110/hw_factory_dce110.h"
 #include "dce120/hw_factory_dce120.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
 #include "dcn10/hw_factory_dcn10.h"
 #endif
 
@@ -81,7 +81,7 @@ bool dal_hw_factory_init(
        case DCE_VERSION_12_0:
                dal_hw_factory_dce120_init(factory);
                return true;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
        case DCN_VERSION_1_0:
                dal_hw_factory_dcn10_init(factory);
                return true;
index 55c7074885413e8ebba1ce2ccc886f6586850c87..e7541310480b1ce32f60fff366f56d99c0884b4b 100644 (file)
@@ -43,7 +43,7 @@
 #include "dce80/hw_translate_dce80.h"
 #include "dce110/hw_translate_dce110.h"
 #include "dce120/hw_translate_dce120.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
 #include "dcn10/hw_translate_dcn10.h"
 #endif
 
@@ -78,7 +78,7 @@ bool dal_hw_translate_init(
        case DCE_VERSION_12_0:
                dal_hw_translate_dce120_init(translate);
                return true;
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
        case DCN_VERSION_1_0:
                dal_hw_translate_dcn10_init(translate);
                return true;
index 352885cb4d0763dd3bb4912e59722abd9ee4589c..a851d07f01901c1f4dad48090f26d3e0fd1c31c3 100644 (file)
@@ -71,7 +71,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE112)
 ###############################################################################
 # DCN 1.0 family
 ###############################################################################
-ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ifdef CONFIG_X86
 I2CAUX_DCN1 = i2caux_dcn10.o
 
 AMD_DAL_I2CAUX_DCN1 = $(addprefix $(AMDDALPATH)/dc/i2caux/dcn10/,$(I2CAUX_DCN1))
index bb526ad326e597a449a1c4d38a9fb134e4ba33a7..0afd2fa57bbe5efbf9c988591862dab7b468f7e6 100644 (file)
@@ -128,8 +128,20 @@ static void process_read_reply(
                        ctx->status =
                                I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
                        ctx->operation_succeeded = false;
+               } else if (ctx->returned_byte < ctx->current_read_length) {
+                       ctx->current_read_length -= ctx->returned_byte;
+
+                       ctx->offset += ctx->returned_byte;
+
+                       ++ctx->invalid_reply_retry_aux_on_ack;
+
+                       if (ctx->invalid_reply_retry_aux_on_ack >
+                               AUX_INVALID_REPLY_RETRY_COUNTER) {
+                               ctx->status =
+                               I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+                               ctx->operation_succeeded = false;
+                       }
                } else {
-                       ctx->current_read_length = ctx->returned_byte;
                        ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
                        ctx->transaction_complete = true;
                        ctx->operation_succeeded = true;
@@ -157,6 +169,10 @@ static void process_read_reply(
                        ctx->operation_succeeded = false;
                }
        break;
+       case AUX_TRANSACTION_REPLY_HPD_DISCON:
+               ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+               ctx->operation_succeeded = false;
+       break;
        default:
                ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
                ctx->operation_succeeded = false;
@@ -215,6 +231,10 @@ static void process_read_request(
                         * so we should not wait here */
                }
        break;
+       case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+               ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+               ctx->operation_succeeded = false;
+       break;
        default:
                ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
                ctx->operation_succeeded = false;
@@ -282,7 +302,6 @@ static bool read_command(
                                ctx.operation_succeeded);
        }
 
-       request->payload.length = ctx.reply.length;
        return ctx.operation_succeeded;
 }
 
@@ -370,6 +389,10 @@ static void process_write_reply(
                        ctx->operation_succeeded = false;
                }
        break;
+       case AUX_TRANSACTION_REPLY_HPD_DISCON:
+               ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+               ctx->operation_succeeded = false;
+       break;
        default:
                ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
                ctx->operation_succeeded = false;
@@ -422,6 +445,10 @@ static void process_write_request(
                         * so we should not wait here */
                }
        break;
+       case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+               ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+               ctx->operation_succeeded = false;
+       break;
        default:
                ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
                ctx->operation_succeeded = false;
index 8e71324ccb1094435e51d14134c7d42d5a97f163..c33a2898d9671e26d05c9a3345b71c3f8484a70f 100644 (file)
 #ifndef __DAL_AUX_ENGINE_H__
 #define __DAL_AUX_ENGINE_H__
 
-enum aux_transaction_type {
-       AUX_TRANSACTION_TYPE_DP,
-       AUX_TRANSACTION_TYPE_I2C
-};
-
-struct aux_request_transaction_data {
-       enum aux_transaction_type type;
-       enum i2caux_transaction_action action;
-       /* 20-bit AUX channel transaction address */
-       uint32_t address;
-       /* delay, in 100-microsecond units */
-       uint8_t delay;
-       uint32_t length;
-       uint8_t *data;
-};
-
-enum aux_transaction_reply {
-       AUX_TRANSACTION_REPLY_AUX_ACK = 0x00,
-       AUX_TRANSACTION_REPLY_AUX_NACK = 0x01,
-       AUX_TRANSACTION_REPLY_AUX_DEFER = 0x02,
-
-       AUX_TRANSACTION_REPLY_I2C_ACK = 0x00,
-       AUX_TRANSACTION_REPLY_I2C_NACK = 0x10,
-       AUX_TRANSACTION_REPLY_I2C_DEFER = 0x20,
-
-       AUX_TRANSACTION_REPLY_INVALID = 0xFF
-};
-
-struct aux_reply_transaction_data {
-       enum aux_transaction_reply status;
-       uint32_t length;
-       uint8_t *data;
-};
-
-enum aux_channel_operation_result {
-       AUX_CHANNEL_OPERATION_SUCCEEDED,
-       AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN,
-       AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY,
-       AUX_CHANNEL_OPERATION_FAILED_TIMEOUT
-};
+#include "dc_ddc_types.h"
 
 struct aux_engine;
 
@@ -83,6 +44,12 @@ struct aux_engine_funcs {
        void (*process_channel_reply)(
                struct aux_engine *engine,
                struct aux_reply_transaction_data *reply);
+       int (*read_channel_reply)(
+               struct aux_engine *engine,
+               uint32_t size,
+               uint8_t *buffer,
+               uint8_t *reply_result,
+               uint32_t *sw_status);
        enum aux_channel_operation_result (*get_channel_status)(
                struct aux_engine *engine,
                uint8_t *returned_bytes);
index e8d3781deaed028dbee299c3800b5cf7cb7162f3..8b704ab0471cd2cea6a3daacd8c945648354b427 100644 (file)
@@ -97,6 +97,7 @@ struct i2caux *dal_i2caux_dce100_create(
 
        dal_i2caux_dce110_construct(i2caux_dce110,
                                    ctx,
+                                   ARRAY_SIZE(dce100_aux_regs),
                                    dce100_aux_regs,
                                    dce100_hw_engine_regs,
                                    &i2c_shift,
index 5f47f6c007ac079bc26694e5556f7618c8026af8..ae5caa97cacaf61049c07c448e9f454976097c9f 100644 (file)
@@ -198,27 +198,27 @@ static void submit_channel_request(
                ((request->type == AUX_TRANSACTION_TYPE_I2C) &&
                ((request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) ||
                 (request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT)));
+       if (REG(AUXN_IMPCAL)) {
+               /* clear_aux_error */
+               REG_UPDATE_SEQ(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK,
+                               1,
+                               0);
 
-       /* clear_aux_error */
-       REG_UPDATE_SEQ(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK,
-                       1,
-                       0);
-
-       REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK,
-                       1,
-                       0);
-
-       /* force_default_calibrate */
-       REG_UPDATE_1BY1_2(AUXN_IMPCAL,
-                       AUXN_IMPCAL_ENABLE, 1,
-                       AUXN_IMPCAL_OVERRIDE_ENABLE, 0);
+               REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK,
+                               1,
+                               0);
 
-       /* bug? why AUXN update EN and OVERRIDE_EN 1 by 1 while AUX P toggles OVERRIDE? */
+               /* force_default_calibrate */
+               REG_UPDATE_1BY1_2(AUXN_IMPCAL,
+                               AUXN_IMPCAL_ENABLE, 1,
+                               AUXN_IMPCAL_OVERRIDE_ENABLE, 0);
 
-       REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE,
-                       1,
-                       0);
+               /* bug? why AUXN update EN and OVERRIDE_EN 1 by 1 while AUX P toggles OVERRIDE? */
 
+               REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE,
+                               1,
+                               0);
+       }
        /* set the delay and the number of bytes to write */
 
        /* The length include
@@ -275,55 +275,92 @@ static void submit_channel_request(
        REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
 }
 
-static void process_channel_reply(
-       struct aux_engine *engine,
-       struct aux_reply_transaction_data *reply)
+static int read_channel_reply(struct aux_engine *engine, uint32_t size,
+                             uint8_t *buffer, uint8_t *reply_result,
+                             uint32_t *sw_status)
 {
        struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+       uint32_t bytes_replied;
+       uint32_t reply_result_32;
 
-       /* Need to do a read to get the number of bytes to process
-        * Alternatively, this information can be passed -
-        * but that causes coupling which isn't good either. */
+       *sw_status = REG_GET(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT,
+                            &bytes_replied);
 
-       uint32_t bytes_replied;
-       uint32_t value;
+       /* In case HPD is LOW, exit AUX transaction */
+       if ((*sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
+               return -1;
 
-       value = REG_GET(AUX_SW_STATUS,
-                       AUX_SW_REPLY_BYTE_COUNT, &bytes_replied);
+       /* Need at least the status byte */
+       if (!bytes_replied)
+               return -1;
 
-       if (bytes_replied) {
-               uint32_t reply_result;
+       REG_UPDATE_1BY1_3(AUX_SW_DATA,
+                         AUX_SW_INDEX, 0,
+                         AUX_SW_AUTOINCREMENT_DISABLE, 1,
+                         AUX_SW_DATA_RW, 1);
 
-               REG_UPDATE_1BY1_3(AUX_SW_DATA,
-                               AUX_SW_INDEX, 0,
-                               AUX_SW_AUTOINCREMENT_DISABLE, 1,
-                               AUX_SW_DATA_RW, 1);
+       REG_GET(AUX_SW_DATA, AUX_SW_DATA, &reply_result_32);
+       reply_result_32 = reply_result_32 >> 4;
+       *reply_result = (uint8_t)reply_result_32;
 
-               REG_GET(AUX_SW_DATA,
-                               AUX_SW_DATA, &reply_result);
+       if (reply_result_32 == 0) { /* ACK */
+               uint32_t i = 0;
 
-               reply_result = reply_result >> 4;
+               /* First byte was already used to get the command status */
+               --bytes_replied;
 
-               switch (reply_result) {
-               case 0: /* ACK */ {
-                       uint32_t i = 0;
+               /* Do not overflow buffer */
+               if (bytes_replied > size)
+                       return -1;
 
-                       /* first byte was already used
-                        * to get the command status */
-                       --bytes_replied;
+               while (i < bytes_replied) {
+                       uint32_t aux_sw_data_val;
 
-                       while (i < bytes_replied) {
-                               uint32_t aux_sw_data_val;
+                       REG_GET(AUX_SW_DATA, AUX_SW_DATA, &aux_sw_data_val);
+                       buffer[i] = aux_sw_data_val;
+                       ++i;
+               }
 
-                               REG_GET(AUX_SW_DATA,
-                                               AUX_SW_DATA, &aux_sw_data_val);
+               return i;
+       }
 
-                               reply->data[i] = aux_sw_data_val;
-                               ++i;
-                       }
+       return 0;
+}
 
-                       reply->status = AUX_TRANSACTION_REPLY_AUX_ACK;
+static void process_channel_reply(
+       struct aux_engine *engine,
+       struct aux_reply_transaction_data *reply)
+{
+       int bytes_replied;
+       uint8_t reply_result;
+       uint32_t sw_status;
+
+       bytes_replied = read_channel_reply(engine, reply->length, reply->data,
+                                          &reply_result, &sw_status);
+
+       /* in case HPD is LOW, exit AUX transaction */
+       if ((sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
+               reply->status = AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
+               return;
+       }
+
+       if (bytes_replied < 0) {
+               /* Need to handle an error case...
+                * Hopefully, upper layer function won't call this function if
+                * the number of bytes in the reply was 0, because there was
+                * surely an error that was asserted that should have been
+                * handled for hot plug case, this could happens
+                */
+               if (!(sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
+                       reply->status = AUX_TRANSACTION_REPLY_INVALID;
+                       ASSERT_CRITICAL(false);
+                       return;
                }
+       } else {
+
+               switch (reply_result) {
+               case 0: /* ACK */
+                       reply->status = AUX_TRANSACTION_REPLY_AUX_ACK;
                break;
                case 1: /* NACK */
                        reply->status = AUX_TRANSACTION_REPLY_AUX_NACK;
@@ -340,15 +377,6 @@ static void process_channel_reply(
                default:
                        reply->status = AUX_TRANSACTION_REPLY_INVALID;
                }
-       } else {
-               /* Need to handle an error case...
-                * hopefully, upper layer function won't call this function
-                * if the number of bytes in the reply was 0
-                * because there was surely an error that was asserted
-                * that should have been handled
-                * for hot plug case, this could happens*/
-               if (!(value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
-                       ASSERT_CRITICAL(false);
        }
 }
 
@@ -371,6 +399,10 @@ static enum aux_channel_operation_result get_channel_status(
        value = REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
                                10, aux110->timeout_period/10);
 
+       /* in case HPD is LOW, exit AUX transaction */
+       if ((value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
+               return AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
+
        /* Note that the following bits are set in 'status.bits'
         * during CTS 4.2.1.2 (FW 3.3.1):
         * AUX_SW_RX_MIN_COUNT_VIOL, AUX_SW_RX_INVALID_STOP,
@@ -402,10 +434,10 @@ static enum aux_channel_operation_result get_channel_status(
                        return AUX_CHANNEL_OPERATION_SUCCEEDED;
                }
        } else {
-               /*time_elapsed >= aux_engine->timeout_period */
-               if (!(value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
-                       ASSERT_CRITICAL(false);
-
+               /*time_elapsed >= aux_engine->timeout_period
+                *  AUX_SW_STATUS__AUX_SW_HPD_DISCON = at this point
+                */
+               ASSERT_CRITICAL(false);
                return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
        }
 }
@@ -415,6 +447,7 @@ static const struct aux_engine_funcs aux_engine_funcs = {
        .acquire_engine = acquire_engine,
        .submit_channel_request = submit_channel_request,
        .process_channel_reply = process_channel_reply,
+       .read_channel_reply = read_channel_reply,
        .get_channel_status = get_channel_status,
        .is_engine_available = is_engine_available,
 };
index b7256f595052b88a617a68213bd0eee8e4eb26b0..9cbe1a7a6bcb2c994b327c41bd68e151e10a14e6 100644 (file)
@@ -62,12 +62,7 @@ enum dc_i2c_arbitration {
        DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_HIGH
 };
 
-enum {
-       /* No timeout in HW
-        * (timeout implemented in SW by querying status) */
-       I2C_SETUP_TIME_LIMIT = 255,
-       I2C_HW_BUFFER_SIZE = 538
-};
+
 
 /*
  * @brief
@@ -152,6 +147,11 @@ static bool setup_engine(
        struct i2c_engine *i2c_engine)
 {
        struct i2c_hw_engine_dce110 *hw_engine = FROM_I2C_ENGINE(i2c_engine);
+       uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE;
+       uint32_t  reset_length = 0;
+
+       if (hw_engine->base.base.setup_limit != 0)
+               i2c_setup_limit = hw_engine->base.base.setup_limit;
 
        /* Program pin select */
        REG_UPDATE_6(
@@ -164,11 +164,15 @@ static bool setup_engine(
                        DC_I2C_DDC_SELECT, hw_engine->engine_id);
 
        /* Program time limit */
-       REG_UPDATE_N(
-                       SETUP, 2,
-                       FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), I2C_SETUP_TIME_LIMIT,
-                       FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1);
-
+       if (hw_engine->base.base.send_reset_length == 0) {
+               /*pre-dcn*/
+               REG_UPDATE_N(
+                               SETUP, 2,
+                               FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), i2c_setup_limit,
+                               FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1);
+       } else {
+               reset_length = hw_engine->base.base.send_reset_length;
+       }
        /* Program HW priority
         * set to High - interrupt software I2C at any time
         * Enable restart of SW I2C that was interrupted by HW
index 5bb04085f670657b5c2d50aa3c6debc46ea059a8..fea2946906ed67682b1adde2194ff15c7051748e 100644 (file)
@@ -192,6 +192,7 @@ struct i2c_hw_engine_dce110 {
        /* number of pending transactions (before GO) */
        uint32_t transaction_count;
        uint32_t engine_keep_power_up_count;
+       uint32_t i2_setup_time_limit;
 };
 
 struct i2c_hw_engine_dce110_create_arg {
@@ -207,4 +208,11 @@ struct i2c_hw_engine_dce110_create_arg {
 struct i2c_engine *dal_i2c_hw_engine_dce110_create(
        const struct i2c_hw_engine_dce110_create_arg *arg);
 
+enum {
+       I2C_SETUP_TIME_LIMIT_DCE = 255,
+       I2C_SETUP_TIME_LIMIT_DCN = 3,
+       I2C_HW_BUFFER_SIZE = 538,
+       I2C_SEND_RESET_LENGTH_9 = 9,
+       I2C_SEND_RESET_LENGTH_10 = 10,
+};
 #endif
index 2a047f8ca0e9ab3d9023204edc0b01167f8c26b8..1d748ac1d6d655e3cc8d36af03b96127f27115fb 100644 (file)
@@ -43,6 +43,9 @@
 #include "i2c_sw_engine_dce110.h"
 #include "i2c_hw_engine_dce110.h"
 #include "aux_engine_dce110.h"
+#include "../../dc.h"
+#include "dc_types.h"
+
 
 /*
  * Post-requisites: headers required by this unit
@@ -199,6 +202,7 @@ static const struct dce110_i2c_hw_engine_mask i2c_mask = {
 void dal_i2caux_dce110_construct(
        struct i2caux_dce110 *i2caux_dce110,
        struct dc_context *ctx,
+       unsigned int num_i2caux_inst,
        const struct dce110_aux_registers aux_regs[],
        const struct dce110_i2c_hw_engine_registers i2c_hw_engine_regs[],
        const struct dce110_i2c_hw_engine_shift *i2c_shift,
@@ -249,9 +253,22 @@ void dal_i2caux_dce110_construct(
 
                base->i2c_hw_engines[line_id] =
                        dal_i2c_hw_engine_dce110_create(&hw_arg_dce110);
-
+               if (base->i2c_hw_engines[line_id] != NULL) {
+                       switch (ctx->dce_version) {
+                       case DCN_VERSION_1_0:
+                               base->i2c_hw_engines[line_id]->setup_limit =
+                                       I2C_SETUP_TIME_LIMIT_DCN;
+                               base->i2c_hw_engines[line_id]->send_reset_length  = 0;
+                       break;
+                       default:
+                               base->i2c_hw_engines[line_id]->setup_limit =
+                                       I2C_SETUP_TIME_LIMIT_DCE;
+                               base->i2c_hw_engines[line_id]->send_reset_length  = 0;
+                               break;
+                       }
+               }
                ++i;
-       } while (i < ARRAY_SIZE(hw_ddc_lines));
+       } while (i < num_i2caux_inst);
 
        /* Create AUX engines for all lines which has assisted HW AUX
         * 'i' (loop counter) used as DDC/AUX engine_id */
@@ -272,7 +289,7 @@ void dal_i2caux_dce110_construct(
                        dal_aux_engine_dce110_create(&aux_init_data);
 
                ++i;
-       } while (i < ARRAY_SIZE(hw_aux_lines));
+       } while (i < num_i2caux_inst);
 
        /*TODO Generic I2C SW and HW*/
 }
@@ -303,6 +320,7 @@ struct i2caux *dal_i2caux_dce110_create(
 
        dal_i2caux_dce110_construct(i2caux_dce110,
                                    ctx,
+                                   ARRAY_SIZE(dce110_aux_regs),
                                    dce110_aux_regs,
                                    i2c_hw_engine_regs,
                                    &i2c_shift,
index 1b1f71c60ac93baac87d3dae2926035b5a67628f..d3d8cc58666acb12b4cf4996794ac86e19d21bf7 100644 (file)
@@ -45,6 +45,7 @@ struct i2caux *dal_i2caux_dce110_create(
 void dal_i2caux_dce110_construct(
        struct i2caux_dce110 *i2caux_dce110,
        struct dc_context *ctx,
+       unsigned int num_i2caux_inst,
        const struct dce110_aux_registers *aux_regs,
        const struct dce110_i2c_hw_engine_registers *i2c_hw_engine_regs,
        const struct dce110_i2c_hw_engine_shift *i2c_shift,
index dafc1a727f7f9c0135cab912723e3d071c1ae74b..a9db047387245e351a9555d43774574b3681643a 100644 (file)
@@ -93,6 +93,7 @@ static void construct(
 {
        dal_i2caux_dce110_construct(i2caux_dce110,
                                    ctx,
+                                   ARRAY_SIZE(dce112_aux_regs),
                                    dce112_aux_regs,
                                    dce112_hw_engine_regs,
                                    &i2c_shift,
index 0e7b1826002792dd930564625dad714e25508293..6a4f344c1db4945f25f80de0a831f7a95b6fa4b7 100644 (file)
@@ -111,6 +111,7 @@ struct i2caux *dal_i2caux_dce120_create(
 
        dal_i2caux_dce110_construct(i2caux_dce110,
                                    ctx,
+                                   ARRAY_SIZE(dce120_aux_regs),
                                    dce120_aux_regs,
                                    dce120_hw_engine_regs,
                                    &i2c_shift,
index e44a8901f38bc7335716fa6189ee19413ced4995..a59c1f50c1e8eb84f15ad645ba1d62a375f1d6af 100644 (file)
@@ -111,6 +111,7 @@ struct i2caux *dal_i2caux_dcn10_create(
 
        dal_i2caux_dce110_construct(i2caux_dce110,
                                    ctx,
+                                   ARRAY_SIZE(dcn10_aux_regs),
                                    dcn10_aux_regs,
                                    dcn10_hw_engine_regs,
                                    &i2c_shift,
index 33de8a8834dc1a71e9f898b81c6a2818549f13e6..b16fb1ff687da70af15a9c9b035c772a4c26c9b2 100644 (file)
@@ -26,6 +26,8 @@
 #ifndef __DAL_ENGINE_H__
 #define __DAL_ENGINE_H__
 
+#include "dc_ddc_types.h"
+
 enum i2caux_transaction_operation {
        I2CAUX_TRANSACTION_READ,
        I2CAUX_TRANSACTION_WRITE
@@ -53,7 +55,8 @@ enum i2caux_transaction_status {
        I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE,
        I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION,
        I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION,
-       I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW
+       I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW,
+       I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON
 };
 
 struct i2caux_transaction_request {
@@ -75,19 +78,6 @@ enum i2c_default_speed {
        I2CAUX_DEFAULT_I2C_SW_SPEED = 50
 };
 
-enum i2caux_transaction_action {
-       I2CAUX_TRANSACTION_ACTION_I2C_WRITE = 0x00,
-       I2CAUX_TRANSACTION_ACTION_I2C_READ = 0x10,
-       I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST = 0x20,
-
-       I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT = 0x40,
-       I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT = 0x50,
-       I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT = 0x60,
-
-       I2CAUX_TRANSACTION_ACTION_DP_WRITE = 0x80,
-       I2CAUX_TRANSACTION_ACTION_DP_READ = 0x90
-};
-
 struct engine;
 
 struct engine_funcs {
@@ -106,6 +96,7 @@ struct engine_funcs {
 
 struct engine {
        const struct engine_funcs *funcs;
+       uint32_t inst;
        struct ddc *ddc;
        struct dc_context *ctx;
 };
index 58fc0f25ecebb68d1d7900cf58c620d5109aa9b1..ded6ea34b714cc81d9dd51aec780dd39f6a64c9d 100644 (file)
@@ -86,6 +86,8 @@ struct i2c_engine {
        struct engine base;
        const struct i2c_engine_funcs *funcs;
        uint32_t timeout_delay;
+       uint32_t setup_limit;
+       uint32_t send_reset_length;
 };
 
 void dal_i2c_engine_construct(
index 14dc8c94d862b049f7c5f7b760d91e201be8ac37..f7ed355fc84f44441563a8aea903477cf89ed5d9 100644 (file)
@@ -59,7 +59,7 @@
 
 #include "dce120/i2caux_dce120.h"
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
 #include "dcn10/i2caux_dcn10.h"
 #endif
 
@@ -91,7 +91,7 @@ struct i2caux *dal_i2caux_create(
                return dal_i2caux_dce100_create(ctx);
        case DCE_VERSION_12_0:
                return dal_i2caux_dce120_create(ctx);
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
        case DCN_VERSION_1_0:
                return dal_i2caux_dcn10_create(ctx);
 #endif
@@ -254,7 +254,6 @@ bool dal_i2caux_submit_aux_command(
                        break;
                }
 
-               cmd->payloads->length = request.payload.length;
                ++index_of_payload;
        }
 
index a94942d4e66b91ddcc0f57a636ee60c0e68fc810..9f33306f9014f08139ef908ea677b33b0db3f367 100644 (file)
@@ -33,7 +33,7 @@
 #include "dc_bios_types.h"
 #include "mem_input.h"
 #include "hubp.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
 #include "mpc.h"
 #endif
 
@@ -138,7 +138,7 @@ struct resource_pool {
        struct output_pixel_processor *opps[MAX_PIPES];
        struct timing_generator *timing_generators[MAX_PIPES];
        struct stream_encoder *stream_enc[MAX_PIPES * 2];
-
+       struct aux_engine *engines[MAX_PIPES];
        struct hubbub *hubbub;
        struct mpc *mpc;
        struct pp_smu_funcs_rv *pp_smu;
@@ -162,7 +162,7 @@ struct resource_pool {
        unsigned int audio_count;
        struct audio_support audio_support;
 
-       struct display_clock *display_clock;
+       struct dccg *dccg;
        struct irq_service *irqs;
 
        struct abm *abm;
@@ -221,7 +221,7 @@ struct pipe_ctx {
        struct pipe_ctx *top_pipe;
        struct pipe_ctx *bottom_pipe;
 
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_X86
        struct _vcs_dpi_display_dlg_regs_st dlg_regs;
        struct _vcs_dpi_display_ttu_regs_st ttu_regs;
        struct _vcs_dpi_display_rq_regs_st rq_regs;
@@ -255,8 +255,7 @@ struct dce_bw_output {
 };
 
 struct dcn_bw_output {
-       struct dc_clocks cur_clk;
-       struct dc_clocks calc_clk;
+       struct dc_clocks clk;
        struct dcn_watermark_set watermarks;
 };
 
@@ -277,11 +276,11 @@ struct dc_state {
 
        /* Note: these are big structures, do *not* put on stack! */
        struct dm_pp_display_configuration pp_display_cfg;
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+#ifdef CONFIG_X86
        struct dcn_bw_internal_vars dcn_bw_vars;
 #endif
 
-       struct display_clock *dis_clk;
+       struct dccg *dis_clk;
 
        struct kref refcount;
 };
index 30b3a08b91be27dade29620cfc5dbf857c89bad1..538b83303b866e8f5a58b9b47751a3b6ace2578d 100644 (file)
@@ -102,22 +102,13 @@ bool dal_ddc_service_query_ddc_data(
                uint8_t *read_buf,
                uint32_t read_size);
 
-enum ddc_result dal_ddc_service_read_dpcd_data(
-               struct ddc_service *ddc,
-               bool i2c,
-               enum i2c_mot_mode mot,
-               uint32_t address,
-               uint8_t *data,
-               uint32_t len,
-               uint32_t *read);
-
-enum ddc_result dal_ddc_service_write_dpcd_data(
-               struct ddc_service *ddc,
-               bool i2c,
-               enum i2c_mot_mode mot,
-               uint32_t address,
-               const uint8_t *data,
-               uint32_t len);
+int dc_link_aux_transfer(struct ddc_service *ddc,
+                            unsigned int address,
+                            uint8_t *reply,
+                            void *buffer,
+                            unsigned int size,
+                            enum aux_transaction_type type,
+                            enum i2caux_transaction_action action);
 
 void dal_ddc_service_write_scdc_data(
                struct ddc_service *ddc_service,
index 2f783c6500842452b63f290d9c8688ff4c5f7e17..a37255c757e0ce11f33e9803b63cc26c017e631f 100644 (file)
@@ -33,9 +33,10 @@ struct dc_link;
 struct dc_stream_state;
 struct dc_link_settings;
 
-bool dp_hbr_verify_link_cap(
+bool dp_verify_link_cap(
        struct dc_link *link,
-       struct dc_link_settings *known_limit_link_setting);
+       struct dc_link_settings *known_limit_link_setting,
+       int *fail_count);
 
 bool dp_validate_mode_timing(
        struct dc_link *link,
index 132d18d4b29383c10977d3fc13096b87204dd3b8..ddbb673caa08e2d583061a1a42e00bac955927db 100644 (file)
@@ -625,7 +625,7 @@ bool dcn_validate_bandwidth(
 
 unsigned int dcn_find_dcfclk_suits_all(
        const struct dc *dc,
-       struct clocks_value *clocks);
+       struct dc_clocks *clocks);
 
 void dcn_bw_update_from_pplib(struct dc *dc);
 void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
new file mode 100644 (file)
index 0000000..e79cd4e
--- /dev/null
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_AUX_ENGINE_H__
+#define __DAL_AUX_ENGINE_H__
+
+#include "dc_ddc_types.h"
+#include "include/i2caux_interface.h"
+
+enum i2caux_transaction_operation {
+       I2CAUX_TRANSACTION_READ,
+       I2CAUX_TRANSACTION_WRITE
+};
+
+enum i2caux_transaction_address_space {
+       I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C = 1,
+       I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD
+};
+
+struct i2caux_transaction_payload {
+       enum i2caux_transaction_address_space address_space;
+       uint32_t address;
+       uint32_t length;
+       uint8_t *data;
+};
+
+enum i2caux_transaction_status {
+       I2CAUX_TRANSACTION_STATUS_UNKNOWN = (-1L),
+       I2CAUX_TRANSACTION_STATUS_SUCCEEDED,
+       I2CAUX_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY,
+       I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT,
+       I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR,
+       I2CAUX_TRANSACTION_STATUS_FAILED_NACK,
+       I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE,
+       I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION,
+       I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION,
+       I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW,
+       I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON
+};
+
+struct i2caux_transaction_request {
+       enum i2caux_transaction_operation operation;
+       struct i2caux_transaction_payload payload;
+       enum i2caux_transaction_status status;
+};
+
+enum i2caux_engine_type {
+       I2CAUX_ENGINE_TYPE_UNKNOWN = (-1L),
+       I2CAUX_ENGINE_TYPE_AUX,
+       I2CAUX_ENGINE_TYPE_I2C_DDC_HW,
+       I2CAUX_ENGINE_TYPE_I2C_GENERIC_HW,
+       I2CAUX_ENGINE_TYPE_I2C_SW
+};
+
+enum i2c_default_speed {
+       I2CAUX_DEFAULT_I2C_HW_SPEED = 50,
+       I2CAUX_DEFAULT_I2C_SW_SPEED = 50
+};
+
+union aux_config;
+
+struct aux_engine {
+       uint32_t inst;
+       struct ddc *ddc;
+       struct dc_context *ctx;
+       const struct aux_engine_funcs *funcs;
+       /* following values are expressed in milliseconds */
+       uint32_t delay;
+       uint32_t max_defer_write_retry;
+       bool acquire_reset;
+};
+
+struct read_command_context {
+       uint8_t *buffer;
+       uint32_t current_read_length;
+       uint32_t offset;
+       enum i2caux_transaction_status status;
+
+       struct aux_request_transaction_data request;
+       struct aux_reply_transaction_data reply;
+
+       uint8_t returned_byte;
+
+       uint32_t timed_out_retry_aux;
+       uint32_t invalid_reply_retry_aux;
+       uint32_t defer_retry_aux;
+       uint32_t defer_retry_i2c;
+       uint32_t invalid_reply_retry_aux_on_ack;
+
+       bool transaction_complete;
+       bool operation_succeeded;
+};
+
+struct write_command_context {
+       bool mot;
+
+       uint8_t *buffer;
+       uint32_t current_write_length;
+       enum i2caux_transaction_status status;
+
+       struct aux_request_transaction_data request;
+       struct aux_reply_transaction_data reply;
+
+       uint8_t returned_byte;
+
+       uint32_t timed_out_retry_aux;
+       uint32_t invalid_reply_retry_aux;
+       uint32_t defer_retry_aux;
+       uint32_t defer_retry_i2c;
+       uint32_t max_defer_retry;
+       uint32_t ack_m_retry;
+
+       uint8_t reply_data[DEFAULT_AUX_MAX_DATA_SIZE];
+
+       bool transaction_complete;
+       bool operation_succeeded;
+};
+
+
+struct aux_engine_funcs {
+       void (*destroy)(
+               struct aux_engine **ptr);
+       bool (*acquire_engine)(
+               struct aux_engine *engine);
+       void (*configure)(
+               struct aux_engine *engine,
+               union aux_config cfg);
+       void (*submit_channel_request)(
+               struct aux_engine *engine,
+               struct aux_request_transaction_data *request);
+       void (*process_channel_reply)(
+               struct aux_engine *engine,
+               struct aux_reply_transaction_data *reply);
+       int (*read_channel_reply)(
+               struct aux_engine *engine,
+               uint32_t size,
+               uint8_t *buffer,
+               uint8_t *reply_result,
+               uint32_t *sw_status);
+       enum aux_channel_operation_result (*get_channel_status)(
+               struct aux_engine *engine,
+               uint8_t *returned_bytes);
+       bool (*is_engine_available)(struct aux_engine *engine);
+       enum i2caux_engine_type (*get_engine_type)(
+               const struct aux_engine *engine);
+       bool (*acquire)(
+               struct aux_engine *engine,
+               struct ddc *ddc);
+       bool (*submit_request)(
+               struct aux_engine *engine,
+               struct i2caux_transaction_request *request,
+               bool middle_of_transaction);
+       void (*release_engine)(
+               struct aux_engine *engine);
+       void (*destroy_engine)(
+               struct aux_engine **engine);
+};
+#endif
index f5f69cd81f6fd43a4aef2f279dbc576ea9437aac..3c7ccb68ecdb58c90ff25d68b3aaf7d6c602530d 100644 (file)
 #define __DISPLAY_CLOCK_H__
 
 #include "dm_services_types.h"
-
-
-struct clocks_value {
-       int dispclk_in_khz;
-       int max_pixelclk_in_khz;
-       int max_non_dp_phyclk_in_khz;
-       int max_dp_phyclk_in_khz;
-       bool dispclk_notify_pplib_done;
-       bool pixelclk_notify_pplib_done;
-       bool phyclk_notigy_pplib_done;
-       int dcfclock_in_khz;
-       int dppclk_in_khz;
-       int mclk_in_khz;
-       int phyclk_in_khz;
-       int common_vdd_level;
-};
-
+#include "dc.h"
 
 /* Structure containing all state-dependent clocks
  * (dependent on "enum clocks_state") */
@@ -52,34 +36,23 @@ struct state_dependent_clocks {
        int pixel_clk_khz;
 };
 
-struct display_clock {
+struct dccg {
        struct dc_context *ctx;
        const struct display_clock_funcs *funcs;
 
        enum dm_pp_clocks_state max_clks_state;
        enum dm_pp_clocks_state cur_min_clks_state;
-       struct clocks_value cur_clocks_value;
+       struct dc_clocks clks;
 };
 
 struct display_clock_funcs {
-       int (*set_clock)(struct display_clock *disp_clk,
+       void (*update_clocks)(struct dccg *dccg,
+                       struct dc_clocks *new_clocks,
+                       bool safe_to_lower);
+       int (*set_dispclk)(struct dccg *dccg,
                int requested_clock_khz);
 
-       enum dm_pp_clocks_state (*get_required_clocks_state)(
-               struct display_clock *disp_clk,
-               struct state_dependent_clocks *req_clocks);
-
-       bool (*set_min_clocks_state)(struct display_clock *disp_clk,
-               enum dm_pp_clocks_state dm_pp_clocks_state);
-
-       int (*get_dp_ref_clk_frequency)(struct display_clock *disp_clk);
-
-       bool (*apply_clock_voltage_request)(
-               struct display_clock *disp_clk,
-               enum dm_pp_clock_type clocks_type,
-               int clocks_in_khz,
-               bool pre_mode_set,
-               bool update_dp_phyclk);
+       int (*get_dp_ref_clk_frequency)(struct dccg *dccg);
 };
 
 #endif /* __DISPLAY_CLOCK_H__ */
index de60f940030da69864a5e9e705a2e408c9af12f2..4550747fb61c24039cb2941c8823339ecd7573c3 100644 (file)
@@ -48,7 +48,7 @@ struct dmcu_funcs {
                        const char *src,
                        unsigned int bytes);
        void (*set_psr_enable)(struct dmcu *dmcu, bool enable, bool wait);
-       void (*setup_psr)(struct dmcu *dmcu,
+       bool (*setup_psr)(struct dmcu *dmcu,
                        struct dc_link *link,
                        struct psr_context *psr_context);
        void (*get_psr_state)(struct dmcu *dmcu, uint32_t *psr_state);
index 582458f028f8479f52951ac1948f3101e2abb8e8..74ad94b0e4f088cd5c0cc14a2fe3d90fcc3fe9b8 100644 (file)
@@ -151,6 +151,9 @@ struct dpp_funcs {
        void (*dpp_set_hdr_multiplier)(
                        struct dpp *dpp_base,
                        uint32_t multiplier);
+       void (*set_optional_cursor_attributes)(
+                       struct dpp *dpp_base,
+                       struct dpp_cursor_attributes *attr);
 
        void (*dpp_dppclk_control)(
                        struct dpp *dpp_base,
index 97df82cddf829c0f8954053835b21f75baf23999..4f3f9e68ccfa46426b65d43149019fe53b04f5be 100644 (file)
@@ -43,10 +43,9 @@ enum cursor_lines_per_chunk {
 };
 
 struct hubp {
-       struct hubp_funcs *funcs;
+       const struct hubp_funcs *funcs;
        struct dc_context *ctx;
        struct dc_plane_address request_address;
-       struct dc_plane_address current_address;
        int inst;
 
        /* run time states */
index 47f1dc5a43b74ffab7b429e1d5975a2b07fa8f3d..da89c2edb07c758034d640045201465833dfa245 100644 (file)
@@ -64,7 +64,7 @@ struct stutter_modes {
 };
 
 struct mem_input {
-       struct mem_input_funcs *funcs;
+       const struct mem_input_funcs *funcs;
        struct dc_context *ctx;
        struct dc_plane_address request_address;
        struct dc_plane_address current_address;
index 69cb0a10530009213eadebfd5634195e5fa886f1..af700c7dac50861d6a878049d8c401a8e2d8d37e 100644 (file)
@@ -156,6 +156,9 @@ struct timing_generator_funcs {
                uint32_t *v_blank_end,
                uint32_t *h_position,
                uint32_t *v_position);
+       bool (*get_otg_active_size)(struct timing_generator *optc,
+                       uint32_t *otg_active_width,
+                       uint32_t *otg_active_height);
        void (*set_early_control)(struct timing_generator *tg,
                                                           uint32_t early_cntl);
        void (*wait_for_state)(struct timing_generator *tg,
index 63fc6c4997892d73b1bb6c25b3261d5ac22b8ee8..a14ce4de80b264856f73ad20236f5844aa9005c9 100644 (file)
@@ -44,6 +44,7 @@ struct dce_hwseq_wa {
        bool blnd_crtc_trigger;
        bool DEGVIDCN10_253;
        bool false_optc_underflow;
+       bool DEGVIDCN10_254;
 };
 
 struct hwseq_wa_state {
@@ -101,10 +102,18 @@ struct hw_sequencer_funcs {
                const struct dc *dc,
                struct pipe_ctx *pipe_ctx);
 
+       void (*plane_atomic_disconnect)(
+               struct dc *dc,
+               struct pipe_ctx *pipe_ctx);
+
        void (*update_dchub)(
                struct dce_hwseq *hws,
                struct dchub_init_data *dh_data);
 
+       void (*update_mpcc)(
+               struct dc *dc,
+               struct pipe_ctx *pipe_ctx);
+
        void (*update_pending_status)(
                        struct pipe_ctx *pipe_ctx);
 
@@ -154,20 +163,24 @@ struct hw_sequencer_funcs {
                        struct dc_link_settings *link_settings);
 
        void (*blank_stream)(struct pipe_ctx *pipe_ctx);
+
+       void (*enable_audio_stream)(struct pipe_ctx *pipe_ctx);
+
+       void (*disable_audio_stream)(struct pipe_ctx *pipe_ctx, int option);
+
        void (*pipe_control_lock)(
                                struct dc *dc,
                                struct pipe_ctx *pipe,
                                bool lock);
        void (*blank_pixel_data)(
                        struct dc *dc,
-                       struct stream_resource *stream_res,
-                       struct dc_stream_state *stream,
+                       struct pipe_ctx *pipe_ctx,
                        bool blank);
 
        void (*set_bandwidth)(
                        struct dc *dc,
                        struct dc_state *context,
-                       bool decrease_allowed);
+                       bool safe_to_lower);
 
        void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes,
                        int vmin, int vmax);
@@ -210,6 +223,7 @@ struct hw_sequencer_funcs {
 
        void (*set_cursor_position)(struct pipe_ctx *pipe);
        void (*set_cursor_attribute)(struct pipe_ctx *pipe);
+       void (*set_cursor_sdr_white_level)(struct pipe_ctx *pipe);
 
 };
 
index 3306e7b0b3e344960dbeb99633cd05209184a952..cf5a84b9e27c44691938bb381fcee20b1923cd9b 100644 (file)
@@ -445,4 +445,50 @@ uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr,
                uint8_t shift6, uint32_t mask6, uint32_t *field_value6,
                uint8_t shift7, uint32_t mask7, uint32_t *field_value7,
                uint8_t shift8, uint32_t mask8, uint32_t *field_value8);
+
+
+/* indirect register access */
+
+#define IX_REG_SET_N(index_reg_name, data_reg_name, index, n, initial_val, ...)        \
+               generic_indirect_reg_update_ex(CTX, \
+                               REG(index_reg_name), REG(data_reg_name), IND_REG(index), \
+                               initial_val, \
+                               n, __VA_ARGS__)
+
+#define IX_REG_SET_2(index_reg_name, data_reg_name, index, init_value, f1, v1, f2, v2) \
+               IX_REG_SET_N(index_reg_name, data_reg_name, index, 2, init_value, \
+                               FN(reg, f1), v1,\
+                               FN(reg, f2), v2)
+
+
+#define IX_REG_READ(index_reg_name, data_reg_name, index) \
+               generic_read_indirect_reg(CTX, REG(index_reg_name), REG(data_reg_name), IND_REG(index))
+
+
+
+#define IX_REG_UPDATE_N(index_reg_name, data_reg_name, index, n, ...)  \
+               generic_indirect_reg_update_ex(CTX, \
+                               REG(index_reg_name), REG(data_reg_name), IND_REG(index), \
+                               IX_REG_READ(index_reg_name, data_reg_name, index), \
+                               n, __VA_ARGS__)
+
+#define IX_REG_UPDATE_2(index_reg_name, data_reg_name, index, f1, v1, f2, v2)  \
+               IX_REG_UPDATE_N(index_reg_name, data_reg_name, index, 2,\
+                               FN(reg, f1), v1,\
+                               FN(reg, f2), v2)
+
+void generic_write_indirect_reg(const struct dc_context *ctx,
+               uint32_t addr_index, uint32_t addr_data,
+               uint32_t index, uint32_t data);
+
+uint32_t generic_read_indirect_reg(const struct dc_context *ctx,
+               uint32_t addr_index, uint32_t addr_data,
+               uint32_t index);
+
+uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
+               uint32_t addr_index, uint32_t addr_data,
+               uint32_t index, uint32_t reg_val, int n,
+               uint8_t shift1, uint32_t mask1, uint32_t field_value1,
+               ...);
+
 #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_ */
index 640a647f4611f25b9e3d2fc8f214d7eaf410e0c5..e92facbd038f0ea559bf9cd910505ca0a8fc8a60 100644 (file)
@@ -38,6 +38,7 @@ enum dce_version resource_parse_asic_id(
 
 struct resource_caps {
        int num_timing_generator;
+       int num_opp;
        int num_video_plane;
        int num_audio;
        int num_stream_encoder;
index 498515aad4a50bf35133755d1bded3a387a0817e..a76ee600eceeae72befe641578fd2c6032432647 100644 (file)
@@ -60,7 +60,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCE12)
 ###############################################################################
 # DCN 1x
 ###############################################################################
-ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ifdef CONFIG_X86
 IRQ_DCN1 = irq_service_dcn10.o
 
 AMD_DAL_IRQ_DCN1 = $(addprefix $(AMDDALPATH)/dc/irq/dcn10/,$(IRQ_DCN1))
index dcdfa0f0155175001d8392dffb92bd77c230ea5e..ae3fd0a235ba45ce900fe6b0aeba7354b7b847cf 100644 (file)
@@ -36,7 +36,7 @@
 #include "dce120/irq_service_dce120.h"
 
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
 #include "dcn10/irq_service_dcn10.h"
 #endif
 
@@ -78,7 +78,7 @@ const struct irq_source_info *find_irq_source_info(
        struct irq_service *irq_service,
        enum dc_irq_source source)
 {
-       if (source > DAL_IRQ_SOURCES_NUMBER || source < DC_IRQ_SOURCE_INVALID)
+       if (source >= DAL_IRQ_SOURCES_NUMBER || source < DC_IRQ_SOURCE_INVALID)
                return NULL;
 
        return &irq_service->info[source];
index a407892905af29661a70ad75a6c76c5d502163c3..c9fce9066ad8b43b970c450c25690b3d5bb9096b 100644 (file)
@@ -48,7 +48,7 @@
 
 #define dm_error(fmt, ...) DRM_ERROR(fmt, ##__VA_ARGS__)
 
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#ifdef CONFIG_X86
 #include <asm/fpu/api.h>
 #endif
 
index 019e7a095ea103062a78fb20d65a8391ff0016fe..d968956a10cd5de232f610a2fe6eaa4028340315 100644 (file)
@@ -40,7 +40,8 @@ enum ddc_result {
        DDC_RESULT_FAILED_INCOMPLETE,
        DDC_RESULT_FAILED_OPERATION,
        DDC_RESULT_FAILED_INVALID_OPERATION,
-       DDC_RESULT_FAILED_BUFFER_OVERFLOW
+       DDC_RESULT_FAILED_BUFFER_OVERFLOW,
+       DDC_RESULT_FAILED_HPD_DISCON
 };
 
 enum ddc_service_type {
index d8e52e3b8e3c7bb72f5778f7b6650df80043023f..1c66166d0a949257ac262c0c236452cc431f99e9 100644 (file)
@@ -27,6 +27,9 @@
 #define __DAL_DPCD_DEFS_H__
 
 #include <drm/drm_dp_helper.h>
+#ifndef DP_SINK_HW_REVISION_START // can remove this once the define gets into linux drm_dp_helper.h
+#define DP_SINK_HW_REVISION_START 0x409
+#endif
 
 enum dpcd_revision {
        DPCD_REV_10 = 0x10,
index a981b3e99ab39f0b973ae19437010c31e63466a6..52a73332befb9e4a27d1d2b4e1cdf4bbf480a21b 100644 (file)
 #ifndef __DAL_FIXED31_32_H__
 #define __DAL_FIXED31_32_H__
 
+#ifndef LLONG_MAX
+#define LLONG_MAX 9223372036854775807ll
+#endif
+#ifndef LLONG_MIN
+#define LLONG_MIN (-LLONG_MAX - 1ll)
+#endif
+
 #define FIXED31_32_BITS_PER_FRACTIONAL_PART 32
 #ifndef LLONG_MIN
 #define LLONG_MIN (1LL<<63)
index 2941b882b0b6b266d40198a832abde05043204a4..58bb42ed85cab0b3540352e93351644403584872 100644 (file)
  * ********************************************************************
  */
 
+#define MAX_CONNECTOR_NUMBER_PER_SLOT  (16)
+#define MAX_BOARD_SLOTS                                        (4)
+#define INVALID_CONNECTOR_INDEX                        ((unsigned int)(-1))
+
 /* HPD unit id - HW direct translation */
 enum hpd_source_id {
        HPD_SOURCEID1 = 0,
@@ -136,5 +140,47 @@ enum sync_source {
        SYNC_SOURCE_DUAL_GPU_PIN
 };
 
+/* connector sizes in millimeters - from BiosParserTypes.hpp */
+#define CONNECTOR_SIZE_DVI                     40
+#define CONNECTOR_SIZE_VGA                     32
+#define CONNECTOR_SIZE_HDMI                    16
+#define CONNECTOR_SIZE_DP                      16
+#define CONNECTOR_SIZE_MINI_DP                 9
+#define CONNECTOR_SIZE_UNKNOWN                 30
+
+enum connector_layout_type {
+       CONNECTOR_LAYOUT_TYPE_UNKNOWN,
+       CONNECTOR_LAYOUT_TYPE_DVI_D,
+       CONNECTOR_LAYOUT_TYPE_DVI_I,
+       CONNECTOR_LAYOUT_TYPE_VGA,
+       CONNECTOR_LAYOUT_TYPE_HDMI,
+       CONNECTOR_LAYOUT_TYPE_DP,
+       CONNECTOR_LAYOUT_TYPE_MINI_DP,
+};
+struct connector_layout_info {
+       struct graphics_object_id connector_id;
+       enum connector_layout_type connector_type;
+       unsigned int length;
+       unsigned int position;  /* offset in mm from right side of the board */
+};
+
+/* length and width in mm */
+struct slot_layout_info {
+       unsigned int length;
+       unsigned int width;
+       unsigned int num_of_connectors;
+       struct connector_layout_info connectors[MAX_CONNECTOR_NUMBER_PER_SLOT];
+};
+
+struct board_layout_info {
+       unsigned int num_of_slots;
 
+       /* indicates valid information in bracket layout structure. */
+       unsigned int is_number_of_slots_valid : 1;
+       unsigned int is_slots_size_valid : 1;
+       unsigned int is_connector_offsets_valid : 1;
+       unsigned int is_connector_lengths_valid : 1;
+
+       struct slot_layout_info slots[MAX_BOARD_SLOTS];
+};
 #endif
index c4197432eb7c34542952364fc4027ff4819f63f3..33b3d755fe655523d9486f5aff3f4e7901afb6a3 100644 (file)
@@ -197,6 +197,11 @@ enum transmitter_color_depth {
        TRANSMITTER_COLOR_DEPTH_48       /* 16 bits */
 };
 
+enum dp_alt_mode {
+       DP_Alt_mode__Unknown = 0,
+       DP_Alt_mode__Connect,
+       DP_Alt_mode__NoConnect,
+};
 /*
  *****************************************************************************
  * graphics_object_id struct
@@ -287,4 +292,15 @@ static inline enum engine_id dal_graphics_object_id_get_engine_id(
                return (enum engine_id) id.id;
        return ENGINE_ID_UNKNOWN;
 }
+
+static inline bool dal_graphics_object_id_equal(
+       struct graphics_object_id id_1,
+       struct graphics_object_id id_2)
+{
+       if ((id_1.id == id_2.id) && (id_1.enum_id == id_2.enum_id) &&
+               (id_1.type == id_2.type)) {
+               return true;
+       }
+       return false;
+}
 #endif
index dc98d6d4b2bd0f8dae4569747fab30a5c02159a2..e3c79616682dca79e6c06e55900550859e6daab4 100644 (file)
@@ -40,47 +40,7 @@ struct dc_state;
  *
  */
 
-struct dal_logger *dal_logger_create(struct dc_context *ctx, uint32_t log_mask);
-
-uint32_t dal_logger_destroy(struct dal_logger **logger);
-
-void dm_logger_flush_buffer(struct dal_logger *logger, bool should_warn);
-
-void dm_logger_write(
-               struct dal_logger *logger,
-               enum dc_log_type log_type,
-               const char *msg,
-               ...);
-
-void dm_logger_append(
-               struct log_entry *entry,
-               const char *msg,
-               ...);
-
-void dm_logger_append_va(
-               struct log_entry *entry,
-               const char *msg,
-               va_list args);
-
-void dm_logger_open(
-               struct dal_logger *logger,
-               struct log_entry *entry,
-               enum dc_log_type log_type);
-
-void dm_logger_close(struct log_entry *entry);
-
-void dc_conn_log(struct dc_context *ctx,
-               const struct dc_link *link,
-               uint8_t *hex_data,
-               int hex_data_count,
-               enum dc_log_type event,
-               const char *msg,
-               ...);
-
-void logger_write(struct dal_logger *logger,
-               enum dc_log_type log_type,
-               const char *msg,
-               void *paralist);
+void dc_conn_log_hex_linux(const uint8_t *hex_data, int hex_data_count);
 
 void pre_surface_trace(
                struct dc *dc,
@@ -106,28 +66,31 @@ void context_clock_trace(
  * marked by this macro.
  * Note that the message will be printed exactly once for every function
  * it is used in order to avoid repeating of the same message. */
+
 #define DAL_LOGGER_NOT_IMPL(fmt, ...) \
-{ \
-       static bool print_not_impl = true; \
-\
-       if (print_not_impl == true) { \
-               print_not_impl = false; \
-               dm_logger_write(ctx->logger, LOG_WARNING, \
-               "DAL_NOT_IMPL: " fmt, ##__VA_ARGS__); \
-       } \
-}
+       do { \
+               static bool print_not_impl = true; \
+               if (print_not_impl == true) { \
+                       print_not_impl = false; \
+                       DRM_WARN("DAL_NOT_IMPL: " fmt, ##__VA_ARGS__); \
+               } \
+       } while (0)
 
 /******************************************************************************
  * Convenience macros to save on typing.
  *****************************************************************************/
 
 #define DC_ERROR(...) \
-       dm_logger_write(dc_ctx->logger, LOG_ERROR, \
-               __VA_ARGS__)
+               do { \
+                       (void)(dc_ctx); \
+                       DC_LOG_ERROR(__VA_ARGS__); \
+               } while (0)
 
 #define DC_SYNC_INFO(...) \
-       dm_logger_write(dc_ctx->logger, LOG_SYNC, \
-               __VA_ARGS__)
+               do { \
+                       (void)(dc_ctx); \
+                       DC_LOG_SYNC(__VA_ARGS__); \
+               } while (0)
 
 /* Connectivity log format:
  * [time stamp]   [drm] [Major_minor] [connector name] message.....
@@ -137,20 +100,30 @@ void context_clock_trace(
  */
 
 #define CONN_DATA_DETECT(link, hex_data, hex_len, ...) \
-               dc_conn_log(link->ctx, link, hex_data, hex_len, \
-                               LOG_EVENT_DETECTION, ##__VA_ARGS__)
+               do { \
+                       (void)(link); \
+                       dc_conn_log_hex_linux(hex_data, hex_len); \
+                       DC_LOG_EVENT_DETECTION(__VA_ARGS__); \
+               } while (0)
 
 #define CONN_DATA_LINK_LOSS(link, hex_data, hex_len, ...) \
-               dc_conn_log(link->ctx, link, hex_data, hex_len, \
-                               LOG_EVENT_LINK_LOSS, ##__VA_ARGS__)
+               do { \
+                       (void)(link); \
+                       dc_conn_log_hex_linux(hex_data, hex_len); \
+                       DC_LOG_EVENT_LINK_LOSS(__VA_ARGS__); \
+               } while (0)
 
 #define CONN_MSG_LT(link, ...) \
-               dc_conn_log(link->ctx, link, NULL, 0, \
-                               LOG_EVENT_LINK_TRAINING, ##__VA_ARGS__)
+               do { \
+                       (void)(link); \
+                       DC_LOG_EVENT_LINK_TRAINING(__VA_ARGS__); \
+               } while (0)
 
 #define CONN_MSG_MODE(link, ...) \
-               dc_conn_log(link->ctx, link, NULL, 0, \
-                               LOG_EVENT_MODE_SET, ##__VA_ARGS__)
+               do { \
+                       (void)(link); \
+                       DC_LOG_EVENT_MODE_SET(__VA_ARGS__); \
+               } while (0)
 
 /*
  * Display Test Next logging
@@ -165,38 +138,21 @@ void context_clock_trace(
        dm_dtn_log_end(dc_ctx)
 
 #define PERFORMANCE_TRACE_START() \
-       unsigned long long perf_trc_start_stmp = dm_get_timestamp(dc->ctx); \
-       unsigned long long perf_trc_start_log_msk = dc->ctx->logger->mask; \
-       unsigned int perf_trc_start_log_flags = dc->ctx->logger->flags.value; \
-       if (dc->debug.performance_trace) {\
-               dm_logger_flush_buffer(dc->ctx->logger, false);\
-               dc->ctx->logger->mask = 1<<LOG_PERF_TRACE;\
-               dc->ctx->logger->flags.bits.ENABLE_CONSOLE = 0;\
-               dc->ctx->logger->flags.bits.ENABLE_BUFFER = 1;\
-       }
-
-#define PERFORMANCE_TRACE_END() do {\
-       unsigned long long perf_trc_end_stmp = dm_get_timestamp(dc->ctx);\
-       if (dc->debug.performance_trace) {\
-               dm_logger_write(dc->ctx->logger, \
-                               LOG_PERF_TRACE, \
-                               "%s duration: %d ticks\n", __func__,\
+       unsigned long long perf_trc_start_stmp = dm_get_timestamp(dc->ctx)
+
+#define PERFORMANCE_TRACE_END() \
+       do { \
+               unsigned long long perf_trc_end_stmp = dm_get_timestamp(dc->ctx); \
+               if (dc->debug.performance_trace) { \
+                       DC_LOG_PERF_TRACE("%s duration: %lld ticks\n", __func__, \
                                perf_trc_end_stmp - perf_trc_start_stmp); \
-               if (perf_trc_start_log_msk != 1<<LOG_PERF_TRACE) {\
-                       dc->ctx->logger->mask = perf_trc_start_log_msk;\
-                       dc->ctx->logger->flags.value = perf_trc_start_log_flags;\
-                       dm_logger_flush_buffer(dc->ctx->logger, false);\
                } \
-       } \
-} while (0)
+       } while (0)
 
-#define DISPLAY_STATS_BEGIN(entry) \
-       dm_logger_open(dc->ctx->logger, &entry, LOG_DISPLAYSTATS)
+#define DISPLAY_STATS_BEGIN(entry) (void)(entry)
 
-#define DISPLAY_STATS(msg, ...) \
-       dm_logger_append(&log_entry, msg, ##__VA_ARGS__)
+#define DISPLAY_STATS(msg, ...) DC_LOG_PERF_TRACE(msg, __VA_ARGS__)
 
-#define DISPLAY_STATS_END(entry) \
-       dm_logger_close(&entry)
+#define DISPLAY_STATS_END(entry) (void)(entry)
 
 #endif /* __DAL_LOGGER_INTERFACE_H__ */
index 0a540b9897a6798169f56e0b6a763042e1faccf0..ad3695e67b76feb866c0132e043c70b8a0b294cb 100644 (file)
@@ -138,63 +138,4 @@ enum dc_log_type {
                (1 << LOG_HW_AUDIO)| \
                (1 << LOG_BANDWIDTH_CALCS)*/
 
-union logger_flags {
-       struct {
-               uint32_t ENABLE_CONSOLE:1; /* Print to console */
-               uint32_t ENABLE_BUFFER:1; /* Print to buffer */
-               uint32_t RESERVED:30;
-       } bits;
-       uint32_t value;
-};
-
-struct log_entry {
-       struct dal_logger *logger;
-       enum dc_log_type type;
-
-       char *buf;
-       uint32_t buf_offset;
-       uint32_t max_buf_bytes;
-};
-
-/**
-* Structure for enumerating log types
-*/
-struct dc_log_type_info {
-       enum dc_log_type type;
-       char name[MAX_NAME_LEN];
-};
-
-/* Structure for keeping track of offsets, buffer, etc */
-
-#define DAL_LOGGER_BUFFER_MAX_SIZE 2048
-
-/*Connectivity log needs to output EDID, which needs at lease 256x3 bytes,
- * change log line size to 896 to meet the request.
- */
-#define LOG_MAX_LINE_SIZE 896
-
-struct dal_logger {
-
-       /* How far into the circular buffer has been read by dsat
-        * Read offset should never cross write offset. Write \0's to
-        * read data just to be sure?
-        */
-       uint32_t buffer_read_offset;
-
-       /* How far into the circular buffer we have written
-        * Write offset should never cross read offset
-        */
-       uint32_t buffer_write_offset;
-
-       uint32_t open_count;
-
-       char *log_buffer;       /* Pointer to malloc'ed buffer */
-       uint32_t log_buffer_size; /* Size of circular buffer */
-
-       uint32_t mask; /*array of masks for major elements*/
-
-       union logger_flags flags;
-       struct dc_context *ctx;
-};
-
 #endif /* __DAL_LOGGER_TYPES_H__ */
index eee0dfad696294eb96fca9a4f25e3e9f3db7fe85..ee69c949bfbf2d7d165fe388dc128b38916c4105 100644 (file)
@@ -131,6 +131,63 @@ static void compute_de_pq(struct fixed31_32 in_x, struct fixed31_32 *out_y)
                        dc_fixpt_div(dc_fixpt_one, m1));
 
 }
+
+/*de gamma, none linear to linear*/
+static void compute_hlg_oetf(struct fixed31_32 in_x, bool is_light0_12, struct fixed31_32 *out_y)
+{
+       struct fixed31_32 a;
+       struct fixed31_32 b;
+       struct fixed31_32 c;
+       struct fixed31_32 threshold;
+       struct fixed31_32 reference_white_level;
+
+       a = dc_fixpt_from_fraction(17883277, 100000000);
+       if (is_light0_12) {
+               /*light 0-12*/
+               b = dc_fixpt_from_fraction(28466892, 100000000);
+               c = dc_fixpt_from_fraction(55991073, 100000000);
+               threshold = dc_fixpt_one;
+               reference_white_level = dc_fixpt_half;
+       } else {
+               /*light 0-1*/
+               b = dc_fixpt_from_fraction(2372241, 100000000);
+               c = dc_fixpt_add(dc_fixpt_one, dc_fixpt_from_fraction(429347, 100000000));
+               threshold = dc_fixpt_from_fraction(1, 12);
+               reference_white_level = dc_fixpt_pow(dc_fixpt_from_fraction(3, 1), dc_fixpt_half);
+       }
+       if (dc_fixpt_lt(threshold, in_x))
+               *out_y = dc_fixpt_add(c, dc_fixpt_mul(a, dc_fixpt_log(dc_fixpt_sub(in_x, b))));
+       else
+               *out_y = dc_fixpt_mul(dc_fixpt_pow(in_x, dc_fixpt_half), reference_white_level);
+}
+
+/*re gamma, linear to none linear*/
+static void compute_hlg_eotf(struct fixed31_32 in_x, bool is_light0_12, struct fixed31_32 *out_y)
+{
+       struct fixed31_32 a;
+       struct fixed31_32 b;
+       struct fixed31_32 c;
+       struct fixed31_32 reference_white_level;
+
+       a = dc_fixpt_from_fraction(17883277, 100000000);
+       if (is_light0_12) {
+               /*light 0-12*/
+               b = dc_fixpt_from_fraction(28466892, 100000000);
+               c = dc_fixpt_from_fraction(55991073, 100000000);
+               reference_white_level = dc_fixpt_from_fraction(4, 1);
+       } else {
+               /*light 0-1*/
+               b = dc_fixpt_from_fraction(2372241, 100000000);
+               c = dc_fixpt_add(dc_fixpt_one, dc_fixpt_from_fraction(429347, 100000000));
+               reference_white_level = dc_fixpt_from_fraction(1, 3);
+       }
+       if (dc_fixpt_lt(dc_fixpt_half, in_x))
+               *out_y = dc_fixpt_add(dc_fixpt_exp(dc_fixpt_div(dc_fixpt_sub(in_x, c), a)), b);
+       else
+               *out_y = dc_fixpt_mul(dc_fixpt_pow(in_x, dc_fixpt_from_fraction(2, 1)), reference_white_level);
+}
+
+
 /* one-time pre-compute PQ values - only for sdr_white_level 80 */
 void precompute_pq(void)
 {
@@ -691,6 +748,48 @@ static void build_degamma(struct pwl_float_data_ex *curve,
        }
 }
 
+static void build_hlg_degamma(struct pwl_float_data_ex *degamma,
+               uint32_t hw_points_num,
+               const struct hw_x_point *coordinate_x, bool is_light0_12)
+{
+       uint32_t i;
+
+       struct pwl_float_data_ex *rgb = degamma;
+       const struct hw_x_point *coord_x = coordinate_x;
+
+       i = 0;
+
+       while (i != hw_points_num + 1) {
+               compute_hlg_oetf(coord_x->x, is_light0_12, &rgb->r);
+               rgb->g = rgb->r;
+               rgb->b = rgb->r;
+               ++coord_x;
+               ++rgb;
+               ++i;
+       }
+}
+
+static void build_hlg_regamma(struct pwl_float_data_ex *regamma,
+               uint32_t hw_points_num,
+               const struct hw_x_point *coordinate_x, bool is_light0_12)
+{
+       uint32_t i;
+
+       struct pwl_float_data_ex *rgb = regamma;
+       const struct hw_x_point *coord_x = coordinate_x;
+
+       i = 0;
+
+       while (i != hw_points_num + 1) {
+               compute_hlg_eotf(coord_x->x, is_light0_12, &rgb->r);
+               rgb->g = rgb->r;
+               rgb->b = rgb->r;
+               ++coord_x;
+               ++rgb;
+               ++i;
+       }
+}
+
 static void scale_gamma(struct pwl_float_data *pwl_rgb,
                const struct dc_gamma *ramp,
                struct dividers dividers)
@@ -1621,6 +1720,25 @@ bool  mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
                }
                ret = true;
 
+               kvfree(rgb_regamma);
+       } else if (trans == TRANSFER_FUNCTION_HLG ||
+               trans == TRANSFER_FUNCTION_HLG12) {
+               rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
+                                      sizeof(*rgb_regamma),
+                                      GFP_KERNEL);
+               if (!rgb_regamma)
+                       goto rgb_regamma_alloc_fail;
+
+               build_hlg_regamma(rgb_regamma,
+                               MAX_HW_POINTS,
+                               coordinates_x,
+                               trans == TRANSFER_FUNCTION_HLG12 ? true:false);
+               for (i = 0; i <= MAX_HW_POINTS ; i++) {
+                       points->red[i]    = rgb_regamma[i].r;
+                       points->green[i]  = rgb_regamma[i].g;
+                       points->blue[i]   = rgb_regamma[i].b;
+               }
+               ret = true;
                kvfree(rgb_regamma);
        }
 rgb_regamma_alloc_fail:
@@ -1681,6 +1799,25 @@ bool  mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
                }
                ret = true;
 
+               kvfree(rgb_degamma);
+       } else if (trans == TRANSFER_FUNCTION_HLG ||
+               trans == TRANSFER_FUNCTION_HLG12) {
+               rgb_degamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
+                                      sizeof(*rgb_degamma),
+                                      GFP_KERNEL);
+               if (!rgb_degamma)
+                       goto rgb_degamma_alloc_fail;
+
+               build_hlg_degamma(rgb_degamma,
+                               MAX_HW_POINTS,
+                               coordinates_x,
+                               trans == TRANSFER_FUNCTION_HLG12 ? true:false);
+               for (i = 0; i <= MAX_HW_POINTS ; i++) {
+                       points->red[i]    = rgb_degamma[i].r;
+                       points->green[i]  = rgb_degamma[i].g;
+                       points->blue[i]   = rgb_degamma[i].b;
+               }
+               ret = true;
                kvfree(rgb_degamma);
        }
        points->end_exponent = 0;
diff --git a/drivers/gpu/drm/amd/display/modules/color/luts_1d.h b/drivers/gpu/drm/amd/display/modules/color/luts_1d.h
new file mode 100644 (file)
index 0000000..66b1fad
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef LUTS_1D_H
+#define LUTS_1D_H
+
+#include "hw_shared.h"
+
+struct point_config {
+       uint32_t custom_float_x;
+       uint32_t custom_float_y;
+       uint32_t custom_float_slope;
+};
+
+struct lut_point {
+       uint32_t red;
+       uint32_t green;
+       uint32_t blue;
+       uint32_t delta_red;
+       uint32_t delta_green;
+       uint32_t delta_blue;
+};
+
+struct pwl_1dlut_parameter {
+       struct gamma_curve      arr_curve_points[34];
+       struct point_config     arr_points[2];
+       struct lut_point rgb_resulted[256];
+       uint32_t hw_points_num;
+};
+#endif // LUTS_1D_H
index 710852ad03f36d9f3bbeff0edff1278fc5a338e6..3d4c1b1ab8c4d4ce08f55b127e6d4b8e028ccdc3 100644 (file)
@@ -29,7 +29,7 @@
 #include "core_types.h"
 
 #define DAL_STATS_ENABLE_REGKEY                        "DalStatsEnable"
-#define DAL_STATS_ENABLE_REGKEY_DEFAULT                0x00000001
+#define DAL_STATS_ENABLE_REGKEY_DEFAULT                0x00000000
 #define DAL_STATS_ENABLE_REGKEY_ENABLED                0x00000001
 
 #define DAL_STATS_ENTRIES_REGKEY               "DalStatsEntries"
@@ -238,7 +238,7 @@ void mod_stats_dump(struct mod_stats *mod_stats)
        for (int i = 0; i < core_stats->entry_id; i++) {
                if (event_index < core_stats->event_index &&
                                i == events[event_index].entry_id) {
-                       DISPLAY_STATS("%s\n", events[event_index].event_string);
+                       DISPLAY_STATS("==Event==%s\n", events[event_index].event_string);
                        event_index++;
                } else if (time_index < core_stats->index &&
                                i == time[time_index].entry_id) {
index 5eb895fd98bfb6a65de4fd16e595d85c12d7830d..9cb9ceb4d74dc28ae48b8acb80b659d03b8e5e09 100644 (file)
@@ -27,6 +27,7 @@
 #define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1        0x00010000
 #define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2        0x00020000
 #define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3        0x00040000
+#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4        0x00080000
 #define CAIL_PCIE_LINK_SPEED_SUPPORT_MASK        0xFFFF0000
 #define CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT       16
 
@@ -34,6 +35,7 @@
 #define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1   0x00000001
 #define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2   0x00000002
 #define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3   0x00000004
+#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4   0x00000008
 #define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_MASK   0x0000FFFF
 #define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_SHIFT  0
 
index b178176b72ac66202d79e271b1fd058ef8d45671..265621d8945c300e8a7f907e9287da48096270fd 100644 (file)
@@ -128,47 +128,57 @@ enum PP_FEATURE_MASK {
        PP_OVERDRIVE_MASK = 0x4000,
        PP_GFXOFF_MASK = 0x8000,
        PP_ACG_MASK = 0x10000,
+       PP_STUTTER_MODE = 0x20000,
 };
 
+/**
+ * struct amd_ip_funcs - general hooks for managing amdgpu IP Blocks
+ */
 struct amd_ip_funcs {
-       /* Name of IP block */
+       /** @name: Name of IP block */
        char *name;
-       /* sets up early driver state (pre sw_init), does not configure hw - Optional */
+       /**
+        * @early_init:
+        *
+        * sets up early driver state (pre sw_init),
+        * does not configure hw - Optional
+        */
        int (*early_init)(void *handle);
-       /* sets up late driver/hw state (post hw_init) - Optional */
+       /** @late_init: sets up late driver/hw state (post hw_init) - Optional */
        int (*late_init)(void *handle);
-       /* sets up driver state, does not configure hw */
+       /** @sw_init: sets up driver state, does not configure hw */
        int (*sw_init)(void *handle);
-       /* tears down driver state, does not configure hw */
+       /** @sw_fini: tears down driver state, does not configure hw */
        int (*sw_fini)(void *handle);
-       /* sets up the hw state */
+       /** @hw_init: sets up the hw state */
        int (*hw_init)(void *handle);
-       /* tears down the hw state */
+       /** @hw_fini: tears down the hw state */
        int (*hw_fini)(void *handle);
+       /** @late_fini: final cleanup */
        void (*late_fini)(void *handle);
-       /* handles IP specific hw/sw changes for suspend */
+       /** @suspend: handles IP specific hw/sw changes for suspend */
        int (*suspend)(void *handle);
-       /* handles IP specific hw/sw changes for resume */
+       /** @resume: handles IP specific hw/sw changes for resume */
        int (*resume)(void *handle);
-       /* returns current IP block idle status */
+       /** @is_idle: returns current IP block idle status */
        bool (*is_idle)(void *handle);
-       /* poll for idle */
+       /** @wait_for_idle: poll for idle */
        int (*wait_for_idle)(void *handle);
-       /* check soft reset the IP block */
+       /** @check_soft_reset: check soft reset the IP block */
        bool (*check_soft_reset)(void *handle);
-       /* pre soft reset the IP block */
+       /** @pre_soft_reset: pre soft reset the IP block */
        int (*pre_soft_reset)(void *handle);
-       /* soft reset the IP block */
+       /** @soft_reset: soft reset the IP block */
        int (*soft_reset)(void *handle);
-       /* post soft reset the IP block */
+       /** @post_soft_reset: post soft reset the IP block */
        int (*post_soft_reset)(void *handle);
-       /* enable/disable cg for the IP block */
+       /** @set_clockgating_state: enable/disable cg for the IP block */
        int (*set_clockgating_state)(void *handle,
                                     enum amd_clockgating_state state);
-       /* enable/disable pg for the IP block */
+       /** @set_powergating_state: enable/disable pg for the IP block */
        int (*set_powergating_state)(void *handle,
                                     enum amd_powergating_state state);
-       /* get current clockgating status */
+       /** @get_clockgating_state: get current clockgating status */
        void (*get_clockgating_state)(void *handle, u32 *flags);
 };
 
index 18a32477ed1d22d703068132d2933d7dd33a8713..fe0cbaade3c32bea3396375b71b9d80faf673e55 100644 (file)
@@ -89,6 +89,8 @@
 #define mmUVD_JPEG_RB_SIZE_BASE_IDX                                                                    1
 #define mmUVD_JPEG_ADDR_CONFIG                                                                         0x021f
 #define mmUVD_JPEG_ADDR_CONFIG_BASE_IDX                                                                1
+#define mmUVD_JPEG_PITCH                                                                               0x0222
+#define mmUVD_JPEG_PITCH_BASE_IDX                                                                      1
 #define mmUVD_JPEG_GPCOM_CMD                                                                           0x022c
 #define mmUVD_JPEG_GPCOM_CMD_BASE_IDX                                                                  1
 #define mmUVD_JPEG_GPCOM_DATA0                                                                         0x022d
 #define mmUVD_RB_WPTR4_BASE_IDX                                                                        1
 #define mmUVD_JRBC_RB_RPTR                                                                             0x0457
 #define mmUVD_JRBC_RB_RPTR_BASE_IDX                                                                    1
+#define mmUVD_LMI_JPEG_VMID                                                                            0x045d
+#define mmUVD_LMI_JPEG_VMID_BASE_IDX                                                                   1
 #define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH                                                            0x045e
 #define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH_BASE_IDX                                                   1
 #define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW                                                             0x045f
 #define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_BASE_IDX                                                      1
 #define mmUVD_LMI_JRBC_IB_VMID                                                                         0x0507
 #define mmUVD_LMI_JRBC_IB_VMID_BASE_IDX                                                                1
+#define mmUVD_LMI_JRBC_RB_VMID                                                                         0x0508
+#define mmUVD_LMI_JRBC_RB_VMID_BASE_IDX                                                                1
 #define mmUVD_JRBC_RB_WPTR                                                                             0x0509
 #define mmUVD_JRBC_RB_WPTR_BASE_IDX                                                                    1
 #define mmUVD_JRBC_RB_CNTL                                                                             0x050a
 #define mmUVD_JRBC_IB_SIZE_BASE_IDX                                                                    1
 #define mmUVD_JRBC_LMI_SWAP_CNTL                                                                       0x050d
 #define mmUVD_JRBC_LMI_SWAP_CNTL_BASE_IDX                                                              1
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW                                                         0x050e
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_BASE_IDX                                                1
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH                                                        0x050f
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX                                               1
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW                                                         0x0510
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_BASE_IDX                                                1
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH                                                        0x0511
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_BASE_IDX                                               1
+#define mmUVD_JRBC_RB_REF_DATA                                                                         0x0512
+#define mmUVD_JRBC_RB_REF_DATA_BASE_IDX                                                                1
+#define mmUVD_JRBC_RB_COND_RD_TIMER                                                                    0x0513
+#define mmUVD_JRBC_RB_COND_RD_TIMER_BASE_IDX                                                           1
+#define mmUVD_JRBC_EXTERNAL_REG_BASE                                                                   0x0517
+#define mmUVD_JRBC_EXTERNAL_REG_BASE_BASE_IDX                                                          1
 #define mmUVD_JRBC_SOFT_RESET                                                                          0x0519
 #define mmUVD_JRBC_SOFT_RESET_BASE_IDX                                                                 1
 #define mmUVD_JRBC_STATUS                                                                              0x051a
index 092d800b703a7627a2b98fdda7be54b5b6f7ff11..4bc118df3bc484e724098203d7d94cd9e51dfdce 100644 (file)
@@ -1074,7 +1074,7 @@ struct atom_integrated_system_info_v1_11
   uint16_t  dpphy_override;                   // bit vector, enum of atom_sysinfo_dpphy_override_def
   uint16_t  lvds_misc;                        // enum of atom_sys_info_lvds_misc_def
   uint16_t  backlight_pwm_hz;                 // pwm frequency in hz
-  uint8_t   memorytype;                       // enum of atom_sys_mem_type
+  uint8_t   memorytype;                       // enum of atom_dmi_t17_mem_type_def, APU memory type indication.
   uint8_t   umachannelnumber;                 // number of memory channels
   uint8_t   pwr_on_digon_to_de;               /* all pwr sequence numbers below are in uint of 4ms */
   uint8_t   pwr_on_de_to_vary_bl;
@@ -1084,18 +1084,25 @@ struct atom_integrated_system_info_v1_11
   uint8_t   pwr_on_vary_bl_to_blon;
   uint8_t   pwr_down_bloff_to_vary_bloff;
   uint8_t   min_allowed_bl_level;
+  uint8_t   htc_hyst_limit;
+  uint8_t   htc_tmp_limit;
+  uint8_t   reserved1;
+  uint8_t   reserved2;
   struct atom_external_display_connection_info extdispconninfo;
   struct atom_14nm_dpphy_dvihdmi_tuningset dvi_tuningset;
   struct atom_14nm_dpphy_dvihdmi_tuningset hdmi_tuningset;
   struct atom_14nm_dpphy_dvihdmi_tuningset hdmi6g_tuningset;
-  struct atom_14nm_dpphy_dp_tuningset dp_tuningset;
-  struct atom_14nm_dpphy_dp_tuningset dp_hbr3_tuningset;
+  struct atom_14nm_dpphy_dp_tuningset dp_tuningset;        // rbr 1.62G dp tuning set
+  struct atom_14nm_dpphy_dp_tuningset dp_hbr3_tuningset;   // HBR3 dp tuning set
   struct atom_camera_data  camera_info;
   struct atom_hdmi_retimer_redriver_set dp0_retimer_set;   //for DP0
   struct atom_hdmi_retimer_redriver_set dp1_retimer_set;   //for DP1
   struct atom_hdmi_retimer_redriver_set dp2_retimer_set;   //for DP2
   struct atom_hdmi_retimer_redriver_set dp3_retimer_set;   //for DP3
-  uint32_t  reserved[108];
+  struct atom_14nm_dpphy_dp_tuningset dp_hbr_tuningset;    //hbr 2.7G dp tuning set
+  struct atom_14nm_dpphy_dp_tuningset dp_hbr2_tuningset;   //hbr2 5.4G dp turnig set
+  struct atom_14nm_dpphy_dp_tuningset edp_tuningset;       //edp tuning set
+  uint32_t  reserved[66];
 };
 
 
@@ -1433,7 +1440,10 @@ struct atom_smc_dpm_info_v4_1
        uint8_t  acggfxclkspreadpercent;
        uint16_t acggfxclkspreadfreq;
 
-       uint32_t boardreserved[10];
+       uint8_t Vr2_I2C_address;
+       uint8_t padding_vr2[3];
+
+       uint32_t boardreserved[9];
 };
 
 /* 
index 7852952d1fdee3bce1dcc79e64d4a83ddeca4057..1d93a0c574c9e9d3194c5a2a5b4431c90a47aa76 100644 (file)
@@ -23,6 +23,8 @@
 #ifndef _DM_PP_INTERFACE_
 #define _DM_PP_INTERFACE_
 
+#include "dm_services_types.h"
+
 #define PP_MAX_CLOCK_LEVELS 16
 
 enum amd_pp_display_config_type{
@@ -189,39 +191,4 @@ struct pp_display_clock_request {
        uint32_t clock_freq_in_khz;
 };
 
-#define PP_MAX_WM_SETS 4
-
-enum pp_wm_set_id {
-       DC_WM_SET_A = 0,
-       DC_WM_SET_B,
-       DC_WM_SET_C,
-       DC_WM_SET_D,
-       DC_WM_SET_INVALID = 0xffff,
-};
-
-struct pp_wm_set_with_dmif_clock_range_soc15 {
-       enum pp_wm_set_id wm_set_id;
-       uint32_t wm_min_dcefclk_in_khz;
-       uint32_t wm_max_dcefclk_in_khz;
-       uint32_t wm_min_memclk_in_khz;
-       uint32_t wm_max_memclk_in_khz;
-};
-
-struct pp_wm_set_with_mcif_clock_range_soc15 {
-       enum pp_wm_set_id wm_set_id;
-       uint32_t wm_min_socclk_in_khz;
-       uint32_t wm_max_socclk_in_khz;
-       uint32_t wm_min_memclk_in_khz;
-       uint32_t wm_max_memclk_in_khz;
-};
-
-struct pp_wm_sets_with_clock_ranges_soc15 {
-       uint32_t num_wm_sets_dmif;
-       uint32_t num_wm_sets_mcif;
-       struct pp_wm_set_with_dmif_clock_range_soc15
-               wm_sets_dmif[PP_MAX_WM_SETS];
-       struct pp_wm_set_with_mcif_clock_range_soc15
-               wm_sets_mcif[PP_MAX_WM_SETS];
-};
-
 #endif /* _DM_PP_INTERFACE_ */
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_9_0.h b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_9_0.h
new file mode 100644 (file)
index 0000000..36306c5
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_GFX_9_0_H__
+#define __IRQSRCS_GFX_9_0_H__
+
+
+#define GFX_9_0__SRCID__CP_RB_INTERRUPT_PKT                                    176             /* B0 CP_INTERRUPT pkt in RB */
+#define GFX_9_0__SRCID__CP_IB1_INTERRUPT_PKT                           177             /* B1 CP_INTERRUPT pkt in IB1 */
+#define GFX_9_0__SRCID__CP_IB2_INTERRUPT_PKT                           178             /* B2 CP_INTERRUPT pkt in IB2 */
+#define GFX_9_0__SRCID__CP_PM4_PKT_RSVD_BIT_ERROR                      180             /* B4 PM4 Pkt Rsvd Bits Error */
+#define GFX_9_0__SRCID__CP_EOP_INTERRUPT                                       181             /* B5 End-of-Pipe Interrupt */
+#define GFX_9_0__SRCID__CP_BAD_OPCODE_ERROR                                    183             /* B7 Bad Opcode Error */
+#define GFX_9_0__SRCID__CP_PRIV_REG_FAULT                                      184             /* B8 Privileged Register Fault */
+#define GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT                                    185             /* B9 Privileged Instr Fault */
+#define GFX_9_0__SRCID__CP_WAIT_MEM_SEM_FAULT                          186             /* BA Wait Memory Semaphore Fault (Synchronization Object Fault) */
+#define GFX_9_0__SRCID__CP_CTX_EMPTY_INTERRUPT                         187             /* BB Context Empty Interrupt */
+#define GFX_9_0__SRCID__CP_CTX_BUSY_INTERRUPT                          188             /* BC Context Busy Interrupt */
+#define GFX_9_0__SRCID__CP_ME_WAIT_REG_MEM_POLL_TIMEOUT                192             /* C0 CP.ME Wait_Reg_Mem Poll Timeout */
+#define GFX_9_0__SRCID__CP_SIG_INCOMPLETE                                      193             /* C1 "Surface Probe Fault Signal Incomplete" */
+#define GFX_9_0__SRCID__CP_PREEMPT_ACK                                     194         /* C2 Preemption Ack-wledge */
+#define GFX_9_0__SRCID__CP_GPF                                             195         /* C3 General Protection Fault (GPF) */
+#define GFX_9_0__SRCID__CP_GDS_ALLOC_ERROR                                     196             /* C4 GDS Alloc Error */
+#define GFX_9_0__SRCID__CP_ECC_ERROR                                       197         /* C5 ECC  Error */
+#define GFX_9_0__SRCID__CP_COMPUTE_QUERY_STATUS             199     /* C7 Compute query status */
+#define GFX_9_0__SRCID__CP_VM_DOORBELL                                     200         /* C8 Unattached VM Doorbell Received */
+#define GFX_9_0__SRCID__CP_FUE_ERROR                                       201         /* C9 ECC FUE Error */
+#define GFX_9_0__SRCID__RLC_STRM_PERF_MONITOR_INTERRUPT                202             /* CA Streaming Perf Monitor Interrupt */
+#define GFX_9_0__SRCID__GRBM_RD_TIMEOUT_ERROR                          232             /* E8 CRead timeout error */
+#define GFX_9_0__SRCID__GRBM_REG_GUI_IDLE                                      233             /* E9 Register GUI Idle */
+#define GFX_9_0__SRCID__SQ_INTERRUPT_ID                                            239         /* EF SQ Interrupt (ttrace wrap, errors) */
+
+#endif /* __IRQSRCS_GFX_9_0_H__ */
index c6b6f97de9de5951b301825b6b36f9ad8171699b..aaed7f59e0e23395418bb24d5114b2d7d3de246e 100644 (file)
 #define VISLANDS30_IV_SRCID_HPD_RX_F                               42      // 0x2a             
 #define VISLANDS30_IV_EXTID_HPD_RX_F                         11
 
+#define VISLANDS30_IV_SRCID_GPIO_19                            0x00000053  /* 83 */
+
+#define VISLANDS30_IV_SRCID_SRBM_READ_TIMEOUT_ERR              0x00000060  /* 96 */
+#define VISLANDS30_IV_SRCID_SRBM_CTX_SWITCH                    0x00000061  /* 97 */
+
+#define VISLANDS30_IV_SRBM_REG_ACCESS_ERROR                    0x00000062  /* 98 */
+
+
+#define VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP                   0x00000077  /* 119 */
+#define VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE                 0x0000007c  /* 124 */
+
+#define VISLANDS30_IV_SRCID_BIF_PF_VF_MSGBUF_VALID             0x00000087  /* 135 */
+
+#define VISLANDS30_IV_SRCID_BIF_VF_PF_MSGBUF_ACK               0x0000008a  /* 138 */
+
+#define VISLANDS30_IV_SRCID_SYS_PAGE_INV_FAULT                 0x0000008c  /* 140 */
+#define VISLANDS30_IV_SRCID_SYS_MEM_PROT_FAULT                 0x0000008d  /* 141 */
+
+#define VISLANDS30_IV_SRCID_SEM_PAGE_INV_FAULT                 0x00000090  /* 144 */
+#define VISLANDS30_IV_SRCID_SEM_MEM_PROT_FAULT                 0x00000091  /* 145 */
+
+#define VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT                 0x00000092  /* 146 */
+#define VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT                 0x00000093  /* 147 */
+
+#define VISLANDS30_IV_SRCID_ACP                                0x000000a2  /* 162 */
+
+#define VISLANDS30_IV_SRCID_VCE_TRAP                           0x000000a7  /* 167 */
+#define VISLANDS30_IV_EXTID_VCE_TRAP_GENERAL_PURPOSE           0
+#define VISLANDS30_IV_EXTID_VCE_TRAP_LOW_LATENCY               1
+#define VISLANDS30_IV_EXTID_VCE_TRAP_REAL_TIME                 2
+
+#define VISLANDS30_IV_SRCID_CP_INT_RB                          0x000000b0  /* 176 */
+#define VISLANDS30_IV_SRCID_CP_INT_IB1                         0x000000b1  /* 177 */
+#define VISLANDS30_IV_SRCID_CP_INT_IB2                         0x000000b2  /* 178 */
+#define VISLANDS30_IV_SRCID_CP_PM4_RES_BITS_ERR                0x000000b4  /* 180 */
+#define VISLANDS30_IV_SRCID_CP_END_OF_PIPE                     0x000000b5  /* 181 */
+#define VISLANDS30_IV_SRCID_CP_BAD_OPCODE                      0x000000b7  /* 183 */
+#define VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT                  0x000000b8  /* 184 */
+#define VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT                0x000000b9  /* 185 */
+#define VISLANDS30_IV_SRCID_CP_WAIT_MEM_SEM_FAULT              0x000000ba  /* 186 */
+#define VISLANDS30_IV_SRCID_CP_GUI_IDLE                        0x000000bb  /* 187 */
+#define VISLANDS30_IV_SRCID_CP_GUI_BUSY                        0x000000bc  /* 188 */
+
+#define VISLANDS30_IV_SRCID_CP_COMPUTE_QUERY_STATUS            0x000000bf  /* 191 */
+#define VISLANDS30_IV_SRCID_CP_ECC_ERROR                       0x000000c5  /* 197 */
+
+#define CARRIZO_IV_SRCID_CP_COMPUTE_QUERY_STATUS               0x000000c7  /* 199 */
+
+#define VISLANDS30_IV_SRCID_CP_WAIT_REG_MEM_POLL_TIMEOUT       0x000000c0  /* 192 */
+#define VISLANDS30_IV_SRCID_CP_SEM_SIG_INCOMPL                 0x000000c1  /* 193 */
+#define VISLANDS30_IV_SRCID_CP_PREEMPT_ACK                     0x000000c2  /* 194 */
+#define VISLANDS30_IV_SRCID_CP_GENERAL_PROT_FAULT              0x000000c3  /* 195 */
+#define VISLANDS30_IV_SRCID_CP_GDS_ALLOC_ERROR                 0x000000c4  /* 196 */
+#define VISLANDS30_IV_SRCID_CP_ECC_ERROR                       0x000000c5  /* 197 */
+
+#define VISLANDS30_IV_SRCID_RLC_STRM_PERF_MONITOR              0x000000ca  /* 202 */
+
+#define VISLANDS30_IV_SDMA_ATOMIC_SRC_ID                       0x000000da  /* 218 */
+
+#define VISLANDS30_IV_SRCID_SDMA_ECC_ERROR                     0x000000dc  /* 220 */
+
+#define VISLANDS30_IV_SRCID_SDMA_TRAP                                 0x000000e0  /* 224 */
+#define VISLANDS30_IV_SRCID_SDMA_SEM_INCOMPLETE                0x000000e1  /* 225 */
+#define VISLANDS30_IV_SRCID_SDMA_SEM_WAIT                      0x000000e2  /* 226 */
+
+
+#define VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER            0x000000e5  /* 229 */
+
+#define VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH         0x000000e6  /* 230 */
+#define VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW         0x000000e7  /* 231 */
+
+#define VISLANDS30_IV_SRCID_GRBM_READ_TIMEOUT_ERR              0x000000e8  /* 232 */
+#define VISLANDS30_IV_SRCID_GRBM_REG_GUI_IDLE                  0x000000e9  /* 233 */
+
+#define VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG                   0x000000ef  /* 239 */
+
+#define VISLANDS30_IV_SRCID_SDMA_PREEMPT                       0x000000f0  /* 240 */
+#define VISLANDS30_IV_SRCID_SDMA_VM_HOLE                       0x000000f2  /* 242 */
+#define VISLANDS30_IV_SRCID_SDMA_CTXEMPTY                      0x000000f3  /* 243 */
+#define VISLANDS30_IV_SRCID_SDMA_DOORBELL_INVALID              0x000000f4  /* 244 */
+#define VISLANDS30_IV_SRCID_SDMA_FROZEN                        0x000000f5  /* 245 */
+#define VISLANDS30_IV_SRCID_SDMA_POLL_TIMEOUT                  0x000000f6  /* 246 */
+#define VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE                    0x000000f7  /* 247 */
+
+#define VISLANDS30_IV_SRCID_CG_THERMAL_TRIG                    0x000000f8  /* 248 */
+
+#define VISLANDS30_IV_SRCID_SMU_DISP_TIMER_TRIGGER             0x000000fd  /* 253 */
+
+/* These are not "real" source ids defined by HW */
+#define VISLANDS30_IV_SRCID_VM_CONTEXT_ALL                     0x00000100  /* 256 */
+#define VISLANDS30_IV_EXTID_VM_CONTEXT0_ALL                    0
+#define VISLANDS30_IV_EXTID_VM_CONTEXT1_ALL                    1
+
+
+/* IV Extended IDs */
+#define VISLANDS30_IV_EXTID_NONE                               0x00000000
+#define VISLANDS30_IV_EXTID_INVALID                            0xffffffff
+
 #endif // _IVSRCID_VISLANDS30_H_
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/sdma0/irqsrcs_sdma0_4_0.h b/drivers/gpu/drm/amd/include/ivsrcid/sdma0/irqsrcs_sdma0_4_0.h
new file mode 100644 (file)
index 0000000..8024138
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_SDMA0_4_0_H__
+#define __IRQSRCS_SDMA0_4_0_H__
+
+#define SDMA0_4_0__SRCID__SDMA_ATOMIC_RTN_DONE                         217             /* 0xD9 SDMA atomic*_rtn ops complete  */
+#define SDMA0_4_0__SRCID__SDMA_ATOMIC_TIMEOUT                          218             /* 0xDA SDMA atomic CMPSWAP loop timeout  */
+#define SDMA0_4_0__SRCID__SDMA_IB_PREEMPT                                      219             /* 0xDB sdma mid-command buffer preempt interrupt  */
+#define SDMA0_4_0__SRCID__SDMA_ECC                                             220             /* 0xDC ECC  Error  */
+#define SDMA0_4_0__SRCID__SDMA_PAGE_FAULT                                      221             /* 0xDD Page Fault Error from UTCL2 when nack=3  */
+#define SDMA0_4_0__SRCID__SDMA_PAGE_NULL                                       222             /* 0xDE Page Null from UTCL2 when nack=2  */
+#define SDMA0_4_0__SRCID__SDMA_XNACK                                       223         /* 0xDF Page retry  timeout after UTCL2 return nack=1  */
+#define SDMA0_4_0__SRCID__SDMA_TRAP                                            224             /* 0xE0 Trap  */
+#define SDMA0_4_0__SRCID__SDMA_SEM_INCOMPLETE_TIMEOUT          225             /* 0xE1 0xDAGPF (Sem incomplete timeout)  */
+#define SDMA0_4_0__SRCID__SDMA_SEM_WAIT_FAIL_TIMEOUT           226             /* 0xE2 Semaphore wait fail timeout  */
+#define SDMA0_4_0__SRCID__SDMA_SRAM_ECC                                            228         /* 0xE4 SRAM ECC Error  */
+#define SDMA0_4_0__SRCID__SDMA_PREEMPT                                     240         /* 0xF0 SDMA New Run List  */
+#define SDMA0_4_0__SRCID__SDMA_VM_HOLE                                     242         /* 0xF2 MC or SEM address in VM hole  */
+#define SDMA0_4_0__SRCID__SDMA_CTXEMPTY                                            243         /* 0xF3 Context Empty  */
+#define SDMA0_4_0__SRCID__SDMA_DOORBELL_INVALID                                244             /* 0xF4 Doorbell BE invalid  */
+#define SDMA0_4_0__SRCID__SDMA_FROZEN                                      245         /* 0xF5 SDMA Frozen  */
+#define SDMA0_4_0__SRCID__SDMA_POLL_TIMEOUT                                    246             /* 0xF6 SRBM read poll timeout  */
+#define SDMA0_4_0__SRCID__SDMA_SRBMWRITE                                       247             /* 0xF7 SRBM write Protection  */
+
+#endif /* __IRQSRCS_SDMA_4_0_H__ */
+
+
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/sdma1/irqsrcs_sdma1_4_0.h b/drivers/gpu/drm/amd/include/ivsrcid/sdma1/irqsrcs_sdma1_4_0.h
new file mode 100644 (file)
index 0000000..d12a356
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_SDMA1_4_0_H__
+#define __IRQSRCS_SDMA1_4_0_H__
+
+#define SDMA1_4_0__SRCID__SDMA_ATOMIC_RTN_DONE                         217             /* 0xD9 SDMA atomic*_rtn ops complete  */
+#define SDMA1_4_0__SRCID__SDMA_ATOMIC_TIMEOUT                          218             /* 0xDA SDMA atomic CMPSWAP loop timeout  */
+#define SDMA1_4_0__SRCID__SDMA_IB_PREEMPT                                      219             /* 0xDB sdma mid-command buffer preempt interrupt  */
+#define SDMA1_4_0__SRCID__SDMA_ECC                                             220             /* 0xDC ECC  Error  */
+#define SDMA1_4_0__SRCID__SDMA_PAGE_FAULT                                      221             /* 0xDD Page Fault Error from UTCL2 when nack=3  */
+#define SDMA1_4_0__SRCID__SDMA_PAGE_NULL                                       222             /* 0xDE Page Null from UTCL2 when nack=2  */
+#define SDMA1_4_0__SRCID__SDMA_XNACK                                       223         /* 0xDF Page retry  timeout after UTCL2 return nack=1  */
+#define SDMA1_4_0__SRCID__SDMA_TRAP                                            224             /* 0xE0 Trap  */
+#define SDMA1_4_0__SRCID__SDMA_SEM_INCOMPLETE_TIMEOUT          225             /* 0xE1 0xDAGPF (Sem incomplete timeout)  */
+#define SDMA1_4_0__SRCID__SDMA_SEM_WAIT_FAIL_TIMEOUT           226             /* 0xE2 Semaphore wait fail timeout  */
+#define SDMA1_4_0__SRCID__SDMA_SRAM_ECC                                            228         /* 0xE4 SRAM ECC Error  */
+#define SDMA1_4_0__SRCID__SDMA_PREEMPT                                     240         /* 0xF0 SDMA New Run List  */
+#define SDMA1_4_0__SRCID__SDMA_VM_HOLE                                     242         /* 0xF2 MC or SEM address in VM hole  */
+#define SDMA1_4_0__SRCID__SDMA_CTXEMPTY                                            243         /* 0xF3 Context Empty  */
+#define SDMA1_4_0__SRCID__SDMA_DOORBELL_INVALID                                244             /* 0xF4 Doorbell BE invalid  */
+#define SDMA1_4_0__SRCID__SDMA_FROZEN                                      245         /* 0xF5 SDMA Frozen  */
+#define SDMA1_4_0__SRCID__SDMA_POLL_TIMEOUT                                    246             /* 0xF6 SRBM read poll timeout  */
+#define SDMA1_4_0__SRCID__SDMA_SRBMWRITE                                       247             /* 0xF7 SRBM write Protection  */
+
+#endif /* __IRQSRCS_SDMA1_4_0_H__ */
+
+
similarity index 75%
rename from drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.h
rename to drivers/gpu/drm/amd/include/ivsrcid/smuio/irqsrcs_smuio_9_0.h
index 7a65206a6d21a01f2b6ee161fb04c7b156cf951f..02bab4673cd4e0d6b9ea813476db5dd08a567ea4 100644 (file)
  *
  */
 
-#ifndef __SOC_BOUNDING_BOX_H__
-#define __SOC_BOUNDING_BOX_H__
+#ifndef __IRQSRCS_SMUIO_9_0_H__
+#define __IRQSRCS_SMUIO_9_0_H__
 
-#include "dml_common_defs.h"
+#define SMUIO_9_0__SRCID__SMUIO_GPIO19                 83              /* GPIO19 interrupt  */
 
-void dml_socbb_set_latencies(soc_bounding_box_st *to_box, soc_bounding_box_st *from_box);
-voltage_scaling_st dml_socbb_voltage_scaling(const soc_bounding_box_st *box, enum voltage_state voltage);
-double dml_socbb_return_bw_mhz(soc_bounding_box_st *box, enum voltage_state voltage);
+#endif /* __IRQSRCS_SMUIO_9_0_H__ */
 
-#endif
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/thm/irqsrcs_thm_9_0.h b/drivers/gpu/drm/amd/include/ivsrcid/thm/irqsrcs_thm_9_0.h
new file mode 100644 (file)
index 0000000..5218bc5
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_THM_9_0_H__
+#define __IRQSRCS_THM_9_0_H__
+
+#define THM_9_0__SRCID__THM_DIG_THERM_L2H              0               /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH  */
+#define THM_9_0__SRCID__THM_DIG_THERM_H2L              1               /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL  */
+
+#endif /* __IRQSRCS_THM_9_0_H__ */
+
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/uvd/irqsrcs_uvd_7_0.h b/drivers/gpu/drm/amd/include/ivsrcid/uvd/irqsrcs_uvd_7_0.h
new file mode 100644 (file)
index 0000000..fb041ae
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_UVD_7_0_H__
+#define __IRQSRCS_UVD_7_0_H__
+
+#define UVD_7_0__SRCID__UVD_ENC_GEN_PURP                119
+#define UVD_7_0__SRCID__UVD_ENC_LOW_LATENCY             120
+#define UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT   124             /* UVD system message interrupt  */
+
+#endif /* __IRQSRCS_UVD_7_0_H__ */
+
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vce/irqsrcs_vce_4_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vce/irqsrcs_vce_4_0.h
new file mode 100644 (file)
index 0000000..3440bab
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_VCE_4_0_H__
+#define __IRQSRCS_VCE_4_0_H__
+
+#define VCE_4_0__CTXID__VCE_TRAP_GENERAL_PURPOSE               0
+#define VCE_4_0__CTXID__VCE_TRAP_LOW_LATENCY                   1
+#define VCE_4_0__CTXID__VCE_TRAP_REAL_TIME                     2
+
+#endif /* __IRQSRCS_VCE_4_0_H__ */
+
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_1_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_1_0.h
new file mode 100644 (file)
index 0000000..e595170
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_VCN_1_0_H__
+#define __IRQSRCS_VCN_1_0_H__
+
+#define VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE                 119     /* 0x77 Encoder General Purpose  */
+#define VCN_1_0__SRCID__UVD_ENC_LOW_LATENCY                     120     /* 0x78 Encoder Low Latency  */
+#define VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT           124             /* 0x7c UVD system message interrupt  */
+
+#endif /* __IRQSRCS_VCN_1_0_H__ */
+
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vmc/irqsrcs_vmc_1_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vmc/irqsrcs_vmc_1_0.h
new file mode 100644 (file)
index 0000000..d130936
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __IRQSRCS_VMC_1_0_H__
+#define __IRQSRCS_VMC_1_0_H__
+
+
+#define VMC_1_0__SRCID__VM_FAULT                            0
+#define VMC_1_0__SRCID__VM_CONTEXT0_ALL                     256
+#define VMC_1_0__SRCID__VM_CONTEXT1_ALL                     257
+
+#define UTCL2_1_0__SRCID__FAULT                             0       /* UTC L2 has encountered a fault or retry scenario */
+
+
+#endif /* __IRQSRCS_VMC_1_0_H__ */
index 5733fbee07f7fd6cca26ac15d348117bad0bddfe..14391b06080ce4566ebbae1d8fd1a8313ac36bc8 100644 (file)
@@ -47,6 +47,17 @@ enum kfd_preempt_type {
        KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
 };
 
+struct kfd_vm_fault_info {
+       uint64_t        page_addr;
+       uint32_t        vmid;
+       uint32_t        mc_id;
+       uint32_t        status;
+       bool            prot_valid;
+       bool            prot_read;
+       bool            prot_write;
+       bool            prot_exec;
+};
+
 struct kfd_cu_info {
        uint32_t num_shader_engines;
        uint32_t num_shader_arrays_per_engine;
@@ -259,6 +270,21 @@ struct tile_config {
  * IB to the corresponding ring (ring type). The IB is executed with the
  * specified VMID in a user mode context.
  *
+ * @get_vm_fault_info: Return information about a recent VM fault on
+ * GFXv7 and v8. If multiple VM faults occurred since the last call of
+ * this function, it will return information about the first of those
+ * faults. On GFXv9 VM fault information is fully contained in the IH
+ * packet and this function is not needed.
+ *
+ * @read_vmid_from_vmfault_reg: On Hawaii the VMID is not set in the
+ * IH ring entry. This function allows the KFD ISR to get the VMID
+ * from the fault status register as early as possible.
+ *
+ * @gpu_recover: let kgd reset gpu after kfd detect CPC hang
+ *
+ * @set_compute_idle: Indicates that compute is idle on a device. This
+ * can be used to change power profiles depending on compute activity.
+ *
  * This structure contains function pointers to services that the kgd driver
  * provides to amdkfd driver.
  *
@@ -374,6 +400,14 @@ struct kfd2kgd_calls {
        int (*submit_ib)(struct kgd_dev *kgd, enum kgd_engine_type engine,
                        uint32_t vmid, uint64_t gpu_addr,
                        uint32_t *ib_cmd, uint32_t ib_len);
+
+       int (*get_vm_fault_info)(struct kgd_dev *kgd,
+                       struct kfd_vm_fault_info *info);
+       uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd);
+
+       void (*gpu_recover)(struct kgd_dev *kgd);
+
+       void (*set_compute_idle)(struct kgd_dev *kgd, bool idle);
 };
 
 /**
@@ -399,6 +433,10 @@ struct kfd2kgd_calls {
  * @schedule_evict_and_restore_process: Schedules work queue that will prepare
  * for safe eviction of KFD BOs that belong to the specified process.
  *
+ * @pre_reset: Notifies amdkfd that amdgpu about to reset the gpu
+ *
+ * @post_reset: Notify amdkfd that amgpu successfully reseted the gpu
+ *
  * This structure contains function callback pointers so the kgd driver
  * will notify to the amdkfd about certain status changes.
  *
@@ -417,6 +455,8 @@ struct kgd2kfd_calls {
        int (*resume_mm)(struct mm_struct *mm);
        int (*schedule_evict_and_restore_process)(struct mm_struct *mm,
                        struct dma_fence *fence);
+       int  (*pre_reset)(struct kfd_dev *kfd);
+       int  (*post_reset)(struct kfd_dev *kfd);
 };
 
 int kgd2kfd_init(unsigned interface_version,
index 06f08f34a110d2a685dc69cfb72830a2884844cc..6a41b81c7325fd640271d01497e5ea095d3fc1d7 100644 (file)
@@ -192,7 +192,6 @@ struct amd_pp_simple_clock_info;
 struct amd_pp_display_configuration;
 struct amd_pp_clock_info;
 struct pp_display_clock_request;
-struct pp_wm_sets_with_clock_ranges_soc15;
 struct pp_clock_levels_with_voltage;
 struct pp_clock_levels_with_latency;
 struct amd_pp_clocks;
@@ -232,16 +231,19 @@ struct amd_pm_funcs {
        void (*debugfs_print_current_performance_level)(void *handle, struct seq_file *m);
        int (*switch_power_profile)(void *handle, enum PP_SMC_POWER_PROFILE type, bool en);
 /* export to amdgpu */
-       void (*powergate_uvd)(void *handle, bool gate);
-       void (*powergate_vce)(void *handle, bool gate);
        struct amd_vce_state *(*get_vce_clock_state)(void *handle, u32 idx);
        int (*dispatch_tasks)(void *handle, enum amd_pp_task task_id,
                        enum amd_pm_state_type *user_state);
        int (*load_firmware)(void *handle);
        int (*wait_for_fw_loading_complete)(void *handle);
+       int (*set_powergating_by_smu)(void *handle,
+                               uint32_t block_type, bool gate);
        int (*set_clockgating_by_smu)(void *handle, uint32_t msg_id);
        int (*set_power_limit)(void *handle, uint32_t n);
        int (*get_power_limit)(void *handle, uint32_t *limit, bool default_limit);
+       int (*get_power_profile_mode)(void *handle, char *buf);
+       int (*set_power_profile_mode)(void *handle, long *input, uint32_t size);
+       int (*odn_edit_dpm_table)(void *handle, uint32_t type, long *input, uint32_t size);
 /* export to DC */
        u32 (*get_sclk)(void *handle, bool low);
        u32 (*get_mclk)(void *handle, bool low);
@@ -261,15 +263,12 @@ struct amd_pm_funcs {
                enum amd_pp_clock_type type,
                struct pp_clock_levels_with_voltage *clocks);
        int (*set_watermarks_for_clocks_ranges)(void *handle,
-               struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
+                                               void *clock_ranges);
        int (*display_clock_voltage_request)(void *handle,
                                struct pp_display_clock_request *clock);
        int (*get_display_mode_validation_clocks)(void *handle,
                struct amd_pp_simple_clock_info *clocks);
-       int (*get_power_profile_mode)(void *handle, char *buf);
-       int (*set_power_profile_mode)(void *handle, long *input, uint32_t size);
-       int (*odn_edit_dpm_table)(void *handle, uint32_t type, long *input, uint32_t size);
-       int (*set_mmhub_powergating_by_smu)(void *handle);
+       int (*notify_smu_enable_pwe)(void *handle);
 };
 
 #endif
index d567be49c31b8c6e346dba9f65a7dbc955f05db7..7a646f94b4788186ceb080fbba8414def8ca8105 100644 (file)
@@ -221,29 +221,7 @@ static int pp_sw_reset(void *handle)
 static int pp_set_powergating_state(void *handle,
                                    enum amd_powergating_state state)
 {
-       struct amdgpu_device *adev = handle;
-       struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
-       int ret;
-
-       if (!hwmgr || !hwmgr->pm_en)
-               return 0;
-
-       if (hwmgr->hwmgr_func->gfx_off_control) {
-               /* Enable/disable GFX off through SMU */
-               ret = hwmgr->hwmgr_func->gfx_off_control(hwmgr,
-                                                        state == AMD_PG_STATE_GATE);
-               if (ret)
-                       pr_err("gfx off control failed!\n");
-       }
-
-       if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
-               pr_debug("%s was not implemented.\n", __func__);
-               return 0;
-       }
-
-       /* Enable/disable GFX per cu powergating through SMU */
-       return hwmgr->hwmgr_func->enable_per_cu_power_gating(hwmgr,
-                       state == AMD_PG_STATE_GATE);
+       return 0;
 }
 
 static int pp_suspend(void *handle)
@@ -1020,7 +998,7 @@ static int pp_get_display_power_level(void *handle,
 static int pp_get_current_clocks(void *handle,
                struct amd_pp_clock_info *clocks)
 {
-       struct amd_pp_simple_clock_info simple_clocks;
+       struct amd_pp_simple_clock_info simple_clocks = { 0 };
        struct pp_clock_info hw_clocks;
        struct pp_hwmgr *hwmgr = handle;
        int ret = 0;
@@ -1056,7 +1034,10 @@ static int pp_get_current_clocks(void *handle,
        clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
        clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
 
-       clocks->max_clocks_state = simple_clocks.level;
+       if (simple_clocks.level == 0)
+               clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
+       else
+               clocks->max_clocks_state = simple_clocks.level;
 
        if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
                clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
@@ -1118,17 +1099,17 @@ static int pp_get_clock_by_type_with_voltage(void *handle,
 }
 
 static int pp_set_watermarks_for_clocks_ranges(void *handle,
-               struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+               void *clock_ranges)
 {
        struct pp_hwmgr *hwmgr = handle;
        int ret = 0;
 
-       if (!hwmgr || !hwmgr->pm_en ||!wm_with_clock_ranges)
+       if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
                return -EINVAL;
 
        mutex_lock(&hwmgr->smu_lock);
        ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
-                       wm_with_clock_ranges);
+                       clock_ranges);
        mutex_unlock(&hwmgr->smu_lock);
 
        return ret;
@@ -1159,6 +1140,8 @@ static int pp_get_display_mode_validation_clocks(void *handle,
        if (!hwmgr || !hwmgr->pm_en ||!clocks)
                return -EINVAL;
 
+       clocks->level = PP_DAL_POWERLEVEL_7;
+
        mutex_lock(&hwmgr->smu_lock);
 
        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
@@ -1168,19 +1151,78 @@ static int pp_get_display_mode_validation_clocks(void *handle,
        return ret;
 }
 
-static int pp_set_mmhub_powergating_by_smu(void *handle)
+static int pp_dpm_powergate_mmhub(void *handle)
 {
        struct pp_hwmgr *hwmgr = handle;
 
        if (!hwmgr || !hwmgr->pm_en)
                return -EINVAL;
 
-       if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) {
+       if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
+               pr_info("%s was not implemented.\n", __func__);
+               return 0;
+       }
+
+       return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
+}
+
+static int pp_dpm_powergate_gfx(void *handle, bool gate)
+{
+       struct pp_hwmgr *hwmgr = handle;
+
+       if (!hwmgr || !hwmgr->pm_en)
+               return 0;
+
+       if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
                pr_info("%s was not implemented.\n", __func__);
                return 0;
        }
 
-       return hwmgr->hwmgr_func->set_mmhub_powergating_by_smu(hwmgr);
+       return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate);
+}
+
+static int pp_set_powergating_by_smu(void *handle,
+                               uint32_t block_type, bool gate)
+{
+       int ret = 0;
+
+       switch (block_type) {
+       case AMD_IP_BLOCK_TYPE_UVD:
+       case AMD_IP_BLOCK_TYPE_VCN:
+               pp_dpm_powergate_uvd(handle, gate);
+               break;
+       case AMD_IP_BLOCK_TYPE_VCE:
+               pp_dpm_powergate_vce(handle, gate);
+               break;
+       case AMD_IP_BLOCK_TYPE_GMC:
+               pp_dpm_powergate_mmhub(handle);
+               break;
+       case AMD_IP_BLOCK_TYPE_GFX:
+               ret = pp_dpm_powergate_gfx(handle, gate);
+               break;
+       default:
+               break;
+       }
+       return ret;
+}
+
+static int pp_notify_smu_enable_pwe(void *handle)
+{
+       struct pp_hwmgr *hwmgr = handle;
+
+       if (!hwmgr || !hwmgr->pm_en)
+               return -EINVAL;
+
+       if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
+               pr_info("%s was not implemented.\n", __func__);
+               return -EINVAL;;
+       }
+
+       mutex_lock(&hwmgr->smu_lock);
+       hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
+       mutex_unlock(&hwmgr->smu_lock);
+
+       return 0;
 }
 
 static const struct amd_pm_funcs pp_dpm_funcs = {
@@ -1189,8 +1231,6 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
        .force_performance_level = pp_dpm_force_performance_level,
        .get_performance_level = pp_dpm_get_performance_level,
        .get_current_power_state = pp_dpm_get_current_power_state,
-       .powergate_vce = pp_dpm_powergate_vce,
-       .powergate_uvd = pp_dpm_powergate_uvd,
        .dispatch_tasks = pp_dpm_dispatch_tasks,
        .set_fan_control_mode = pp_dpm_set_fan_control_mode,
        .get_fan_control_mode = pp_dpm_get_fan_control_mode,
@@ -1210,6 +1250,7 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
        .get_vce_clock_state = pp_dpm_get_vce_clock_state,
        .switch_power_profile = pp_dpm_switch_power_profile,
        .set_clockgating_by_smu = pp_set_clockgating_by_smu,
+       .set_powergating_by_smu = pp_set_powergating_by_smu,
        .get_power_profile_mode = pp_get_power_profile_mode,
        .set_power_profile_mode = pp_set_power_profile_mode,
        .odn_edit_dpm_table = pp_odn_edit_dpm_table,
@@ -1227,5 +1268,5 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
        .set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
        .display_clock_voltage_request = pp_display_clock_voltage_request,
        .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
-       .set_mmhub_powergating_by_smu = pp_set_mmhub_powergating_by_smu,
+       .notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
 };
index a0bb921fac2285d8e289cb31c429ceba152a73cf..53207e76b0f348a9ba82cb7bc347e78a6c7daab3 100644 (file)
@@ -435,7 +435,7 @@ int phm_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
 }
 
 int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
-               struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+                                       void *clock_ranges)
 {
        PHM_FUNC_CHECK(hwmgr);
 
@@ -443,7 +443,7 @@ int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
                return -EINVAL;
 
        return hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges(hwmgr,
-                       wm_with_clock_ranges);
+                                                               clock_ranges);
 }
 
 int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
index e63bc47dc715f12dcac2280ab66d80a5e0b03d7d..8994aa5c8cf80cb56734a3737c6bffc39bb3cfe0 100644 (file)
@@ -81,7 +81,6 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
                return -EINVAL;
 
        hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
-       hwmgr->power_source = PP_PowerSource_AC;
        hwmgr->pp_table_version = PP_TABLE_V1;
        hwmgr->dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
        hwmgr->request_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
@@ -148,10 +147,10 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
                smu7_init_function_pointers(hwmgr);
                break;
        case AMDGPU_FAMILY_AI:
-               hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
                switch (hwmgr->chip_id) {
                case CHIP_VEGA10:
                case CHIP_VEGA20:
+                       hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
                        hwmgr->smumgr_funcs = &vega10_smu_funcs;
                        vega10_hwmgr_init(hwmgr);
                        break;
@@ -236,6 +235,11 @@ int hwmgr_hw_init(struct pp_hwmgr *hwmgr)
        ret = hwmgr->hwmgr_func->backend_init(hwmgr);
        if (ret)
                goto err1;
+ /* make sure dc limits are valid */
+       if ((hwmgr->dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
+                       (hwmgr->dyn_state.max_clock_voltage_on_dc.mclk == 0))
+                       hwmgr->dyn_state.max_clock_voltage_on_dc =
+                                       hwmgr->dyn_state.max_clock_voltage_on_ac;
 
        ret = psm_init_power_state_table(hwmgr);
        if (ret)
index 7047e29755c352f2686266af3f80ee4d7ab60857..01dc46dc9c8a0f3de72f584b26adb282284b0006 100644 (file)
@@ -1544,14 +1544,14 @@ void atomctrl_get_voltage_range(struct pp_hwmgr *hwmgr, uint32_t *max_vddc,
                switch (hwmgr->chip_id) {
                case CHIP_TONGA:
                case CHIP_FIJI:
-                       *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc/4);
-                       *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc/4);
+                       *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc) / 4;
+                       *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc) / 4;
                        return;
                case CHIP_POLARIS11:
                case CHIP_POLARIS10:
                case CHIP_POLARIS12:
-                       *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc/100);
-                       *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc/100);
+                       *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc) / 100;
+                       *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc) / 100;
                        return;
                default:
                        break;
index 5325661fedffb9480b26fb7b94269a30fb32334a..d27c1c9df2868696887157845aa13eca5b7348d1 100644 (file)
@@ -512,14 +512,82 @@ int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKI
        return 0;
 }
 
+static void pp_atomfwctrl_copy_vbios_bootup_values_3_2(struct pp_hwmgr *hwmgr,
+                       struct pp_atomfwctrl_bios_boot_up_values *boot_values,
+                       struct atom_firmware_info_v3_2 *fw_info)
+{
+       uint32_t frequency = 0;
+
+       boot_values->ulRevision = fw_info->firmware_revision;
+       boot_values->ulGfxClk   = fw_info->bootup_sclk_in10khz;
+       boot_values->ulUClk     = fw_info->bootup_mclk_in10khz;
+       boot_values->usVddc     = fw_info->bootup_vddc_mv;
+       boot_values->usVddci    = fw_info->bootup_vddci_mv;
+       boot_values->usMvddc    = fw_info->bootup_mvddc_mv;
+       boot_values->usVddGfx   = fw_info->bootup_vddgfx_mv;
+       boot_values->ucCoolingID = fw_info->coolingsolution_id;
+       boot_values->ulSocClk   = 0;
+       boot_values->ulDCEFClk   = 0;
+
+       if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_SOCCLK_ID, &frequency))
+               boot_values->ulSocClk   = frequency;
+
+       if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCEFCLK_ID, &frequency))
+               boot_values->ulDCEFClk  = frequency;
+
+       if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_ECLK_ID, &frequency))
+               boot_values->ulEClk     = frequency;
+
+       if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_VCLK_ID, &frequency))
+               boot_values->ulVClk     = frequency;
+
+       if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCLK_ID, &frequency))
+               boot_values->ulDClk     = frequency;
+}
+
+static void pp_atomfwctrl_copy_vbios_bootup_values_3_1(struct pp_hwmgr *hwmgr,
+                       struct pp_atomfwctrl_bios_boot_up_values *boot_values,
+                       struct atom_firmware_info_v3_1 *fw_info)
+{
+       uint32_t frequency = 0;
+
+       boot_values->ulRevision = fw_info->firmware_revision;
+       boot_values->ulGfxClk   = fw_info->bootup_sclk_in10khz;
+       boot_values->ulUClk     = fw_info->bootup_mclk_in10khz;
+       boot_values->usVddc     = fw_info->bootup_vddc_mv;
+       boot_values->usVddci    = fw_info->bootup_vddci_mv;
+       boot_values->usMvddc    = fw_info->bootup_mvddc_mv;
+       boot_values->usVddGfx   = fw_info->bootup_vddgfx_mv;
+       boot_values->ucCoolingID = fw_info->coolingsolution_id;
+       boot_values->ulSocClk   = 0;
+       boot_values->ulDCEFClk   = 0;
+
+       if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, &frequency))
+               boot_values->ulSocClk   = frequency;
+
+       if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, &frequency))
+               boot_values->ulDCEFClk  = frequency;
+
+       if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_ECLK_ID, &frequency))
+               boot_values->ulEClk     = frequency;
+
+       if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_VCLK_ID, &frequency))
+               boot_values->ulVClk     = frequency;
+
+       if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCLK_ID, &frequency))
+               boot_values->ulDClk     = frequency;
+}
+
 int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
                        struct pp_atomfwctrl_bios_boot_up_values *boot_values)
 {
-       struct atom_firmware_info_v3_1 *info = NULL;
+       struct atom_firmware_info_v3_2 *fwinfo_3_2;
+       struct atom_firmware_info_v3_1 *fwinfo_3_1;
+       struct atom_common_table_header *info = NULL;
        uint16_t ix;
 
        ix = GetIndexIntoMasterDataTable(firmwareinfo);
-       info = (struct atom_firmware_info_v3_1 *)
+       info = (struct atom_common_table_header *)
                smu_atom_get_data_table(hwmgr->adev,
                                ix, NULL, NULL, NULL);
 
@@ -528,16 +596,18 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
                return -EINVAL;
        }
 
-       boot_values->ulRevision = info->firmware_revision;
-       boot_values->ulGfxClk   = info->bootup_sclk_in10khz;
-       boot_values->ulUClk     = info->bootup_mclk_in10khz;
-       boot_values->usVddc     = info->bootup_vddc_mv;
-       boot_values->usVddci    = info->bootup_vddci_mv;
-       boot_values->usMvddc    = info->bootup_mvddc_mv;
-       boot_values->usVddGfx   = info->bootup_vddgfx_mv;
-       boot_values->ucCoolingID = info->coolingsolution_id;
-       boot_values->ulSocClk   = 0;
-       boot_values->ulDCEFClk   = 0;
+       if ((info->format_revision == 3) && (info->content_revision == 2)) {
+               fwinfo_3_2 = (struct atom_firmware_info_v3_2 *)info;
+               pp_atomfwctrl_copy_vbios_bootup_values_3_2(hwmgr,
+                               boot_values, fwinfo_3_2);
+       } else if ((info->format_revision == 3) && (info->content_revision == 1)) {
+               fwinfo_3_1 = (struct atom_firmware_info_v3_1 *)info;
+               pp_atomfwctrl_copy_vbios_bootup_values_3_1(hwmgr,
+                               boot_values, fwinfo_3_1);
+       } else {
+               pr_info("Fw info table revision does not match!");
+               return -EINVAL;
+       }
 
        return 0;
 }
@@ -629,5 +699,7 @@ int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
        param->acggfxclkspreadpercent = info->acggfxclkspreadpercent;
        param->acggfxclkspreadfreq = info->acggfxclkspreadfreq;
 
+       param->Vr2_I2C_address = info->Vr2_I2C_address;
+
        return 0;
 }
index fe10aa4db5e64f721fbd462ffa6127b0e06851fc..22e21668c93a429239688fd7509328fc0d870406 100644 (file)
@@ -136,6 +136,9 @@ struct pp_atomfwctrl_bios_boot_up_values {
        uint32_t   ulUClk;
        uint32_t   ulSocClk;
        uint32_t   ulDCEFClk;
+       uint32_t   ulEClk;
+       uint32_t   ulVClk;
+       uint32_t   ulDClk;
        uint16_t   usVddc;
        uint16_t   usVddci;
        uint16_t   usMvddc;
@@ -207,6 +210,8 @@ struct pp_atomfwctrl_smc_dpm_parameters
        uint8_t  acggfxclkspreadenabled;
        uint8_t  acggfxclkspreadpercent;
        uint16_t acggfxclkspreadfreq;
+
+       uint8_t Vr2_I2C_address;
 };
 
 int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr,
index 35bd9870ab10811cd46c79adc3d8020267e828cd..4e1fd53938458ed3ff488ce5ad451abf12fe0480 100644 (file)
@@ -183,10 +183,10 @@ static int get_vddc_lookup_table(
                                        ATOM_Tonga_Voltage_Lookup_Record,
                                        entries, vddc_lookup_pp_tables, i);
                record->us_calculated = 0;
-               record->us_vdd = atom_record->usVdd;
-               record->us_cac_low = atom_record->usCACLow;
-               record->us_cac_mid = atom_record->usCACMid;
-               record->us_cac_high = atom_record->usCACHigh;
+               record->us_vdd = le16_to_cpu(atom_record->usVdd);
+               record->us_cac_low = le16_to_cpu(atom_record->usCACLow);
+               record->us_cac_mid = le16_to_cpu(atom_record->usCACMid);
+               record->us_cac_high = le16_to_cpu(atom_record->usCACHigh);
        }
 
        *lookup_table = table;
index d4bc83e813896903b33b22fca4dbed1b88b2c159..a63e006533243ba1621d851e05e0f5f5434f056e 100644 (file)
@@ -993,7 +993,7 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
 
        clocks->num_levels = 0;
        for (i = 0; i < pclk_vol_table->count; i++) {
-               clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk;
+               clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10;
                clocks->data[i].latency_in_us = latency_required ?
                                                smu10_get_mem_latency(hwmgr,
                                                pclk_vol_table->entries[i].clk) :
@@ -1044,7 +1044,7 @@ static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
 
        clocks->num_levels = 0;
        for (i = 0; i < pclk_vol_table->count; i++) {
-               clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk;
+               clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk  * 10;
                clocks->data[i].voltage_in_mv = pclk_vol_table->entries[i].vol;
                clocks->num_levels++;
        }
@@ -1108,9 +1108,10 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
 }
 
 static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
-               struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+               void *clock_ranges)
 {
        struct smu10_hwmgr *data = hwmgr->backend;
+       struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
        Watermarks_t *table = &(data->water_marks_table);
        int result = 0;
 
@@ -1126,7 +1127,7 @@ static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr)
        return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister);
 }
 
-static int smu10_set_mmhub_powergating_by_smu(struct pp_hwmgr *hwmgr)
+static int smu10_powergate_mmhub(struct pp_hwmgr *hwmgr)
 {
        return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
 }
@@ -1182,10 +1183,11 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
        .asic_setup = smu10_setup_asic_task,
        .power_state_set = smu10_set_power_state_tasks,
        .dynamic_state_management_disable = smu10_disable_dpm_tasks,
-       .set_mmhub_powergating_by_smu = smu10_set_mmhub_powergating_by_smu,
+       .powergate_mmhub = smu10_powergate_mmhub,
        .smus_notify_pwe = smu10_smus_notify_pwe,
        .gfx_off_control = smu10_gfx_off_control,
        .display_clock_voltage_request = smu10_display_clock_voltage_request,
+       .powergate_gfx = smu10_gfx_off_control,
 };
 
 int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
index 6d72a5600917c48d038da97a01c9d52e1f1e2985..683b29a993666513d0330291ef80a11522df381a 100644 (file)
@@ -39,13 +39,6 @@ static int smu7_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
                        PPSMC_MSG_VCEDPM_Disable);
 }
 
-static int smu7_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
-       return smum_send_msg_to_smc(hwmgr, enable ?
-                       PPSMC_MSG_SAMUDPM_Enable :
-                       PPSMC_MSG_SAMUDPM_Disable);
-}
-
 static int smu7_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
 {
        if (!bgate)
@@ -60,13 +53,6 @@ static int smu7_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate)
        return smu7_enable_disable_vce_dpm(hwmgr, !bgate);
 }
 
-static int smu7_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
-       if (!bgate)
-               smum_update_smc_table(hwmgr, SMU_SAMU_TABLE);
-       return smu7_enable_disable_samu_dpm(hwmgr, !bgate);
-}
-
 int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
 {
        if (phm_cf_want_uvd_power_gating(hwmgr))
@@ -107,35 +93,15 @@ static int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
-static int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
-{
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_SamuPowerGating))
-               return smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_SAMPowerOFF);
-       return 0;
-}
-
-static int smu7_powerup_samu(struct pp_hwmgr *hwmgr)
-{
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_SamuPowerGating))
-               return smum_send_msg_to_smc(hwmgr,
-                               PPSMC_MSG_SAMPowerON);
-       return 0;
-}
-
 int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
 {
        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
        data->uvd_power_gated = false;
        data->vce_power_gated = false;
-       data->samu_power_gated = false;
 
        smu7_powerup_uvd(hwmgr);
        smu7_powerup_vce(hwmgr);
-       smu7_powerup_samu(hwmgr);
 
        return 0;
 }
@@ -195,26 +161,6 @@ void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
        }
 }
 
-int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-
-       if (data->samu_power_gated == bgate)
-               return 0;
-
-       data->samu_power_gated = bgate;
-
-       if (bgate) {
-               smu7_update_samu_dpm(hwmgr, true);
-               smu7_powerdown_samu(hwmgr);
-       } else {
-               smu7_powerup_samu(hwmgr);
-               smu7_update_samu_dpm(hwmgr, false);
-       }
-
-       return 0;
-}
-
 int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                        const uint32_t *msg_id)
 {
@@ -470,7 +416,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
  * Powerplay will only control the static per CU Power Gating.
  * Dynamic per CU Power Gating will be done in gfx.
  */
-int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable)
+int smu7_powergate_gfx(struct pp_hwmgr *hwmgr, bool enable)
 {
        struct amdgpu_device *adev = hwmgr->adev;
 
index 1ddce023218ac2067dbbc81c1786efbef545c818..fc8f8a6acc7223d7bb6b812e5e74e91e35dd430d 100644 (file)
 void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
 void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
 int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr);
-int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate);
 int smu7_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
 int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
 int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
                                        const uint32_t *msg_id);
-int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable);
+int smu7_powergate_gfx(struct pp_hwmgr *hwmgr, bool enable);
 
 #endif
index f8e866ceda02222a7c59e92a2d63cec1c4ea9dde..052e60dfaf9fd77003bc49e82eb2d35646fdace2 100644 (file)
@@ -48,6 +48,8 @@
 #include "processpptables.h"
 #include "pp_thermal.h"
 
+#include "ivsrcid/ivsrcid_vislands30.h"
+
 #define MC_CG_ARB_FREQ_F0           0x0a
 #define MC_CG_ARB_FREQ_F1           0x0b
 #define MC_CG_ARB_FREQ_F2           0x0c
@@ -885,6 +887,60 @@ static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr)
        data->odn_dpm_table.max_vddc = max_vddc;
 }
 
+static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
+{
+       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+       struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       uint32_t i;
+
+       struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
+       struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
+
+       if (table_info == NULL)
+               return;
+
+       for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+               if (odn_table->odn_core_clock_dpm_levels.entries[i].clock !=
+                                       data->dpm_table.sclk_table.dpm_levels[i].value) {
+                       data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+                       break;
+               }
+       }
+
+       for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
+               if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock !=
+                                       data->dpm_table.mclk_table.dpm_levels[i].value) {
+                       data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+                       break;
+               }
+       }
+
+       dep_table = table_info->vdd_dep_on_mclk;
+       odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk);
+
+       for (i = 0; i < dep_table->count; i++) {
+               if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
+                       data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
+                       return;
+               }
+       }
+
+       dep_table = table_info->vdd_dep_on_sclk;
+       odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
+       for (i = 0; i < dep_table->count; i++) {
+               if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
+                       data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
+                       return;
+               }
+       }
+       if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
+               data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
+               data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
+       }
+}
+
 static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
 {
        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -904,10 +960,13 @@ static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
 
        /* initialize ODN table */
        if (hwmgr->od_enabled) {
-               smu7_setup_voltage_range_from_vbios(hwmgr);
-               smu7_odn_initial_default_setting(hwmgr);
+               if (data->odn_dpm_table.max_vddc) {
+                       smu7_check_dpm_table_updated(hwmgr);
+               } else {
+                       smu7_setup_voltage_range_from_vbios(hwmgr);
+                       smu7_odn_initial_default_setting(hwmgr);
+               }
        }
-
        return 0;
 }
 
@@ -1521,7 +1580,7 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
        data->current_profile_setting.sclk_up_hyst = 0;
        data->current_profile_setting.sclk_down_hyst = 100;
        data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT;
-       data->current_profile_setting.bupdate_sclk = 1;
+       data->current_profile_setting.bupdate_mclk = 1;
        data->current_profile_setting.mclk_up_hyst = 0;
        data->current_profile_setting.mclk_down_hyst = 100;
        data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT;
@@ -2820,7 +2879,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
                                struct pp_power_state *request_ps,
                        const struct pp_power_state *current_ps)
 {
-
+       struct amdgpu_device *adev = hwmgr->adev;
        struct smu7_power_state *smu7_ps =
                                cast_phw_smu7_power_state(&request_ps->hardware);
        uint32_t sclk;
@@ -2843,12 +2902,12 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
                                 "VI should always have 2 performance levels",
                                );
 
-       max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
+       max_limits = adev->pm.ac_power ?
                        &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
                        &(hwmgr->dyn_state.max_clock_voltage_on_dc);
 
        /* Cap clock DPM tables at DC MAX if it is in DC. */
-       if (PP_PowerSource_DC == hwmgr->power_source) {
+       if (!adev->pm.ac_power) {
                for (i = 0; i < smu7_ps->performance_level_count; i++) {
                        if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk)
                                smu7_ps->performance_levels[i].memory_clock = max_limits->mclk;
@@ -3126,7 +3185,7 @@ static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
        performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
                        state_entry->ucPCIEGenLow);
        performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
-                       state_entry->ucPCIELaneHigh);
+                       state_entry->ucPCIELaneLow);
 
        performance_level = &(smu7_power_state->performance_levels
                        [smu7_power_state->performance_level_count++]);
@@ -3717,8 +3776,9 @@ static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
        uint32_t i;
 
        for (i = 0; i < dpm_table->count; i++) {
-               if ((dpm_table->dpm_levels[i].value < low_limit)
-               || (dpm_table->dpm_levels[i].value > high_limit))
+       /*skip the trim if od is enabled*/
+               if (!hwmgr->od_enabled && (dpm_table->dpm_levels[i].value < low_limit
+                       || dpm_table->dpm_levels[i].value > high_limit))
                        dpm_table->dpm_levels[i].enabled = false;
                else
                        dpm_table->dpm_levels[i].enabled = true;
@@ -3762,10 +3822,8 @@ static int smu7_generate_dpm_level_enable_mask(
        const struct smu7_power_state *smu7_ps =
                        cast_const_phw_smu7_power_state(states->pnew_state);
 
-       /*skip the trim if od is enabled*/
-       if (!hwmgr->od_enabled)
-               result = smu7_trim_dpm_states(hwmgr, smu7_ps);
 
+       result = smu7_trim_dpm_states(hwmgr, smu7_ps);
        if (result)
                return result;
 
@@ -4049,17 +4107,17 @@ static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr)
 
        amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
                        AMDGPU_IH_CLIENTID_LEGACY,
-                       230,
+                       VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH,
                        source);
        amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
                        AMDGPU_IH_CLIENTID_LEGACY,
-                       231,
+                       VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW,
                        source);
 
        /* Register CTF(GPIO_19) interrupt */
        amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
                        AMDGPU_IH_CLIENTID_LEGACY,
-                       83,
+                       VISLANDS30_IV_SRCID_GPIO_19,
                        source);
 
        return 0;
@@ -4244,7 +4302,6 @@ static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr)
 
        data->uvd_power_gated = false;
        data->vce_power_gated = false;
-       data->samu_power_gated = false;
 
        return 0;
 }
@@ -4555,12 +4612,12 @@ static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
                        return -EINVAL;
                dep_sclk_table = table_info->vdd_dep_on_sclk;
                for (i = 0; i < dep_sclk_table->count; i++)
-                       clocks->clock[i] = dep_sclk_table->entries[i].clk;
+                       clocks->clock[i] = dep_sclk_table->entries[i].clk * 10;
                clocks->count = dep_sclk_table->count;
        } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
                sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
                for (i = 0; i < sclk_table->count; i++)
-                       clocks->clock[i] = sclk_table->entries[i].clk;
+                       clocks->clock[i] = sclk_table->entries[i].clk * 10;
                clocks->count = sclk_table->count;
        }
 
@@ -4592,7 +4649,7 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
                        return -EINVAL;
                dep_mclk_table = table_info->vdd_dep_on_mclk;
                for (i = 0; i < dep_mclk_table->count; i++) {
-                       clocks->clock[i] = dep_mclk_table->entries[i].clk;
+                       clocks->clock[i] = dep_mclk_table->entries[i].clk * 10;
                        clocks->latency[i] = smu7_get_mem_latency(hwmgr,
                                                dep_mclk_table->entries[i].clk);
                }
@@ -4600,7 +4657,7 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
        } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
                mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
                for (i = 0; i < mclk_table->count; i++)
-                       clocks->clock[i] = mclk_table->entries[i].clk;
+                       clocks->clock[i] = mclk_table->entries[i].clk * 10;
                clocks->count = mclk_table->count;
        }
        return 0;
@@ -4739,60 +4796,6 @@ static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
        return true;
 }
 
-static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
-{
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       uint32_t i;
-
-       struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
-       struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
-
-       if (table_info == NULL)
-               return;
-
-       for (i=0; i<data->dpm_table.sclk_table.count; i++) {
-               if (odn_table->odn_core_clock_dpm_levels.entries[i].clock !=
-                                       data->dpm_table.sclk_table.dpm_levels[i].value) {
-                       data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
-                       break;
-               }
-       }
-
-       for (i=0; i<data->dpm_table.mclk_table.count; i++) {
-               if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock !=
-                                       data->dpm_table.mclk_table.dpm_levels[i].value) {
-                       data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
-                       break;
-               }
-       }
-
-       dep_table = table_info->vdd_dep_on_mclk;
-       odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk);
-
-       for (i=0; i < dep_table->count; i++) {
-               if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
-                       data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
-                       return;
-               }
-       }
-
-       dep_table = table_info->vdd_dep_on_sclk;
-       odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
-       for (i=0; i < dep_table->count; i++) {
-               if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
-                       data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
-                       return;
-               }
-       }
-       if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
-               data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
-               data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
-       }
-}
-
 static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
                                        enum PP_OD_DPM_TABLE_COMMAND type,
                                        long *input, uint32_t size)
@@ -5043,7 +5046,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
        .get_fan_control_mode = smu7_get_fan_control_mode,
        .force_clock_level = smu7_force_clock_level,
        .print_clock_levels = smu7_print_clock_levels,
-       .enable_per_cu_power_gating = smu7_enable_per_cu_power_gating,
+       .powergate_gfx = smu7_powergate_gfx,
        .get_sclk_od = smu7_get_sclk_od,
        .set_sclk_od = smu7_set_sclk_od,
        .get_mclk_od = smu7_get_mclk_od,
index c91e75db6a8e51757f4b654692c962ec785c2900..3784ce6e50ab4a43a89d6ad12551c6c9bf20d02f 100644 (file)
@@ -310,7 +310,6 @@ struct smu7_hwmgr {
        /* ---- Power Gating States ---- */
        bool                           uvd_power_gated;
        bool                           vce_power_gated;
-       bool                           samu_power_gated;
        bool                           need_long_memory_training;
 
        /* Application power optimization parameters */
index c952845833d72b5b11c2e94c95cb1b35b81fdae4..5e19f5977eb19e551eecad7f72f9966bfbe8c768 100644 (file)
@@ -403,6 +403,49 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris12[] = {
        {   ixDIDT_SQ_CTRL1,                   DIDT_SQ_CTRL1__MAX_POWER_MASK,                      DIDT_SQ_CTRL1__MAX_POWER__SHIFT,                    0xffff,     GPU_CONFIGREG_DIDT_IND },
 
        {   ixDIDT_SQ_CTRL_OCP,                DIDT_SQ_CTRL_OCP__UNUSED_0_MASK,                    DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT,                  0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL_OCP,                DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK,               DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT,             0xffff,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK,                DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT,              0x3853,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_0_MASK,                       DIDT_SQ_CTRL2__UNUSED_0__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,       DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,     0x005a,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_1_MASK,                       DIDT_SQ_CTRL2__UNUSED_1__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,       DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,     0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_2_MASK,                       DIDT_SQ_CTRL2__UNUSED_2__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,    DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT,  0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,       DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,     0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,       DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,     0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,   DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__UNUSED_0_MASK,                  DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,       DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,     0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,       DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,     0x3853,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,       DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,     0x3153,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK,                 DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT,               0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__PHASE_OFFSET_MASK,                   DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0010,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0010,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__UNUSED_0_MASK,                       DIDT_SQ_CTRL0__UNUSED_0__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT0_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT,                  0x000a,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT1_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT,                  0x0010,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT2_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT,                  0x0017,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT3_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT,                  0x002f,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT4_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT,                  0x0046,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT5_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT,                  0x005d,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT6_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT,                  0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT7_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT,                  0x0000,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_CTRL1,                   DIDT_TD_CTRL1__MIN_POWER_MASK,                      DIDT_TD_CTRL1__MIN_POWER__SHIFT,                    0x0000,     GPU_CONFIGREG_DIDT_IND },
+       {   ixDIDT_TD_CTRL1,                   DIDT_TD_CTRL1__MAX_POWER_MASK,                      DIDT_TD_CTRL1__MAX_POWER__SHIFT,                    0xffff,     GPU_CONFIGREG_DIDT_IND },
+
+       {   ixDIDT_TD_CTRL_OCP,                DIDT_TD_CTRL_OCP__UNUSED_0_MASK,                    DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT,                  0x0000,     GPU_CONFIGREG_DIDT_IND },
        {   ixDIDT_TD_CTRL_OCP,                DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK,               DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT,             0x00ff,     GPU_CONFIGREG_DIDT_IND },
 
        {   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK,                DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT,              0x3fff,     GPU_CONFIGREG_DIDT_IND },
index 50690c72b2ea1ea816a5de9149a45079ec059257..0adfc5392cd375f425da25ffd49767518c927c23 100644 (file)
@@ -244,6 +244,7 @@ static int smu8_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
+/* convert form 8bit vid to real voltage in mV*4 */
 static uint32_t smu8_convert_8Bit_index_to_voltage(
                        struct pp_hwmgr *hwmgr, uint16_t voltage)
 {
@@ -1604,17 +1605,17 @@ static int smu8_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type
        switch (type) {
        case amd_pp_disp_clock:
                for (i = 0; i < clocks->count; i++)
-                       clocks->clock[i] = data->sys_info.display_clock[i];
+                       clocks->clock[i] = data->sys_info.display_clock[i] * 10;
                break;
        case amd_pp_sys_clock:
                table = hwmgr->dyn_state.vddc_dependency_on_sclk;
                for (i = 0; i < clocks->count; i++)
-                       clocks->clock[i] = table->entries[i].clk;
+                       clocks->clock[i] = table->entries[i].clk * 10;
                break;
        case amd_pp_mem_clock:
                clocks->count = SMU8_NUM_NBPMEMORYCLOCK;
                for (i = 0; i < clocks->count; i++)
-                       clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i];
+                       clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i] * 10;
                break;
        default:
                return -1;
@@ -1702,13 +1703,13 @@ static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx,
        case AMDGPU_PP_SENSOR_VDDNB:
                tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
                        CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
-               vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp);
+               vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp) / 4;
                *((uint32_t *)value) = vddnb;
                return 0;
        case AMDGPU_PP_SENSOR_VDDGFX:
                tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
                        CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
-               vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
+               vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp) / 4;
                *((uint32_t *)value) = vddgfx;
                return 0;
        case AMDGPU_PP_SENSOR_UVD_VCLK:
index 93a3d022ba47a7d6a20cc7a1f8b763bf3eb58b06..2aab1b4759459fb421443b30d563a1a4e1860e3a 100644 (file)
@@ -25,6 +25,9 @@
 #include "ppatomctrl.h"
 #include "ppsmc.h"
 #include "atom.h"
+#include "ivsrcid/thm/irqsrcs_thm_9_0.h"
+#include "ivsrcid/smuio/irqsrcs_smuio_9_0.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
 
 uint8_t convert_to_vid(uint16_t vddc)
 {
@@ -543,17 +546,17 @@ int phm_irq_process(struct amdgpu_device *adev,
        uint32_t src_id = entry->src_id;
 
        if (client_id == AMDGPU_IH_CLIENTID_LEGACY) {
-               if (src_id == 230)
+               if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH)
                        pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
                                                PCI_BUS_NUM(adev->pdev->devfn),
                                                PCI_SLOT(adev->pdev->devfn),
                                                PCI_FUNC(adev->pdev->devfn));
-               else if (src_id == 231)
+               else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW)
                        pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
                                        PCI_BUS_NUM(adev->pdev->devfn),
                                        PCI_SLOT(adev->pdev->devfn),
                                        PCI_FUNC(adev->pdev->devfn));
-               else if (src_id == 83)
+               else if (src_id == VISLANDS30_IV_SRCID_GPIO_19)
                        pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n",
                                        PCI_BUS_NUM(adev->pdev->devfn),
                                        PCI_SLOT(adev->pdev->devfn),
@@ -594,17 +597,17 @@ int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr)
 
        amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
                        SOC15_IH_CLIENTID_THM,
-                       0,
+                       THM_9_0__SRCID__THM_DIG_THERM_L2H,
                        source);
        amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
                        SOC15_IH_CLIENTID_THM,
-                       1,
+                       THM_9_0__SRCID__THM_DIG_THERM_H2L,
                        source);
 
        /* Register CTF(GPIO_19) interrupt */
        amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
                        SOC15_IH_CLIENTID_ROM_SMUIO,
-                       83,
+                       SMUIO_9_0__SRCID__SMUIO_GPIO19,
                        source);
 
        return 0;
@@ -652,7 +655,7 @@ int smu_get_voltage_dependency_table_ppt_v1(
 }
 
 int smu_set_watermarks_for_clocks_ranges(void *wt_table,
-               struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+               struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
 {
        uint32_t i;
        struct watermarks *table = wt_table;
@@ -660,49 +663,49 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
        if (!table || !wm_with_clock_ranges)
                return -EINVAL;
 
-       if (wm_with_clock_ranges->num_wm_sets_dmif > 4 || wm_with_clock_ranges->num_wm_sets_mcif > 4)
+       if (wm_with_clock_ranges->num_wm_dmif_sets > 4 || wm_with_clock_ranges->num_wm_mcif_sets > 4)
                return -EINVAL;
 
-       for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) {
+       for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) {
                table->WatermarkRow[1][i].MinClock =
                        cpu_to_le16((uint16_t)
-                       (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
-                       100);
+                       (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz) /
+                       1000);
                table->WatermarkRow[1][i].MaxClock =
                        cpu_to_le16((uint16_t)
-                       (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
+                       (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz) /
                        100);
                table->WatermarkRow[1][i].MinUclk =
                        cpu_to_le16((uint16_t)
-                       (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
-                       100);
+                       (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
+                       1000);
                table->WatermarkRow[1][i].MaxUclk =
                        cpu_to_le16((uint16_t)
-                       (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
-                       100);
+                       (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz) /
+                       1000);
                table->WatermarkRow[1][i].WmSetting = (uint8_t)
-                               wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
+                               wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
        }
 
-       for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
+       for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) {
                table->WatermarkRow[0][i].MinClock =
                        cpu_to_le16((uint16_t)
-                       (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
-                       100);
+                       (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz) /
+                       1000);
                table->WatermarkRow[0][i].MaxClock =
                        cpu_to_le16((uint16_t)
-                       (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
-                       100);
+                       (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz) /
+                       1000);
                table->WatermarkRow[0][i].MinUclk =
                        cpu_to_le16((uint16_t)
-                       (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
-                       100);
+                       (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
+                       1000);
                table->WatermarkRow[0][i].MaxUclk =
                        cpu_to_le16((uint16_t)
-                       (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
-                       100);
+                       (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz) /
+                       1000);
                table->WatermarkRow[0][i].WmSetting = (uint8_t)
-                               wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
+                               wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
        }
        return 0;
 }
index 916cc01e7652828ad66dda7e0cd0b3062d063e7f..5454289d5226c768a7ee69b280d9165e19fbb568 100644 (file)
@@ -107,7 +107,7 @@ int smu_get_voltage_dependency_table_ppt_v1(
                struct phm_ppt_v1_clock_voltage_dependency_table *dep_table);
 
 int smu_set_watermarks_for_clocks_ranges(void *wt_table,
-               struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
+               struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
 
 #define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
 #define PHM_FIELD_MASK(reg, field) reg##__##field##_MASK
index 05e680d55dbbe06a6c951aa33d599e2dd6584424..fb86c24394ff463f3ee565125d72e27434889038 100644 (file)
 
 static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
 
-#define MEM_FREQ_LOW_LATENCY        25000
-#define MEM_FREQ_HIGH_LATENCY       80000
-#define MEM_LATENCY_HIGH            245
-#define MEM_LATENCY_LOW             35
-#define MEM_LATENCY_ERR             0xFFFF
-
 #define mmDF_CS_AON0_DramBaseAddress0                                                                  0x0044
 #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX                                                         0
 
@@ -295,7 +289,15 @@ static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
        struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table;
        struct phm_ppt_v1_clock_voltage_dependency_table *dep_table[3];
        struct phm_ppt_v1_clock_voltage_dependency_table *od_table[3];
+       struct pp_atomfwctrl_avfs_parameters avfs_params = {0};
        uint32_t i;
+       int result;
+
+       result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
+       if (!result) {
+               data->odn_dpm_table.max_vddc = avfs_params.ulMaxVddc;
+               data->odn_dpm_table.min_vddc = avfs_params.ulMinVddc;
+       }
 
        od_lookup_table = &odn_table->vddc_lookup_table;
        vddc_lookup_table = table_info->vddc_lookup_table;
@@ -2078,9 +2080,6 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
        if (data->smu_features[GNLD_AVFS].supported) {
                result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
                if (!result) {
-                       data->odn_dpm_table.max_vddc = avfs_params.ulMaxVddc;
-                       data->odn_dpm_table.min_vddc = avfs_params.ulMinVddc;
-
                        pp_table->MinVoltageVid = (uint8_t)
                                        convert_to_vid((uint16_t)(avfs_params.ulMinVddc));
                        pp_table->MaxVoltageVid = (uint8_t)
@@ -2414,6 +2413,40 @@ static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
        return result;
 }
 
+static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
+{
+       struct vega10_hwmgr *data = hwmgr->backend;
+       struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
+       struct phm_ppt_v2_information *table_info = hwmgr->pptable;
+       struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
+       struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
+       uint32_t i;
+
+       dep_table = table_info->vdd_dep_on_mclk;
+       odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_mclk);
+
+       for (i = 0; i < dep_table->count; i++) {
+               if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
+                       data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
+                       return;
+               }
+       }
+
+       dep_table = table_info->vdd_dep_on_sclk;
+       odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_sclk);
+       for (i = 0; i < dep_table->count; i++) {
+               if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
+                       data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
+                       return;
+               }
+       }
+
+       if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
+               data->need_update_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
+               data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
+       }
+}
+
 /**
 * Initializes the SMC table and uploads it
 *
@@ -2430,6 +2463,7 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
        PPTable_t *pp_table = &(data->smc_state_table.pp_table);
        struct pp_atomfwctrl_voltage_table voltage_table;
        struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
+       struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
 
        result = vega10_setup_default_dpm_tables(hwmgr);
        PP_ASSERT_WITH_CODE(!result,
@@ -2437,8 +2471,14 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
                        return result);
 
        /* initialize ODN table */
-       if (hwmgr->od_enabled)
-               vega10_odn_initial_default_setting(hwmgr);
+       if (hwmgr->od_enabled) {
+               if (odn_table->max_vddc) {
+                       data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
+                       vega10_check_dpm_table_updated(hwmgr);
+               } else {
+                       vega10_odn_initial_default_setting(hwmgr);
+               }
+       }
 
        pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
                        VOLTAGE_OBJ_SVID2,  &voltage_table);
@@ -2861,11 +2901,6 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
 
        vega10_enable_disable_PCC_limit_feature(hwmgr, true);
 
-       if ((hwmgr->smu_version == 0x001c2c00) ||
-                       (hwmgr->smu_version == 0x001c2d00))
-               smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_UpdatePkgPwrPidAlpha, 1);
-
        smum_send_msg_to_smc_with_parameter(hwmgr,
                PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
 
@@ -3061,6 +3096,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
                                struct pp_power_state  *request_ps,
                        const struct pp_power_state *current_ps)
 {
+       struct amdgpu_device *adev = hwmgr->adev;
        struct vega10_power_state *vega10_ps =
                                cast_phw_vega10_power_state(&request_ps->hardware);
        uint32_t sclk;
@@ -3086,12 +3122,12 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
        if (vega10_ps->performance_level_count != 2)
                pr_info("VI should always have 2 performance levels");
 
-       max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
+       max_limits = adev->pm.ac_power ?
                        &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
                        &(hwmgr->dyn_state.max_clock_voltage_on_dc);
 
        /* Cap clock DPM tables at DC MAX if it is in DC. */
-       if (PP_PowerSource_DC == hwmgr->power_source) {
+       if (!adev->pm.ac_power) {
                for (i = 0; i < vega10_ps->performance_level_count; i++) {
                        if (vega10_ps->performance_levels[i].mem_clock >
                                max_limits->mclk)
@@ -3181,7 +3217,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
                /* Find the lowest MCLK frequency that is within
                 * the tolerable latency defined in DAL
                 */
-               latency = 0;
+               latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
                for (i = 0; i < data->mclk_latency_table.count; i++) {
                        if ((data->mclk_latency_table.entries[i].latency <= latency) &&
                                (data->mclk_latency_table.entries[i].frequency >=
@@ -3223,10 +3259,25 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
 {
        int result = 0;
        struct vega10_hwmgr *data = hwmgr->backend;
+       struct vega10_dpm_table *dpm_table = &data->dpm_table;
+       struct vega10_odn_dpm_table *odn_table = &data->odn_dpm_table;
+       struct vega10_odn_clock_voltage_dependency_table *odn_clk_table = &odn_table->vdd_dep_on_sclk;
+       int count;
 
        if (!data->need_update_dpm_table)
                return 0;
 
+       if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
+               for (count = 0; count < dpm_table->gfx_table.count; count++)
+                       dpm_table->gfx_table.dpm_levels[count].value = odn_clk_table->entries[count].clk;
+       }
+
+       odn_clk_table = &odn_table->vdd_dep_on_mclk;
+       if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
+               for (count = 0; count < dpm_table->mem_table.count; count++)
+                       dpm_table->mem_table.dpm_levels[count].value = odn_clk_table->entries[count].clk;
+       }
+
        if (data->need_update_dpm_table &
                        (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK + DPMTABLE_UPDATE_SOCCLK)) {
                result = vega10_populate_all_graphic_levels(hwmgr);
@@ -3674,7 +3725,7 @@ static void vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
 {
        smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetUclkFastSwitch,
-                       has_disp ? 0 : 1);
+                       has_disp ? 1 : 0);
 }
 
 int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
@@ -3749,7 +3800,9 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
        uint32_t i;
        struct pp_display_clock_request clock_req;
 
-       if (hwmgr->display_config->num_display > 1)
+       if ((hwmgr->display_config->num_display > 1) &&
+            !hwmgr->display_config->multi_monitor_in_sync &&
+            !hwmgr->display_config->nb_pstate_switch_disable)
                vega10_notify_smc_display_change(hwmgr, false);
        else
                vega10_notify_smc_display_change(hwmgr, true);
@@ -3765,7 +3818,7 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
 
        if (i < dpm_table->count) {
                clock_req.clock_type = amd_pp_dcef_clock;
-               clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value;
+               clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value * 10;
                if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
                        smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
@@ -4022,28 +4075,17 @@ static void vega10_get_sclks(struct pp_hwmgr *hwmgr,
                        table_info->vdd_dep_on_sclk;
        uint32_t i;
 
+       clocks->num_levels = 0;
        for (i = 0; i < dep_table->count; i++) {
                if (dep_table->entries[i].clk) {
                        clocks->data[clocks->num_levels].clocks_in_khz =
-                                       dep_table->entries[i].clk;
+                                       dep_table->entries[i].clk * 10;
                        clocks->num_levels++;
                }
        }
 
 }
 
-static uint32_t vega10_get_mem_latency(struct pp_hwmgr *hwmgr,
-               uint32_t clock)
-{
-       if (clock >= MEM_FREQ_LOW_LATENCY &&
-                       clock < MEM_FREQ_HIGH_LATENCY)
-               return MEM_LATENCY_HIGH;
-       else if (clock >= MEM_FREQ_HIGH_LATENCY)
-               return MEM_LATENCY_LOW;
-       else
-               return MEM_LATENCY_ERR;
-}
-
 static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
                struct pp_clock_levels_with_latency *clocks)
 {
@@ -4052,26 +4094,22 @@ static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
        struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
                        table_info->vdd_dep_on_mclk;
        struct vega10_hwmgr *data = hwmgr->backend;
+       uint32_t j = 0;
        uint32_t i;
 
-       clocks->num_levels = 0;
-       data->mclk_latency_table.count = 0;
-
        for (i = 0; i < dep_table->count; i++) {
                if (dep_table->entries[i].clk) {
-                       clocks->data[clocks->num_levels].clocks_in_khz =
-                       data->mclk_latency_table.entries
-                       [data->mclk_latency_table.count].frequency =
-                                       dep_table->entries[i].clk;
-                       clocks->data[clocks->num_levels].latency_in_us =
-                       data->mclk_latency_table.entries
-                       [data->mclk_latency_table.count].latency =
-                                       vega10_get_mem_latency(hwmgr,
-                                               dep_table->entries[i].clk);
-                       clocks->num_levels++;
-                       data->mclk_latency_table.count++;
+
+                       clocks->data[j].clocks_in_khz =
+                                               dep_table->entries[i].clk * 10;
+                       data->mclk_latency_table.entries[j].frequency =
+                                                       dep_table->entries[i].clk;
+                       clocks->data[j].latency_in_us =
+                               data->mclk_latency_table.entries[j].latency = 25;
+                       j++;
                }
        }
+       clocks->num_levels = data->mclk_latency_table.count = j;
 }
 
 static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr,
@@ -4084,7 +4122,7 @@ static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr,
        uint32_t i;
 
        for (i = 0; i < dep_table->count; i++) {
-               clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
+               clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
                clocks->data[i].latency_in_us = 0;
                clocks->num_levels++;
        }
@@ -4100,7 +4138,7 @@ static void vega10_get_socclocks(struct pp_hwmgr *hwmgr,
        uint32_t i;
 
        for (i = 0; i < dep_table->count; i++) {
-               clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
+               clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
                clocks->data[i].latency_in_us = 0;
                clocks->num_levels++;
        }
@@ -4160,7 +4198,7 @@ static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
        }
 
        for (i = 0; i < dep_table->count; i++) {
-               clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
+               clocks->data[i].clocks_in_khz = dep_table->entries[i].clk  * 10;
                clocks->data[i].voltage_in_mv = (uint32_t)(table_info->vddc_lookup_table->
                                entries[dep_table->entries[i].vddInd].us_vdd);
                clocks->num_levels++;
@@ -4173,9 +4211,10 @@ static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
 }
 
 static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
-               struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+                                                       void *clock_range)
 {
        struct vega10_hwmgr *data = hwmgr->backend;
+       struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_range;
        Watermarks_t *table = &(data->smc_state_table.water_marks_table);
        int result = 0;
 
@@ -4695,40 +4734,6 @@ static bool vega10_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
        return true;
 }
 
-static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
-{
-       struct vega10_hwmgr *data = hwmgr->backend;
-       struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
-       struct phm_ppt_v2_information *table_info = hwmgr->pptable;
-       struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
-       struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
-       uint32_t i;
-
-       dep_table = table_info->vdd_dep_on_mclk;
-       odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_mclk);
-
-       for (i = 0; i < dep_table->count; i++) {
-               if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
-                       data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
-                       return;
-               }
-       }
-
-       dep_table = table_info->vdd_dep_on_sclk;
-       odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_sclk);
-       for (i = 0; i < dep_table->count; i++) {
-               if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
-                       data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
-                       return;
-               }
-       }
-
-       if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
-               data->need_update_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
-               data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
-       }
-}
-
 static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr,
                                                enum PP_OD_DPM_TABLE_COMMAND type)
 {
index aadd6cbc7e85d2db38974d52bd6abc66095c6221..339820da9e6a89c7d6b8f2142284935f2e9b7512 100644 (file)
@@ -370,7 +370,6 @@ struct vega10_hwmgr {
        /* ---- Power Gating States ---- */
        bool                           uvd_power_gated;
        bool                           vce_power_gated;
-       bool                           samu_power_gated;
        bool                           need_long_memory_training;
 
        /* Internal settings to apply the application power optimization parameters */
index dbe4b1f66784961ea028b3fcfee564e830617f80..22364875a943e5e32e7e13d5fe2ef79d579f2824 100644 (file)
@@ -1090,7 +1090,7 @@ static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr)
 static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
 {
        struct amdgpu_device *adev = hwmgr->adev;
-       int result;
+       int result = 0;
        uint32_t num_se = 0;
        uint32_t count, data;
 
index 782e2098824df6225e2c044bf060626d143a0919..0789d64246ca5dd0567fba6655fe4c5e5684e58c 100644 (file)
@@ -81,6 +81,7 @@ static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr)
 
        data->registry_data.disallowed_features = 0x0;
        data->registry_data.od_state_in_dc_support = 0;
+       data->registry_data.thermal_support = 1;
        data->registry_data.skip_baco_hardware = 0;
 
        data->registry_data.log_avfs_param = 0;
@@ -422,6 +423,11 @@ static int vega12_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
                        hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit *
                        hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
 
+       if (hwmgr->feature_mask & PP_GFXOFF_MASK)
+               data->gfxoff_controlled_by_driver = true;
+       else
+               data->gfxoff_controlled_by_driver = false;
+
        return result;
 }
 
@@ -453,43 +459,36 @@ static int vega12_setup_asic_task(struct pp_hwmgr *hwmgr)
  */
 static void vega12_init_dpm_state(struct vega12_dpm_state *dpm_state)
 {
-       dpm_state->soft_min_level = 0xff;
-       dpm_state->soft_max_level = 0xff;
-       dpm_state->hard_min_level = 0xff;
-       dpm_state->hard_max_level = 0xff;
+       dpm_state->soft_min_level = 0x0;
+       dpm_state->soft_max_level = 0xffff;
+       dpm_state->hard_min_level = 0x0;
+       dpm_state->hard_max_level = 0xffff;
 }
 
-static int vega12_get_number_dpm_level(struct pp_hwmgr *hwmgr,
-               PPCLK_e clkID, uint32_t *num_dpm_level)
+static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
+               PPCLK_e clk_id, uint32_t *num_of_levels)
 {
-       int result;
-       /*
-        * SMU expects the Clock ID to be in the top 16 bits.
-        * Lower 16 bits specify the level however 0xFF is a
-        * special argument the returns the total number of levels
-        */
-       PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
-               PPSMC_MSG_GetDpmFreqByIndex, (clkID << 16 | 0xFF)) == 0,
-               "[GetNumberDpmLevel] Failed to get DPM levels from SMU for CLKID!",
-               return -EINVAL);
-
-       result = vega12_read_arg_from_smc(hwmgr, num_dpm_level);
+       int ret = 0;
 
-       PP_ASSERT_WITH_CODE(*num_dpm_level < MAX_REGULAR_DPM_NUMBER,
-               "[GetNumberDPMLevel] Number of DPM levels is greater than limit",
-               return -EINVAL);
+       ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+                       PPSMC_MSG_GetDpmFreqByIndex,
+                       (clk_id << 16 | 0xFF));
+       PP_ASSERT_WITH_CODE(!ret,
+                       "[GetNumOfDpmLevel] failed to get dpm levels!",
+                       return ret);
 
-       PP_ASSERT_WITH_CODE(*num_dpm_level != 0,
-               "[GetNumberDPMLevel] Number of CLK Levels is zero!",
-               return -EINVAL);
+       *num_of_levels = smum_get_argument(hwmgr);
+       PP_ASSERT_WITH_CODE(*num_of_levels > 0,
+                       "[GetNumOfDpmLevel] number of clk levels is invalid!",
+                       return -EINVAL);
 
-       return result;
+       return ret;
 }
 
 static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
                PPCLK_e clkID, uint32_t index, uint32_t *clock)
 {
-       int result;
+       int result = 0;
 
        /*
         *SMU expects the Clock ID to be in the top 16 bits.
@@ -500,15 +499,36 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
                "[GetDpmFrequencyByIndex] Failed to get dpm frequency from SMU!",
                return -EINVAL);
 
-       result = vega12_read_arg_from_smc(hwmgr, clock);
-
-       PP_ASSERT_WITH_CODE(*clock != 0,
-               "[GetDPMFrequencyByIndex] Failed to get dpm frequency by index.!",
-               return -EINVAL);
+       *clock = smum_get_argument(hwmgr);
 
        return result;
 }
 
+static int vega12_setup_single_dpm_table(struct pp_hwmgr *hwmgr,
+               struct vega12_single_dpm_table *dpm_table, PPCLK_e clk_id)
+{
+       int ret = 0;
+       uint32_t i, num_of_levels, clk;
+
+       ret = vega12_get_number_of_dpm_level(hwmgr, clk_id, &num_of_levels);
+       PP_ASSERT_WITH_CODE(!ret,
+                       "[SetupSingleDpmTable] failed to get clk levels!",
+                       return ret);
+
+       dpm_table->count = num_of_levels;
+
+       for (i = 0; i < num_of_levels; i++) {
+               ret = vega12_get_dpm_frequency_by_index(hwmgr, clk_id, i, &clk);
+               PP_ASSERT_WITH_CODE(!ret,
+                       "[SetupSingleDpmTable] failed to get clk of specific level!",
+                       return ret);
+               dpm_table->dpm_levels[i].value = clk;
+               dpm_table->dpm_levels[i].enabled = true;
+       }
+
+       return ret;
+}
+
 /*
  * This function is to initialize all DPM state tables
  * for SMU based on the dependency table.
@@ -519,224 +539,136 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
  */
 static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
 {
-       uint32_t num_levels, i, clock;
 
        struct vega12_hwmgr *data =
                        (struct vega12_hwmgr *)(hwmgr->backend);
-
        struct vega12_single_dpm_table *dpm_table;
+       int ret = 0;
 
        memset(&data->dpm_table, 0, sizeof(data->dpm_table));
 
-       /* Initialize Sclk DPM and SOC DPM table based on allow Sclk values */
+       /* socclk */
        dpm_table = &(data->dpm_table.soc_table);
-
-       PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_SOCCLK,
-               &num_levels) == 0,
-               "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for SOCCLK!",
-               return -EINVAL);
-
-       dpm_table->count = num_levels;
-
-       for (i = 0; i < num_levels; i++) {
-               PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
-                       PPCLK_SOCCLK, i, &clock) == 0,
-                       "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for SOCCLK!",
-                       return -EINVAL);
-
-               dpm_table->dpm_levels[i].value = clock;
-               dpm_table->dpm_levels[i].enabled = true;
+       if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
+               ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_SOCCLK);
+               PP_ASSERT_WITH_CODE(!ret,
+                               "[SetupDefaultDpmTable] failed to get socclk dpm levels!",
+                               return ret);
+       } else {
+               dpm_table->count = 1;
+               dpm_table->dpm_levels[0].value = data->vbios_boot_state.soc_clock / 100;
        }
-
        vega12_init_dpm_state(&(dpm_table->dpm_state));
 
+       /* gfxclk */
        dpm_table = &(data->dpm_table.gfx_table);
-
-       PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_GFXCLK,
-               &num_levels) == 0,
-               "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for GFXCLK!",
-               return -EINVAL);
-
-       dpm_table->count = num_levels;
-       for (i = 0; i < num_levels; i++) {
-               PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
-                       PPCLK_GFXCLK, i, &clock) == 0,
-                       "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for GFXCLK!",
-                       return -EINVAL);
-
-               dpm_table->dpm_levels[i].value = clock;
-               dpm_table->dpm_levels[i].enabled = true;
+       if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
+               ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_GFXCLK);
+               PP_ASSERT_WITH_CODE(!ret,
+                               "[SetupDefaultDpmTable] failed to get gfxclk dpm levels!",
+                               return ret);
+       } else {
+               dpm_table->count = 1;
+               dpm_table->dpm_levels[0].value = data->vbios_boot_state.gfx_clock / 100;
        }
-
        vega12_init_dpm_state(&(dpm_table->dpm_state));
-       /* Initialize Mclk DPM table based on allow Mclk values */
-       dpm_table = &(data->dpm_table.mem_table);
 
-       PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_UCLK,
-               &num_levels) == 0,
-               "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for UCLK!",
-               return -EINVAL);
-
-       dpm_table->count = num_levels;
-
-       for (i = 0; i < num_levels; i++) {
-               PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
-                       PPCLK_UCLK, i, &clock) == 0,
-                       "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for UCLK!",
-                       return -EINVAL);
-
-               dpm_table->dpm_levels[i].value = clock;
-               dpm_table->dpm_levels[i].enabled = true;
+       /* memclk */
+       dpm_table = &(data->dpm_table.mem_table);
+       if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+               ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_UCLK);
+               PP_ASSERT_WITH_CODE(!ret,
+                               "[SetupDefaultDpmTable] failed to get memclk dpm levels!",
+                               return ret);
+       } else {
+               dpm_table->count = 1;
+               dpm_table->dpm_levels[0].value = data->vbios_boot_state.mem_clock / 100;
        }
-
        vega12_init_dpm_state(&(dpm_table->dpm_state));
 
+       /* eclk */
        dpm_table = &(data->dpm_table.eclk_table);
-
-       PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_ECLK,
-               &num_levels) == 0,
-               "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for ECLK!",
-               return -EINVAL);
-
-       dpm_table->count = num_levels;
-
-       for (i = 0; i < num_levels; i++) {
-               PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
-               PPCLK_ECLK, i, &clock) == 0,
-               "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for ECLK!",
-               return -EINVAL);
-
-               dpm_table->dpm_levels[i].value = clock;
-               dpm_table->dpm_levels[i].enabled = true;
+       if (data->smu_features[GNLD_DPM_VCE].enabled) {
+               ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_ECLK);
+               PP_ASSERT_WITH_CODE(!ret,
+                               "[SetupDefaultDpmTable] failed to get eclk dpm levels!",
+                               return ret);
+       } else {
+               dpm_table->count = 1;
+               dpm_table->dpm_levels[0].value = data->vbios_boot_state.eclock / 100;
        }
-
        vega12_init_dpm_state(&(dpm_table->dpm_state));
 
+       /* vclk */
        dpm_table = &(data->dpm_table.vclk_table);
-
-       PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_VCLK,
-               &num_levels) == 0,
-               "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for VCLK!",
-               return -EINVAL);
-
-       dpm_table->count = num_levels;
-
-       for (i = 0; i < num_levels; i++) {
-               PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
-                       PPCLK_VCLK, i, &clock) == 0,
-                       "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for VCLK!",
-                       return -EINVAL);
-
-               dpm_table->dpm_levels[i].value = clock;
-               dpm_table->dpm_levels[i].enabled = true;
+       if (data->smu_features[GNLD_DPM_UVD].enabled) {
+               ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_VCLK);
+               PP_ASSERT_WITH_CODE(!ret,
+                               "[SetupDefaultDpmTable] failed to get vclk dpm levels!",
+                               return ret);
+       } else {
+               dpm_table->count = 1;
+               dpm_table->dpm_levels[0].value = data->vbios_boot_state.vclock / 100;
        }
-
        vega12_init_dpm_state(&(dpm_table->dpm_state));
 
+       /* dclk */
        dpm_table = &(data->dpm_table.dclk_table);
-
-       PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_DCLK,
-               &num_levels) == 0,
-               "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCLK!",
-               return -EINVAL);
-
-       dpm_table->count = num_levels;
-
-       for (i = 0; i < num_levels; i++) {
-               PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
-                       PPCLK_DCLK, i, &clock) == 0,
-               "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCLK!",
-               return -EINVAL);
-
-               dpm_table->dpm_levels[i].value = clock;
-               dpm_table->dpm_levels[i].enabled = true;
+       if (data->smu_features[GNLD_DPM_UVD].enabled) {
+               ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCLK);
+               PP_ASSERT_WITH_CODE(!ret,
+                               "[SetupDefaultDpmTable] failed to get dclk dpm levels!",
+                               return ret);
+       } else {
+               dpm_table->count = 1;
+               dpm_table->dpm_levels[0].value = data->vbios_boot_state.dclock / 100;
        }
-
        vega12_init_dpm_state(&(dpm_table->dpm_state));
 
-       /* Assume there is no headless Vega12 for now */
+       /* dcefclk */
        dpm_table = &(data->dpm_table.dcef_table);
-
-       PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
-               PPCLK_DCEFCLK, &num_levels) == 0,
-               "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCEFCLK!",
-               return -EINVAL);
-
-       dpm_table->count = num_levels;
-
-       for (i = 0; i < num_levels; i++) {
-               PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
-                       PPCLK_DCEFCLK, i, &clock) == 0,
-                       "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCEFCLK!",
-                       return -EINVAL);
-
-               dpm_table->dpm_levels[i].value = clock;
-               dpm_table->dpm_levels[i].enabled = true;
+       if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
+               ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCEFCLK);
+               PP_ASSERT_WITH_CODE(!ret,
+                               "[SetupDefaultDpmTable] failed to get dcefclk dpm levels!",
+                               return ret);
+       } else {
+               dpm_table->count = 1;
+               dpm_table->dpm_levels[0].value = data->vbios_boot_state.dcef_clock / 100;
        }
-
        vega12_init_dpm_state(&(dpm_table->dpm_state));
 
+       /* pixclk */
        dpm_table = &(data->dpm_table.pixel_table);
-
-       PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
-               PPCLK_PIXCLK, &num_levels) == 0,
-               "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PIXCLK!",
-               return -EINVAL);
-
-       dpm_table->count = num_levels;
-
-       for (i = 0; i < num_levels; i++) {
-               PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
-                       PPCLK_PIXCLK, i, &clock) == 0,
-                       "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PIXCLK!",
-                       return -EINVAL);
-
-               dpm_table->dpm_levels[i].value = clock;
-               dpm_table->dpm_levels[i].enabled = true;
-       }
-
+       if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
+               ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PIXCLK);
+               PP_ASSERT_WITH_CODE(!ret,
+                               "[SetupDefaultDpmTable] failed to get pixclk dpm levels!",
+                               return ret);
+       } else
+               dpm_table->count = 0;
        vega12_init_dpm_state(&(dpm_table->dpm_state));
 
+       /* dispclk */
        dpm_table = &(data->dpm_table.display_table);
-
-       PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
-               PPCLK_DISPCLK, &num_levels) == 0,
-               "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DISPCLK!",
-               return -EINVAL);
-
-       dpm_table->count = num_levels;
-
-       for (i = 0; i < num_levels; i++) {
-               PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
-                       PPCLK_DISPCLK, i, &clock) == 0,
-                       "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DISPCLK!",
-                       return -EINVAL);
-
-               dpm_table->dpm_levels[i].value = clock;
-               dpm_table->dpm_levels[i].enabled = true;
-       }
-
+       if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
+               ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DISPCLK);
+               PP_ASSERT_WITH_CODE(!ret,
+                               "[SetupDefaultDpmTable] failed to get dispclk dpm levels!",
+                               return ret);
+       } else
+               dpm_table->count = 0;
        vega12_init_dpm_state(&(dpm_table->dpm_state));
 
+       /* phyclk */
        dpm_table = &(data->dpm_table.phy_table);
-
-       PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
-               PPCLK_PHYCLK, &num_levels) == 0,
-               "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PHYCLK!",
-               return -EINVAL);
-
-       dpm_table->count = num_levels;
-
-       for (i = 0; i < num_levels; i++) {
-               PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
-                       PPCLK_PHYCLK, i, &clock) == 0,
-                       "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PHYCLK!",
-                       return -EINVAL);
-
-               dpm_table->dpm_levels[i].value = clock;
-               dpm_table->dpm_levels[i].enabled = true;
-       }
-
+       if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
+               ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PHYCLK);
+               PP_ASSERT_WITH_CODE(!ret,
+                               "[SetupDefaultDpmTable] failed to get phyclk dpm levels!",
+                               return ret);
+       } else
+               dpm_table->count = 0;
        vega12_init_dpm_state(&(dpm_table->dpm_state));
 
        /* save a copy of the default DPM table */
@@ -803,6 +735,9 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
                data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
                data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
                data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID;
+               data->vbios_boot_state.eclock = boot_up_values.ulEClk;
+               data->vbios_boot_state.dclock = boot_up_values.ulDClk;
+               data->vbios_boot_state.vclock = boot_up_values.ulVClk;
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetMinDeepSleepDcefclk,
                        (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
@@ -844,6 +779,21 @@ static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
+static void vega12_init_powergate_state(struct pp_hwmgr *hwmgr)
+{
+       struct vega12_hwmgr *data =
+                       (struct vega12_hwmgr *)(hwmgr->backend);
+
+       data->uvd_power_gated = true;
+       data->vce_power_gated = true;
+
+       if (data->smu_features[GNLD_DPM_UVD].enabled)
+               data->uvd_power_gated = false;
+
+       if (data->smu_features[GNLD_DPM_VCE].enabled)
+               data->vce_power_gated = false;
+}
+
 static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
 {
        struct vega12_hwmgr *data =
@@ -862,12 +812,11 @@ static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
                        enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? true : false;
                        data->smu_features[i].enabled = enabled;
                        data->smu_features[i].supported = enabled;
-                       PP_ASSERT(
-                               !data->smu_features[i].allowed || enabled,
-                               "[EnableAllSMUFeatures] Enabled feature is different from allowed, expected disabled!");
                }
        }
 
+       vega12_init_powergate_state(hwmgr);
+
        return 0;
 }
 
@@ -923,6 +872,48 @@ static int vega12_power_control_set_level(struct pp_hwmgr *hwmgr)
        return result;
 }
 
+static int vega12_get_all_clock_ranges_helper(struct pp_hwmgr *hwmgr,
+               PPCLK_e clkid, struct vega12_clock_range *clock)
+{
+       /* AC Max */
+       PP_ASSERT_WITH_CODE(
+               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16)) == 0,
+               "[GetClockRanges] Failed to get max ac clock from SMC!",
+               return -EINVAL);
+       clock->ACMax = smum_get_argument(hwmgr);
+
+       /* AC Min */
+       PP_ASSERT_WITH_CODE(
+               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16)) == 0,
+               "[GetClockRanges] Failed to get min ac clock from SMC!",
+               return -EINVAL);
+       clock->ACMin = smum_get_argument(hwmgr);
+
+       /* DC Max */
+       PP_ASSERT_WITH_CODE(
+               smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16)) == 0,
+               "[GetClockRanges] Failed to get max dc clock from SMC!",
+               return -EINVAL);
+       clock->DCMax = smum_get_argument(hwmgr);
+
+       return 0;
+}
+
+static int vega12_get_all_clock_ranges(struct pp_hwmgr *hwmgr)
+{
+       struct vega12_hwmgr *data =
+                       (struct vega12_hwmgr *)(hwmgr->backend);
+       uint32_t i;
+
+       for (i = 0; i < PPCLK_COUNT; i++)
+               PP_ASSERT_WITH_CODE(!vega12_get_all_clock_ranges_helper(hwmgr,
+                                       i, &(data->clk_range[i])),
+                               "Failed to get clk range from SMC!",
+                               return -EINVAL);
+
+       return 0;
+}
+
 static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
 {
        int tmp_result, result = 0;
@@ -950,6 +941,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
                        "Failed to power control set level!",
                        result = tmp_result);
 
+       result = vega12_get_all_clock_ranges(hwmgr);
+       PP_ASSERT_WITH_CODE(!result,
+                       "Failed to get all clock ranges!",
+                       return result);
+
        result = vega12_odn_initialize_default_settings(hwmgr);
        PP_ASSERT_WITH_CODE(!result,
                        "Failed to power control set level!",
@@ -978,76 +974,172 @@ static uint32_t vega12_find_lowest_dpm_level(
                        break;
        }
 
+       if (i >= table->count) {
+               i = 0;
+               table->dpm_levels[i].enabled = true;
+       }
+
        return i;
 }
 
 static uint32_t vega12_find_highest_dpm_level(
                struct vega12_single_dpm_table *table)
 {
-       uint32_t i = 0;
+       int32_t i = 0;
+       PP_ASSERT_WITH_CODE(table->count <= MAX_REGULAR_DPM_NUMBER,
+                       "[FindHighestDPMLevel] DPM Table has too many entries!",
+                       return MAX_REGULAR_DPM_NUMBER - 1);
 
-       if (table->count <= MAX_REGULAR_DPM_NUMBER) {
-               for (i = table->count; i > 0; i--) {
-                       if (table->dpm_levels[i - 1].enabled)
-                               return i - 1;
-               }
-       } else {
-               pr_info("DPM Table Has Too Many Entries!");
-               return MAX_REGULAR_DPM_NUMBER - 1;
+       for (i = table->count - 1; i >= 0; i--) {
+               if (table->dpm_levels[i].enabled)
+                       break;
        }
 
-       return i;
+       if (i < 0) {
+               i = 0;
+               table->dpm_levels[i].enabled = true;
+       }
+
+       return (uint32_t)i;
 }
 
 static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
 {
        struct vega12_hwmgr *data = hwmgr->backend;
-       if (data->smc_state_table.gfx_boot_level !=
-                       data->dpm_table.gfx_table.dpm_state.soft_min_level) {
-               smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_SetSoftMinByFreq,
-                       PPCLK_GFXCLK<<16 | data->dpm_table.gfx_table.dpm_levels[data->smc_state_table.gfx_boot_level].value);
-               data->dpm_table.gfx_table.dpm_state.soft_min_level =
-                               data->smc_state_table.gfx_boot_level;
+       uint32_t min_freq;
+       int ret = 0;
+
+       if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
+               min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
+               PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+                                       hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+                                       (PPCLK_GFXCLK << 16) | (min_freq & 0xffff))),
+                                       "Failed to set soft min gfxclk !",
+                                       return ret);
        }
 
-       if (data->smc_state_table.mem_boot_level !=
-                       data->dpm_table.mem_table.dpm_state.soft_min_level) {
-               smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_SetSoftMinByFreq,
-                       PPCLK_UCLK<<16 | data->dpm_table.mem_table.dpm_levels[data->smc_state_table.mem_boot_level].value);
-               data->dpm_table.mem_table.dpm_state.soft_min_level =
-                               data->smc_state_table.mem_boot_level;
+       if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+               min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
+               PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+                                       hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+                                       (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
+                                       "Failed to set soft min memclk !",
+                                       return ret);
+
+               min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level;
+               PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+                                       hwmgr, PPSMC_MSG_SetHardMinByFreq,
+                                       (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
+                                       "Failed to set hard min memclk !",
+                                       return ret);
        }
 
-       return 0;
+       if (data->smu_features[GNLD_DPM_UVD].enabled) {
+               min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level;
+
+               PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+                                       hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+                                       (PPCLK_VCLK << 16) | (min_freq & 0xffff))),
+                                       "Failed to set soft min vclk!",
+                                       return ret);
+
+               min_freq = data->dpm_table.dclk_table.dpm_state.soft_min_level;
+
+               PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+                                       hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+                                       (PPCLK_DCLK << 16) | (min_freq & 0xffff))),
+                                       "Failed to set soft min dclk!",
+                                       return ret);
+       }
+
+       if (data->smu_features[GNLD_DPM_VCE].enabled) {
+               min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level;
+
+               PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+                                       hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+                                       (PPCLK_ECLK << 16) | (min_freq & 0xffff))),
+                                       "Failed to set soft min eclk!",
+                                       return ret);
+       }
+
+       if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
+               min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level;
+
+               PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+                                       hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+                                       (PPCLK_SOCCLK << 16) | (min_freq & 0xffff))),
+                                       "Failed to set soft min socclk!",
+                                       return ret);
+       }
+
+       return ret;
 
 }
 
 static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
 {
        struct vega12_hwmgr *data = hwmgr->backend;
-       if (data->smc_state_table.gfx_max_level !=
-               data->dpm_table.gfx_table.dpm_state.soft_max_level) {
-               smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_SetSoftMaxByFreq,
-                       /* plus the vale by 1 to align the resolution */
-                       PPCLK_GFXCLK<<16 | (data->dpm_table.gfx_table.dpm_levels[data->smc_state_table.gfx_max_level].value + 1));
-               data->dpm_table.gfx_table.dpm_state.soft_max_level =
-                               data->smc_state_table.gfx_max_level;
+       uint32_t max_freq;
+       int ret = 0;
+
+       if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
+               max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level;
+
+               PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+                                       hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+                                       (PPCLK_GFXCLK << 16) | (max_freq & 0xffff))),
+                                       "Failed to set soft max gfxclk!",
+                                       return ret);
        }
 
-       if (data->smc_state_table.mem_max_level !=
-               data->dpm_table.mem_table.dpm_state.soft_max_level) {
-               smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_SetSoftMaxByFreq,
-                       /* plus the vale by 1 to align the resolution */
-                       PPCLK_UCLK<<16 | (data->dpm_table.mem_table.dpm_levels[data->smc_state_table.mem_max_level].value + 1));
-               data->dpm_table.mem_table.dpm_state.soft_max_level =
-                               data->smc_state_table.mem_max_level;
+       if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+               max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level;
+
+               PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+                                       hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+                                       (PPCLK_UCLK << 16) | (max_freq & 0xffff))),
+                                       "Failed to set soft max memclk!",
+                                       return ret);
        }
 
-       return 0;
+       if (data->smu_features[GNLD_DPM_UVD].enabled) {
+               max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level;
+
+               PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+                                       hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+                                       (PPCLK_VCLK << 16) | (max_freq & 0xffff))),
+                                       "Failed to set soft max vclk!",
+                                       return ret);
+
+               max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level;
+               PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+                                       hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+                                       (PPCLK_DCLK << 16) | (max_freq & 0xffff))),
+                                       "Failed to set soft max dclk!",
+                                       return ret);
+       }
+
+       if (data->smu_features[GNLD_DPM_VCE].enabled) {
+               max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level;
+
+               PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+                                       hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+                                       (PPCLK_ECLK << 16) | (max_freq & 0xffff))),
+                                       "Failed to set soft max eclk!",
+                                       return ret);
+       }
+
+       if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
+               max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level;
+
+               PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+                                       hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+                                       (PPCLK_SOCCLK << 16) | (max_freq & 0xffff))),
+                                       "Failed to set soft max socclk!",
+                                       return ret);
+       }
+
+       return ret;
 }
 
 int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
@@ -1123,7 +1215,7 @@ static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr, uint32_t *query)
                        "Failed to get current package power!",
                        return -EINVAL);
 
-       vega12_read_arg_from_smc(hwmgr, &value);
+       value = smum_get_argument(hwmgr);
        /* power value is an integer */
        *query = value << 8;
 #endif
@@ -1136,14 +1228,11 @@ static int vega12_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx
 
        *gfx_freq = 0;
 
-       PP_ASSERT_WITH_CODE(
-                       smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0,
+       PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
+                       PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0,
                        "[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
-                       return -1);
-       PP_ASSERT_WITH_CODE(
-                       vega12_read_arg_from_smc(hwmgr, &gfx_clk) == 0,
-                       "[GetCurrentGfxClkFreq] Attempt to read arg from SMC Failed",
-                       return -1);
+                       return -EINVAL);
+       gfx_clk = smum_get_argument(hwmgr);
 
        *gfx_freq = gfx_clk * 100;
 
@@ -1159,11 +1248,8 @@ static int vega12_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_f
        PP_ASSERT_WITH_CODE(
                        smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16)) == 0,
                        "[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!",
-                       return -1);
-       PP_ASSERT_WITH_CODE(
-                       vega12_read_arg_from_smc(hwmgr, &mem_clk) == 0,
-                       "[GetCurrentMClkFreq] Attempt to read arg from SMC Failed",
-                       return -1);
+                       return -EINVAL);
+       mem_clk = smum_get_argument(hwmgr);
 
        *mclk_freq = mem_clk * 100;
 
@@ -1180,16 +1266,12 @@ static int vega12_get_current_activity_percent(
 #if 0
        ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0);
        if (!ret) {
-               ret = vega12_read_arg_from_smc(hwmgr, &current_activity);
-               if (!ret) {
-                       if (current_activity > 100) {
-                               PP_ASSERT(false,
-                                       "[GetCurrentActivityPercent] Activity Percentage Exceeds 100!");
-                               current_activity = 100;
-                       }
-               } else
+               current_activity = smum_get_argument(hwmgr);
+               if (current_activity > 100) {
                        PP_ASSERT(false,
-                               "[GetCurrentActivityPercent] Attempt To Read Average Graphics Activity from SMU Failed!");
+                                 "[GetCurrentActivityPercent] Activity Percentage Exceeds 100!");
+                       current_activity = 100;
+               }
        } else
                PP_ASSERT(false,
                        "[GetCurrentActivityPercent] Attempt To Send Get Average Graphics Activity to SMU Failed!");
@@ -1252,7 +1334,7 @@ static int vega12_notify_smc_display_change(struct pp_hwmgr *hwmgr,
        if (data->smu_features[GNLD_DPM_UCLK].enabled)
                return smum_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetUclkFastSwitch,
-                       has_disp ? 0 : 1);
+                       has_disp ? 1 : 0);
 
        return 0;
 }
@@ -1270,7 +1352,6 @@ int vega12_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
        if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
                switch (clk_type) {
                case amd_pp_dcef_clock:
-                       clk_freq = clock_req->clock_freq_in_khz / 100;
                        clk_select = PPCLK_DCEFCLK;
                        break;
                case amd_pp_disp_clock:
@@ -1306,9 +1387,10 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
                        (struct vega12_hwmgr *)(hwmgr->backend);
        struct PP_Clocks min_clocks = {0};
        struct pp_display_clock_request clock_req;
-       uint32_t clk_request;
 
-       if (hwmgr->display_config->num_display > 1)
+       if ((hwmgr->display_config->num_display > 1) &&
+            !hwmgr->display_config->multi_monitor_in_sync &&
+            !hwmgr->display_config->nb_pstate_switch_disable)
                vega12_notify_smc_display_change(hwmgr, false);
        else
                vega12_notify_smc_display_change(hwmgr, true);
@@ -1319,7 +1401,7 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
 
        if (data->smu_features[GNLD_DPM_DCEFCLK].supported) {
                clock_req.clock_type = amd_pp_dcef_clock;
-               clock_req.clock_freq_in_khz = min_clocks.dcefClock;
+               clock_req.clock_freq_in_khz = min_clocks.dcefClock/10;
                if (!vega12_display_clock_voltage_request(hwmgr, &clock_req)) {
                        if (data->smu_features[GNLD_DS_DCEFCLK].supported)
                                PP_ASSERT_WITH_CODE(
@@ -1333,15 +1415,6 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
                }
        }
 
-       if (data->smu_features[GNLD_DPM_UCLK].enabled) {
-               clk_request = (PPCLK_UCLK << 16) | (min_clocks.memoryClock) / 100;
-               PP_ASSERT_WITH_CODE(
-                       smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinByFreq, clk_request) == 0,
-                       "[PhwVega12_NotifySMCDisplayConfigAfterPowerStateAdjustment] Attempt to set UCLK HardMin Failed!",
-                       return -1);
-               data->dpm_table.mem_table.dpm_state.hard_min_level = min_clocks.memoryClock;
-       }
-
        return 0;
 }
 
@@ -1350,12 +1423,19 @@ static int vega12_force_dpm_highest(struct pp_hwmgr *hwmgr)
        struct vega12_hwmgr *data =
                        (struct vega12_hwmgr *)(hwmgr->backend);
 
-       data->smc_state_table.gfx_boot_level =
-       data->smc_state_table.gfx_max_level =
-                       vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
-       data->smc_state_table.mem_boot_level =
-       data->smc_state_table.mem_max_level =
-                       vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
+       uint32_t soft_level;
+
+       soft_level = vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
+
+       data->dpm_table.gfx_table.dpm_state.soft_min_level =
+               data->dpm_table.gfx_table.dpm_state.soft_max_level =
+               data->dpm_table.gfx_table.dpm_levels[soft_level].value;
+
+       soft_level = vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
+
+       data->dpm_table.mem_table.dpm_state.soft_min_level =
+               data->dpm_table.mem_table.dpm_state.soft_max_level =
+               data->dpm_table.mem_table.dpm_levels[soft_level].value;
 
        PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
                        "Failed to upload boot level to highest!",
@@ -1372,13 +1452,19 @@ static int vega12_force_dpm_lowest(struct pp_hwmgr *hwmgr)
 {
        struct vega12_hwmgr *data =
                        (struct vega12_hwmgr *)(hwmgr->backend);
+       uint32_t soft_level;
+
+       soft_level = vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
+
+       data->dpm_table.gfx_table.dpm_state.soft_min_level =
+               data->dpm_table.gfx_table.dpm_state.soft_max_level =
+               data->dpm_table.gfx_table.dpm_levels[soft_level].value;
 
-       data->smc_state_table.gfx_boot_level =
-       data->smc_state_table.gfx_max_level =
-                       vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
-       data->smc_state_table.mem_boot_level =
-       data->smc_state_table.mem_max_level =
-                       vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
+       soft_level = vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
+
+       data->dpm_table.mem_table.dpm_state.soft_min_level =
+               data->dpm_table.mem_table.dpm_state.soft_max_level =
+               data->dpm_table.mem_table.dpm_levels[soft_level].value;
 
        PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
                        "Failed to upload boot level to highest!",
@@ -1394,17 +1480,6 @@ static int vega12_force_dpm_lowest(struct pp_hwmgr *hwmgr)
 
 static int vega12_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
 {
-       struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
-
-       data->smc_state_table.gfx_boot_level =
-                       vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
-       data->smc_state_table.gfx_max_level =
-                       vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
-       data->smc_state_table.mem_boot_level =
-                       vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
-       data->smc_state_table.mem_max_level =
-                       vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
-
        PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
                        "Failed to upload DPM Bootup Levels!",
                        return -1);
@@ -1412,22 +1487,28 @@ static int vega12_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
        PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
                        "Failed to upload DPM Max Levels!",
                        return -1);
+
        return 0;
 }
 
-#if 0
 static int vega12_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
                                uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
 {
-       struct phm_ppt_v2_information *table_info =
-                       (struct phm_ppt_v2_information *)(hwmgr->pptable);
+       struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+       struct vega12_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table);
+       struct vega12_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table);
+       struct vega12_single_dpm_table *soc_dpm_table = &(data->dpm_table.soc_table);
 
-       if (table_info->vdd_dep_on_sclk->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
-               table_info->vdd_dep_on_socclk->count > VEGA12_UMD_PSTATE_SOCCLK_LEVEL &&
-               table_info->vdd_dep_on_mclk->count > VEGA12_UMD_PSTATE_MCLK_LEVEL) {
+       *sclk_mask = 0;
+       *mclk_mask = 0;
+       *soc_mask  = 0;
+
+       if (gfx_dpm_table->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
+           mem_dpm_table->count > VEGA12_UMD_PSTATE_MCLK_LEVEL &&
+           soc_dpm_table->count > VEGA12_UMD_PSTATE_SOCCLK_LEVEL) {
                *sclk_mask = VEGA12_UMD_PSTATE_GFXCLK_LEVEL;
-               *soc_mask = VEGA12_UMD_PSTATE_SOCCLK_LEVEL;
                *mclk_mask = VEGA12_UMD_PSTATE_MCLK_LEVEL;
+               *soc_mask  = VEGA12_UMD_PSTATE_SOCCLK_LEVEL;
        }
 
        if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
@@ -1435,13 +1516,13 @@ static int vega12_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo
        } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
                *mclk_mask = 0;
        } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
-               *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
-               *soc_mask = table_info->vdd_dep_on_socclk->count - 1;
-               *mclk_mask = table_info->vdd_dep_on_mclk->count - 1;
+               *sclk_mask = gfx_dpm_table->count - 1;
+               *mclk_mask = mem_dpm_table->count - 1;
+               *soc_mask  = soc_dpm_table->count - 1;
        }
+
        return 0;
 }
-#endif
 
 static void vega12_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
 {
@@ -1465,11 +1546,9 @@ static int vega12_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
                                enum amd_dpm_forced_level level)
 {
        int ret = 0;
-#if 0
        uint32_t sclk_mask = 0;
        uint32_t mclk_mask = 0;
        uint32_t soc_mask = 0;
-#endif
 
        switch (level) {
        case AMD_DPM_FORCED_LEVEL_HIGH:
@@ -1485,27 +1564,18 @@ static int vega12_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
        case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
        case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
        case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
-#if 0
                ret = vega12_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
                if (ret)
                        return ret;
-               vega12_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
-               vega12_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
-#endif
+               vega12_force_clock_level(hwmgr, PP_SCLK, 1 << sclk_mask);
+               vega12_force_clock_level(hwmgr, PP_MCLK, 1 << mclk_mask);
                break;
        case AMD_DPM_FORCED_LEVEL_MANUAL:
        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
        default:
                break;
        }
-#if 0
-       if (!ret) {
-               if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
-                       vega12_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
-               else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
-                       vega12_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
-       }
-#endif
+
        return ret;
 }
 
@@ -1539,24 +1609,14 @@ static int vega12_get_clock_ranges(struct pp_hwmgr *hwmgr,
                PPCLK_e clock_select,
                bool max)
 {
-       int result;
-       *clock = 0;
+       struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
 
-       if (max) {
-                PP_ASSERT_WITH_CODE(
-                       smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16)) == 0,
-                       "[GetClockRanges] Failed to get max clock from SMC!",
-                       return -1);
-               result = vega12_read_arg_from_smc(hwmgr, clock);
-       } else {
-               PP_ASSERT_WITH_CODE(
-                       smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clock_select << 16)) == 0,
-                       "[GetClockRanges] Failed to get min clock from SMC!",
-                       return -1);
-               result = vega12_read_arg_from_smc(hwmgr, clock);
-       }
+       if (max)
+               *clock = data->clk_range[clock_select].ACMax;
+       else
+               *clock = data->clk_range[clock_select].ACMin;
 
-       return result;
+       return 0;
 }
 
 static int vega12_get_sclks(struct pp_hwmgr *hwmgr,
@@ -1571,12 +1631,12 @@ static int vega12_get_sclks(struct pp_hwmgr *hwmgr,
                return -1;
 
        dpm_table = &(data->dpm_table.gfx_table);
-       ucount = (dpm_table->count > VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS) ?
-               VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS : dpm_table->count;
+       ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
+               MAX_NUM_CLOCKS : dpm_table->count;
 
        for (i = 0; i < ucount; i++) {
                clocks->data[i].clocks_in_khz =
-                       dpm_table->dpm_levels[i].value * 100;
+                       dpm_table->dpm_levels[i].value * 1000;
 
                clocks->data[i].latency_in_us = 0;
        }
@@ -1603,13 +1663,12 @@ static int vega12_get_memclocks(struct pp_hwmgr *hwmgr,
                return -1;
 
        dpm_table = &(data->dpm_table.mem_table);
-       ucount = (dpm_table->count > VG12_PSUEDO_NUM_UCLK_DPM_LEVELS) ?
-               VG12_PSUEDO_NUM_UCLK_DPM_LEVELS : dpm_table->count;
+       ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
+               MAX_NUM_CLOCKS : dpm_table->count;
 
        for (i = 0; i < ucount; i++) {
-               clocks->data[i].clocks_in_khz =
-                       dpm_table->dpm_levels[i].value * 100;
-
+               clocks->data[i].clocks_in_khz = dpm_table->dpm_levels[i].value * 1000;
+               data->mclk_latency_table.entries[i].frequency = dpm_table->dpm_levels[i].value * 100;
                clocks->data[i].latency_in_us =
                        data->mclk_latency_table.entries[i].latency =
                        vega12_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value);
@@ -1633,12 +1692,12 @@ static int vega12_get_dcefclocks(struct pp_hwmgr *hwmgr,
 
 
        dpm_table = &(data->dpm_table.dcef_table);
-       ucount = (dpm_table->count > VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS) ?
-               VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS : dpm_table->count;
+       ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
+               MAX_NUM_CLOCKS : dpm_table->count;
 
        for (i = 0; i < ucount; i++) {
                clocks->data[i].clocks_in_khz =
-                       dpm_table->dpm_levels[i].value * 100;
+                       dpm_table->dpm_levels[i].value * 1000;
 
                clocks->data[i].latency_in_us = 0;
        }
@@ -1661,12 +1720,12 @@ static int vega12_get_socclocks(struct pp_hwmgr *hwmgr,
 
 
        dpm_table = &(data->dpm_table.soc_table);
-       ucount = (dpm_table->count > VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS) ?
-               VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS : dpm_table->count;
+       ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
+               MAX_NUM_CLOCKS : dpm_table->count;
 
        for (i = 0; i < ucount; i++) {
                clocks->data[i].clocks_in_khz =
-                       dpm_table->dpm_levels[i].value * 100;
+                       dpm_table->dpm_levels[i].value * 1000;
 
                clocks->data[i].latency_in_us = 0;
        }
@@ -1713,99 +1772,69 @@ static int vega12_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
 }
 
 static int vega12_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
-               struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+                                                       void *clock_ranges)
 {
        struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
        Watermarks_t *table = &(data->smc_state_table.water_marks_table);
-       int result = 0;
-       uint32_t i;
+       struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
 
        if (!data->registry_data.disable_water_mark &&
                        data->smu_features[GNLD_DPM_DCEFCLK].supported &&
                        data->smu_features[GNLD_DPM_SOCCLK].supported) {
-               for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) {
-                       table->WatermarkRow[WM_DCEFCLK][i].MinClock =
-                               cpu_to_le16((uint16_t)
-                               (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
-                               100);
-                       table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
-                               cpu_to_le16((uint16_t)
-                               (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
-                               100);
-                       table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
-                               cpu_to_le16((uint16_t)
-                               (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
-                               100);
-                       table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
-                               cpu_to_le16((uint16_t)
-                               (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
-                               100);
-                       table->WatermarkRow[WM_DCEFCLK][i].WmSetting = (uint8_t)
-                                       wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
-               }
-
-               for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
-                       table->WatermarkRow[WM_SOCCLK][i].MinClock =
-                               cpu_to_le16((uint16_t)
-                               (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
-                               100);
-                       table->WatermarkRow[WM_SOCCLK][i].MaxClock =
-                               cpu_to_le16((uint16_t)
-                               (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
-                               100);
-                       table->WatermarkRow[WM_SOCCLK][i].MinUclk =
-                               cpu_to_le16((uint16_t)
-                               (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
-                               100);
-                       table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
-                               cpu_to_le16((uint16_t)
-                               (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
-                               100);
-                       table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
-                                       wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
-               }
+               smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
                data->water_marks_bitmap |= WaterMarksExist;
                data->water_marks_bitmap &= ~WaterMarksLoaded;
        }
 
-       return result;
+       return 0;
 }
 
 static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
                enum pp_clock_type type, uint32_t mask)
 {
        struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
-
-       if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
-                               AMD_DPM_FORCED_LEVEL_LOW |
-                               AMD_DPM_FORCED_LEVEL_HIGH))
-               return -EINVAL;
+       uint32_t soft_min_level, soft_max_level;
+       int ret = 0;
 
        switch (type) {
        case PP_SCLK:
-               data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;
-               data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0;
+               soft_min_level = mask ? (ffs(mask) - 1) : 0;
+               soft_max_level = mask ? (fls(mask) - 1) : 0;
+
+               data->dpm_table.gfx_table.dpm_state.soft_min_level =
+                       data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
+               data->dpm_table.gfx_table.dpm_state.soft_max_level =
+                       data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
 
-               PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
+               ret = vega12_upload_dpm_min_level(hwmgr);
+               PP_ASSERT_WITH_CODE(!ret,
                        "Failed to upload boot level to lowest!",
-                       return -EINVAL);
+                       return ret);
 
-               PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
+               ret = vega12_upload_dpm_max_level(hwmgr);
+               PP_ASSERT_WITH_CODE(!ret,
                        "Failed to upload dpm max level to highest!",
-                       return -EINVAL);
+                       return ret);
                break;
 
        case PP_MCLK:
-               data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0;
-               data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0;
+               soft_min_level = mask ? (ffs(mask) - 1) : 0;
+               soft_max_level = mask ? (fls(mask) - 1) : 0;
 
-               PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
+               data->dpm_table.mem_table.dpm_state.soft_min_level =
+                       data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
+               data->dpm_table.mem_table.dpm_state.soft_max_level =
+                       data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
+
+               ret = vega12_upload_dpm_min_level(hwmgr);
+               PP_ASSERT_WITH_CODE(!ret,
                        "Failed to upload boot level to lowest!",
-                       return -EINVAL);
+                       return ret);
 
-               PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
+               ret = vega12_upload_dpm_max_level(hwmgr);
+               PP_ASSERT_WITH_CODE(!ret,
                        "Failed to upload dpm max level to highest!",
-                       return -EINVAL);
+                       return ret);
 
                break;
 
@@ -1838,8 +1867,8 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
                                return -1);
                for (i = 0; i < clocks.num_levels; i++)
                        size += sprintf(buf + size, "%d: %uMhz %s\n",
-                               i, clocks.data[i].clocks_in_khz / 100,
-                               (clocks.data[i].clocks_in_khz == now) ? "*" : "");
+                               i, clocks.data[i].clocks_in_khz / 1000,
+                               (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
                break;
 
        case PP_MCLK:
@@ -1854,8 +1883,8 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
                                return -1);
                for (i = 0; i < clocks.num_levels; i++)
                        size += sprintf(buf + size, "%d: %uMhz %s\n",
-                               i, clocks.data[i].clocks_in_khz / 100,
-                               (clocks.data[i].clocks_in_khz == now) ? "*" : "");
+                               i, clocks.data[i].clocks_in_khz / 1000,
+                               (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
                break;
 
        case PP_PCIE:
@@ -1867,6 +1896,205 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
        return size;
 }
 
+static int vega12_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
+{
+       struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+       struct vega12_single_dpm_table *dpm_table;
+       bool vblank_too_short = false;
+       bool disable_mclk_switching;
+       uint32_t i, latency;
+
+       disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
+                                 !hwmgr->display_config->multi_monitor_in_sync) ||
+                                 vblank_too_short;
+       latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
+
+       /* gfxclk */
+       dpm_table = &(data->dpm_table.gfx_table);
+       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+       dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+       dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+               if (VEGA12_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) {
+                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
+                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
+               }
+
+               if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
+               }
+
+               if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+               }
+       }
+
+       /* memclk */
+       dpm_table = &(data->dpm_table.mem_table);
+       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+       dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+       dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+               if (VEGA12_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) {
+                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
+                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
+               }
+
+               if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
+                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
+               }
+
+               if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+               }
+       }
+
+       /* honour DAL's UCLK Hardmin */
+       if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100))
+               dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100;
+
+       /* Hardmin is dependent on displayconfig */
+       if (disable_mclk_switching) {
+               dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+               for (i = 0; i < data->mclk_latency_table.count - 1; i++) {
+                       if (data->mclk_latency_table.entries[i].latency <= latency) {
+                               if (dpm_table->dpm_levels[i].value >= (hwmgr->display_config->min_mem_set_clock / 100)) {
+                                       dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value;
+                                       break;
+                               }
+                       }
+               }
+       }
+
+       if (hwmgr->display_config->nb_pstate_switch_disable)
+               dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+       /* vclk */
+       dpm_table = &(data->dpm_table.vclk_table);
+       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+       dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+       dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+               if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
+                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
+                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
+               }
+
+               if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+               }
+       }
+
+       /* dclk */
+       dpm_table = &(data->dpm_table.dclk_table);
+       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+       dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+       dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+               if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
+                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
+                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
+               }
+
+               if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+               }
+       }
+
+       /* socclk */
+       dpm_table = &(data->dpm_table.soc_table);
+       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+       dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+       dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+               if (VEGA12_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) {
+                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
+                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
+               }
+
+               if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+               }
+       }
+
+       /* eclk */
+       dpm_table = &(data->dpm_table.eclk_table);
+       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+       dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+       dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+       if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+               if (VEGA12_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) {
+                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
+                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
+               }
+
+               if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+                       dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+                       dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+               }
+       }
+
+       return 0;
+}
+
+static int vega12_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
+               struct vega12_single_dpm_table *dpm_table)
+{
+       struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+       int ret = 0;
+
+       if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+               PP_ASSERT_WITH_CODE(dpm_table->count > 0,
+                               "[SetUclkToHightestDpmLevel] Dpm table has no entry!",
+                               return -EINVAL);
+               PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_UCLK_DPM_LEVELS,
+                               "[SetUclkToHightestDpmLevel] Dpm table has too many entries!",
+                               return -EINVAL);
+
+               dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+               PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_SetHardMinByFreq,
+                               (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
+                               "[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
+                               return ret);
+       }
+
+       return ret;
+}
+
+static int vega12_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
+{
+       struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+       int ret = 0;
+
+       smum_send_msg_to_smc_with_parameter(hwmgr,
+                       PPSMC_MSG_NumOfDisplays, 0);
+
+       ret = vega12_set_uclk_to_highest_dpm_level(hwmgr,
+                       &data->dpm_table.mem_table);
+
+       return ret;
+}
+
 static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
 {
        struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
@@ -1911,6 +2139,9 @@ static void vega12_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
 {
        struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
 
+       if (data->vce_power_gated == bgate)
+               return;
+
        data->vce_power_gated = bgate;
        vega12_enable_disable_vce_dpm(hwmgr, !bgate);
 }
@@ -1919,6 +2150,9 @@ static void vega12_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
 {
        struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
 
+       if (data->uvd_power_gated == bgate)
+               return;
+
        data->uvd_power_gated = bgate;
        vega12_enable_disable_uvd_dpm(hwmgr, !bgate);
 }
@@ -2086,6 +2320,38 @@ static int vega12_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
        return 0;
 }
 
+static int vega12_enable_gfx_off(struct pp_hwmgr *hwmgr)
+{
+       struct vega12_hwmgr *data =
+                       (struct vega12_hwmgr *)(hwmgr->backend);
+       int ret = 0;
+
+       if (data->gfxoff_controlled_by_driver)
+               ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_AllowGfxOff);
+
+       return ret;
+}
+
+static int vega12_disable_gfx_off(struct pp_hwmgr *hwmgr)
+{
+       struct vega12_hwmgr *data =
+                       (struct vega12_hwmgr *)(hwmgr->backend);
+       int ret = 0;
+
+       if (data->gfxoff_controlled_by_driver)
+               ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisallowGfxOff);
+
+       return ret;
+}
+
+static int vega12_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable)
+{
+       if (enable)
+               return vega12_enable_gfx_off(hwmgr);
+       else
+               return vega12_disable_gfx_off(hwmgr);
+}
+
 static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
        .backend_init = vega12_hwmgr_backend_init,
        .backend_fini = vega12_hwmgr_backend_fini,
@@ -2113,6 +2379,10 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
        .display_clock_voltage_request = vega12_display_clock_voltage_request,
        .force_clock_level = vega12_force_clock_level,
        .print_clock_levels = vega12_print_clock_levels,
+       .apply_clocks_adjust_rules =
+               vega12_apply_clocks_adjust_rules,
+       .pre_display_config_changed =
+               vega12_pre_display_configuration_changed_task,
        .display_config_changed = vega12_display_configuration_changed_task,
        .powergate_uvd = vega12_power_gate_uvd,
        .powergate_vce = vega12_power_gate_vce,
@@ -2131,6 +2401,7 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
        .get_thermal_temperature_range = vega12_get_thermal_temperature_range,
        .register_irq_handlers = smu9_register_irq_handlers,
        .start_thermal_controller = vega12_start_thermal_controller,
+       .powergate_gfx = vega12_gfx_off_control,
 };
 
 int vega12_hwmgr_init(struct pp_hwmgr *hwmgr)
index e81ded1ec1982d55f2b1a5657ee0fc163b3d7c96..b3e424d289941aa85d741cc87d09aa541eb524ee 100644 (file)
@@ -167,6 +167,9 @@ struct vega12_vbios_boot_state {
        uint32_t    mem_clock;
        uint32_t    soc_clock;
        uint32_t    dcef_clock;
+       uint32_t    eclock;
+       uint32_t    dclock;
+       uint32_t    vclock;
 };
 
 #define DPMTABLE_OD_UPDATE_SCLK     0x00000001
@@ -301,6 +304,12 @@ struct vega12_odn_fan_table {
        bool            force_fan_pwm;
 };
 
+struct vega12_clock_range {
+       uint32_t        ACMax;
+       uint32_t        ACMin;
+       uint32_t        DCMax;
+};
+
 struct vega12_hwmgr {
        struct vega12_dpm_table          dpm_table;
        struct vega12_dpm_table          golden_dpm_table;
@@ -382,6 +391,11 @@ struct vega12_hwmgr {
        uint32_t                       smu_version;
        struct smu_features            smu_features[GNLD_FEATURES_MAX];
        struct vega12_smc_state_table  smc_state_table;
+
+       struct vega12_clock_range      clk_range[PPCLK_COUNT];
+
+       /* ---- Gfxoff ---- */
+       bool                           gfxoff_controlled_by_driver;
 };
 
 #define VEGA12_DPM2_NEAR_TDP_DEC                      10
@@ -432,6 +446,8 @@ struct vega12_hwmgr {
 #define VEGA12_UMD_PSTATE_GFXCLK_LEVEL         0x3
 #define VEGA12_UMD_PSTATE_SOCCLK_LEVEL         0x3
 #define VEGA12_UMD_PSTATE_MCLK_LEVEL           0x2
+#define VEGA12_UMD_PSTATE_UVDCLK_LEVEL         0x3
+#define VEGA12_UMD_PSTATE_VCEMCLK_LEVEL        0x3
 
 int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
 
index 888ddca902d894216acee566879f239a47009468..cb3a5b1737c888fc040825dff14021f916447493 100644 (file)
@@ -224,11 +224,9 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
        ppsmc_pptable->AcgGfxclkSpreadPercent = smc_dpm_table.acggfxclkspreadpercent;
        ppsmc_pptable->AcgGfxclkSpreadFreq = smc_dpm_table.acggfxclkspreadfreq;
 
-       /* 0xFFFF will disable the ACG feature */
-       if (!(hwmgr->feature_mask & PP_ACG_MASK)) {
-               ppsmc_pptable->AcgThresholdFreqHigh = 0xFFFF;
-               ppsmc_pptable->AcgThresholdFreqLow = 0xFFFF;
-       }
+       ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address;
+
+       ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address;
 
        return 0;
 }
index cfd9e6ccb790126a372ff95ca186af4ec16f3301..904eb2c9155b4b45ff21daee35eed31146cdd79f 100644 (file)
@@ -34,11 +34,9 @@ static int vega12_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
        PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
                                PPSMC_MSG_GetCurrentRpm),
                        "Attempt to get current RPM from SMC Failed!",
-                       return -1);
-       PP_ASSERT_WITH_CODE(!vega12_read_arg_from_smc(hwmgr,
-                       current_rpm),
-                       "Attempt to read current RPM from SMC Failed!",
-                       return -1);
+                       return -EINVAL);
+       *current_rpm = smum_get_argument(hwmgr);
+
        return 0;
 }
 
index a202247c989444bfaee5e7686ddce367b76dd61d..429c9c4322daaeb818d375eaa2cc79dfdf2f8011 100644 (file)
@@ -455,7 +455,7 @@ extern int phm_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
                enum amd_pp_clock_type type,
                struct pp_clock_levels_with_voltage *clocks);
 extern int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
-               struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
+                                               void *clock_ranges);
 extern int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
                struct pp_display_clock_request *clock);
 
index b99fb8ac822c8db7e06b91b6773015b6001a1e56..d3d96260f440673ed39960d999c4094577904282 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/seq_file.h>
 #include "amd_powerplay.h"
 #include "hardwaremanager.h"
-#include "pp_power_source.h"
 #include "hwmgr_ppt.h"
 #include "ppatomctrl.h"
 #include "hwmgr_ppt.h"
@@ -195,7 +194,7 @@ struct pp_smumgr_func {
        int (*request_smu_load_fw)(struct pp_hwmgr  *hwmgr);
        int (*request_smu_load_specific_fw)(struct pp_hwmgr  *hwmgr,
                                            uint32_t firmware);
-       int (*get_argument)(struct pp_hwmgr  *hwmgr);
+       uint32_t (*get_argument)(struct pp_hwmgr  *hwmgr);
        int (*send_msg_to_smc)(struct pp_hwmgr  *hwmgr, uint16_t msg);
        int (*send_msg_to_smc_with_parameter)(struct pp_hwmgr  *hwmgr,
                                          uint16_t msg, uint32_t parameter);
@@ -294,8 +293,7 @@ struct pp_hwmgr_func {
        int (*get_clock_by_type_with_voltage)(struct pp_hwmgr *hwmgr,
                        enum amd_pp_clock_type type,
                        struct pp_clock_levels_with_voltage *clocks);
-       int (*set_watermarks_for_clocks_ranges)(struct pp_hwmgr *hwmgr,
-                       struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
+       int (*set_watermarks_for_clocks_ranges)(struct pp_hwmgr *hwmgr, void *clock_ranges);
        int (*display_clock_voltage_request)(struct pp_hwmgr *hwmgr,
                        struct pp_display_clock_request *clock);
        int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks);
@@ -303,7 +301,7 @@ struct pp_hwmgr_func {
        int (*power_off_asic)(struct pp_hwmgr *hwmgr);
        int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask);
        int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf);
-       int (*enable_per_cu_power_gating)(struct pp_hwmgr *hwmgr, bool enable);
+       int (*powergate_gfx)(struct pp_hwmgr *hwmgr, bool enable);
        int (*get_sclk_od)(struct pp_hwmgr *hwmgr);
        int (*set_sclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
        int (*get_mclk_od)(struct pp_hwmgr *hwmgr);
@@ -328,7 +326,7 @@ struct pp_hwmgr_func {
                                        enum PP_OD_DPM_TABLE_COMMAND type,
                                        long *input, uint32_t size);
        int (*set_power_limit)(struct pp_hwmgr *hwmgr, uint32_t n);
-       int (*set_mmhub_powergating_by_smu)(struct pp_hwmgr *hwmgr);
+       int (*powergate_mmhub)(struct pp_hwmgr *hwmgr);
        int (*smus_notify_pwe)(struct pp_hwmgr *hwmgr);
 };
 
@@ -741,7 +739,6 @@ struct pp_hwmgr {
        const struct pp_table_func *pptable_func;
 
        struct pp_power_state    *ps;
-       enum pp_power_source  power_source;
        uint32_t num_ps;
        struct pp_thermal_controller_info thermal_controller;
        bool fan_ctrl_is_in_default_mode;
index 6c22ed9249bfa440210e211683f7b18ba1580687..82550a8a3a3fc28916db07e603c8cbce3bdc33ff 100644 (file)
@@ -29,7 +29,6 @@
 enum SMU_TABLE {
        SMU_UVD_TABLE = 0,
        SMU_VCE_TABLE,
-       SMU_SAMU_TABLE,
        SMU_BIF_TABLE,
 };
 
@@ -47,7 +46,6 @@ enum SMU_MEMBER {
        UcodeLoadStatus,
        UvdBootLevel,
        VceBootLevel,
-       SamuBootLevel,
        LowSclkInterruptThreshold,
        DRAM_LOG_ADDR_H,
        DRAM_LOG_ADDR_L,
@@ -82,7 +80,7 @@ enum SMU10_TABLE_ID {
        SMU10_CLOCKTABLE,
 };
 
-extern int smum_get_argument(struct pp_hwmgr *hwmgr);
+extern uint32_t smum_get_argument(struct pp_hwmgr *hwmgr);
 
 extern int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table);
 
index 2f8a3b983cce0c9444cac949ed7ce79a420b06ba..b6ffd08784e7ffeff0e89068bf3a6c0009c2145a 100644 (file)
@@ -412,10 +412,10 @@ typedef struct {
   QuadraticInt_t    ReservedEquation2;
   QuadraticInt_t    ReservedEquation3;
 
-       uint16_t     MinVoltageUlvGfx;
-       uint16_t     MinVoltageUlvSoc;
+  uint16_t     MinVoltageUlvGfx;
+  uint16_t     MinVoltageUlvSoc;
 
-       uint32_t     Reserved[14];
+  uint32_t     Reserved[14];
 
 
 
@@ -483,9 +483,9 @@ typedef struct {
   uint8_t      padding8_4;
 
 
-       uint8_t      PllGfxclkSpreadEnabled;
-       uint8_t      PllGfxclkSpreadPercent;
-       uint16_t     PllGfxclkSpreadFreq;
+  uint8_t      PllGfxclkSpreadEnabled;
+  uint8_t      PllGfxclkSpreadPercent;
+  uint16_t     PllGfxclkSpreadFreq;
 
   uint8_t      UclkSpreadEnabled;
   uint8_t      UclkSpreadPercent;
@@ -495,11 +495,14 @@ typedef struct {
   uint8_t      SocclkSpreadPercent;
   uint16_t     SocclkSpreadFreq;
 
-       uint8_t      AcgGfxclkSpreadEnabled;
-       uint8_t      AcgGfxclkSpreadPercent;
-       uint16_t     AcgGfxclkSpreadFreq;
+  uint8_t      AcgGfxclkSpreadEnabled;
+  uint8_t      AcgGfxclkSpreadPercent;
+  uint16_t     AcgGfxclkSpreadFreq;
 
-       uint32_t     BoardReserved[10];
+  uint8_t      Vr2_I2C_address;
+  uint8_t      padding_vr2[3];
+
+  uint32_t     BoardReserved[9];
 
 
   uint32_t     MmHubPadding[7];
index 0a200406a1ec243312367a48a37062ef930297b4..8d557accaef2a5ac38d464cdeca3948af23545c4 100644 (file)
@@ -26,7 +26,7 @@
 SMU_MGR = smumgr.o smu8_smumgr.o tonga_smumgr.o fiji_smumgr.o \
          polaris10_smumgr.o iceland_smumgr.o \
          smu7_smumgr.o vega10_smumgr.o smu10_smumgr.o ci_smumgr.o \
-         vega12_smumgr.o vegam_smumgr.o
+         vega12_smumgr.o vegam_smumgr.o smu9_smumgr.o
 
 AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
 
index 2d4ec8ac3a088c3ff7f1340030e5764854245a2c..fbe3ef4ee45c66b01e9e1f0ea0e3021854c6be21 100644 (file)
@@ -1614,37 +1614,6 @@ static int ci_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
        return result;
 }
 
-static int ci_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
-                                       SMU7_Discrete_DpmTable *table)
-{
-       int result = -EINVAL;
-       uint8_t count;
-       struct pp_atomctrl_clock_dividers_vi dividers;
-       struct phm_samu_clock_voltage_dependency_table *samu_table =
-                               hwmgr->dyn_state.samu_clock_voltage_dependency_table;
-
-       table->SamuBootLevel = 0;
-       table->SamuLevelCount = (uint8_t)(samu_table->count);
-
-       for (count = 0; count < table->SamuLevelCount; count++) {
-               table->SamuLevel[count].Frequency = samu_table->entries[count].samclk;
-               table->SamuLevel[count].MinVoltage = samu_table->entries[count].v * VOLTAGE_SCALE;
-               table->SamuLevel[count].MinPhases = 1;
-
-               /* retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                               table->SamuLevel[count].Frequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find divide id for samu clock", return result);
-
-               table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
-               CONVERT_FROM_HOST_TO_SMC_US(table->SamuLevel[count].MinVoltage);
-       }
-       return result;
-}
-
 static int ci_populate_memory_timing_parameters(
                struct pp_hwmgr *hwmgr,
                uint32_t engine_clock,
@@ -2026,10 +1995,6 @@ static int ci_init_smc_table(struct pp_hwmgr *hwmgr)
        PP_ASSERT_WITH_CODE(0 == result,
                "Failed to initialize ACP Level!", return result);
 
-       result = ci_populate_smc_samu_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize SAMU Level!", return result);
-
        /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
        /* need to populate the  ARB settings for the initial state. */
        result = ci_program_memory_timing_parameters(hwmgr);
@@ -2881,6 +2846,89 @@ static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr,
        return 0;
 }
 
+static int ci_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
+{
+       struct amdgpu_device *adev = hwmgr->adev;
+       struct smu7_hwmgr *data = hwmgr->backend;
+       struct ci_smumgr *smu_data = hwmgr->smu_backend;
+       struct phm_uvd_clock_voltage_dependency_table *uvd_table =
+                       hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
+       uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
+                                       AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
+                                       AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
+                                       AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+       uint32_t max_vddc = adev->pm.ac_power ? hwmgr->dyn_state.max_clock_voltage_on_ac.vddc :
+                                               hwmgr->dyn_state.max_clock_voltage_on_dc.vddc;
+       int32_t i;
+
+       if (PP_CAP(PHM_PlatformCaps_UVDDPM) || uvd_table->count <= 0)
+               smu_data->smc_state_table.UvdBootLevel = 0;
+       else
+               smu_data->smc_state_table.UvdBootLevel = uvd_table->count - 1;
+
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, DPM_TABLE_475,
+                               UvdBootLevel, smu_data->smc_state_table.UvdBootLevel);
+
+       data->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
+
+       for (i = uvd_table->count - 1; i >= 0; i--) {
+               if (uvd_table->entries[i].v <= max_vddc)
+                       data->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
+               if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_UVDDPM))
+                       break;
+       }
+       ci_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask,
+                               data->dpm_level_enable_mask.uvd_dpm_enable_mask);
+
+       return 0;
+}
+
+static int ci_update_vce_smc_table(struct pp_hwmgr *hwmgr)
+{
+       struct amdgpu_device *adev = hwmgr->adev;
+       struct smu7_hwmgr *data = hwmgr->backend;
+       struct phm_vce_clock_voltage_dependency_table *vce_table =
+                       hwmgr->dyn_state.vce_clock_voltage_dependency_table;
+       uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
+                                       AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
+                                       AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
+                                       AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+       uint32_t max_vddc = adev->pm.ac_power ? hwmgr->dyn_state.max_clock_voltage_on_ac.vddc :
+                                               hwmgr->dyn_state.max_clock_voltage_on_dc.vddc;
+       int32_t i;
+
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, DPM_TABLE_475,
+                               VceBootLevel, 0); /* temp hard code to level 0, vce can set min evclk*/
+
+       data->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
+
+       for (i = vce_table->count - 1; i >= 0; i--) {
+               if (vce_table->entries[i].v <= max_vddc)
+                       data->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
+               if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_VCEDPM))
+                       break;
+       }
+       ci_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask,
+                               data->dpm_level_enable_mask.vce_dpm_enable_mask);
+
+       return 0;
+}
+
+static int ci_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
+{
+       switch (type) {
+       case SMU_UVD_TABLE:
+               ci_update_uvd_smc_table(hwmgr);
+               break;
+       case SMU_VCE_TABLE:
+               ci_update_vce_smc_table(hwmgr);
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
 const struct pp_smumgr_func ci_smu_funcs = {
        .smu_init = ci_smu_init,
        .smu_fini = ci_smu_fini,
@@ -2903,4 +2951,5 @@ const struct pp_smumgr_func ci_smu_funcs = {
        .initialize_mc_reg_table = ci_initialize_mc_reg_table,
        .is_dpm_running = ci_is_dpm_running,
        .update_dpm_settings = ci_update_dpm_settings,
+       .update_smc_table = ci_update_smc_table,
 };
index 53df9405f43a364558f68126a5459b79496a8a39..18048f8e2f130ec27bf993a68373636c0518c171 100644 (file)
@@ -1503,44 +1503,6 @@ static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
        return result;
 }
 
-static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
-               SMU73_Discrete_DpmTable *table)
-{
-       int result = -EINVAL;
-       uint8_t count;
-       struct pp_atomctrl_clock_dividers_vi dividers;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-                       table_info->mm_dep_table;
-
-       table->SamuBootLevel = 0;
-       table->SamuLevelCount = (uint8_t)(mm_table->count);
-
-       for (count = 0; count < table->SamuLevelCount; count++) {
-               /* not sure whether we need evclk or not */
-               table->SamuLevel[count].MinVoltage = 0;
-               table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
-               table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
-                               VOLTAGE_SCALE) << VDDC_SHIFT;
-               table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
-                               VDDC_VDDCI_DELTA) * VOLTAGE_SCALE) << VDDCI_SHIFT;
-               table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
-               /* retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                               table->SamuLevel[count].Frequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find divide id for samu clock", return result);
-
-               table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
-               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
-       }
-       return result;
-}
-
 static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
                int32_t eng_clock, int32_t mem_clock,
                struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs)
@@ -2028,10 +1990,6 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
        PP_ASSERT_WITH_CODE(0 == result,
                        "Failed to initialize ACP Level!", return result);
 
-       result = fiji_populate_smc_samu_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize SAMU Level!", return result);
-
        /* Since only the initial state is completely set up at this point
         * (the other states are just copies of the boot state) we only
         * need to populate the  ARB settings for the initial state.
@@ -2378,8 +2336,6 @@ static uint32_t fiji_get_offsetof(uint32_t type, uint32_t member)
                        return offsetof(SMU73_Discrete_DpmTable, UvdBootLevel);
                case VceBootLevel:
                        return offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
-               case SamuBootLevel:
-                       return offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
                case LowSclkInterruptThreshold:
                        return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold);
                }
@@ -2478,33 +2434,6 @@ static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
-static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr)
-{
-       struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
-       uint32_t mm_boot_level_offset, mm_boot_level_value;
-
-
-       smu_data->smc_state_table.SamuBootLevel = 0;
-       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
-                               offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
-
-       mm_boot_level_offset /= 4;
-       mm_boot_level_offset *= 4;
-       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset);
-       mm_boot_level_value &= 0xFFFFFF00;
-       mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
-       cgs_write_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_StablePState))
-               smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_SAMUDPM_SetEnabledMask,
-                               (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
-       return 0;
-}
-
 static int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
 {
        switch (type) {
@@ -2514,9 +2443,6 @@ static int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
        case SMU_VCE_TABLE:
                fiji_update_vce_smc_table(hwmgr);
                break;
-       case SMU_SAMU_TABLE:
-               fiji_update_samu_smc_table(hwmgr);
-               break;
        default:
                break;
        }
index 415f691c3fa906ee047937f166e0dc61be1f3a6e..9299b93aa09af87e28d9fd3e4e08b27916d4be62 100644 (file)
@@ -1578,12 +1578,6 @@ static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
        return 0;
 }
 
-static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
-       SMU71_Discrete_DpmTable *table)
-{
-       return 0;
-}
-
 static int iceland_populate_memory_timing_parameters(
                struct pp_hwmgr *hwmgr,
                uint32_t engine_clock,
@@ -1992,10 +1986,6 @@ static int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
        PP_ASSERT_WITH_CODE(0 == result,
                "Failed to initialize ACP Level!", return result;);
 
-       result = iceland_populate_smc_samu_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-               "Failed to initialize SAMU Level!", return result;);
-
        /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
        /* need to populate the  ARB settings for the initial state. */
        result = iceland_program_memory_timing_parameters(hwmgr);
index a8c6524f07e4098153e48c30d104be98298dd5b6..1276f168ff68d75a742d4101d7edb3fcf7a31b1d 100644 (file)
@@ -1204,7 +1204,6 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
        SMIO_Pattern vol_level;
        uint32_t mvdd;
-       uint16_t us_mvdd;
 
        table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
 
@@ -1255,16 +1254,11 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
                        "in Clock Dependency Table",
                        );
 
-       us_mvdd = 0;
-       if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
-                       (data->mclk_dpm_key_disabled))
-               us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
-       else {
-               if (!polaris10_populate_mvdd_value(hwmgr,
+       if (!((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
+                       (data->mclk_dpm_key_disabled)))
+               polaris10_populate_mvdd_value(hwmgr,
                                data->dpm_table.mclk_table.dpm_levels[0].value,
-                               &vol_level))
-                       us_mvdd = vol_level.Voltage;
-       }
+                               &vol_level);
 
        if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
                table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
@@ -1337,55 +1331,6 @@ static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
        return result;
 }
 
-
-static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
-               SMU74_Discrete_DpmTable *table)
-{
-       int result = -EINVAL;
-       uint8_t count;
-       struct pp_atomctrl_clock_dividers_vi dividers;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-                       table_info->mm_dep_table;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       uint32_t vddci;
-
-       table->SamuBootLevel = 0;
-       table->SamuLevelCount = (uint8_t)(mm_table->count);
-
-       for (count = 0; count < table->SamuLevelCount; count++) {
-               /* not sure whether we need evclk or not */
-               table->SamuLevel[count].MinVoltage = 0;
-               table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
-               table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
-                               VOLTAGE_SCALE) << VDDC_SHIFT;
-
-               if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
-                       vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
-                                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
-               else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
-                       vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
-               else
-                       vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
-               table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
-               table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
-               /* retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                               table->SamuLevel[count].Frequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find divide id for samu clock", return result);
-
-               table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
-               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
-       }
-       return result;
-}
-
 static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
                int32_t eng_clock, int32_t mem_clock,
                SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs)
@@ -1566,7 +1511,7 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
        uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
        struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
 
-       uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
+       uint8_t i, stretch_amount, volt_offset = 0;
        struct phm_ppt_v1_information *table_info =
                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
        struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
@@ -1617,11 +1562,7 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
 
        smu_data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
        /* Populate CKS Lookup Table */
-       if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
-               stretch_amount2 = 0;
-       else if (stretch_amount == 3 || stretch_amount == 4)
-               stretch_amount2 = 1;
-       else {
+       if (stretch_amount == 0 || stretch_amount > 5) {
                phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
                                PHM_PlatformCaps_ClockStretcher);
                PP_ASSERT_WITH_CODE(false,
@@ -1865,10 +1806,6 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
        PP_ASSERT_WITH_CODE(0 == result,
                        "Failed to initialize VCE Level!", return result);
 
-       result = polaris10_populate_smc_samu_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(0 == result,
-                       "Failed to initialize SAMU Level!", return result);
-
        /* Since only the initial state is completely set up at this point
         * (the other states are just copies of the boot state) we only
         * need to populate the  ARB settings for the initial state.
@@ -2222,34 +2159,6 @@ static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
-static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr)
-{
-       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
-       uint32_t mm_boot_level_offset, mm_boot_level_value;
-
-
-       smu_data->smc_state_table.SamuBootLevel = 0;
-       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
-                               offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
-
-       mm_boot_level_offset /= 4;
-       mm_boot_level_offset *= 4;
-       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset);
-       mm_boot_level_value &= 0xFFFFFF00;
-       mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
-       cgs_write_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_StablePState))
-               smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_SAMUDPM_SetEnabledMask,
-                               (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
-       return 0;
-}
-
-
 static int polaris10_update_bif_smc_table(struct pp_hwmgr *hwmgr)
 {
        struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
@@ -2276,9 +2185,6 @@ static int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
        case SMU_VCE_TABLE:
                polaris10_update_vce_smc_table(hwmgr);
                break;
-       case SMU_SAMU_TABLE:
-               polaris10_update_samu_smc_table(hwmgr);
-               break;
        case SMU_BIF_TABLE:
                polaris10_update_bif_smc_table(hwmgr);
        default:
@@ -2357,8 +2263,6 @@ static uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
                        return offsetof(SMU74_Discrete_DpmTable, UvdBootLevel);
                case VceBootLevel:
                        return offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
-               case SamuBootLevel:
-                       return offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
                case LowSclkInterruptThreshold:
                        return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
                }
index 0a563f6fe9ea6446b10f2b268acb75c631297512..bb07d43f3874454a9ba42d83dae54422b5154098 100644 (file)
@@ -68,7 +68,7 @@ static int smu10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
        return 0;
 }
 
-static int smu10_read_arg_from_smc(struct pp_hwmgr *hwmgr)
+static uint32_t smu10_read_arg_from_smc(struct pp_hwmgr *hwmgr)
 {
        struct amdgpu_device *adev = hwmgr->adev;
 
index d644a9bb9078d081639aa09a72af9f92e3612fc2..a029e47c2319c5cf4804b9486cae02992d26e055 100644 (file)
@@ -379,8 +379,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
 {
        struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
        uint32_t fw_to_load;
-       int result = 0;
-       struct SMU_DRAMData_TOC *toc;
+       int r = 0;
 
        if (!hwmgr->reload_fw) {
                pr_info("skip reloading...\n");
@@ -421,49 +420,62 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
                           + UCODE_ID_CP_MEC_JT2_MASK;
        }
 
-       toc = (struct SMU_DRAMData_TOC *)smu_data->header;
-       toc->num_entries = 0;
-       toc->structure_version = 1;
+       if (!smu_data->toc) {
+               struct SMU_DRAMData_TOC *toc;
+
+               smu_data->toc = kzalloc(sizeof(struct SMU_DRAMData_TOC), GFP_KERNEL);
+               if (!smu_data->toc)
+                       return -ENOMEM;
+               toc = smu_data->toc;
+               toc->num_entries = 0;
+               toc->structure_version = 1;
 
-       PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+               PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
                                UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
-                               "Failed to Get Firmware Entry.", return -EINVAL);
-       PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+                               "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+               PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
                                UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
-                               "Failed to Get Firmware Entry.", return -EINVAL);
-       PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+                               "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+               PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
                                UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
-                               "Failed to Get Firmware Entry.", return -EINVAL);
-       PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+                               "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+               PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
                                UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
-                               "Failed to Get Firmware Entry.", return -EINVAL);
-       PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+                               "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+               PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
                                UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
-                               "Failed to Get Firmware Entry.", return -EINVAL);
-       PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+                               "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+               PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
                                UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
-                               "Failed to Get Firmware Entry.", return -EINVAL);
-       PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+                               "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+               PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
                                UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
-                               "Failed to Get Firmware Entry.", return -EINVAL);
-       PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+                               "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+               PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
                                UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
-                               "Failed to Get Firmware Entry.", return -EINVAL);
-       PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
-                               UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
-                               "Failed to Get Firmware Entry.", return -EINVAL);
-       if (!hwmgr->not_vf)
+                               "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
                PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
+                               UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
+                               "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+               if (!hwmgr->not_vf)
+                       PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
                                UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
-                               "Failed to Get Firmware Entry.", return -EINVAL);
-
+                               "Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
+       }
+       memcpy_toio(smu_data->header_buffer.kaddr, smu_data->toc,
+                   sizeof(struct SMU_DRAMData_TOC));
        smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr));
        smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr));
 
        if (smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load))
                pr_err("Fail to Request SMU Load uCode");
 
-       return result;
+       return r;
+
+failed:
+       kfree(smu_data->toc);
+       smu_data->toc = NULL;
+       return r;
 }
 
 /* Check if the FW has been loaded, SMU will not return if loading has not finished. */
@@ -570,7 +582,6 @@ int smu7_setup_pwr_virus(struct pp_hwmgr *hwmgr)
 int smu7_init(struct pp_hwmgr *hwmgr)
 {
        struct smu7_smumgr *smu_data;
-       uint64_t mc_addr = 0;
        int r;
        /* Allocate memory for backend private data */
        smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
@@ -584,15 +595,12 @@ int smu7_init(struct pp_hwmgr *hwmgr)
                PAGE_SIZE,
                AMDGPU_GEM_DOMAIN_VRAM,
                &smu_data->header_buffer.handle,
-               &mc_addr,
+               &smu_data->header_buffer.mc_addr,
                &smu_data->header_buffer.kaddr);
 
        if (r)
                return -EINVAL;
 
-       smu_data->header = smu_data->header_buffer.kaddr;
-       smu_data->header_buffer.mc_addr = mc_addr;
-
        if (!hwmgr->not_vf)
                return 0;
 
@@ -602,7 +610,7 @@ int smu7_init(struct pp_hwmgr *hwmgr)
                PAGE_SIZE,
                AMDGPU_GEM_DOMAIN_VRAM,
                &smu_data->smu_buffer.handle,
-               &mc_addr,
+               &smu_data->smu_buffer.mc_addr,
                &smu_data->smu_buffer.kaddr);
 
        if (r) {
@@ -611,7 +619,6 @@ int smu7_init(struct pp_hwmgr *hwmgr)
                                        &smu_data->header_buffer.kaddr);
                return -EINVAL;
        }
-       smu_data->smu_buffer.mc_addr = mc_addr;
 
        if (smum_is_hw_avfs_present(hwmgr))
                hwmgr->avfs_supported = true;
@@ -633,6 +640,9 @@ int smu7_smu_fini(struct pp_hwmgr *hwmgr)
                                        &smu_data->smu_buffer.mc_addr,
                                        &smu_data->smu_buffer.kaddr);
 
+
+       kfree(smu_data->toc);
+       smu_data->toc = NULL;
        kfree(hwmgr->smu_backend);
        hwmgr->smu_backend = NULL;
        return 0;
index 39c9bfda0ab416665864c47b66f29c25c0c6f5fe..01f0538fba6b9e8edc20febbda4178dd4aa545d4 100644 (file)
@@ -37,10 +37,9 @@ struct smu7_buffer_entry {
 };
 
 struct smu7_smumgr {
-       uint8_t *header;
-       uint8_t *mec_image;
        struct smu7_buffer_entry smu_buffer;
        struct smu7_buffer_entry header_buffer;
+       struct SMU_DRAMData_TOC *toc;
 
        uint32_t                             soft_regs_start;
        uint32_t                             dpm_table_start;
index c861d3023474c50fd68e42cb6292db6cfb896fe6..f7e3bc22bb93f8a44d071d1c796f7e5a6b718b80 100644 (file)
@@ -52,10 +52,10 @@ static const enum smu8_scratch_entry firmware_list[] = {
        SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G,
 };
 
-static int smu8_get_argument(struct pp_hwmgr *hwmgr)
+static uint32_t smu8_get_argument(struct pp_hwmgr *hwmgr)
 {
        if (hwmgr == NULL || hwmgr->device == NULL)
-               return -EINVAL;
+               return 0;
 
        return cgs_read_register(hwmgr->device,
                                        mmSMU_MP1_SRBM2P_ARG_0);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
new file mode 100644 (file)
index 0000000..079fc8e
--- /dev/null
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "smumgr.h"
+#include "vega10_inc.h"
+#include "soc15_common.h"
+#include "pp_debug.h"
+
+
+/* MP Apertures */
+#define MP0_Public                  0x03800000
+#define MP0_SRAM                    0x03900000
+#define MP1_Public                  0x03b00000
+#define MP1_SRAM                    0x03c00004
+
+#define smnMP1_FIRMWARE_FLAGS                                                                           0x3010028
+
+bool smu9_is_smc_ram_running(struct pp_hwmgr *hwmgr)
+{
+       struct amdgpu_device *adev = hwmgr->adev;
+       uint32_t mp1_fw_flags;
+
+       WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
+                       (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
+
+       mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
+
+       if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
+               return true;
+
+       return false;
+}
+
+/*
+ * Check if SMC has responded to previous message.
+ *
+ * @param    smumgr  the address of the powerplay hardware manager.
+ * @return   TRUE    SMC has responded, FALSE otherwise.
+ */
+static uint32_t smu9_wait_for_response(struct pp_hwmgr *hwmgr)
+{
+       struct amdgpu_device *adev = hwmgr->adev;
+       uint32_t reg;
+       uint32_t ret;
+
+       reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
+
+       ret = phm_wait_for_register_unequal(hwmgr, reg,
+                       0, MP1_C2PMSG_90__CONTENT_MASK);
+
+       if (ret)
+               pr_err("No response from smu\n");
+
+       return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
+}
+
+/*
+ * Send a message to the SMC, and do not wait for its response.
+ * @param    smumgr  the address of the powerplay hardware manager.
+ * @param    msg the message to send.
+ * @return   Always return 0.
+ */
+static int smu9_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
+                                               uint16_t msg)
+{
+       struct amdgpu_device *adev = hwmgr->adev;
+
+       WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
+
+       return 0;
+}
+
+/*
+ * Send a message to the SMC, and wait for its response.
+ * @param    hwmgr  the address of the powerplay hardware manager.
+ * @param    msg the message to send.
+ * @return   Always return 0.
+ */
+int smu9_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
+{
+       struct amdgpu_device *adev = hwmgr->adev;
+       uint32_t ret;
+
+       smu9_wait_for_response(hwmgr);
+
+       WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+
+       smu9_send_msg_to_smc_without_waiting(hwmgr, msg);
+
+       ret = smu9_wait_for_response(hwmgr);
+       if (ret != 1)
+               pr_err("Failed to send message: 0x%x, ret value: 0x%x\n", msg, ret);
+
+       return 0;
+}
+
+/*
+ * Send a message to the SMC with parameter
+ * @param    hwmgr:  the address of the powerplay hardware manager.
+ * @param    msg: the message to send.
+ * @param    parameter: the parameter to send
+ * @return   Always return 0.
+ */
+int smu9_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
+                                       uint16_t msg, uint32_t parameter)
+{
+       struct amdgpu_device *adev = hwmgr->adev;
+       uint32_t ret;
+
+       smu9_wait_for_response(hwmgr);
+
+       WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+
+       WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
+
+       smu9_send_msg_to_smc_without_waiting(hwmgr, msg);
+
+       ret = smu9_wait_for_response(hwmgr);
+       if (ret != 1)
+               pr_err("Failed message: 0x%x, input parameter: 0x%x, error code: 0x%x\n", msg, parameter, ret);
+
+       return 0;
+}
+
+uint32_t smu9_get_argument(struct pp_hwmgr *hwmgr)
+{
+       struct amdgpu_device *adev = hwmgr->adev;
+
+       return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.h
new file mode 100644 (file)
index 0000000..1462279
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _SMU9_SMUMANAGER_H_
+#define _SMU9_SMUMANAGER_H_
+
+bool smu9_is_smc_ram_running(struct pp_hwmgr *hwmgr);
+int smu9_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg);
+int smu9_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
+                                       uint16_t msg, uint32_t parameter);
+uint32_t smu9_get_argument(struct pp_hwmgr *hwmgr);
+
+#endif
index c9837935f0f5e2303b634bc55efd283e9cd3baa0..99d5e4f98f49cd7ec103a70eee060b8c9e0241e4 100644 (file)
@@ -96,7 +96,7 @@ int smum_process_firmware_header(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
-int smum_get_argument(struct pp_hwmgr *hwmgr)
+uint32_t smum_get_argument(struct pp_hwmgr *hwmgr)
 {
        if (NULL != hwmgr->smumgr_funcs->get_argument)
                return hwmgr->smumgr_funcs->get_argument(hwmgr);
index 782b19fc2e7012d2ddf95b2bb76fa7e4988ffba7..7dabc6c456e120b9e9e16dac8cd6ceb2c5b59100 100644 (file)
@@ -1443,51 +1443,6 @@ static int tonga_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
        return result;
 }
 
-static int tonga_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
-               SMU72_Discrete_DpmTable *table)
-{
-       int result = 0;
-       uint8_t count;
-       pp_atomctrl_clock_dividers_vi dividers;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       struct phm_ppt_v1_information *pptable_info =
-                            (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-                                                   pptable_info->mm_dep_table;
-
-       table->SamuBootLevel = 0;
-       table->SamuLevelCount = (uint8_t) (mm_table->count);
-
-       for (count = 0; count < table->SamuLevelCount; count++) {
-               /* not sure whether we need evclk or not */
-               table->SamuLevel[count].Frequency =
-                       pptable_info->mm_dep_table->entries[count].samclock;
-               table->SamuLevel[count].MinVoltage.Vddc =
-                       phm_get_voltage_index(pptable_info->vddc_lookup_table,
-                               mm_table->entries[count].vddc);
-               table->SamuLevel[count].MinVoltage.VddGfx =
-                       (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) ?
-                       phm_get_voltage_index(pptable_info->vddgfx_lookup_table,
-                               mm_table->entries[count].vddgfx) : 0;
-               table->SamuLevel[count].MinVoltage.Vddci =
-                       phm_get_voltage_id(&data->vddci_voltage_table,
-                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
-               table->SamuLevel[count].MinVoltage.Phases = 1;
-
-               /* retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                                       table->SamuLevel[count].Frequency, &dividers);
-               PP_ASSERT_WITH_CODE((!result),
-                       "can not find divide id for samu clock", return result);
-
-               table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
-       }
-
-       return result;
-}
-
 static int tonga_populate_memory_timing_parameters(
                struct pp_hwmgr *hwmgr,
                uint32_t engine_clock,
@@ -2323,10 +2278,6 @@ static int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
        PP_ASSERT_WITH_CODE(!result,
                "Failed to initialize ACP Level !", return result);
 
-       result = tonga_populate_smc_samu_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(!result,
-               "Failed to initialize SAMU Level !", return result);
-
        /* Since only the initial state is completely set up at this
        * point (the other states are just copies of the boot state) we only
        * need to populate the  ARB settings for the initial state.
@@ -2673,8 +2624,6 @@ static uint32_t tonga_get_offsetof(uint32_t type, uint32_t member)
                        return offsetof(SMU72_Discrete_DpmTable, UvdBootLevel);
                case VceBootLevel:
                        return offsetof(SMU72_Discrete_DpmTable, VceBootLevel);
-               case SamuBootLevel:
-                       return offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
                case LowSclkInterruptThreshold:
                        return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold);
                }
@@ -2773,32 +2722,6 @@ static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
-static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr)
-{
-       struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
-       uint32_t mm_boot_level_offset, mm_boot_level_value;
-
-       smu_data->smc_state_table.SamuBootLevel = 0;
-       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
-                               offsetof(SMU72_Discrete_DpmTable, SamuBootLevel);
-
-       mm_boot_level_offset /= 4;
-       mm_boot_level_offset *= 4;
-       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset);
-       mm_boot_level_value &= 0xFFFFFF00;
-       mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
-       cgs_write_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_StablePState))
-               smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_SAMUDPM_SetEnabledMask,
-                               (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
-       return 0;
-}
-
 static int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
 {
        switch (type) {
@@ -2808,9 +2731,6 @@ static int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
        case SMU_VCE_TABLE:
                tonga_update_vce_smc_table(hwmgr);
                break;
-       case SMU_SAMU_TABLE:
-               tonga_update_samu_smc_table(hwmgr);
-               break;
        default:
                break;
        }
index e84669c448a30a0ea6900f2aa2a245e03df8b654..5d19115f410c93b8a465011f15cdaa8808e886e8 100644 (file)
 #include "vega10_hwmgr.h"
 #include "vega10_ppsmc.h"
 #include "smu9_driver_if.h"
+#include "smu9_smumgr.h"
 #include "ppatomctrl.h"
 #include "pp_debug.h"
 
 
-#define AVFS_EN_MSB            1568
-#define AVFS_EN_LSB            1568
-
-/* Microcode file is stored in this buffer */
-#define BUFFER_SIZE                 80000
-#define MAX_STRING_SIZE             15
-#define BUFFER_SIZETWO              131072 /* 128 *1024 */
-
-/* MP Apertures */
-#define MP0_Public                  0x03800000
-#define MP0_SRAM                    0x03900000
-#define MP1_Public                  0x03b00000
-#define MP1_SRAM                    0x03c00004
-
-#define smnMP1_FIRMWARE_FLAGS                                                                           0x3010028
-#define smnMP0_FW_INTF                                                                                  0x3010104
-#define smnMP1_PUB_CTRL                                                                                 0x3010b14
-
-static bool vega10_is_smc_ram_running(struct pp_hwmgr *hwmgr)
-{
-       struct amdgpu_device *adev = hwmgr->adev;
-       uint32_t mp1_fw_flags;
-
-       WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
-                       (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
-
-       mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
-
-       if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
-               return true;
-
-       return false;
-}
-
-/*
- * Check if SMC has responded to previous message.
- *
- * @param    smumgr  the address of the powerplay hardware manager.
- * @return   TRUE    SMC has responded, FALSE otherwise.
- */
-static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr)
-{
-       struct amdgpu_device *adev = hwmgr->adev;
-       uint32_t reg;
-       uint32_t ret;
-
-       reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
-
-       ret = phm_wait_for_register_unequal(hwmgr, reg,
-                       0, MP1_C2PMSG_90__CONTENT_MASK);
-
-       if (ret)
-               pr_err("No response from smu\n");
-
-       return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
-}
-
-/*
- * Send a message to the SMC, and do not wait for its response.
- * @param    smumgr  the address of the powerplay hardware manager.
- * @param    msg the message to send.
- * @return   Always return 0.
- */
-static int vega10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
-               uint16_t msg)
-{
-       struct amdgpu_device *adev = hwmgr->adev;
-
-       WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
-
-       return 0;
-}
-
-/*
- * Send a message to the SMC, and wait for its response.
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @param    msg the message to send.
- * @return   Always return 0.
- */
-static int vega10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
-{
-       struct amdgpu_device *adev = hwmgr->adev;
-       uint32_t ret;
-
-       vega10_wait_for_response(hwmgr);
-
-       WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
-
-       vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
-
-       ret = vega10_wait_for_response(hwmgr);
-       if (ret != 1)
-               pr_err("Failed to send message: 0x%x, ret value: 0x%x\n", msg, ret);
-
-       return 0;
-}
-
-/*
- * Send a message to the SMC with parameter
- * @param    hwmgr:  the address of the powerplay hardware manager.
- * @param    msg: the message to send.
- * @param    parameter: the parameter to send
- * @return   Always return 0.
- */
-static int vega10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
-               uint16_t msg, uint32_t parameter)
-{
-       struct amdgpu_device *adev = hwmgr->adev;
-       uint32_t ret;
-
-       vega10_wait_for_response(hwmgr);
-
-       WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
-
-       WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
-
-       vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
-
-       ret = vega10_wait_for_response(hwmgr);
-       if (ret != 1)
-               pr_err("Failed message: 0x%x, input parameter: 0x%x, error code: 0x%x\n", msg, parameter, ret);
-
-       return 0;
-}
-
-static int vega10_get_argument(struct pp_hwmgr *hwmgr)
-{
-       struct amdgpu_device *adev = hwmgr->adev;
-
-       return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
-}
-
 static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
                uint8_t *table, int16_t table_id)
 {
@@ -175,13 +44,13 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
                        "Invalid SMU Table version!", return -EINVAL);
        PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
                        "Invalid SMU Table Length!", return -EINVAL);
-       vega10_send_msg_to_smc_with_parameter(hwmgr,
+       smu9_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrHigh,
                        upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
-       vega10_send_msg_to_smc_with_parameter(hwmgr,
+       smu9_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrLow,
                        lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
-       vega10_send_msg_to_smc_with_parameter(hwmgr,
+       smu9_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_TransferTableSmu2Dram,
                        priv->smu_tables.entry[table_id].table_id);
 
@@ -206,13 +75,13 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
        memcpy(priv->smu_tables.entry[table_id].table, table,
                        priv->smu_tables.entry[table_id].size);
 
-       vega10_send_msg_to_smc_with_parameter(hwmgr,
+       smu9_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrHigh,
                        upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
-       vega10_send_msg_to_smc_with_parameter(hwmgr,
+       smu9_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrLow,
                        lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
-       vega10_send_msg_to_smc_with_parameter(hwmgr,
+       smu9_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_TransferTableDram2Smu,
                        priv->smu_tables.entry[table_id].table_id);
 
@@ -225,8 +94,8 @@ static int vega10_get_smc_features(struct pp_hwmgr *hwmgr,
        if (features_enabled == NULL)
                return -EINVAL;
 
-       vega10_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeatures);
-       *features_enabled = vega10_get_argument(hwmgr);
+       smu9_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeatures);
+       *features_enabled = smu9_get_argument(hwmgr);
 
        return 0;
 }
@@ -248,10 +117,10 @@ static int vega10_set_tools_address(struct pp_hwmgr *hwmgr)
        struct vega10_smumgr *priv = hwmgr->smu_backend;
 
        if (priv->smu_tables.entry[TOOLSTABLE].mc_addr) {
-               vega10_send_msg_to_smc_with_parameter(hwmgr,
+               smu9_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetToolsDramAddrHigh,
                                upper_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr));
-               vega10_send_msg_to_smc_with_parameter(hwmgr,
+               smu9_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetToolsDramAddrLow,
                                lower_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr));
        }
@@ -265,11 +134,11 @@ static int vega10_verify_smc_interface(struct pp_hwmgr *hwmgr)
        uint32_t dev_id;
        uint32_t rev_id;
 
-       PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc(hwmgr,
+       PP_ASSERT_WITH_CODE(!smu9_send_msg_to_smc(hwmgr,
                        PPSMC_MSG_GetDriverIfVersion),
                        "Attempt to get SMC IF Version Number Failed!",
                        return -EINVAL);
-       smc_driver_if_version = vega10_get_argument(hwmgr);
+       smc_driver_if_version = smu9_get_argument(hwmgr);
 
        dev_id = adev->pdev->device;
        rev_id = adev->pdev->revision;
@@ -441,7 +310,7 @@ static int vega10_smu_fini(struct pp_hwmgr *hwmgr)
 
 static int vega10_start_smu(struct pp_hwmgr *hwmgr)
 {
-       if (!vega10_is_smc_ram_running(hwmgr))
+       if (!smu9_is_smc_ram_running(hwmgr))
                return -EINVAL;
 
        PP_ASSERT_WITH_CODE(!vega10_verify_smc_interface(hwmgr),
@@ -453,7 +322,8 @@ static int vega10_start_smu(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
-static int vega10_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw)
+static int vega10_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table,
+                                   uint16_t table_id, bool rw)
 {
        int ret;
 
@@ -470,11 +340,11 @@ const struct pp_smumgr_func vega10_smu_funcs = {
        .smu_fini = &vega10_smu_fini,
        .start_smu = &vega10_start_smu,
        .request_smu_load_specific_fw = NULL,
-       .send_msg_to_smc = &vega10_send_msg_to_smc,
-       .send_msg_to_smc_with_parameter = &vega10_send_msg_to_smc_with_parameter,
+       .send_msg_to_smc = &smu9_send_msg_to_smc,
+       .send_msg_to_smc_with_parameter = &smu9_send_msg_to_smc_with_parameter,
        .download_pptable_settings = NULL,
        .upload_pptable_settings = NULL,
        .is_dpm_running = vega10_is_dpm_running,
-       .get_argument = vega10_get_argument,
+       .get_argument = smu9_get_argument,
        .smc_table_manager = vega10_smc_table_manager,
 };
index 7d9b40e8b1bf053b3cd3e8920b8118a53d804d04..7f0e2109f40d7f46641dd81f2440e9dff2932b9a 100644 (file)
 #include "smumgr.h"
 #include "vega12_inc.h"
 #include "soc15_common.h"
+#include "smu9_smumgr.h"
 #include "vega12_smumgr.h"
 #include "vega12_ppsmc.h"
 #include "vega12/smu9_driver_if.h"
-
 #include "ppatomctrl.h"
 #include "pp_debug.h"
 
 
-/* MP Apertures */
-#define MP0_Public                  0x03800000
-#define MP0_SRAM                    0x03900000
-#define MP1_Public                  0x03b00000
-#define MP1_SRAM                    0x03c00004
-
-#define smnMP1_FIRMWARE_FLAGS                                                                           0x3010028
-#define smnMP0_FW_INTF                                                                                  0x3010104
-#define smnMP1_PUB_CTRL                                                                                 0x3010b14
-
-static bool vega12_is_smc_ram_running(struct pp_hwmgr *hwmgr)
-{
-       struct amdgpu_device *adev = hwmgr->adev;
-       uint32_t mp1_fw_flags;
-
-       WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
-                       (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
-
-       mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
-
-       if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
-                               MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
-               return true;
-
-       return false;
-}
-
-/*
- * Check if SMC has responded to previous message.
- *
- * @param    smumgr  the address of the powerplay hardware manager.
- * @return   TRUE    SMC has responded, FALSE otherwise.
- */
-static uint32_t vega12_wait_for_response(struct pp_hwmgr *hwmgr)
-{
-       struct amdgpu_device *adev = hwmgr->adev;
-       uint32_t reg;
-
-       reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
-
-       phm_wait_for_register_unequal(hwmgr, reg,
-                       0, MP1_C2PMSG_90__CONTENT_MASK);
-
-       return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
-}
-
-/*
- * Send a message to the SMC, and do not wait for its response.
- * @param    smumgr  the address of the powerplay hardware manager.
- * @param    msg the message to send.
- * @return   Always return 0.
- */
-int vega12_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
-               uint16_t msg)
-{
-       struct amdgpu_device *adev = hwmgr->adev;
-
-       WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
-
-       return 0;
-}
-
-/*
- * Send a message to the SMC, and wait for its response.
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @param    msg the message to send.
- * @return   Always return 0.
- */
-int vega12_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
-{
-       struct amdgpu_device *adev = hwmgr->adev;
-
-       vega12_wait_for_response(hwmgr);
-
-       WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
-
-       vega12_send_msg_to_smc_without_waiting(hwmgr, msg);
-
-       if (vega12_wait_for_response(hwmgr) != 1)
-               pr_err("Failed to send message: 0x%x\n", msg);
-
-       return 0;
-}
-
-/*
- * Send a message to the SMC with parameter
- * @param    hwmgr:  the address of the powerplay hardware manager.
- * @param    msg: the message to send.
- * @param    parameter: the parameter to send
- * @return   Always return 0.
- */
-int vega12_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
-               uint16_t msg, uint32_t parameter)
-{
-       struct amdgpu_device *adev = hwmgr->adev;
-
-       vega12_wait_for_response(hwmgr);
-
-       WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
-
-       WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter);
-
-       vega12_send_msg_to_smc_without_waiting(hwmgr, msg);
-
-       if (vega12_wait_for_response(hwmgr) != 1)
-               pr_err("Failed to send message: 0x%x\n", msg);
-
-       return 0;
-}
-
-
-/*
- * Send a message to the SMC with parameter, do not wait for response
- * @param    hwmgr:  the address of the powerplay hardware manager.
- * @param    msg: the message to send.
- * @param    parameter: the parameter to send
- * @return   The response that came from the SMC.
- */
-int vega12_send_msg_to_smc_with_parameter_without_waiting(
-               struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
-{
-       struct amdgpu_device *adev = hwmgr->adev;
-
-       WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, parameter);
-
-       return vega12_send_msg_to_smc_without_waiting(hwmgr, msg);
-}
-
-/*
- * Retrieve an argument from SMC.
- * @param    hwmgr  the address of the powerplay hardware manager.
- * @param    arg     pointer to store the argument from SMC.
- * @return   Always return 0.
- */
-int vega12_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg)
-{
-       struct amdgpu_device *adev = hwmgr->adev;
-
-       *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
-
-       return 0;
-}
-
 /*
  * Copy table from SMC into driver FB
  * @param   hwmgr    the address of the HW manager
@@ -192,16 +49,16 @@ int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
                        "Invalid SMU Table version!", return -EINVAL);
        PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
                        "Invalid SMU Table Length!", return -EINVAL);
-       PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrHigh,
                        upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
                        "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL);
-       PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrLow,
                        lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
                        "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
                        return -EINVAL);
-       PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_TransferTableSmu2Dram,
                        table_id) == 0,
                        "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
@@ -234,17 +91,17 @@ int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
        memcpy(priv->smu_tables.entry[table_id].table, table,
                        priv->smu_tables.entry[table_id].size);
 
-       PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrHigh,
                        upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
                        "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
                        return -EINVAL;);
-       PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_SetDriverDramAddrLow,
                        lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
                        "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
                        return -EINVAL);
-       PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+       PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
                        PPSMC_MSG_TransferTableDram2Smu,
                        table_id) == 0,
                        "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
@@ -262,20 +119,20 @@ int vega12_enable_smc_features(struct pp_hwmgr *hwmgr,
        smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT);
 
        if (enable) {
-               PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+               PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low) == 0,
                                "[EnableDisableSMCFeatures] Attemp to enable SMU features Low failed!",
                                return -EINVAL);
-               PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+               PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high) == 0,
                                "[EnableDisableSMCFeatures] Attemp to enable SMU features High failed!",
                                return -EINVAL);
        } else {
-               PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+               PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low) == 0,
                                "[EnableDisableSMCFeatures] Attemp to disable SMU features Low failed!",
                                return -EINVAL);
-               PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc_with_parameter(hwmgr,
+               PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high) == 0,
                                "[EnableDisableSMCFeatures] Attemp to disable SMU features High failed!",
                                return -EINVAL);
@@ -292,22 +149,17 @@ int vega12_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
        if (features_enabled == NULL)
                return -EINVAL;
 
-       PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc(hwmgr,
+       PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr,
                        PPSMC_MSG_GetEnabledSmuFeaturesLow) == 0,
                        "[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!",
                        return -EINVAL);
-       PP_ASSERT_WITH_CODE(vega12_read_arg_from_smc(hwmgr,
-                       &smc_features_low) == 0,
-                       "[GetEnabledSMCFeatures] Attemp to read SMU features Low argument failed!",
-                       return -EINVAL);
-       PP_ASSERT_WITH_CODE(vega12_send_msg_to_smc(hwmgr,
+       smc_features_low = smu9_get_argument(hwmgr);
+
+       PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr,
                        PPSMC_MSG_GetEnabledSmuFeaturesHigh) == 0,
                        "[GetEnabledSMCFeatures] Attemp to get SMU features High failed!",
                        return -EINVAL);
-       PP_ASSERT_WITH_CODE(vega12_read_arg_from_smc(hwmgr,
-                       &smc_features_high) == 0,
-                       "[GetEnabledSMCFeatures] Attemp to read SMU features High argument failed!",
-                       return -EINVAL);
+       smc_features_high = smu9_get_argument(hwmgr);
 
        *features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) |
                        (((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK));
@@ -333,39 +185,16 @@ static int vega12_set_tools_address(struct pp_hwmgr *hwmgr)
                        (struct vega12_smumgr *)(hwmgr->smu_backend);
 
        if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) {
-               if (!vega12_send_msg_to_smc_with_parameter(hwmgr,
+               if (!smu9_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetToolsDramAddrHigh,
                                upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr)))
-                       vega12_send_msg_to_smc_with_parameter(hwmgr,
+                       smu9_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetToolsDramAddrLow,
                                        lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr));
        }
        return 0;
 }
 
-#if 0 /* tentatively remove */
-static int vega12_verify_smc_interface(struct pp_hwmgr *hwmgr)
-{
-       uint32_t smc_driver_if_version;
-
-       PP_ASSERT_WITH_CODE(!vega12_send_msg_to_smc(hwmgr,
-                       PPSMC_MSG_GetDriverIfVersion),
-                       "Attempt to get SMC IF Version Number Failed!",
-                       return -EINVAL);
-       vega12_read_arg_from_smc(hwmgr, &smc_driver_if_version);
-
-       if (smc_driver_if_version != SMU9_DRIVER_IF_VERSION) {
-               pr_err("Your firmware(0x%x) doesn't match \
-                       SMU9_DRIVER_IF_VERSION(0x%x). \
-                       Please update your firmware!\n",
-                       smc_driver_if_version, SMU9_DRIVER_IF_VERSION);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-#endif
-
 static int vega12_smu_init(struct pp_hwmgr *hwmgr)
 {
        struct vega12_smumgr *priv;
@@ -513,16 +342,10 @@ static int vega12_smu_fini(struct pp_hwmgr *hwmgr)
 
 static int vega12_start_smu(struct pp_hwmgr *hwmgr)
 {
-       PP_ASSERT_WITH_CODE(vega12_is_smc_ram_running(hwmgr),
+       PP_ASSERT_WITH_CODE(smu9_is_smc_ram_running(hwmgr),
                        "SMC is not running!",
                        return -EINVAL);
 
-#if 0 /* tentatively remove */
-       PP_ASSERT_WITH_CODE(!vega12_verify_smc_interface(hwmgr),
-                       "Failed to verify SMC interface!",
-                       return -EINVAL);
-#endif
-
        vega12_set_tools_address(hwmgr);
 
        return 0;
@@ -533,9 +356,10 @@ const struct pp_smumgr_func vega12_smu_funcs = {
        .smu_fini = &vega12_smu_fini,
        .start_smu = &vega12_start_smu,
        .request_smu_load_specific_fw = NULL,
-       .send_msg_to_smc = &vega12_send_msg_to_smc,
-       .send_msg_to_smc_with_parameter = &vega12_send_msg_to_smc_with_parameter,
+       .send_msg_to_smc = &smu9_send_msg_to_smc,
+       .send_msg_to_smc_with_parameter = &smu9_send_msg_to_smc_with_parameter,
        .download_pptable_settings = NULL,
        .upload_pptable_settings = NULL,
        .is_dpm_running = vega12_is_dpm_running,
+       .get_argument = smu9_get_argument,
 };
index 2810d387b611cab83d0472498cdf75ef7c82b8cb..b285cbc04019cd78464657cd000d206d73ac50e3 100644 (file)
@@ -48,7 +48,6 @@ struct vega12_smumgr {
 #define SMU_FEATURES_HIGH_MASK       0xFFFFFFFF00000000
 #define SMU_FEATURES_HIGH_SHIFT      32
 
-int vega12_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg);
 int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
                uint8_t *table, int16_t table_id);
 int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
index 2de48959ac935ad553f93473196525a20f2700e8..57420d7caa4e968181b93cdf69e7eded94f7571a 100644 (file)
@@ -393,34 +393,6 @@ static int vegam_update_vce_smc_table(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
-static int vegam_update_samu_smc_table(struct pp_hwmgr *hwmgr)
-{
-       struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
-       uint32_t mm_boot_level_offset, mm_boot_level_value;
-
-
-       smu_data->smc_state_table.SamuBootLevel = 0;
-       mm_boot_level_offset = smu_data->smu7_data.dpm_table_start +
-                               offsetof(SMU75_Discrete_DpmTable, SamuBootLevel);
-
-       mm_boot_level_offset /= 4;
-       mm_boot_level_offset *= 4;
-       mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset);
-       mm_boot_level_value &= 0xFFFFFF00;
-       mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0;
-       cgs_write_ind_register(hwmgr->device,
-                       CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
-       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
-                       PHM_PlatformCaps_StablePState))
-               smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_SAMUDPM_SetEnabledMask,
-                               (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
-       return 0;
-}
-
-
 static int vegam_update_bif_smc_table(struct pp_hwmgr *hwmgr)
 {
        struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend);
@@ -447,9 +419,6 @@ static int vegam_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
        case SMU_VCE_TABLE:
                vegam_update_vce_smc_table(hwmgr);
                break;
-       case SMU_SAMU_TABLE:
-               vegam_update_samu_smc_table(hwmgr);
-               break;
        case SMU_BIF_TABLE:
                vegam_update_bif_smc_table(hwmgr);
                break;
@@ -1281,54 +1250,6 @@ static int vegam_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
        return result;
 }
 
-static int vegam_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
-               SMU75_Discrete_DpmTable *table)
-{
-       int result = -EINVAL;
-       uint8_t count;
-       struct pp_atomctrl_clock_dividers_vi dividers;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
-       struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
-                       table_info->mm_dep_table;
-       struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
-       uint32_t vddci;
-
-       table->SamuBootLevel = 0;
-       table->SamuLevelCount = (uint8_t)(mm_table->count);
-
-       for (count = 0; count < table->SamuLevelCount; count++) {
-               /* not sure whether we need evclk or not */
-               table->SamuLevel[count].MinVoltage = 0;
-               table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
-               table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
-                               VOLTAGE_SCALE) << VDDC_SHIFT;
-
-               if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
-                       vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
-                                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
-               else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
-                       vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
-               else
-                       vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
-
-               table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
-               table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
-               /* retrieve divider value for VBIOS */
-               result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
-                               table->SamuLevel[count].Frequency, &dividers);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "can not find divide id for samu clock", return result);
-
-               table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
-               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
-               CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
-       }
-       return result;
-}
-
 static int vegam_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
                int32_t eng_clock, int32_t mem_clock,
                SMU75_Discrete_MCArbDramTimingTableEntry *arb_regs)
@@ -2062,10 +1983,6 @@ static int vegam_init_smc_table(struct pp_hwmgr *hwmgr)
        PP_ASSERT_WITH_CODE(!result,
                        "Failed to initialize VCE Level!", return result);
 
-       result = vegam_populate_smc_samu_level(hwmgr, table);
-       PP_ASSERT_WITH_CODE(!result,
-                       "Failed to initialize SAMU Level!", return result);
-
        /* Since only the initial state is completely set up at this point
         * (the other states are just copies of the boot state) we only
         * need to populate the  ARB settings for the initial state.
@@ -2273,8 +2190,6 @@ static uint32_t vegam_get_offsetof(uint32_t type, uint32_t member)
                        return offsetof(SMU75_Discrete_DpmTable, UvdBootLevel);
                case VceBootLevel:
                        return offsetof(SMU75_Discrete_DpmTable, VceBootLevel);
-               case SamuBootLevel:
-                       return offsetof(SMU75_Discrete_DpmTable, SamuBootLevel);
                case LowSclkInterruptThreshold:
                        return offsetof(SMU75_Discrete_DpmTable, LowSclkInterruptThreshold);
                }
index 16903dc7fe0dc465d7d6f93f138efeb337ca4c42..965cda48dc13ceeca19ff677558c08ef537871dc 100644 (file)
@@ -136,9 +136,6 @@ static void arc_pgu_crtc_atomic_disable(struct drm_crtc *crtc,
 {
        struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
 
-       if (!crtc->primary->fb)
-               return;
-
        clk_disable_unprepare(arcpgu->clk);
        arc_pgu_write(arcpgu, ARCPGU_REG_CTRL,
                              arc_pgu_read(arcpgu, ARCPGU_REG_CTRL) &
@@ -189,7 +186,7 @@ static const struct drm_plane_helper_funcs arc_pgu_plane_helper_funcs = {
 
 static void arc_pgu_plane_destroy(struct drm_plane *plane)
 {
-       drm_plane_helper_disable(plane);
+       drm_plane_helper_disable(plane, NULL);
        drm_plane_cleanup(plane);
 }
 
index b8f6f9a5dfbe32059f3e4568640e4034fa108d8d..68629e6149909fd08d52f22e314296971a3d4618 100644 (file)
@@ -99,7 +99,7 @@ int arcpgu_drm_sim_init(struct drm_device *drm, struct device_node *np)
                goto error_encoder_cleanup;
        }
 
-       ret = drm_mode_connector_attach_encoder(connector, encoder);
+       ret = drm_connector_attach_encoder(connector, encoder);
        if (ret < 0) {
                dev_err(drm->dev, "could not attach connector to encoder\n");
                drm_connector_unregister(connector);
index bb8b158ff90d03dcc1d295f07b2d76ac3899304a..3bf31d1a4722cafa2bb2108e8bad2f616d7aa895 100644 (file)
@@ -1,4 +1,5 @@
 hdlcd-y := hdlcd_drv.o hdlcd_crtc.o
 obj-$(CONFIG_DRM_HDLCD)        += hdlcd.o
 mali-dp-y := malidp_drv.o malidp_hw.o malidp_planes.o malidp_crtc.o
+mali-dp-y += malidp_mw.o
 obj-$(CONFIG_DRM_MALI_DISPLAY) += mali-dp.o
index cf5cbd63ecdffcc1143ddcb48ecfb4d80c1b4328..e4d67b70244d5716764a6afa1ef2ae991e2e51c6 100644 (file)
@@ -229,6 +229,8 @@ static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
 static int hdlcd_plane_atomic_check(struct drm_plane *plane,
                                    struct drm_plane_state *state)
 {
+       int i;
+       struct drm_crtc *crtc;
        struct drm_crtc_state *crtc_state;
        u32 src_h = state->src_h >> 16;
 
@@ -238,20 +240,17 @@ static int hdlcd_plane_atomic_check(struct drm_plane *plane,
                return -EINVAL;
        }
 
-       if (!state->fb || !state->crtc)
-               return 0;
-
-       crtc_state = drm_atomic_get_existing_crtc_state(state->state,
-                                                       state->crtc);
-       if (!crtc_state) {
-               DRM_DEBUG_KMS("Invalid crtc state\n");
-               return -EINVAL;
+       for_each_new_crtc_in_state(state->state, crtc, crtc_state, i) {
+               /* we cannot disable the plane while the CRTC is active */
+               if (!state->fb && crtc_state->active)
+                       return -EINVAL;
+               return drm_atomic_helper_check_plane_state(state, crtc_state,
+                                               DRM_PLANE_HELPER_NO_SCALING,
+                                               DRM_PLANE_HELPER_NO_SCALING,
+                                               false, true);
        }
 
-       return drm_atomic_helper_check_plane_state(state, crtc_state,
-                                                  DRM_PLANE_HELPER_NO_SCALING,
-                                                  DRM_PLANE_HELPER_NO_SCALING,
-                                                  false, true);
+       return 0;
 }
 
 static void hdlcd_plane_atomic_update(struct drm_plane *plane,
@@ -280,16 +279,10 @@ static const struct drm_plane_helper_funcs hdlcd_plane_helper_funcs = {
        .atomic_update = hdlcd_plane_atomic_update,
 };
 
-static void hdlcd_plane_destroy(struct drm_plane *plane)
-{
-       drm_plane_helper_disable(plane);
-       drm_plane_cleanup(plane);
-}
-
 static const struct drm_plane_funcs hdlcd_plane_funcs = {
        .update_plane           = drm_atomic_helper_update_plane,
        .disable_plane          = drm_atomic_helper_disable_plane,
-       .destroy                = hdlcd_plane_destroy,
+       .destroy                = drm_plane_cleanup,
        .reset                  = drm_atomic_helper_plane_reset,
        .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
        .atomic_destroy_state   = drm_atomic_helper_plane_destroy_state,
@@ -334,10 +327,8 @@ int hdlcd_setup_crtc(struct drm_device *drm)
 
        ret = drm_crtc_init_with_planes(drm, &hdlcd->crtc, primary, NULL,
                                        &hdlcd_crtc_funcs, NULL);
-       if (ret) {
-               hdlcd_plane_destroy(primary);
+       if (ret)
                return ret;
-       }
 
        drm_crtc_helper_add(&hdlcd->crtc, &hdlcd_crtc_helper_funcs);
        return 0;
index feaa8bc3d7b760a58e2b4e32011ef252a22d85cd..0ed1cde98cf8c8cdce08836e4c4b28bda3792761 100644 (file)
@@ -27,6 +27,7 @@
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_modeset_helper.h>
 #include <drm/drm_of.h>
 
 #include "hdlcd_drv.h"
@@ -100,16 +101,9 @@ setup_fail:
        return ret;
 }
 
-static void hdlcd_fb_output_poll_changed(struct drm_device *drm)
-{
-       struct hdlcd_drm_private *hdlcd = drm->dev_private;
-
-       drm_fbdev_cma_hotplug_event(hdlcd->fbdev);
-}
-
 static const struct drm_mode_config_funcs hdlcd_mode_config_funcs = {
        .fb_create = drm_gem_fb_create,
-       .output_poll_changed = hdlcd_fb_output_poll_changed,
+       .output_poll_changed = drm_fb_helper_output_poll_changed,
        .atomic_check = drm_atomic_helper_check,
        .atomic_commit = drm_atomic_helper_commit,
 };
@@ -124,13 +118,6 @@ static void hdlcd_setup_mode_config(struct drm_device *drm)
        drm->mode_config.funcs = &hdlcd_mode_config_funcs;
 }
 
-static void hdlcd_lastclose(struct drm_device *drm)
-{
-       struct hdlcd_drm_private *hdlcd = drm->dev_private;
-
-       drm_fbdev_cma_restore_mode(hdlcd->fbdev);
-}
-
 static irqreturn_t hdlcd_irq(int irq, void *arg)
 {
        struct drm_device *drm = arg;
@@ -246,7 +233,7 @@ static struct drm_driver hdlcd_driver = {
        .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM |
                           DRIVER_MODESET | DRIVER_PRIME |
                           DRIVER_ATOMIC,
-       .lastclose = hdlcd_lastclose,
+       .lastclose = drm_fb_helper_lastclose,
        .irq_handler = hdlcd_irq,
        .irq_preinstall = hdlcd_irq_preinstall,
        .irq_postinstall = hdlcd_irq_postinstall,
@@ -321,14 +308,9 @@ static int hdlcd_drm_bind(struct device *dev)
        drm_mode_config_reset(drm);
        drm_kms_helper_poll_init(drm);
 
-       hdlcd->fbdev = drm_fbdev_cma_init(drm, 32,
-                                         drm->mode_config.num_connector);
-
-       if (IS_ERR(hdlcd->fbdev)) {
-               ret = PTR_ERR(hdlcd->fbdev);
-               hdlcd->fbdev = NULL;
+       ret = drm_fb_cma_fbdev_init(drm, 32, 0);
+       if (ret)
                goto err_fbdev;
-       }
 
        ret = drm_dev_register(drm, 0);
        if (ret)
@@ -337,15 +319,13 @@ static int hdlcd_drm_bind(struct device *dev)
        return 0;
 
 err_register:
-       if (hdlcd->fbdev) {
-               drm_fbdev_cma_fini(hdlcd->fbdev);
-               hdlcd->fbdev = NULL;
-       }
+       drm_fb_cma_fbdev_fini(drm);
 err_fbdev:
        drm_kms_helper_poll_fini(drm);
 err_vblank:
        pm_runtime_disable(drm->dev);
 err_pm_active:
+       drm_atomic_helper_shutdown(drm);
        component_unbind_all(dev, drm);
 err_unload:
        of_node_put(hdlcd->crtc.port);
@@ -366,23 +346,23 @@ static void hdlcd_drm_unbind(struct device *dev)
        struct hdlcd_drm_private *hdlcd = drm->dev_private;
 
        drm_dev_unregister(drm);
-       if (hdlcd->fbdev) {
-               drm_fbdev_cma_fini(hdlcd->fbdev);
-               hdlcd->fbdev = NULL;
-       }
+       drm_fb_cma_fbdev_fini(drm);
        drm_kms_helper_poll_fini(drm);
        component_unbind_all(dev, drm);
        of_node_put(hdlcd->crtc.port);
        hdlcd->crtc.port = NULL;
-       pm_runtime_get_sync(drm->dev);
+       pm_runtime_get_sync(dev);
+       drm_crtc_vblank_off(&hdlcd->crtc);
        drm_irq_uninstall(drm);
-       pm_runtime_put_sync(drm->dev);
-       pm_runtime_disable(drm->dev);
-       of_reserved_mem_device_release(drm->dev);
+       drm_atomic_helper_shutdown(drm);
+       pm_runtime_put(dev);
+       if (pm_runtime_enabled(dev))
+               pm_runtime_disable(dev);
+       of_reserved_mem_device_release(dev);
        drm_mode_config_cleanup(drm);
-       drm_dev_put(drm);
        drm->dev_private = NULL;
        dev_set_drvdata(dev, NULL);
+       drm_dev_put(drm);
 }
 
 static const struct component_master_ops hdlcd_master_ops = {
@@ -427,35 +407,15 @@ MODULE_DEVICE_TABLE(of, hdlcd_of_match);
 static int __maybe_unused hdlcd_pm_suspend(struct device *dev)
 {
        struct drm_device *drm = dev_get_drvdata(dev);
-       struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
-
-       if (!hdlcd)
-               return 0;
 
-       drm_kms_helper_poll_disable(drm);
-       drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 1);
-
-       hdlcd->state = drm_atomic_helper_suspend(drm);
-       if (IS_ERR(hdlcd->state)) {
-               drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 0);
-               drm_kms_helper_poll_enable(drm);
-               return PTR_ERR(hdlcd->state);
-       }
-
-       return 0;
+       return drm_mode_config_helper_suspend(drm);
 }
 
 static int __maybe_unused hdlcd_pm_resume(struct device *dev)
 {
        struct drm_device *drm = dev_get_drvdata(dev);
-       struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
-
-       if (!hdlcd)
-               return 0;
 
-       drm_atomic_helper_resume(drm, hdlcd->state);
-       drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 0);
-       drm_kms_helper_poll_enable(drm);
+       drm_mode_config_helper_resume(drm);
 
        return 0;
 }
index 56f34dfff64065640284e5eee100e628f4a59204..fd438d177b644d6f7d0fde99a32802dfa2bd6dbc 100644 (file)
@@ -9,10 +9,8 @@
 struct hdlcd_drm_private {
        void __iomem                    *mmio;
        struct clk                      *clk;
-       struct drm_fbdev_cma            *fbdev;
        struct drm_crtc                 crtc;
        struct drm_plane                *plane;
-       struct drm_atomic_state         *state;
 #ifdef CONFIG_DEBUG_FS
        atomic_t buffer_underrun_count;
        atomic_t bus_error_count;
index fcc62bc60f6a7fbd7d47554ac2ebf2b8d2e4be41..ef44202fb43f8135dbb0386560b2f466ae87d400 100644 (file)
@@ -411,6 +411,16 @@ static int malidp_crtc_atomic_check(struct drm_crtc *crtc,
                }
        }
 
+       /* If only the writeback routing has changed, we don't need a modeset */
+       if (state->connectors_changed) {
+               u32 old_mask = crtc->state->connector_mask;
+               u32 new_mask = state->connector_mask;
+
+               if ((old_mask ^ new_mask) ==
+                   (1 << drm_connector_index(&malidp->mw_connector.base)))
+                       state->connectors_changed = false;
+       }
+
        ret = malidp_crtc_atomic_check_gamma(crtc, state);
        ret = ret ? ret : malidp_crtc_atomic_check_ctm(crtc, state);
        ret = ret ? ret : malidp_crtc_atomic_check_scaling(crtc, state);
index 8d20faa198cf199ff65d604ca9f4f1effd633c86..08b5bb219816ad38f929a23ccb69b378cba2101f 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/of_graph.h>
 #include <linux/of_reserved_mem.h>
 #include <linux/pm_runtime.h>
+#include <linux/debugfs.h>
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
@@ -31,6 +32,7 @@
 #include <drm/drm_of.h>
 
 #include "malidp_drv.h"
+#include "malidp_mw.h"
 #include "malidp_regs.h"
 #include "malidp_hw.h"
 
@@ -170,14 +172,15 @@ static int malidp_set_and_wait_config_valid(struct drm_device *drm)
        struct malidp_hw_device *hwdev = malidp->dev;
        int ret;
 
-       atomic_set(&malidp->config_valid, 0);
-       hwdev->hw->set_config_valid(hwdev);
+       hwdev->hw->set_config_valid(hwdev, 1);
        /* don't wait for config_valid flag if we are in config mode */
-       if (hwdev->hw->in_config_mode(hwdev))
+       if (hwdev->hw->in_config_mode(hwdev)) {
+               atomic_set(&malidp->config_valid, MALIDP_CONFIG_VALID_DONE);
                return 0;
+       }
 
        ret = wait_event_interruptible_timeout(malidp->wq,
-                       atomic_read(&malidp->config_valid) == 1,
+                       atomic_read(&malidp->config_valid) == MALIDP_CONFIG_VALID_DONE,
                        msecs_to_jiffies(MALIDP_CONF_VALID_TIMEOUT));
 
        return (ret > 0) ? 0 : -ETIMEDOUT;
@@ -216,12 +219,20 @@ static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state)
 static void malidp_atomic_commit_tail(struct drm_atomic_state *state)
 {
        struct drm_device *drm = state->dev;
+       struct malidp_drm *malidp = drm->dev_private;
        struct drm_crtc *crtc;
        struct drm_crtc_state *old_crtc_state;
        int i;
 
        pm_runtime_get_sync(drm->dev);
 
+       /*
+        * set config_valid to a special value to let IRQ handlers
+        * know that we are updating registers
+        */
+       atomic_set(&malidp->config_valid, MALIDP_CONFIG_START);
+       malidp->dev->hw->set_config_valid(malidp->dev, 0);
+
        drm_atomic_helper_commit_modeset_disables(drm, state);
 
        for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
@@ -230,7 +241,9 @@ static void malidp_atomic_commit_tail(struct drm_atomic_state *state)
                malidp_atomic_commit_se_config(crtc, old_crtc_state);
        }
 
-       drm_atomic_helper_commit_planes(drm, state, 0);
+       drm_atomic_helper_commit_planes(drm, state, DRM_PLANE_COMMIT_ACTIVE_ONLY);
+
+       malidp_mw_atomic_commit(drm, state);
 
        drm_atomic_helper_commit_modeset_enables(drm, state);
 
@@ -268,17 +281,22 @@ static int malidp_init(struct drm_device *drm)
        drm->mode_config.helper_private = &malidp_mode_config_helpers;
 
        ret = malidp_crtc_init(drm);
-       if (ret) {
-               drm_mode_config_cleanup(drm);
-               return ret;
-       }
+       if (ret)
+               goto crtc_fail;
+
+       ret = malidp_mw_connector_init(drm);
+       if (ret)
+               goto crtc_fail;
 
        return 0;
+
+crtc_fail:
+       drm_mode_config_cleanup(drm);
+       return ret;
 }
 
 static void malidp_fini(struct drm_device *drm)
 {
-       drm_atomic_helper_shutdown(drm);
        drm_mode_config_cleanup(drm);
 }
 
@@ -286,6 +304,8 @@ static int malidp_irq_init(struct platform_device *pdev)
 {
        int irq_de, irq_se, ret = 0;
        struct drm_device *drm = dev_get_drvdata(&pdev->dev);
+       struct malidp_drm *malidp = drm->dev_private;
+       struct malidp_hw_device *hwdev = malidp->dev;
 
        /* fetch the interrupts from DT */
        irq_de = platform_get_irq_byname(pdev, "DE");
@@ -305,7 +325,7 @@ static int malidp_irq_init(struct platform_device *pdev)
 
        ret = malidp_se_irq_init(drm, irq_se);
        if (ret) {
-               malidp_de_irq_fini(drm);
+               malidp_de_irq_fini(hwdev);
                return ret;
        }
 
@@ -327,6 +347,106 @@ static int malidp_dumb_create(struct drm_file *file_priv,
        return drm_gem_cma_dumb_create_internal(file_priv, drm, args);
 }
 
+#ifdef CONFIG_DEBUG_FS
+
+static void malidp_error_stats_init(struct malidp_error_stats *error_stats)
+{
+       error_stats->num_errors = 0;
+       error_stats->last_error_status = 0;
+       error_stats->last_error_vblank = -1;
+}
+
+void malidp_error(struct malidp_drm *malidp,
+                 struct malidp_error_stats *error_stats, u32 status,
+                 u64 vblank)
+{
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&malidp->errors_lock, irqflags);
+       error_stats->last_error_status = status;
+       error_stats->last_error_vblank = vblank;
+       error_stats->num_errors++;
+       spin_unlock_irqrestore(&malidp->errors_lock, irqflags);
+}
+
+void malidp_error_stats_dump(const char *prefix,
+                            struct malidp_error_stats error_stats,
+                            struct seq_file *m)
+{
+       seq_printf(m, "[%s] num_errors : %d\n", prefix,
+                  error_stats.num_errors);
+       seq_printf(m, "[%s] last_error_status  : 0x%08x\n", prefix,
+                  error_stats.last_error_status);
+       seq_printf(m, "[%s] last_error_vblank : %lld\n", prefix,
+                  error_stats.last_error_vblank);
+}
+
+static int malidp_show_stats(struct seq_file *m, void *arg)
+{
+       struct drm_device *drm = m->private;
+       struct malidp_drm *malidp = drm->dev_private;
+       unsigned long irqflags;
+       struct malidp_error_stats de_errors, se_errors;
+
+       spin_lock_irqsave(&malidp->errors_lock, irqflags);
+       de_errors = malidp->de_errors;
+       se_errors = malidp->se_errors;
+       spin_unlock_irqrestore(&malidp->errors_lock, irqflags);
+       malidp_error_stats_dump("DE", de_errors, m);
+       malidp_error_stats_dump("SE", se_errors, m);
+       return 0;
+}
+
+static int malidp_debugfs_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, malidp_show_stats, inode->i_private);
+}
+
+static ssize_t malidp_debugfs_write(struct file *file, const char __user *ubuf,
+                                   size_t len, loff_t *offp)
+{
+       struct seq_file *m = file->private_data;
+       struct drm_device *drm = m->private;
+       struct malidp_drm *malidp = drm->dev_private;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&malidp->errors_lock, irqflags);
+       malidp_error_stats_init(&malidp->de_errors);
+       malidp_error_stats_init(&malidp->se_errors);
+       spin_unlock_irqrestore(&malidp->errors_lock, irqflags);
+       return len;
+}
+
+static const struct file_operations malidp_debugfs_fops = {
+       .owner = THIS_MODULE,
+       .open = malidp_debugfs_open,
+       .read = seq_read,
+       .write = malidp_debugfs_write,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+static int malidp_debugfs_init(struct drm_minor *minor)
+{
+       struct malidp_drm *malidp = minor->dev->dev_private;
+       struct dentry *dentry = NULL;
+
+       malidp_error_stats_init(&malidp->de_errors);
+       malidp_error_stats_init(&malidp->se_errors);
+       spin_lock_init(&malidp->errors_lock);
+       dentry = debugfs_create_file("debug",
+                                    S_IRUGO | S_IWUSR,
+                                    minor->debugfs_root, minor->dev,
+                                    &malidp_debugfs_fops);
+       if (!dentry) {
+               DRM_ERROR("Cannot create debug file\n");
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+#endif //CONFIG_DEBUG_FS
+
 static struct drm_driver malidp_driver = {
        .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC |
                           DRIVER_PRIME,
@@ -343,6 +463,9 @@ static struct drm_driver malidp_driver = {
        .gem_prime_vmap = drm_gem_cma_prime_vmap,
        .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
        .gem_prime_mmap = drm_gem_cma_prime_mmap,
+#ifdef CONFIG_DEBUG_FS
+       .debugfs_init = malidp_debugfs_init,
+#endif
        .fops = &fops,
        .name = "mali-dp",
        .desc = "ARM Mali Display Processor driver",
@@ -459,6 +582,8 @@ static int malidp_runtime_pm_suspend(struct device *dev)
        /* we can only suspend if the hardware is in config mode */
        WARN_ON(!hwdev->hw->in_config_mode(hwdev));
 
+       malidp_se_irq_fini(hwdev);
+       malidp_de_irq_fini(hwdev);
        hwdev->pm_suspended = true;
        clk_disable_unprepare(hwdev->mclk);
        clk_disable_unprepare(hwdev->aclk);
@@ -477,6 +602,8 @@ static int malidp_runtime_pm_resume(struct device *dev)
        clk_prepare_enable(hwdev->aclk);
        clk_prepare_enable(hwdev->mclk);
        hwdev->pm_suspended = false;
+       malidp_de_irq_hw_init(hwdev);
+       malidp_se_irq_hw_init(hwdev);
 
        return 0;
 }
@@ -489,6 +616,7 @@ static int malidp_bind(struct device *dev)
        struct malidp_hw_device *hwdev;
        struct platform_device *pdev = to_platform_device(dev);
        struct of_device_id const *dev_id;
+       struct drm_encoder *encoder;
        /* number of lines for the R, G and B output */
        u8 output_width[MAX_OUTPUT_CHANNELS];
        int ret = 0, i;
@@ -588,8 +716,9 @@ static int malidp_bind(struct device *dev)
        for (i = 0; i < MAX_OUTPUT_CHANNELS; i++)
                out_depth = (out_depth << 8) | (output_width[i] & 0xf);
        malidp_hw_write(hwdev, out_depth, hwdev->hw->map.out_depth_base);
+       hwdev->output_color_depth = out_depth;
 
-       atomic_set(&malidp->config_valid, 0);
+       atomic_set(&malidp->config_valid, MALIDP_CONFIG_VALID_INIT);
        init_waitqueue_head(&malidp->wq);
 
        ret = malidp_init(drm);
@@ -609,6 +738,15 @@ static int malidp_bind(struct device *dev)
                goto bind_fail;
        }
 
+       /* We expect to have a maximum of two encoders one for the actual
+        * display and a virtual one for the writeback connector
+        */
+       WARN_ON(drm->mode_config.num_encoder > 2);
+       list_for_each_entry(encoder, &drm->mode_config.encoder_list, head) {
+               encoder->possible_clones =
+                               (1 << drm->mode_config.num_encoder) -  1;
+       }
+
        ret = malidp_irq_init(pdev);
        if (ret < 0)
                goto irq_init_fail;
@@ -642,10 +780,11 @@ register_fail:
 fbdev_fail:
        pm_runtime_get_sync(dev);
 vblank_fail:
-       malidp_se_irq_fini(drm);
-       malidp_de_irq_fini(drm);
+       malidp_se_irq_fini(hwdev);
+       malidp_de_irq_fini(hwdev);
        drm->irq_enabled = false;
 irq_init_fail:
+       drm_atomic_helper_shutdown(drm);
        component_unbind_all(dev, drm);
 bind_fail:
        of_node_put(malidp->crtc.port);
@@ -672,15 +811,17 @@ static void malidp_unbind(struct device *dev)
 {
        struct drm_device *drm = dev_get_drvdata(dev);
        struct malidp_drm *malidp = drm->dev_private;
+       struct malidp_hw_device *hwdev = malidp->dev;
 
        drm_dev_unregister(drm);
        drm_fb_cma_fbdev_fini(drm);
        drm_kms_helper_poll_fini(drm);
        pm_runtime_get_sync(dev);
        drm_crtc_vblank_off(&malidp->crtc);
-       malidp_se_irq_fini(drm);
-       malidp_de_irq_fini(drm);
+       malidp_se_irq_fini(hwdev);
+       malidp_de_irq_fini(hwdev);
        drm->irq_enabled = false;
+       drm_atomic_helper_shutdown(drm);
        component_unbind_all(dev, drm);
        of_node_put(malidp->crtc.port);
        malidp->crtc.port = NULL;
@@ -751,8 +892,25 @@ static int __maybe_unused malidp_pm_resume(struct device *dev)
        return 0;
 }
 
+static int __maybe_unused malidp_pm_suspend_late(struct device *dev)
+{
+       if (!pm_runtime_status_suspended(dev)) {
+               malidp_runtime_pm_suspend(dev);
+               pm_runtime_set_suspended(dev);
+       }
+       return 0;
+}
+
+static int __maybe_unused malidp_pm_resume_early(struct device *dev)
+{
+       malidp_runtime_pm_resume(dev);
+       pm_runtime_set_active(dev);
+       return 0;
+}
+
 static const struct dev_pm_ops malidp_pm_ops = {
        SET_SYSTEM_SLEEP_PM_OPS(malidp_pm_suspend, malidp_pm_resume) \
+       SET_LATE_SYSTEM_SLEEP_PM_OPS(malidp_pm_suspend_late, malidp_pm_resume_early) \
        SET_RUNTIME_PM_OPS(malidp_runtime_pm_suspend, malidp_runtime_pm_resume, NULL)
 };
 
index c70989b933874e37f4ee125c50e8b1c20abb7112..e3eb0cb1f385d887c18023087ef0c6c94dc4f643 100644 (file)
 #ifndef __MALIDP_DRV_H__
 #define __MALIDP_DRV_H__
 
+#include <drm/drm_writeback.h>
+#include <drm/drm_encoder.h>
 #include <linux/mutex.h>
 #include <linux/wait.h>
+#include <linux/spinlock.h>
 #include <drm/drmP.h>
 #include "malidp_hw.h"
 
+#define MALIDP_CONFIG_VALID_INIT       0
+#define MALIDP_CONFIG_VALID_DONE       1
+#define MALIDP_CONFIG_START            0xd0
+
+struct malidp_error_stats {
+       s32 num_errors;
+       u32 last_error_status;
+       s64 last_error_vblank;
+};
+
 struct malidp_drm {
        struct malidp_hw_device *dev;
        struct drm_crtc crtc;
+       struct drm_writeback_connector mw_connector;
        wait_queue_head_t wq;
        struct drm_pending_vblank_event *event;
        atomic_t config_valid;
        u32 core_id;
+#ifdef CONFIG_DEBUG_FS
+       struct malidp_error_stats de_errors;
+       struct malidp_error_stats se_errors;
+       /* Protects errors stats */
+       spinlock_t errors_lock;
+#endif
 };
 
 #define crtc_to_malidp_device(x) container_of(x, struct malidp_drm, crtc)
@@ -62,6 +82,12 @@ struct malidp_crtc_state {
 int malidp_de_planes_init(struct drm_device *drm);
 int malidp_crtc_init(struct drm_device *drm);
 
+#ifdef CONFIG_DEBUG_FS
+void malidp_error(struct malidp_drm *malidp,
+                 struct malidp_error_stats *error_stats, u32 status,
+                 u64 vblank);
+#endif
+
 /* often used combination of rotational bits */
 #define MALIDP_ROTATED_MASK    (DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270)
 
index d789b46dc817335dd2d509ee456d850897762b88..c94a4422e0e9100a607a8b817878c25942ce77bd 100644 (file)
 
 #include "malidp_drv.h"
 #include "malidp_hw.h"
+#include "malidp_mw.h"
+
+enum {
+       MW_NOT_ENABLED = 0,     /* SE writeback not enabled */
+       MW_ONESHOT,             /* SE in one-shot mode for writeback */
+       MW_START,               /* SE started writeback */
+       MW_RESTART,             /* SE will start another writeback after this one */
+       MW_STOP,                /* SE needs to stop after this writeback */
+};
 
 static const struct malidp_format_id malidp500_de_formats[] = {
        /*    fourcc,   layers supporting the format,     internal id  */
-       { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2,  0 },
-       { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2,  1 },
+       { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2 | SE_MEMWRITE,  0 },
+       { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2 | SE_MEMWRITE,  1 },
        { DRM_FORMAT_ARGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2,  2 },
        { DRM_FORMAT_ABGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2,  3 },
-       { DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2,  4 },
-       { DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2,  5 },
+       { DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2 | SE_MEMWRITE,  4 },
+       { DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2 | SE_MEMWRITE,  5 },
        { DRM_FORMAT_RGB888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2,  6 },
        { DRM_FORMAT_BGR888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2,  7 },
        { DRM_FORMAT_RGBA5551, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2,  8 },
@@ -38,7 +47,7 @@ static const struct malidp_format_id malidp500_de_formats[] = {
        { DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 11 },
        { DRM_FORMAT_UYVY, DE_VIDEO1, 12 },
        { DRM_FORMAT_YUYV, DE_VIDEO1, 13 },
-       { DRM_FORMAT_NV12, DE_VIDEO1, 14 },
+       { DRM_FORMAT_NV12, DE_VIDEO1 | SE_MEMWRITE, 14 },
        { DRM_FORMAT_YUV420, DE_VIDEO1, 15 },
 };
 
@@ -47,27 +56,27 @@ static const struct malidp_format_id malidp500_de_formats[] = {
 
 #define MALIDP_COMMON_FORMATS \
        /*    fourcc,   layers supporting the format,      internal id   */ \
-       { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 0) }, \
-       { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 1) }, \
-       { DRM_FORMAT_RGBA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 2) }, \
-       { DRM_FORMAT_BGRA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 3) }, \
+       { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(0, 0) }, \
+       { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(0, 1) }, \
+       { DRM_FORMAT_RGBA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(0, 2) }, \
+       { DRM_FORMAT_BGRA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(0, 3) }, \
        { DRM_FORMAT_ARGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 0) }, \
        { DRM_FORMAT_ABGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 1) }, \
        { DRM_FORMAT_RGBA8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 2) }, \
        { DRM_FORMAT_BGRA8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 3) }, \
-       { DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 0) }, \
-       { DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 1) }, \
-       { DRM_FORMAT_RGBX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 2) }, \
-       { DRM_FORMAT_BGRX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 3) }, \
-       { DRM_FORMAT_RGB888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(3, 0) }, \
-       { DRM_FORMAT_BGR888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(3, 1) }, \
+       { DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART | SE_MEMWRITE, MALIDP_ID(2, 0) }, \
+       { DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART | SE_MEMWRITE, MALIDP_ID(2, 1) }, \
+       { DRM_FORMAT_RGBX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART | SE_MEMWRITE, MALIDP_ID(2, 2) }, \
+       { DRM_FORMAT_BGRX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART | SE_MEMWRITE, MALIDP_ID(2, 3) }, \
+       { DRM_FORMAT_RGB888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(3, 0) }, \
+       { DRM_FORMAT_BGR888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(3, 1) }, \
        { DRM_FORMAT_RGBA5551, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 0) }, \
        { DRM_FORMAT_ABGR1555, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 1) }, \
        { DRM_FORMAT_RGB565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 2) }, \
        { DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 3) }, \
        { DRM_FORMAT_YUYV, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 2) },    \
        { DRM_FORMAT_UYVY, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 3) },    \
-       { DRM_FORMAT_NV12, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 6) },    \
+       { DRM_FORMAT_NV12, DE_VIDEO1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(5, 6) },      \
        { DRM_FORMAT_YUV420, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 7) }
 
 static const struct malidp_format_id malidp550_de_formats[] = {
@@ -223,15 +232,20 @@ static bool malidp500_in_config_mode(struct malidp_hw_device *hwdev)
        return false;
 }
 
-static void malidp500_set_config_valid(struct malidp_hw_device *hwdev)
+static void malidp500_set_config_valid(struct malidp_hw_device *hwdev, u8 value)
 {
-       malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID);
+       if (value)
+               malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID);
+       else
+               malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID);
 }
 
 static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode *mode)
 {
        u32 val = 0;
 
+       malidp_hw_write(hwdev, hwdev->output_color_depth,
+               hwdev->hw->map.out_depth_base);
        malidp_hw_clearbits(hwdev, MALIDP500_DC_CLEAR_MASK, MALIDP500_DC_CONTROL);
        if (mode->flags & DISPLAY_FLAGS_HSYNC_HIGH)
                val |= MALIDP500_HSYNCPOL;
@@ -368,6 +382,55 @@ static long malidp500_se_calc_mclk(struct malidp_hw_device *hwdev,
        return ret;
 }
 
+static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev,
+                                    dma_addr_t *addrs, s32 *pitches,
+                                    int num_planes, u16 w, u16 h, u32 fmt_id)
+{
+       u32 base = MALIDP500_SE_MEMWRITE_BASE;
+       u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
+
+       /* enable the scaling engine block */
+       malidp_hw_setbits(hwdev, MALIDP_SCALE_ENGINE_EN, de_base + MALIDP_DE_DISPLAY_FUNC);
+
+       /* restart the writeback if already enabled */
+       if (hwdev->mw_state != MW_NOT_ENABLED)
+               hwdev->mw_state = MW_RESTART;
+       else
+               hwdev->mw_state = MW_START;
+
+       malidp_hw_write(hwdev, fmt_id, base + MALIDP_MW_FORMAT);
+       switch (num_planes) {
+       case 2:
+               malidp_hw_write(hwdev, lower_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_LOW);
+               malidp_hw_write(hwdev, upper_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_HIGH);
+               malidp_hw_write(hwdev, pitches[1], base + MALIDP_MW_P2_STRIDE);
+               /* fall through */
+       case 1:
+               malidp_hw_write(hwdev, lower_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_LOW);
+               malidp_hw_write(hwdev, upper_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_HIGH);
+               malidp_hw_write(hwdev, pitches[0], base + MALIDP_MW_P1_STRIDE);
+               break;
+       default:
+               WARN(1, "Invalid number of planes");
+       }
+
+       malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h),
+                       MALIDP500_SE_MEMWRITE_OUT_SIZE);
+       malidp_hw_setbits(hwdev, MALIDP_SE_MEMWRITE_EN, MALIDP500_SE_CONTROL);
+
+       return 0;
+}
+
+static void malidp500_disable_memwrite(struct malidp_hw_device *hwdev)
+{
+       u32 base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
+
+       if (hwdev->mw_state == MW_START || hwdev->mw_state == MW_RESTART)
+               hwdev->mw_state = MW_STOP;
+       malidp_hw_clearbits(hwdev, MALIDP_SE_MEMWRITE_EN, MALIDP500_SE_CONTROL);
+       malidp_hw_clearbits(hwdev, MALIDP_SCALE_ENGINE_EN, base + MALIDP_DE_DISPLAY_FUNC);
+}
+
 static int malidp550_query_hw(struct malidp_hw_device *hwdev)
 {
        u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID);
@@ -447,15 +510,20 @@ static bool malidp550_in_config_mode(struct malidp_hw_device *hwdev)
        return false;
 }
 
-static void malidp550_set_config_valid(struct malidp_hw_device *hwdev)
+static void malidp550_set_config_valid(struct malidp_hw_device *hwdev, u8 value)
 {
-       malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID);
+       if (value)
+               malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID);
+       else
+               malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID);
 }
 
 static void malidp550_modeset(struct malidp_hw_device *hwdev, struct videomode *mode)
 {
        u32 val = MALIDP_DE_DEFAULT_PREFETCH_START;
 
+       malidp_hw_write(hwdev, hwdev->output_color_depth,
+               hwdev->hw->map.out_depth_base);
        malidp_hw_write(hwdev, val, MALIDP550_DE_CONTROL);
        /*
         * Mali-DP550 and Mali-DP650 encode the background color like this:
@@ -588,6 +656,51 @@ static long malidp550_se_calc_mclk(struct malidp_hw_device *hwdev,
        return ret;
 }
 
+static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev,
+                                    dma_addr_t *addrs, s32 *pitches,
+                                    int num_planes, u16 w, u16 h, u32 fmt_id)
+{
+       u32 base = MALIDP550_SE_MEMWRITE_BASE;
+       u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
+
+       /* enable the scaling engine block */
+       malidp_hw_setbits(hwdev, MALIDP_SCALE_ENGINE_EN, de_base + MALIDP_DE_DISPLAY_FUNC);
+
+       hwdev->mw_state = MW_ONESHOT;
+
+       malidp_hw_write(hwdev, fmt_id, base + MALIDP_MW_FORMAT);
+       switch (num_planes) {
+       case 2:
+               malidp_hw_write(hwdev, lower_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_LOW);
+               malidp_hw_write(hwdev, upper_32_bits(addrs[1]), base + MALIDP_MW_P2_PTR_HIGH);
+               malidp_hw_write(hwdev, pitches[1], base + MALIDP_MW_P2_STRIDE);
+               /* fall through */
+       case 1:
+               malidp_hw_write(hwdev, lower_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_LOW);
+               malidp_hw_write(hwdev, upper_32_bits(addrs[0]), base + MALIDP_MW_P1_PTR_HIGH);
+               malidp_hw_write(hwdev, pitches[0], base + MALIDP_MW_P1_STRIDE);
+               break;
+       default:
+               WARN(1, "Invalid number of planes");
+       }
+
+       malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h),
+                       MALIDP550_SE_MEMWRITE_OUT_SIZE);
+       malidp_hw_setbits(hwdev, MALIDP550_SE_MEMWRITE_ONESHOT | MALIDP_SE_MEMWRITE_EN,
+                         MALIDP550_SE_CONTROL);
+
+       return 0;
+}
+
+static void malidp550_disable_memwrite(struct malidp_hw_device *hwdev)
+{
+       u32 base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
+
+       malidp_hw_clearbits(hwdev, MALIDP550_SE_MEMWRITE_ONESHOT | MALIDP_SE_MEMWRITE_EN,
+                           MALIDP550_SE_CONTROL);
+       malidp_hw_clearbits(hwdev, MALIDP_SCALE_ENGINE_EN, base + MALIDP_DE_DISPLAY_FUNC);
+}
+
 static int malidp650_query_hw(struct malidp_hw_device *hwdev)
 {
        u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID);
@@ -632,10 +745,18 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
                                            MALIDP500_DE_IRQ_VSYNC |
                                            MALIDP500_DE_IRQ_GLOBAL,
                                .vsync_irq = MALIDP500_DE_IRQ_VSYNC,
+                               .err_mask = MALIDP_DE_IRQ_UNDERRUN |
+                                           MALIDP500_DE_IRQ_AXI_ERR |
+                                           MALIDP500_DE_IRQ_SATURATION,
                        },
                        .se_irq_map = {
-                               .irq_mask = MALIDP500_SE_IRQ_CONF_MODE,
-                               .vsync_irq = 0,
+                               .irq_mask = MALIDP500_SE_IRQ_CONF_MODE |
+                                           MALIDP500_SE_IRQ_CONF_VALID |
+                                           MALIDP500_SE_IRQ_GLOBAL,
+                               .vsync_irq = MALIDP500_SE_IRQ_CONF_VALID,
+                               .err_mask = MALIDP500_SE_IRQ_INIT_BUSY |
+                                           MALIDP500_SE_IRQ_AXI_ERROR |
+                                           MALIDP500_SE_IRQ_OVERRUN,
                        },
                        .dc_irq_map = {
                                .irq_mask = MALIDP500_DE_IRQ_CONF_VALID,
@@ -654,6 +775,8 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
                .rotmem_required = malidp500_rotmem_required,
                .se_set_scaling_coeffs = malidp500_se_set_scaling_coeffs,
                .se_calc_mclk = malidp500_se_calc_mclk,
+               .enable_memwrite = malidp500_enable_memwrite,
+               .disable_memwrite = malidp500_disable_memwrite,
                .features = MALIDP_DEVICE_LV_HAS_3_STRIDES,
        },
        [MALIDP_550] = {
@@ -669,13 +792,20 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
                                .irq_mask = MALIDP_DE_IRQ_UNDERRUN |
                                            MALIDP550_DE_IRQ_VSYNC,
                                .vsync_irq = MALIDP550_DE_IRQ_VSYNC,
+                               .err_mask = MALIDP_DE_IRQ_UNDERRUN |
+                                           MALIDP550_DE_IRQ_SATURATION |
+                                           MALIDP550_DE_IRQ_AXI_ERR,
                        },
                        .se_irq_map = {
-                               .irq_mask = MALIDP550_SE_IRQ_EOW |
-                                           MALIDP550_SE_IRQ_AXI_ERR,
+                               .irq_mask = MALIDP550_SE_IRQ_EOW,
+                               .vsync_irq = MALIDP550_SE_IRQ_EOW,
+                               .err_mask  = MALIDP550_SE_IRQ_AXI_ERR |
+                                            MALIDP550_SE_IRQ_OVR |
+                                            MALIDP550_SE_IRQ_IBSY,
                        },
                        .dc_irq_map = {
-                               .irq_mask = MALIDP550_DC_IRQ_CONF_VALID,
+                               .irq_mask = MALIDP550_DC_IRQ_CONF_VALID |
+                                           MALIDP550_DC_IRQ_SE,
                                .vsync_irq = MALIDP550_DC_IRQ_CONF_VALID,
                        },
                        .pixel_formats = malidp550_de_formats,
@@ -691,6 +821,8 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
                .rotmem_required = malidp550_rotmem_required,
                .se_set_scaling_coeffs = malidp550_se_set_scaling_coeffs,
                .se_calc_mclk = malidp550_se_calc_mclk,
+               .enable_memwrite = malidp550_enable_memwrite,
+               .disable_memwrite = malidp550_disable_memwrite,
                .features = 0,
        },
        [MALIDP_650] = {
@@ -707,13 +839,25 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
                                            MALIDP650_DE_IRQ_DRIFT |
                                            MALIDP550_DE_IRQ_VSYNC,
                                .vsync_irq = MALIDP550_DE_IRQ_VSYNC,
+                               .err_mask = MALIDP_DE_IRQ_UNDERRUN |
+                                           MALIDP650_DE_IRQ_DRIFT |
+                                           MALIDP550_DE_IRQ_SATURATION |
+                                           MALIDP550_DE_IRQ_AXI_ERR |
+                                           MALIDP650_DE_IRQ_ACEV1 |
+                                           MALIDP650_DE_IRQ_ACEV2 |
+                                           MALIDP650_DE_IRQ_ACEG |
+                                           MALIDP650_DE_IRQ_AXIEP,
                        },
                        .se_irq_map = {
-                               .irq_mask = MALIDP550_SE_IRQ_EOW |
-                                           MALIDP550_SE_IRQ_AXI_ERR,
+                               .irq_mask = MALIDP550_SE_IRQ_EOW,
+                               .vsync_irq = MALIDP550_SE_IRQ_EOW,
+                               .err_mask = MALIDP550_SE_IRQ_AXI_ERR |
+                                           MALIDP550_SE_IRQ_OVR |
+                                           MALIDP550_SE_IRQ_IBSY,
                        },
                        .dc_irq_map = {
-                               .irq_mask = MALIDP550_DC_IRQ_CONF_VALID,
+                               .irq_mask = MALIDP550_DC_IRQ_CONF_VALID |
+                                           MALIDP550_DC_IRQ_SE,
                                .vsync_irq = MALIDP550_DC_IRQ_CONF_VALID,
                        },
                        .pixel_formats = malidp550_de_formats,
@@ -729,6 +873,8 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
                .rotmem_required = malidp550_rotmem_required,
                .se_set_scaling_coeffs = malidp550_se_set_scaling_coeffs,
                .se_calc_mclk = malidp550_se_calc_mclk,
+               .enable_memwrite = malidp550_enable_memwrite,
+               .disable_memwrite = malidp550_disable_memwrite,
                .features = 0,
        },
 };
@@ -790,7 +936,7 @@ static irqreturn_t malidp_de_irq(int irq, void *arg)
                        malidp->event = NULL;
                        spin_unlock(&drm->event_lock);
                }
-               atomic_set(&malidp->config_valid, 1);
+               atomic_set(&malidp->config_valid, MALIDP_CONFIG_VALID_DONE);
                ret = IRQ_WAKE_THREAD;
        }
 
@@ -799,10 +945,17 @@ static irqreturn_t malidp_de_irq(int irq, void *arg)
                return ret;
 
        mask = malidp_hw_read(hwdev, MALIDP_REG_MASKIRQ);
-       status &= mask;
+       /* keep the status of the enabled interrupts, plus the error bits */
+       status &= (mask | de->err_mask);
        if ((status & de->vsync_irq) && malidp->crtc.enabled)
                drm_crtc_handle_vblank(&malidp->crtc);
 
+#ifdef CONFIG_DEBUG_FS
+       if (status & de->err_mask) {
+               malidp_error(malidp, &malidp->de_errors, status,
+                            drm_crtc_vblank_count(&malidp->crtc));
+       }
+#endif
        malidp_hw_clear_irq(hwdev, MALIDP_DE_BLOCK, status);
 
        return (ret == IRQ_NONE) ? IRQ_HANDLED : ret;
@@ -818,6 +971,23 @@ static irqreturn_t malidp_de_irq_thread_handler(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
+void malidp_de_irq_hw_init(struct malidp_hw_device *hwdev)
+{
+       /* ensure interrupts are disabled */
+       malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK, 0xffffffff);
+       malidp_hw_clear_irq(hwdev, MALIDP_DE_BLOCK, 0xffffffff);
+       malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK, 0xffffffff);
+       malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, 0xffffffff);
+
+       /* first enable the DC block IRQs */
+       malidp_hw_enable_irq(hwdev, MALIDP_DC_BLOCK,
+                            hwdev->hw->map.dc_irq_map.irq_mask);
+
+       /* now enable the DE block IRQs */
+       malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
+                            hwdev->hw->map.de_irq_map.irq_mask);
+}
+
 int malidp_de_irq_init(struct drm_device *drm, int irq)
 {
        struct malidp_drm *malidp = drm->dev_private;
@@ -838,22 +1008,13 @@ int malidp_de_irq_init(struct drm_device *drm, int irq)
                return ret;
        }
 
-       /* first enable the DC block IRQs */
-       malidp_hw_enable_irq(hwdev, MALIDP_DC_BLOCK,
-                            hwdev->hw->map.dc_irq_map.irq_mask);
-
-       /* now enable the DE block IRQs */
-       malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
-                            hwdev->hw->map.de_irq_map.irq_mask);
+       malidp_de_irq_hw_init(hwdev);
 
        return 0;
 }
 
-void malidp_de_irq_fini(struct drm_device *drm)
+void malidp_de_irq_fini(struct malidp_hw_device *hwdev)
 {
-       struct malidp_drm *malidp = drm->dev_private;
-       struct malidp_hw_device *hwdev = malidp->dev;
-
        malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK,
                              hwdev->hw->map.de_irq_map.irq_mask);
        malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK,
@@ -878,19 +1039,61 @@ static irqreturn_t malidp_se_irq(int irq, void *arg)
                return IRQ_NONE;
 
        status = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_STATUS);
-       if (!(status & se->irq_mask))
+       if (!(status & (se->irq_mask | se->err_mask)))
                return IRQ_NONE;
 
+#ifdef CONFIG_DEBUG_FS
+       if (status & se->err_mask)
+               malidp_error(malidp, &malidp->se_errors, status,
+                            drm_crtc_vblank_count(&malidp->crtc));
+#endif
        mask = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_MASKIRQ);
-       status = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_STATUS);
        status &= mask;
-       /* ToDo: status decoding and firing up of VSYNC and page flip events */
+
+       if (status & se->vsync_irq) {
+               switch (hwdev->mw_state) {
+               case MW_ONESHOT:
+                       drm_writeback_signal_completion(&malidp->mw_connector, 0);
+                       break;
+               case MW_STOP:
+                       drm_writeback_signal_completion(&malidp->mw_connector, 0);
+                       /* disable writeback after stop */
+                       hwdev->mw_state = MW_NOT_ENABLED;
+                       break;
+               case MW_RESTART:
+                       drm_writeback_signal_completion(&malidp->mw_connector, 0);
+                       /* fall through to a new start */
+               case MW_START:
+                       /* writeback started, need to emulate one-shot mode */
+                       hw->disable_memwrite(hwdev);
+                       /*
+                        * only set config_valid HW bit if there is no other update
+                        * in progress or if we raced ahead of the DE IRQ handler
+                        * and config_valid flag will not be update until later
+                        */
+                       status = malidp_hw_read(hwdev, hw->map.dc_base + MALIDP_REG_STATUS);
+                       if ((atomic_read(&malidp->config_valid) != MALIDP_CONFIG_START) ||
+                           (status & hw->map.dc_irq_map.vsync_irq))
+                               hw->set_config_valid(hwdev, 1);
+                       break;
+               }
+       }
 
        malidp_hw_clear_irq(hwdev, MALIDP_SE_BLOCK, status);
 
        return IRQ_HANDLED;
 }
 
+void malidp_se_irq_hw_init(struct malidp_hw_device *hwdev)
+{
+       /* ensure interrupts are disabled */
+       malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK, 0xffffffff);
+       malidp_hw_clear_irq(hwdev, MALIDP_SE_BLOCK, 0xffffffff);
+
+       malidp_hw_enable_irq(hwdev, MALIDP_SE_BLOCK,
+                            hwdev->hw->map.se_irq_map.irq_mask);
+}
+
 static irqreturn_t malidp_se_irq_thread_handler(int irq, void *arg)
 {
        return IRQ_HANDLED;
@@ -914,17 +1117,14 @@ int malidp_se_irq_init(struct drm_device *drm, int irq)
                return ret;
        }
 
-       malidp_hw_enable_irq(hwdev, MALIDP_SE_BLOCK,
-                            hwdev->hw->map.se_irq_map.irq_mask);
+       hwdev->mw_state = MW_NOT_ENABLED;
+       malidp_se_irq_hw_init(hwdev);
 
        return 0;
 }
 
-void malidp_se_irq_fini(struct drm_device *drm)
+void malidp_se_irq_fini(struct malidp_hw_device *hwdev)
 {
-       struct malidp_drm *malidp = drm->dev_private;
-       struct malidp_hw_device *hwdev = malidp->dev;
-
        malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK,
                              hwdev->hw->map.se_irq_map.irq_mask);
 }
index b5dd6c73ec9f233362983c718240c3d2ac85baa9..ad2e96915d44a253c8d77dc2e5d98c22c758fc5c 100644 (file)
@@ -33,6 +33,7 @@ enum {
        DE_GRAPHICS2 = BIT(2), /* used only in DP500 */
        DE_VIDEO2 = BIT(3),
        DE_SMART = BIT(4),
+       SE_MEMWRITE = BIT(5),
 };
 
 struct malidp_format_id {
@@ -52,6 +53,7 @@ struct malidp_format_id {
 struct malidp_irq_map {
        u32 irq_mask;           /* mask of IRQs that can be enabled in the block */
        u32 vsync_irq;          /* IRQ bit used for signaling during VSYNC */
+       u32 err_mask;           /* mask of bits that represent errors */
 };
 
 struct malidp_layer {
@@ -151,12 +153,13 @@ struct malidp_hw {
        bool (*in_config_mode)(struct malidp_hw_device *hwdev);
 
        /*
-        * Set configuration valid flag for hardware parameters that can
-        * be changed outside the configuration mode. Hardware will use
-        * the new settings when config valid is set after the end of the
-        * current buffer scanout
+        * Set/clear configuration valid flag for hardware parameters that can
+        * be changed outside the configuration mode to the given value.
+        * Hardware will use the new settings when config valid is set,
+        * after the end of the current buffer scanout, and will ignore
+        * any new values for those parameters if config valid flag is cleared
         */
-       void (*set_config_valid)(struct malidp_hw_device *hwdev);
+       void (*set_config_valid)(struct malidp_hw_device *hwdev, u8 value);
 
        /*
         * Set a new mode in hardware. Requires the hardware to be in
@@ -177,6 +180,23 @@ struct malidp_hw {
        long (*se_calc_mclk)(struct malidp_hw_device *hwdev,
                             struct malidp_se_config *se_config,
                             struct videomode *vm);
+       /*
+        * Enable writing to memory the content of the next frame
+        * @param hwdev - malidp_hw_device structure containing the HW description
+        * @param addrs - array of addresses for each plane
+        * @param pitches - array of pitches for each plane
+        * @param num_planes - number of planes to be written
+        * @param w - width of the output frame
+        * @param h - height of the output frame
+        * @param fmt_id - internal format ID of output buffer
+        */
+       int (*enable_memwrite)(struct malidp_hw_device *hwdev, dma_addr_t *addrs,
+                              s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id);
+
+       /*
+        * Disable the writing to memory of the next frame's content.
+        */
+       void (*disable_memwrite)(struct malidp_hw_device *hwdev);
 
        u8 features;
 };
@@ -210,10 +230,14 @@ struct malidp_hw_device {
 
        u8 min_line_size;
        u16 max_line_size;
+       u32 output_color_depth;
 
        /* track the device PM state */
        bool pm_suspended;
 
+       /* track the SE memory writeback state */
+       u8 mw_state;
+
        /* size of memory used for rotating layers, up to two banks available */
        u32 rotation_memory[2];
 };
@@ -279,9 +303,11 @@ static inline void malidp_hw_enable_irq(struct malidp_hw_device *hwdev,
 }
 
 int malidp_de_irq_init(struct drm_device *drm, int irq);
-void malidp_de_irq_fini(struct drm_device *drm);
+void malidp_se_irq_hw_init(struct malidp_hw_device *hwdev);
+void malidp_de_irq_hw_init(struct malidp_hw_device *hwdev);
+void malidp_de_irq_fini(struct malidp_hw_device *hwdev);
 int malidp_se_irq_init(struct drm_device *drm, int irq);
-void malidp_se_irq_fini(struct drm_device *drm);
+void malidp_se_irq_fini(struct malidp_hw_device *hwdev);
 
 u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
                           u8 layer_id, u32 format);
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
new file mode 100644 (file)
index 0000000..ba6ae66
--- /dev/null
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Brian Starkey <brian.starkey@arm.com>
+ *
+ * ARM Mali DP Writeback connector implementation
+ */
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drmP.h>
+#include <drm/drm_writeback.h>
+
+#include "malidp_drv.h"
+#include "malidp_hw.h"
+#include "malidp_mw.h"
+
+#define to_mw_state(_state) (struct malidp_mw_connector_state *)(_state)
+
+struct malidp_mw_connector_state {
+       struct drm_connector_state base;
+       dma_addr_t addrs[2];
+       s32 pitches[2];
+       u8 format;
+       u8 n_planes;
+};
+
+static int malidp_mw_connector_get_modes(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+
+       return drm_add_modes_noedid(connector, dev->mode_config.max_width,
+                                   dev->mode_config.max_height);
+}
+
+static enum drm_mode_status
+malidp_mw_connector_mode_valid(struct drm_connector *connector,
+                              struct drm_display_mode *mode)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       int w = mode->hdisplay, h = mode->vdisplay;
+
+       if ((w < mode_config->min_width) || (w > mode_config->max_width))
+               return MODE_BAD_HVALUE;
+
+       if ((h < mode_config->min_height) || (h > mode_config->max_height))
+               return MODE_BAD_VVALUE;
+
+       return MODE_OK;
+}
+
+const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = {
+       .get_modes = malidp_mw_connector_get_modes,
+       .mode_valid = malidp_mw_connector_mode_valid,
+};
+
+static void malidp_mw_connector_reset(struct drm_connector *connector)
+{
+       struct malidp_mw_connector_state *mw_state =
+               kzalloc(sizeof(*mw_state), GFP_KERNEL);
+
+       if (connector->state)
+               __drm_atomic_helper_connector_destroy_state(connector->state);
+
+       kfree(connector->state);
+       __drm_atomic_helper_connector_reset(connector, &mw_state->base);
+}
+
+static enum drm_connector_status
+malidp_mw_connector_detect(struct drm_connector *connector, bool force)
+{
+       return connector_status_connected;
+}
+
+static void malidp_mw_connector_destroy(struct drm_connector *connector)
+{
+       drm_connector_cleanup(connector);
+}
+
+static struct drm_connector_state *
+malidp_mw_connector_duplicate_state(struct drm_connector *connector)
+{
+       struct malidp_mw_connector_state *mw_state;
+
+       if (WARN_ON(!connector->state))
+               return NULL;
+
+       mw_state = kzalloc(sizeof(*mw_state), GFP_KERNEL);
+       if (!mw_state)
+               return NULL;
+
+       /* No need to preserve any of our driver-local data */
+       __drm_atomic_helper_connector_duplicate_state(connector, &mw_state->base);
+
+       return &mw_state->base;
+}
+
+static const struct drm_connector_funcs malidp_mw_connector_funcs = {
+       .reset = malidp_mw_connector_reset,
+       .detect = malidp_mw_connector_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .destroy = malidp_mw_connector_destroy,
+       .atomic_duplicate_state = malidp_mw_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int
+malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
+                              struct drm_crtc_state *crtc_state,
+                              struct drm_connector_state *conn_state)
+{
+       struct malidp_mw_connector_state *mw_state = to_mw_state(conn_state);
+       struct malidp_drm *malidp = encoder->dev->dev_private;
+       struct drm_framebuffer *fb;
+       int i, n_planes;
+
+       if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+               return 0;
+
+       fb = conn_state->writeback_job->fb;
+       if ((fb->width != crtc_state->mode.hdisplay) ||
+           (fb->height != crtc_state->mode.vdisplay)) {
+               DRM_DEBUG_KMS("Invalid framebuffer size %ux%u\n",
+                               fb->width, fb->height);
+               return -EINVAL;
+       }
+
+       mw_state->format =
+               malidp_hw_get_format_id(&malidp->dev->hw->map, SE_MEMWRITE,
+                                       fb->format->format);
+       if (mw_state->format == MALIDP_INVALID_FORMAT_ID) {
+               struct drm_format_name_buf format_name;
+
+               DRM_DEBUG_KMS("Invalid pixel format %s\n",
+                             drm_get_format_name(fb->format->format,
+                                                 &format_name));
+               return -EINVAL;
+       }
+
+       n_planes = drm_format_num_planes(fb->format->format);
+       for (i = 0; i < n_planes; i++) {
+               struct drm_gem_cma_object *obj = drm_fb_cma_get_gem_obj(fb, i);
+               /* memory write buffers are never rotated */
+               u8 alignment = malidp_hw_get_pitch_align(malidp->dev, 0);
+
+               if (fb->pitches[i] & (alignment - 1)) {
+                       DRM_DEBUG_KMS("Invalid pitch %u for plane %d\n",
+                                     fb->pitches[i], i);
+                       return -EINVAL;
+               }
+               mw_state->pitches[i] = fb->pitches[i];
+               mw_state->addrs[i] = obj->paddr + fb->offsets[i];
+       }
+       mw_state->n_planes = n_planes;
+
+       return 0;
+}
+
+static const struct drm_encoder_helper_funcs malidp_mw_encoder_helper_funcs = {
+       .atomic_check = malidp_mw_encoder_atomic_check,
+};
+
+static u32 *get_writeback_formats(struct malidp_drm *malidp, int *n_formats)
+{
+       const struct malidp_hw_regmap *map = &malidp->dev->hw->map;
+       u32 *formats;
+       int n, i;
+
+       formats = kcalloc(map->n_pixel_formats, sizeof(*formats),
+                         GFP_KERNEL);
+       if (!formats)
+               return NULL;
+
+       for (n = 0, i = 0;  i < map->n_pixel_formats; i++) {
+               if (map->pixel_formats[i].layer & SE_MEMWRITE)
+                       formats[n++] = map->pixel_formats[i].format;
+       }
+
+       *n_formats = n;
+
+       return formats;
+}
+
+int malidp_mw_connector_init(struct drm_device *drm)
+{
+       struct malidp_drm *malidp = drm->dev_private;
+       u32 *formats;
+       int ret, n_formats;
+
+       if (!malidp->dev->hw->enable_memwrite)
+               return 0;
+
+       malidp->mw_connector.encoder.possible_crtcs = 1 << drm_crtc_index(&malidp->crtc);
+       drm_connector_helper_add(&malidp->mw_connector.base,
+                                &malidp_mw_connector_helper_funcs);
+
+       formats = get_writeback_formats(malidp, &n_formats);
+       if (!formats)
+               return -ENOMEM;
+
+       ret = drm_writeback_connector_init(drm, &malidp->mw_connector,
+                                          &malidp_mw_connector_funcs,
+                                          &malidp_mw_encoder_helper_funcs,
+                                          formats, n_formats);
+       kfree(formats);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+void malidp_mw_atomic_commit(struct drm_device *drm,
+                            struct drm_atomic_state *old_state)
+{
+       struct malidp_drm *malidp = drm->dev_private;
+       struct drm_writeback_connector *mw_conn = &malidp->mw_connector;
+       struct drm_connector_state *conn_state = mw_conn->base.state;
+       struct malidp_hw_device *hwdev = malidp->dev;
+       struct malidp_mw_connector_state *mw_state;
+
+       if (!conn_state)
+               return;
+
+       mw_state = to_mw_state(conn_state);
+
+       if (conn_state->writeback_job && conn_state->writeback_job->fb) {
+               struct drm_framebuffer *fb = conn_state->writeback_job->fb;
+
+               DRM_DEV_DEBUG_DRIVER(drm->dev,
+                                    "Enable memwrite %ux%u:%d %pad fmt: %u\n",
+                                    fb->width, fb->height,
+                                    mw_state->pitches[0],
+                                    &mw_state->addrs[0],
+                                    mw_state->format);
+
+               drm_writeback_queue_job(mw_conn, conn_state->writeback_job);
+               conn_state->writeback_job = NULL;
+
+               hwdev->hw->enable_memwrite(hwdev, mw_state->addrs,
+                                          mw_state->pitches, mw_state->n_planes,
+                                          fb->width, fb->height, mw_state->format);
+       } else {
+               DRM_DEV_DEBUG_DRIVER(drm->dev, "Disable memwrite\n");
+               hwdev->hw->disable_memwrite(hwdev);
+       }
+}
diff --git a/drivers/gpu/drm/arm/malidp_mw.h b/drivers/gpu/drm/arm/malidp_mw.h
new file mode 100644 (file)
index 0000000..19a0076
--- /dev/null
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Brian Starkey <brian.starkey@arm.com>
+ *
+ */
+
+#ifndef __MALIDP_MW_H__
+#define __MALIDP_MW_H__
+
+int malidp_mw_connector_init(struct drm_device *drm);
+void malidp_mw_atomic_commit(struct drm_device *drm,
+                            struct drm_atomic_state *old_state);
+#endif
index 7a44897c50fea784bf516db7f17866ee31f413fa..29409a65d864760e674f787cb5279cdbff5b91a7 100644 (file)
@@ -23,6 +23,7 @@
 
 /* Layer specific register offsets */
 #define MALIDP_LAYER_FORMAT            0x000
+#define   LAYER_FORMAT_MASK            0x3f
 #define MALIDP_LAYER_CONTROL           0x004
 #define   LAYER_ENABLE                 (1 << 0)
 #define   LAYER_FLOWCFG_MASK           7
@@ -235,8 +236,8 @@ static int malidp_de_plane_check(struct drm_plane *plane,
        if (state->rotation & MALIDP_ROTATED_MASK) {
                int val;
 
-               val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_h,
-                                                    state->crtc_w,
+               val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_w,
+                                                    state->crtc_h,
                                                     fb->format->format);
                if (val < 0)
                        return val;
@@ -337,7 +338,9 @@ static void malidp_de_plane_update(struct drm_plane *plane,
        dest_w = plane->state->crtc_w;
        dest_h = plane->state->crtc_h;
 
-       malidp_hw_write(mp->hwdev, ms->format, mp->layer->base);
+       val = malidp_hw_read(mp->hwdev, mp->layer->base);
+       val = (val & ~LAYER_FORMAT_MASK) | ms->format;
+       malidp_hw_write(mp->hwdev, val, mp->layer->base);
 
        for (i = 0; i < ms->n_planes; i++) {
                /* calculate the offset for the layer's plane registers */
index 149024fb44327c3f16351cc0c2a2f7a359668a21..3579d36b2a717aedc436cd574b8639f9ea1d7d9b 100644 (file)
@@ -53,6 +53,8 @@
 #define MALIDP550_DE_IRQ_AXI_ERR               (1 << 16)
 #define MALIDP550_SE_IRQ_EOW                   (1 << 0)
 #define MALIDP550_SE_IRQ_AXI_ERR               (1 << 16)
+#define MALIDP550_SE_IRQ_OVR                   (1 << 17)
+#define MALIDP550_SE_IRQ_IBSY                  (1 << 18)
 #define MALIDP550_DC_IRQ_CONF_VALID            (1 << 0)
 #define MALIDP550_DC_IRQ_CONF_MODE             (1 << 4)
 #define MALIDP550_DC_IRQ_CONF_ACTIVE           (1 << 16)
 #define MALIDP550_DC_IRQ_SE                    (1 << 24)
 
 #define MALIDP650_DE_IRQ_DRIFT                 (1 << 4)
+#define MALIDP650_DE_IRQ_ACEV1                 (1 << 17)
+#define MALIDP650_DE_IRQ_ACEV2                 (1 << 18)
+#define MALIDP650_DE_IRQ_ACEG                  (1 << 19)
+#define MALIDP650_DE_IRQ_AXIEP                 (1 << 28)
 
 /* bit masks that are common between products */
 #define   MALIDP_CFG_VALID             (1 << 0)
 #define   MALIDP_DISP_FUNC_GAMMA       (1 << 0)
 #define   MALIDP_DISP_FUNC_CADJ                (1 << 4)
 #define   MALIDP_DISP_FUNC_ILACED      (1 << 8)
+#define   MALIDP_SCALE_ENGINE_EN       (1 << 16)
+#define   MALIDP_SE_MEMWRITE_EN                (2 << 5)
 
 /* register offsets for IRQ management */
 #define MALIDP_REG_STATUS              0x00000
                (((x) & MALIDP_SE_ENH_LIMIT_MASK) << 16)
 #define   MALIDP_SE_ENH_COEFF0                 0x04
 
+
+/* register offsets relative to MALIDP5x0_SE_MEMWRITE_BASE */
+#define MALIDP_MW_FORMAT               0x00000
+#define MALIDP_MW_P1_STRIDE            0x00004
+#define MALIDP_MW_P2_STRIDE            0x00008
+#define MALIDP_MW_P1_PTR_LOW           0x0000c
+#define MALIDP_MW_P1_PTR_HIGH          0x00010
+#define MALIDP_MW_P2_PTR_LOW           0x0002c
+#define MALIDP_MW_P2_PTR_HIGH          0x00030
+
 /* register offsets and bits specific to DP500 */
 #define MALIDP500_ADDR_SPACE_SIZE      0x01000
 #define MALIDP500_DC_BASE              0x00000
 #define MALIDP500_DE_LG2_PTR_BASE      0x0031c
 #define MALIDP500_SE_BASE              0x00c00
 #define MALIDP500_SE_CONTROL           0x00c0c
-#define MALIDP500_SE_PTR_BASE          0x00e0c
+#define MALIDP500_SE_MEMWRITE_OUT_SIZE 0x00c2c
+#define MALIDP500_SE_MEMWRITE_BASE     0x00e00
 #define MALIDP500_DC_IRQ_BASE          0x00f00
 #define MALIDP500_CONFIG_VALID         0x00f00
 #define MALIDP500_CONFIG_ID            0x00fd4
 #define MALIDP550_DE_PERF_BASE         0x00500
 #define MALIDP550_SE_BASE              0x08000
 #define MALIDP550_SE_CONTROL           0x08010
+#define   MALIDP550_SE_MEMWRITE_ONESHOT        (1 << 7)
+#define MALIDP550_SE_MEMWRITE_OUT_SIZE 0x08030
+#define MALIDP550_SE_MEMWRITE_BASE     0x08100
 #define MALIDP550_DC_BASE              0x0c000
 #define MALIDP550_DC_CONTROL           0x0c010
 #define   MALIDP550_DC_CONFIG_REQ      (1 << 16)
index ecf25cf9f9f59a222700ab9cd72ca2776cc66bed..9bc3c321372480c3615e320aff5bc47ff2b3fb78 100644 (file)
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 armada-y       := armada_crtc.o armada_drv.o armada_fb.o armada_fbdev.o \
-                  armada_gem.o armada_overlay.o armada_trace.o
+                  armada_gem.o armada_overlay.o armada_plane.o armada_trace.o
 armada-y       += armada_510.o
 armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o
 
index 41a784f5a5e64df3feabcc0ad3e1053f64a29aaa..2f7c048c53613b9086beaa1a5ecaa26c508df884 100644 (file)
@@ -27,6 +27,10 @@ static int armada510_crtc_init(struct armada_crtc *dcrtc, struct device *dev)
        /* Lower the watermark so to eliminate jitter at higher bandwidths */
        armada_updatel(0x20, (1 << 11) | 0xff, dcrtc->base + LCD_CFG_RDREG4F);
 
+       /* Initialise SPU register */
+       writel_relaxed(ADV_HWC32ENABLE | ADV_HWC32ARGB | ADV_HWC32BLEND,
+                      dcrtc->base + LCD_SPU_ADV_REG);
+
        return 0;
 }
 
@@ -75,9 +79,27 @@ static int armada510_crtc_compute_clock(struct armada_crtc *dcrtc,
        return 0;
 }
 
+static void armada510_crtc_disable(struct armada_crtc *dcrtc)
+{
+       if (!IS_ERR(dcrtc->clk)) {
+               clk_disable_unprepare(dcrtc->clk);
+               dcrtc->clk = ERR_PTR(-EINVAL);
+       }
+}
+
+static void armada510_crtc_enable(struct armada_crtc *dcrtc,
+       const struct drm_display_mode *mode)
+{
+       if (IS_ERR(dcrtc->clk)) {
+               dcrtc->clk = dcrtc->extclk[0];
+               WARN_ON(clk_prepare_enable(dcrtc->clk));
+       }
+}
+
 const struct armada_variant armada510_ops = {
        .has_spu_adv_reg = true,
-       .spu_adv_reg = ADV_HWC32ENABLE | ADV_HWC32ARGB | ADV_HWC32BLEND,
        .init = armada510_crtc_init,
        .compute_clock = armada510_crtc_compute_clock,
+       .disable = armada510_crtc_disable,
+       .enable = armada510_crtc_enable,
 };
index 03eeee11dd5bd2f4ed000b2f86ca5eda6253f9ca..da9360688b5546664deb1f41de3e75e12137fa85 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <drm/drmP.h>
+#include <drm/drm_atomic.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_atomic_helper.h>
 #include "armada_fb.h"
 #include "armada_gem.h"
 #include "armada_hw.h"
+#include "armada_plane.h"
 #include "armada_trace.h"
 
-enum csc_mode {
-       CSC_AUTO = 0,
-       CSC_YUV_CCIR601 = 1,
-       CSC_YUV_CCIR709 = 2,
-       CSC_RGB_COMPUTER = 1,
-       CSC_RGB_STUDIO = 2,
-};
-
-static const uint32_t armada_primary_formats[] = {
-       DRM_FORMAT_UYVY,
-       DRM_FORMAT_YUYV,
-       DRM_FORMAT_VYUY,
-       DRM_FORMAT_YVYU,
-       DRM_FORMAT_ARGB8888,
-       DRM_FORMAT_ABGR8888,
-       DRM_FORMAT_XRGB8888,
-       DRM_FORMAT_XBGR8888,
-       DRM_FORMAT_RGB888,
-       DRM_FORMAT_BGR888,
-       DRM_FORMAT_ARGB1555,
-       DRM_FORMAT_ABGR1555,
-       DRM_FORMAT_RGB565,
-       DRM_FORMAT_BGR565,
-};
-
 /*
  * A note about interlacing.  Let's consider HDMI 1920x1080i.
  * The timing parameters we have from X are:
@@ -115,15 +92,13 @@ armada_drm_crtc_update_regs(struct armada_crtc *dcrtc, struct armada_regs *regs)
        }
 }
 
-#define dpms_blanked(dpms)     ((dpms) != DRM_MODE_DPMS_ON)
-
-static void armada_drm_crtc_update(struct armada_crtc *dcrtc)
+static void armada_drm_crtc_update(struct armada_crtc *dcrtc, bool enable)
 {
        uint32_t dumb_ctrl;
 
        dumb_ctrl = dcrtc->cfg_dumb_ctrl;
 
-       if (!dpms_blanked(dcrtc->dpms))
+       if (enable)
                dumb_ctrl |= CFG_DUMB_ENA;
 
        /*
@@ -132,295 +107,26 @@ static void armada_drm_crtc_update(struct armada_crtc *dcrtc)
         * force LCD_D[23:0] to output blank color, overriding the GPIO or
         * SPI usage.  So leave it as-is unless in DUMB24_RGB888_0 mode.
         */
-       if (dpms_blanked(dcrtc->dpms) &&
-           (dumb_ctrl & DUMB_MASK) == DUMB24_RGB888_0) {
+       if (!enable && (dumb_ctrl & DUMB_MASK) == DUMB24_RGB888_0) {
                dumb_ctrl &= ~DUMB_MASK;
                dumb_ctrl |= DUMB_BLANK;
        }
 
-       /*
-        * The documentation doesn't indicate what the normal state of
-        * the sync signals are.  Sebastian Hesselbart kindly probed
-        * these signals on his board to determine their state.
-        *
-        * The non-inverted state of the sync signals is active high.
-        * Setting these bits makes the appropriate signal active low.
-        */
-       if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NCSYNC)
-               dumb_ctrl |= CFG_INV_CSYNC;
-       if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NHSYNC)
-               dumb_ctrl |= CFG_INV_HSYNC;
-       if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NVSYNC)
-               dumb_ctrl |= CFG_INV_VSYNC;
-
-       if (dcrtc->dumb_ctrl != dumb_ctrl) {
-               dcrtc->dumb_ctrl = dumb_ctrl;
-               writel_relaxed(dumb_ctrl, dcrtc->base + LCD_SPU_DUMB_CTRL);
-       }
-}
-
-void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb,
-       int x, int y)
-{
-       const struct drm_format_info *format = fb->format;
-       unsigned int num_planes = format->num_planes;
-       u32 addr = drm_fb_obj(fb)->dev_addr;
-       int i;
-
-       if (num_planes > 3)
-               num_planes = 3;
-
-       addrs[0] = addr + fb->offsets[0] + y * fb->pitches[0] +
-                  x * format->cpp[0];
-
-       y /= format->vsub;
-       x /= format->hsub;
-
-       for (i = 1; i < num_planes; i++)
-               addrs[i] = addr + fb->offsets[i] + y * fb->pitches[i] +
-                            x * format->cpp[i];
-       for (; i < 3; i++)
-               addrs[i] = 0;
-}
-
-static unsigned armada_drm_crtc_calc_fb(struct drm_framebuffer *fb,
-       int x, int y, struct armada_regs *regs, bool interlaced)
-{
-       unsigned pitch = fb->pitches[0];
-       u32 addrs[3], addr_odd, addr_even;
-       unsigned i = 0;
-
-       DRM_DEBUG_DRIVER("pitch %u x %d y %d bpp %d\n",
-               pitch, x, y, fb->format->cpp[0] * 8);
-
-       armada_drm_plane_calc_addrs(addrs, fb, x, y);
-
-       addr_odd = addr_even = addrs[0];
-
-       if (interlaced) {
-               addr_even += pitch;
-               pitch *= 2;
-       }
-
-       /* write offset, base, and pitch */
-       armada_reg_queue_set(regs, i, addr_odd, LCD_CFG_GRA_START_ADDR0);
-       armada_reg_queue_set(regs, i, addr_even, LCD_CFG_GRA_START_ADDR1);
-       armada_reg_queue_mod(regs, i, pitch, 0xffff, LCD_CFG_GRA_PITCH);
-
-       return i;
-}
-
-static void armada_drm_plane_work_call(struct armada_crtc *dcrtc,
-       struct armada_plane_work *work,
-       void (*fn)(struct armada_crtc *, struct armada_plane_work *))
-{
-       struct armada_plane *dplane = drm_to_armada_plane(work->plane);
-       struct drm_pending_vblank_event *event;
-       struct drm_framebuffer *fb;
-
-       if (fn)
-               fn(dcrtc, work);
-       drm_crtc_vblank_put(&dcrtc->crtc);
-
-       event = work->event;
-       fb = work->old_fb;
-       if (event || fb) {
-               struct drm_device *dev = dcrtc->crtc.dev;
-               unsigned long flags;
-
-               spin_lock_irqsave(&dev->event_lock, flags);
-               if (event)
-                       drm_crtc_send_vblank_event(&dcrtc->crtc, event);
-               if (fb)
-                       __armada_drm_queue_unref_work(dev, fb);
-               spin_unlock_irqrestore(&dev->event_lock, flags);
-       }
-
-       if (work->need_kfree)
-               kfree(work);
-
-       wake_up(&dplane->frame_wait);
+       armada_updatel(dumb_ctrl,
+                      ~(CFG_INV_CSYNC | CFG_INV_HSYNC | CFG_INV_VSYNC),
+                      dcrtc->base + LCD_SPU_DUMB_CTRL);
 }
 
-static void armada_drm_plane_work_run(struct armada_crtc *dcrtc,
-       struct drm_plane *plane)
-{
-       struct armada_plane *dplane = drm_to_armada_plane(plane);
-       struct armada_plane_work *work = xchg(&dplane->work, NULL);
-
-       /* Handle any pending frame work. */
-       if (work)
-               armada_drm_plane_work_call(dcrtc, work, work->fn);
-}
-
-int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
-       struct armada_plane_work *work)
-{
-       struct armada_plane *plane = drm_to_armada_plane(work->plane);
-       int ret;
-
-       ret = drm_crtc_vblank_get(&dcrtc->crtc);
-       if (ret)
-               return ret;
-
-       ret = cmpxchg(&plane->work, NULL, work) ? -EBUSY : 0;
-       if (ret)
-               drm_crtc_vblank_put(&dcrtc->crtc);
-
-       return ret;
-}
-
-int armada_drm_plane_work_wait(struct armada_plane *plane, long timeout)
-{
-       return wait_event_timeout(plane->frame_wait, !plane->work, timeout);
-}
-
-void armada_drm_plane_work_cancel(struct armada_crtc *dcrtc,
-       struct armada_plane *dplane)
-{
-       struct armada_plane_work *work = xchg(&dplane->work, NULL);
-
-       if (work)
-               armada_drm_plane_work_call(dcrtc, work, work->cancel);
-}
-
-static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc,
-       struct armada_plane_work *work)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&dcrtc->irq_lock, flags);
-       armada_drm_crtc_update_regs(dcrtc, work->regs);
-       spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
-}
-
-static void armada_drm_crtc_complete_disable_work(struct armada_crtc *dcrtc,
-       struct armada_plane_work *work)
-{
-       unsigned long flags;
-
-       if (dcrtc->plane == work->plane)
-               dcrtc->plane = NULL;
-
-       spin_lock_irqsave(&dcrtc->irq_lock, flags);
-       armada_drm_crtc_update_regs(dcrtc, work->regs);
-       spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
-}
-
-static struct armada_plane_work *
-armada_drm_crtc_alloc_plane_work(struct drm_plane *plane)
-{
-       struct armada_plane_work *work;
-       int i = 0;
-
-       work = kzalloc(sizeof(*work), GFP_KERNEL);
-       if (!work)
-               return NULL;
-
-       work->plane = plane;
-       work->fn = armada_drm_crtc_complete_frame_work;
-       work->need_kfree = true;
-       armada_reg_queue_end(work->regs, i);
-
-       return work;
-}
-
-static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
-       struct drm_framebuffer *fb, bool force)
-{
-       struct armada_plane_work *work;
-
-       if (!fb)
-               return;
-
-       if (force) {
-               /* Display is disabled, so just drop the old fb */
-               drm_framebuffer_put(fb);
-               return;
-       }
-
-       work = armada_drm_crtc_alloc_plane_work(dcrtc->crtc.primary);
-       if (work) {
-               work->old_fb = fb;
-
-               if (armada_drm_plane_work_queue(dcrtc, work) == 0)
-                       return;
-
-               kfree(work);
-       }
-
-       /*
-        * Oops - just drop the reference immediately and hope for
-        * the best.  The worst that will happen is the buffer gets
-        * reused before it has finished being displayed.
-        */
-       drm_framebuffer_put(fb);
-}
-
-static void armada_drm_vblank_off(struct armada_crtc *dcrtc)
-{
-       /*
-        * Tell the DRM core that vblank IRQs aren't going to happen for
-        * a while.  This cleans up any pending vblank events for us.
-        */
-       drm_crtc_vblank_off(&dcrtc->crtc);
-       armada_drm_plane_work_run(dcrtc, dcrtc->crtc.primary);
-}
-
-/* The mode_config.mutex will be held for this call */
-static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms)
-{
-       struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
-
-       if (dpms_blanked(dcrtc->dpms) != dpms_blanked(dpms)) {
-               if (dpms_blanked(dpms))
-                       armada_drm_vblank_off(dcrtc);
-               else if (!IS_ERR(dcrtc->clk))
-                       WARN_ON(clk_prepare_enable(dcrtc->clk));
-               dcrtc->dpms = dpms;
-               armada_drm_crtc_update(dcrtc);
-               if (!dpms_blanked(dpms))
-                       drm_crtc_vblank_on(&dcrtc->crtc);
-               else if (!IS_ERR(dcrtc->clk))
-                       clk_disable_unprepare(dcrtc->clk);
-       } else if (dcrtc->dpms != dpms) {
-               dcrtc->dpms = dpms;
-       }
-}
-
-/*
- * Prepare for a mode set.  Turn off overlay to ensure that we don't end
- * up with the overlay size being bigger than the active screen size.
- * We rely upon X refreshing this state after the mode set has completed.
- *
- * The mode_config.mutex will be held for this call
- */
-static void armada_drm_crtc_prepare(struct drm_crtc *crtc)
-{
-       struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
-       struct drm_plane *plane;
-
-       /*
-        * If we have an overlay plane associated with this CRTC, disable
-        * it before the modeset to avoid its coordinates being outside
-        * the new mode parameters.
-        */
-       plane = dcrtc->plane;
-       if (plane) {
-               drm_plane_force_disable(plane);
-               WARN_ON(!armada_drm_plane_work_wait(drm_to_armada_plane(plane),
-                                                   HZ));
-       }
-}
-
-/* The mode_config.mutex will be held for this call */
-static void armada_drm_crtc_commit(struct drm_crtc *crtc)
+static void armada_drm_crtc_queue_state_event(struct drm_crtc *crtc)
 {
        struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+       struct drm_pending_vblank_event *event;
 
-       if (dcrtc->dpms != DRM_MODE_DPMS_ON) {
-               dcrtc->dpms = DRM_MODE_DPMS_ON;
-               armada_drm_crtc_update(dcrtc);
+       /* If we have an event, we need vblank events enabled */
+       event = xchg(&crtc->state->event, NULL);
+       if (event) {
+               WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+               dcrtc->event = event;
        }
 }
 
@@ -465,8 +171,8 @@ static void armada_drm_crtc_enable_irq(struct armada_crtc *dcrtc, u32 mask)
 
 static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
 {
+       struct drm_pending_vblank_event *event;
        void __iomem *base = dcrtc->base;
-       struct drm_plane *ovl_plane;
 
        if (stat & DMA_FF_UNDERFLOW)
                DRM_ERROR("video underflow on crtc %u\n", dcrtc->num);
@@ -476,10 +182,6 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
        if (stat & VSYNC_IRQ)
                drm_crtc_handle_vblank(&dcrtc->crtc);
 
-       ovl_plane = dcrtc->plane;
-       if (ovl_plane)
-               armada_drm_plane_work_run(dcrtc, ovl_plane);
-
        spin_lock(&dcrtc->irq_lock);
        if (stat & GRA_FRAME_IRQ && dcrtc->interlaced) {
                int i = stat & GRA_FRAME_IRQ0 ? 0 : 1;
@@ -495,22 +197,35 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
                writel_relaxed(val, base + LCD_SPU_ADV_REG);
        }
 
-       if (stat & DUMB_FRAMEDONE && dcrtc->cursor_update) {
-               writel_relaxed(dcrtc->cursor_hw_pos,
-                              base + LCD_SPU_HWC_OVSA_HPXL_VLN);
-               writel_relaxed(dcrtc->cursor_hw_sz,
-                              base + LCD_SPU_HWC_HPXL_VLN);
-               armada_updatel(CFG_HWC_ENA,
-                              CFG_HWC_ENA | CFG_HWC_1BITMOD | CFG_HWC_1BITENA,
-                              base + LCD_SPU_DMA_CTRL0);
-               dcrtc->cursor_update = false;
+       if (stat & dcrtc->irq_ena & DUMB_FRAMEDONE) {
+               if (dcrtc->update_pending) {
+                       armada_drm_crtc_update_regs(dcrtc, dcrtc->regs);
+                       dcrtc->update_pending = false;
+               }
+               if (dcrtc->cursor_update) {
+                       writel_relaxed(dcrtc->cursor_hw_pos,
+                                      base + LCD_SPU_HWC_OVSA_HPXL_VLN);
+                       writel_relaxed(dcrtc->cursor_hw_sz,
+                                      base + LCD_SPU_HWC_HPXL_VLN);
+                       armada_updatel(CFG_HWC_ENA,
+                                      CFG_HWC_ENA | CFG_HWC_1BITMOD |
+                                      CFG_HWC_1BITENA,
+                                      base + LCD_SPU_DMA_CTRL0);
+                       dcrtc->cursor_update = false;
+               }
                armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
        }
-
        spin_unlock(&dcrtc->irq_lock);
 
-       if (stat & GRA_FRAME_IRQ)
-               armada_drm_plane_work_run(dcrtc, dcrtc->crtc.primary);
+       if (stat & VSYNC_IRQ && !dcrtc->update_pending) {
+               event = xchg(&dcrtc->event, NULL);
+               if (event) {
+                       spin_lock(&dcrtc->crtc.dev->event_lock);
+                       drm_crtc_send_vblank_event(&dcrtc->crtc, event);
+                       spin_unlock(&dcrtc->crtc.dev->event_lock);
+                       drm_crtc_vblank_put(&dcrtc->crtc);
+               }
+       }
 }
 
 static irqreturn_t armada_drm_irq(int irq, void *arg)
@@ -519,8 +234,9 @@ static irqreturn_t armada_drm_irq(int irq, void *arg)
        u32 v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
 
        /*
-        * This is rediculous - rather than writing bits to clear, we
-        * have to set the actual status register value.  This is racy.
+        * Reading the ISR appears to clear bits provided CLEAN_SPU_IRQ_ISR
+        * is set.  Writing has some other effect to acknowledge the IRQ -
+        * without this, we only get a single IRQ.
         */
        writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
 
@@ -536,107 +252,16 @@ static irqreturn_t armada_drm_irq(int irq, void *arg)
        return IRQ_NONE;
 }
 
-static uint32_t armada_drm_crtc_calculate_csc(struct armada_crtc *dcrtc)
-{
-       struct drm_display_mode *adj = &dcrtc->crtc.mode;
-       uint32_t val = 0;
-
-       if (dcrtc->csc_yuv_mode == CSC_YUV_CCIR709)
-               val |= CFG_CSC_YUV_CCIR709;
-       if (dcrtc->csc_rgb_mode == CSC_RGB_STUDIO)
-               val |= CFG_CSC_RGB_STUDIO;
-
-       /*
-        * In auto mode, set the colorimetry, based upon the HDMI spec.
-        * 1280x720p, 1920x1080p and 1920x1080i use ITU709, others use
-        * ITU601.  It may be more appropriate to set this depending on
-        * the source - but what if the graphic frame is YUV and the
-        * video frame is RGB?
-        */
-       if ((adj->hdisplay == 1280 && adj->vdisplay == 720 &&
-            !(adj->flags & DRM_MODE_FLAG_INTERLACE)) ||
-           (adj->hdisplay == 1920 && adj->vdisplay == 1080)) {
-               if (dcrtc->csc_yuv_mode == CSC_AUTO)
-                       val |= CFG_CSC_YUV_CCIR709;
-       }
-
-       /*
-        * We assume we're connected to a TV-like device, so the YUV->RGB
-        * conversion should produce a limited range.  We should set this
-        * depending on the connectors attached to this CRTC, and what
-        * kind of device they report being connected.
-        */
-       if (dcrtc->csc_rgb_mode == CSC_AUTO)
-               val |= CFG_CSC_RGB_STUDIO;
-
-       return val;
-}
-
-static void armada_drm_gra_plane_regs(struct armada_regs *regs,
-       struct drm_framebuffer *fb, struct armada_plane_state *state,
-       int x, int y, bool interlaced)
-{
-       unsigned int i;
-       u32 ctrl0;
-
-       i = armada_drm_crtc_calc_fb(fb, x, y, regs, interlaced);
-       armada_reg_queue_set(regs, i, state->dst_yx, LCD_SPU_GRA_OVSA_HPXL_VLN);
-       armada_reg_queue_set(regs, i, state->src_hw, LCD_SPU_GRA_HPXL_VLN);
-       armada_reg_queue_set(regs, i, state->dst_hw, LCD_SPU_GZM_HPXL_VLN);
-
-       ctrl0 = state->ctrl0;
-       if (interlaced)
-               ctrl0 |= CFG_GRA_FTOGGLE;
-
-       armada_reg_queue_mod(regs, i, ctrl0, CFG_GRAFORMAT |
-                            CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV |
-                                        CFG_SWAPYU | CFG_YUV2RGB) |
-                            CFG_PALETTE_ENA | CFG_GRA_FTOGGLE |
-                            CFG_GRA_HSMOOTH | CFG_GRA_ENA,
-                            LCD_SPU_DMA_CTRL0);
-       armada_reg_queue_end(regs, i);
-}
-
-static void armada_drm_primary_set(struct drm_crtc *crtc,
-       struct drm_plane *plane, int x, int y)
-{
-       struct armada_plane_state *state = &drm_to_armada_plane(plane)->state;
-       struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
-       struct armada_regs regs[8];
-       bool interlaced = dcrtc->interlaced;
-
-       armada_drm_gra_plane_regs(regs, plane->fb, state, x, y, interlaced);
-       armada_drm_crtc_update_regs(dcrtc, regs);
-}
-
 /* The mode_config.mutex will be held for this call */
-static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
-       struct drm_display_mode *mode, struct drm_display_mode *adj,
-       int x, int y, struct drm_framebuffer *old_fb)
+static void armada_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
 {
+       struct drm_display_mode *adj = &crtc->state->adjusted_mode;
        struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
        struct armada_regs regs[17];
        uint32_t lm, rm, tm, bm, val, sclk;
        unsigned long flags;
        unsigned i;
-       bool interlaced;
-
-       drm_framebuffer_get(crtc->primary->fb);
-
-       interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
-
-       val = CFG_GRA_ENA;
-       val |= CFG_GRA_FMT(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt);
-       val |= CFG_GRA_MOD(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->mod);
-
-       if (drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt > CFG_420)
-               val |= CFG_PALETTE_ENA;
-
-       drm_to_armada_plane(crtc->primary)->state.ctrl0 = val;
-       drm_to_armada_plane(crtc->primary)->state.src_hw =
-       drm_to_armada_plane(crtc->primary)->state.dst_hw =
-               adj->crtc_vdisplay << 16 | adj->crtc_hdisplay;
-       drm_to_armada_plane(crtc->primary)->state.dst_yx = 0;
+       bool interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
 
        i = 0;
        rm = adj->crtc_hsync_start - adj->crtc_hdisplay;
@@ -644,35 +269,15 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
        bm = adj->crtc_vsync_start - adj->crtc_vdisplay;
        tm = adj->crtc_vtotal - adj->crtc_vsync_end;
 
-       DRM_DEBUG_DRIVER("H: %d %d %d %d lm %d rm %d\n",
-               adj->crtc_hdisplay,
-               adj->crtc_hsync_start,
-               adj->crtc_hsync_end,
-               adj->crtc_htotal, lm, rm);
-       DRM_DEBUG_DRIVER("V: %d %d %d %d tm %d bm %d\n",
-               adj->crtc_vdisplay,
-               adj->crtc_vsync_start,
-               adj->crtc_vsync_end,
-               adj->crtc_vtotal, tm, bm);
-
-       /* Wait for pending flips to complete */
-       armada_drm_plane_work_wait(drm_to_armada_plane(dcrtc->crtc.primary),
-                                  MAX_SCHEDULE_TIMEOUT);
-
-       drm_crtc_vblank_off(crtc);
-
-       val = dcrtc->dumb_ctrl & ~CFG_DUMB_ENA;
-       if (val != dcrtc->dumb_ctrl) {
-               dcrtc->dumb_ctrl = val;
-               writel_relaxed(val, dcrtc->base + LCD_SPU_DUMB_CTRL);
-       }
-
-       /*
-        * If we are blanked, we would have disabled the clock.  Re-enable
-        * it so that compute_clock() does the right thing.
-        */
-       if (!IS_ERR(dcrtc->clk) && dpms_blanked(dcrtc->dpms))
-               WARN_ON(clk_prepare_enable(dcrtc->clk));
+       DRM_DEBUG_KMS("[CRTC:%d:%s] mode " DRM_MODE_FMT "\n",
+                     crtc->base.id, crtc->name,
+                     adj->base.id, adj->name, adj->vrefresh, adj->clock,
+                     adj->crtc_hdisplay, adj->crtc_hsync_start,
+                     adj->crtc_hsync_end, adj->crtc_htotal,
+                     adj->crtc_vdisplay, adj->crtc_vsync_start,
+                     adj->crtc_vsync_end, adj->crtc_vtotal,
+                     adj->type, adj->flags);
+       DRM_DEBUG_KMS("lm %d rm %d tm %d bm %d\n", lm, rm, tm, bm);
 
        /* Now compute the divider for real */
        dcrtc->variant->compute_clock(dcrtc, adj, &sclk);
@@ -689,25 +294,20 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
 
        spin_lock_irqsave(&dcrtc->irq_lock, flags);
 
-       /* Ensure graphic fifo is enabled */
-       armada_reg_queue_mod(regs, i, 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1);
-
        /* Even interlaced/progressive frame */
        dcrtc->v[1].spu_v_h_total = adj->crtc_vtotal << 16 |
                                    adj->crtc_htotal;
        dcrtc->v[1].spu_v_porch = tm << 16 | bm;
        val = adj->crtc_hsync_start;
-       dcrtc->v[1].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
-               dcrtc->variant->spu_adv_reg;
+       dcrtc->v[1].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN;
 
        if (interlaced) {
                /* Odd interlaced frame */
+               val -= adj->crtc_htotal / 2;
+               dcrtc->v[0].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN;
                dcrtc->v[0].spu_v_h_total = dcrtc->v[1].spu_v_h_total +
                                                (1 << 16);
                dcrtc->v[0].spu_v_porch = dcrtc->v[1].spu_v_porch + 1;
-               val = adj->crtc_hsync_start - adj->crtc_htotal / 2;
-               dcrtc->v[0].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
-                       dcrtc->variant->spu_adv_reg;
        } else {
                dcrtc->v[0] = dcrtc->v[1];
        }
@@ -720,77 +320,136 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
        armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_h_total,
                           LCD_SPUT_V_H_TOTAL);
 
-       if (dcrtc->variant->has_spu_adv_reg) {
+       if (dcrtc->variant->has_spu_adv_reg)
                armada_reg_queue_mod(regs, i, dcrtc->v[0].spu_adv_reg,
                                     ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF |
                                     ADV_VSYNCOFFEN, LCD_SPU_ADV_REG);
-       }
 
        val = adj->flags & DRM_MODE_FLAG_NVSYNC ? CFG_VSYNC_INV : 0;
        armada_reg_queue_mod(regs, i, val, CFG_VSYNC_INV, LCD_SPU_DMA_CTRL1);
 
-       val = dcrtc->spu_iopad_ctrl | armada_drm_crtc_calculate_csc(dcrtc);
-       armada_reg_queue_set(regs, i, val, LCD_SPU_IOPAD_CONTROL);
+       /*
+        * The documentation doesn't indicate what the normal state of
+        * the sync signals are.  Sebastian Hesselbart kindly probed
+        * these signals on his board to determine their state.
+        *
+        * The non-inverted state of the sync signals is active high.
+        * Setting these bits makes the appropriate signal active low.
+        */
+       val = 0;
+       if (adj->flags & DRM_MODE_FLAG_NCSYNC)
+               val |= CFG_INV_CSYNC;
+       if (adj->flags & DRM_MODE_FLAG_NHSYNC)
+               val |= CFG_INV_HSYNC;
+       if (adj->flags & DRM_MODE_FLAG_NVSYNC)
+               val |= CFG_INV_VSYNC;
+       armada_reg_queue_mod(regs, i, val, CFG_INV_CSYNC | CFG_INV_HSYNC |
+                            CFG_INV_VSYNC, LCD_SPU_DUMB_CTRL);
        armada_reg_queue_end(regs, i);
 
        armada_drm_crtc_update_regs(dcrtc, regs);
-
-       armada_drm_primary_set(crtc, crtc->primary, x, y);
        spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
+}
 
-       armada_drm_crtc_update(dcrtc);
+static void armada_drm_crtc_atomic_begin(struct drm_crtc *crtc,
+                                        struct drm_crtc_state *old_crtc_state)
+{
+       struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
 
-       drm_crtc_vblank_on(crtc);
-       armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms));
+       DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
 
-       return 0;
+       dcrtc->regs_idx = 0;
+       dcrtc->regs = dcrtc->atomic_regs;
 }
 
-/* The mode_config.mutex will be held for this call */
-static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
-       struct drm_framebuffer *old_fb)
+static void armada_drm_crtc_atomic_flush(struct drm_crtc *crtc,
+                                        struct drm_crtc_state *old_crtc_state)
 {
        struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
-       struct armada_regs regs[4];
-       unsigned i;
 
-       i = armada_drm_crtc_calc_fb(crtc->primary->fb, crtc->x, crtc->y, regs,
-                                   dcrtc->interlaced);
-       armada_reg_queue_end(regs, i);
+       DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
+
+       armada_reg_queue_end(dcrtc->regs, dcrtc->regs_idx);
+
+       /*
+        * If we aren't doing a full modeset, then we need to queue
+        * the event here.
+        */
+       if (!drm_atomic_crtc_needs_modeset(crtc->state)) {
+               dcrtc->update_pending = true;
+               armada_drm_crtc_queue_state_event(crtc);
+               spin_lock_irq(&dcrtc->irq_lock);
+               armada_drm_crtc_enable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+               spin_unlock_irq(&dcrtc->irq_lock);
+       } else {
+               spin_lock_irq(&dcrtc->irq_lock);
+               armada_drm_crtc_update_regs(dcrtc, dcrtc->regs);
+               spin_unlock_irq(&dcrtc->irq_lock);
+       }
+}
 
-       /* Wait for pending flips to complete */
-       armada_drm_plane_work_wait(drm_to_armada_plane(dcrtc->crtc.primary),
-                                  MAX_SCHEDULE_TIMEOUT);
+static void armada_drm_crtc_atomic_disable(struct drm_crtc *crtc,
+                                          struct drm_crtc_state *old_state)
+{
+       struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+       struct drm_pending_vblank_event *event;
 
-       /* Take a reference to the new fb as we're using it */
-       drm_framebuffer_get(crtc->primary->fb);
+       DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
 
-       /* Update the base in the CRTC */
-       armada_drm_crtc_update_regs(dcrtc, regs);
+       drm_crtc_vblank_off(crtc);
+       armada_drm_crtc_update(dcrtc, false);
 
-       /* Drop our previously held reference */
-       armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms));
+       if (!crtc->state->active) {
+               /*
+                * This modeset will be leaving the CRTC disabled, so
+                * call the backend to disable upstream clocks etc.
+                */
+               if (dcrtc->variant->disable)
+                       dcrtc->variant->disable(dcrtc);
 
-       return 0;
+               /*
+                * We will not receive any further vblank events.
+                * Send the flip_done event manually.
+                */
+               event = crtc->state->event;
+               crtc->state->event = NULL;
+               if (event) {
+                       spin_lock_irq(&crtc->dev->event_lock);
+                       drm_crtc_send_vblank_event(crtc, event);
+                       spin_unlock_irq(&crtc->dev->event_lock);
+               }
+       }
 }
 
-/* The mode_config.mutex will be held for this call */
-static void armada_drm_crtc_disable(struct drm_crtc *crtc)
+static void armada_drm_crtc_atomic_enable(struct drm_crtc *crtc,
+                                         struct drm_crtc_state *old_state)
 {
-       armada_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+       struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+
+       DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
+
+       if (!old_state->active) {
+               /*
+                * This modeset is enabling the CRTC after it having
+                * been disabled.  Reverse the call to ->disable in
+                * the atomic_disable().
+                */
+               if (dcrtc->variant->enable)
+                       dcrtc->variant->enable(dcrtc, &crtc->state->adjusted_mode);
+       }
+       armada_drm_crtc_update(dcrtc, true);
+       drm_crtc_vblank_on(crtc);
 
-       /* Disable our primary plane when we disable the CRTC. */
-       crtc->primary->funcs->disable_plane(crtc->primary, NULL);
+       armada_drm_crtc_queue_state_event(crtc);
 }
 
 static const struct drm_crtc_helper_funcs armada_crtc_helper_funcs = {
-       .dpms           = armada_drm_crtc_dpms,
-       .prepare        = armada_drm_crtc_prepare,
-       .commit         = armada_drm_crtc_commit,
        .mode_fixup     = armada_drm_crtc_mode_fixup,
-       .mode_set       = armada_drm_crtc_mode_set,
-       .mode_set_base  = armada_drm_crtc_mode_set_base,
-       .disable        = armada_drm_crtc_disable,
+       .mode_set_nofb  = armada_drm_crtc_mode_set_nofb,
+       .atomic_begin   = armada_drm_crtc_atomic_begin,
+       .atomic_flush   = armada_drm_crtc_atomic_flush,
+       .atomic_disable = armada_drm_crtc_atomic_disable,
+       .atomic_enable  = armada_drm_crtc_atomic_enable,
 };
 
 static void armada_load_cursor_argb(void __iomem *base, uint32_t *pix,
@@ -883,7 +542,6 @@ static int armada_drm_crtc_cursor_update(struct armada_crtc *dcrtc, bool reload)
 
        if (!dcrtc->cursor_obj || !h || !w) {
                spin_lock_irq(&dcrtc->irq_lock);
-               armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
                dcrtc->cursor_update = false;
                armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
                spin_unlock_irq(&dcrtc->irq_lock);
@@ -907,7 +565,6 @@ static int armada_drm_crtc_cursor_update(struct armada_crtc *dcrtc, bool reload)
 
        if (dcrtc->cursor_hw_sz != (h << 16 | w)) {
                spin_lock_irq(&dcrtc->irq_lock);
-               armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
                dcrtc->cursor_update = false;
                armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
                spin_unlock_irq(&dcrtc->irq_lock);
@@ -1015,8 +672,8 @@ static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
        priv->dcrtc[dcrtc->num] = NULL;
        drm_crtc_cleanup(&dcrtc->crtc);
 
-       if (!IS_ERR(dcrtc->clk))
-               clk_disable_unprepare(dcrtc->clk);
+       if (dcrtc->variant->disable)
+               dcrtc->variant->disable(dcrtc);
 
        writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ENA);
 
@@ -1025,361 +682,51 @@ static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
        kfree(dcrtc);
 }
 
-/*
- * The mode_config lock is held here, to prevent races between this
- * and a mode_set.
- */
-static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
-       struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t page_flip_flags,
-       struct drm_modeset_acquire_ctx *ctx)
-{
-       struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
-       struct armada_plane_work *work;
-       unsigned i;
-       int ret;
-
-       /* We don't support changing the pixel format */
-       if (fb->format != crtc->primary->fb->format)
-               return -EINVAL;
-
-       work = armada_drm_crtc_alloc_plane_work(dcrtc->crtc.primary);
-       if (!work)
-               return -ENOMEM;
-
-       work->event = event;
-       work->old_fb = dcrtc->crtc.primary->fb;
-
-       i = armada_drm_crtc_calc_fb(fb, crtc->x, crtc->y, work->regs,
-                                   dcrtc->interlaced);
-       armada_reg_queue_end(work->regs, i);
-
-       /*
-        * Ensure that we hold a reference on the new framebuffer.
-        * This has to match the behaviour in mode_set.
-        */
-       drm_framebuffer_get(fb);
-
-       ret = armada_drm_plane_work_queue(dcrtc, work);
-       if (ret) {
-               /* Undo our reference above */
-               drm_framebuffer_put(fb);
-               kfree(work);
-               return ret;
-       }
-
-       /*
-        * Don't take a reference on the new framebuffer;
-        * drm_mode_page_flip_ioctl() has already grabbed a reference and
-        * will _not_ drop that reference on successful return from this
-        * function.  Simply mark this new framebuffer as the current one.
-        */
-       dcrtc->crtc.primary->fb = fb;
-
-       /*
-        * Finally, if the display is blanked, we won't receive an
-        * interrupt, so complete it now.
-        */
-       if (dpms_blanked(dcrtc->dpms))
-               armada_drm_plane_work_run(dcrtc, dcrtc->crtc.primary);
-
-       return 0;
-}
-
-static int
-armada_drm_crtc_set_property(struct drm_crtc *crtc,
-       struct drm_property *property, uint64_t val)
-{
-       struct armada_private *priv = crtc->dev->dev_private;
-       struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
-       bool update_csc = false;
-
-       if (property == priv->csc_yuv_prop) {
-               dcrtc->csc_yuv_mode = val;
-               update_csc = true;
-       } else if (property == priv->csc_rgb_prop) {
-               dcrtc->csc_rgb_mode = val;
-               update_csc = true;
-       }
-
-       if (update_csc) {
-               uint32_t val;
-
-               val = dcrtc->spu_iopad_ctrl |
-                     armada_drm_crtc_calculate_csc(dcrtc);
-               writel_relaxed(val, dcrtc->base + LCD_SPU_IOPAD_CONTROL);
-       }
-
-       return 0;
-}
-
 /* These are called under the vbl_lock. */
 static int armada_drm_crtc_enable_vblank(struct drm_crtc *crtc)
 {
        struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+       unsigned long flags;
 
+       spin_lock_irqsave(&dcrtc->irq_lock, flags);
        armada_drm_crtc_enable_irq(dcrtc, VSYNC_IRQ_ENA);
+       spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
        return 0;
 }
 
 static void armada_drm_crtc_disable_vblank(struct drm_crtc *crtc)
 {
        struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+       unsigned long flags;
 
+       spin_lock_irqsave(&dcrtc->irq_lock, flags);
        armada_drm_crtc_disable_irq(dcrtc, VSYNC_IRQ_ENA);
+       spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
 }
 
 static const struct drm_crtc_funcs armada_crtc_funcs = {
+       .reset          = drm_atomic_helper_crtc_reset,
        .cursor_set     = armada_drm_crtc_cursor_set,
        .cursor_move    = armada_drm_crtc_cursor_move,
        .destroy        = armada_drm_crtc_destroy,
-       .set_config     = drm_crtc_helper_set_config,
-       .page_flip      = armada_drm_crtc_page_flip,
-       .set_property   = armada_drm_crtc_set_property,
+       .set_config     = drm_atomic_helper_set_config,
+       .page_flip      = drm_atomic_helper_page_flip,
+       .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
        .enable_vblank  = armada_drm_crtc_enable_vblank,
        .disable_vblank = armada_drm_crtc_disable_vblank,
 };
 
-static void armada_drm_primary_update_state(struct drm_plane_state *state,
-       struct armada_regs *regs)
-{
-       struct armada_plane *dplane = drm_to_armada_plane(state->plane);
-       struct armada_crtc *dcrtc = drm_to_armada_crtc(state->crtc);
-       struct armada_framebuffer *dfb = drm_fb_to_armada_fb(state->fb);
-       bool was_disabled;
-       unsigned int idx = 0;
-       u32 val;
-
-       val = CFG_GRA_FMT(dfb->fmt) | CFG_GRA_MOD(dfb->mod);
-       if (dfb->fmt > CFG_420)
-               val |= CFG_PALETTE_ENA;
-       if (state->visible)
-               val |= CFG_GRA_ENA;
-       if (drm_rect_width(&state->src) >> 16 != drm_rect_width(&state->dst))
-               val |= CFG_GRA_HSMOOTH;
-
-       was_disabled = !(dplane->state.ctrl0 & CFG_GRA_ENA);
-       if (was_disabled)
-               armada_reg_queue_mod(regs, idx,
-                                    0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1);
-
-       dplane->state.ctrl0 = val;
-       dplane->state.src_hw = (drm_rect_height(&state->src) & 0xffff0000) |
-                               drm_rect_width(&state->src) >> 16;
-       dplane->state.dst_hw = drm_rect_height(&state->dst) << 16 |
-                              drm_rect_width(&state->dst);
-       dplane->state.dst_yx = state->dst.y1 << 16 | state->dst.x1;
-
-       armada_drm_gra_plane_regs(regs + idx, &dfb->fb, &dplane->state,
-                                 state->src.x1 >> 16, state->src.y1 >> 16,
-                                 dcrtc->interlaced);
-
-       dplane->state.vsync_update = !was_disabled;
-       dplane->state.changed = true;
-}
-
-static int armada_drm_primary_update(struct drm_plane *plane,
-       struct drm_crtc *crtc, struct drm_framebuffer *fb,
-       int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h,
-       uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h,
-       struct drm_modeset_acquire_ctx *ctx)
-{
-       struct armada_plane *dplane = drm_to_armada_plane(plane);
-       struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
-       struct armada_plane_work *work;
-       struct drm_plane_state state = {
-               .plane = plane,
-               .crtc = crtc,
-               .fb = fb,
-               .src_x = src_x,
-               .src_y = src_y,
-               .src_w = src_w,
-               .src_h = src_h,
-               .crtc_x = crtc_x,
-               .crtc_y = crtc_y,
-               .crtc_w = crtc_w,
-               .crtc_h = crtc_h,
-               .rotation = DRM_MODE_ROTATE_0,
-       };
-       struct drm_crtc_state crtc_state = {
-               .crtc = crtc,
-               .enable = crtc->enabled,
-               .mode = crtc->mode,
-       };
-       int ret;
-
-       ret = drm_atomic_helper_check_plane_state(&state, &crtc_state, 0,
-                                                 INT_MAX, true, false);
-       if (ret)
-               return ret;
-
-       work = &dplane->works[dplane->next_work];
-       work->fn = armada_drm_crtc_complete_frame_work;
-
-       if (plane->fb != fb) {
-               /*
-                * Take a reference on the new framebuffer - we want to
-                * hold on to it while the hardware is displaying it.
-                */
-               drm_framebuffer_reference(fb);
-
-               work->old_fb = plane->fb;
-       } else {
-               work->old_fb = NULL;
-       }
-
-       armada_drm_primary_update_state(&state, work->regs);
-
-       if (!dplane->state.changed)
-               return 0;
-
-       /* Wait for pending work to complete */
-       if (armada_drm_plane_work_wait(dplane, HZ / 10) == 0)
-               armada_drm_plane_work_cancel(dcrtc, dplane);
-
-       if (!dplane->state.vsync_update) {
-               work->fn(dcrtc, work);
-               if (work->old_fb)
-                       drm_framebuffer_unreference(work->old_fb);
-               return 0;
-       }
-
-       /* Queue it for update on the next interrupt if we are enabled */
-       ret = armada_drm_plane_work_queue(dcrtc, work);
-       if (ret) {
-               work->fn(dcrtc, work);
-               if (work->old_fb)
-                       drm_framebuffer_unreference(work->old_fb);
-       }
-
-       dplane->next_work = !dplane->next_work;
-
-       return 0;
-}
-
-int armada_drm_plane_disable(struct drm_plane *plane,
-                            struct drm_modeset_acquire_ctx *ctx)
-{
-       struct armada_plane *dplane = drm_to_armada_plane(plane);
-       struct armada_crtc *dcrtc;
-       struct armada_plane_work *work;
-       unsigned int idx = 0;
-       u32 sram_para1, enable_mask;
-
-       if (!plane->crtc)
-               return 0;
-
-       /*
-        * Arrange to power down most RAMs and FIFOs if this is the primary
-        * plane, otherwise just the YUV FIFOs for the overlay plane.
-        */
-       if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
-               sram_para1 = CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
-                            CFG_PDWN32x32 | CFG_PDWN64x66;
-               enable_mask = CFG_GRA_ENA;
-       } else {
-               sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66;
-               enable_mask = CFG_DMA_ENA;
-       }
-
-       dplane->state.ctrl0 &= ~enable_mask;
-
-       dcrtc = drm_to_armada_crtc(plane->crtc);
-
-       /*
-        * Try to disable the plane and drop our ref on the framebuffer
-        * at the next frame update. If we fail for any reason, disable
-        * the plane immediately.
-        */
-       work = &dplane->works[dplane->next_work];
-       work->fn = armada_drm_crtc_complete_disable_work;
-       work->cancel = armada_drm_crtc_complete_disable_work;
-       work->old_fb = plane->fb;
-
-       armada_reg_queue_mod(work->regs, idx,
-                            0, enable_mask, LCD_SPU_DMA_CTRL0);
-       armada_reg_queue_mod(work->regs, idx,
-                            sram_para1, 0, LCD_SPU_SRAM_PARA1);
-       armada_reg_queue_end(work->regs, idx);
-
-       /* Wait for any preceding work to complete, but don't wedge */
-       if (WARN_ON(!armada_drm_plane_work_wait(dplane, HZ)))
-               armada_drm_plane_work_cancel(dcrtc, dplane);
-
-       if (armada_drm_plane_work_queue(dcrtc, work)) {
-               work->fn(dcrtc, work);
-               if (work->old_fb)
-                       drm_framebuffer_unreference(work->old_fb);
-       }
-
-       dplane->next_work = !dplane->next_work;
-
-       return 0;
-}
-
-static const struct drm_plane_funcs armada_primary_plane_funcs = {
-       .update_plane   = armada_drm_primary_update,
-       .disable_plane  = armada_drm_plane_disable,
-       .destroy        = drm_primary_helper_destroy,
-};
-
-int armada_drm_plane_init(struct armada_plane *plane)
-{
-       unsigned int i;
-
-       for (i = 0; i < ARRAY_SIZE(plane->works); i++)
-               plane->works[i].plane = &plane->base;
-
-       init_waitqueue_head(&plane->frame_wait);
-
-       return 0;
-}
-
-static const struct drm_prop_enum_list armada_drm_csc_yuv_enum_list[] = {
-       { CSC_AUTO,        "Auto" },
-       { CSC_YUV_CCIR601, "CCIR601" },
-       { CSC_YUV_CCIR709, "CCIR709" },
-};
-
-static const struct drm_prop_enum_list armada_drm_csc_rgb_enum_list[] = {
-       { CSC_AUTO,         "Auto" },
-       { CSC_RGB_COMPUTER, "Computer system" },
-       { CSC_RGB_STUDIO,   "Studio" },
-};
-
-static int armada_drm_crtc_create_properties(struct drm_device *dev)
-{
-       struct armada_private *priv = dev->dev_private;
-
-       if (priv->csc_yuv_prop)
-               return 0;
-
-       priv->csc_yuv_prop = drm_property_create_enum(dev, 0,
-                               "CSC_YUV", armada_drm_csc_yuv_enum_list,
-                               ARRAY_SIZE(armada_drm_csc_yuv_enum_list));
-       priv->csc_rgb_prop = drm_property_create_enum(dev, 0,
-                               "CSC_RGB", armada_drm_csc_rgb_enum_list,
-                               ARRAY_SIZE(armada_drm_csc_rgb_enum_list));
-
-       if (!priv->csc_yuv_prop || !priv->csc_rgb_prop)
-               return -ENOMEM;
-
-       return 0;
-}
-
 static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
        struct resource *res, int irq, const struct armada_variant *variant,
        struct device_node *port)
 {
        struct armada_private *priv = drm->dev_private;
        struct armada_crtc *dcrtc;
-       struct armada_plane *primary;
+       struct drm_plane *primary;
        void __iomem *base;
        int ret;
 
-       ret = armada_drm_crtc_create_properties(drm);
-       if (ret)
-               return ret;
-
        base = devm_ioremap_resource(dev, res);
        if (IS_ERR(base))
                return PTR_ERR(base);
@@ -1397,8 +744,6 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
        dcrtc->base = base;
        dcrtc->num = drm->mode_config.num_crtc;
        dcrtc->clk = ERR_PTR(-EINVAL);
-       dcrtc->csc_yuv_mode = CSC_AUTO;
-       dcrtc->csc_rgb_mode = CSC_AUTO;
        dcrtc->cfg_dumb_ctrl = DUMB24_RGB888_0;
        dcrtc->spu_iopad_ctrl = CFG_VSCALE_LN_EN | CFG_IOPAD_DUMB24;
        spin_lock_init(&dcrtc->irq_lock);
@@ -1415,6 +760,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
                       CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
        writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1);
        writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+       readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
        writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
 
        ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc",
@@ -1441,39 +787,23 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
                goto err_crtc;
        }
 
-       ret = armada_drm_plane_init(primary);
-       if (ret) {
-               kfree(primary);
-               goto err_crtc;
-       }
-
-       ret = drm_universal_plane_init(drm, &primary->base, 0,
-                                      &armada_primary_plane_funcs,
-                                      armada_primary_formats,
-                                      ARRAY_SIZE(armada_primary_formats),
-                                      NULL,
-                                      DRM_PLANE_TYPE_PRIMARY, NULL);
+       ret = armada_drm_primary_plane_init(drm, primary);
        if (ret) {
                kfree(primary);
                goto err_crtc;
        }
 
-       ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL,
+       ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, primary, NULL,
                                        &armada_crtc_funcs, NULL);
        if (ret)
                goto err_crtc_init;
 
        drm_crtc_helper_add(&dcrtc->crtc, &armada_crtc_helper_funcs);
 
-       drm_object_attach_property(&dcrtc->crtc.base, priv->csc_yuv_prop,
-                                  dcrtc->csc_yuv_mode);
-       drm_object_attach_property(&dcrtc->crtc.base, priv->csc_rgb_prop,
-                                  dcrtc->csc_rgb_mode);
-
        return armada_overlay_plane_create(drm, 1 << dcrtc->num);
 
 err_crtc_init:
-       primary->base.funcs->destroy(&primary->base);
+       primary->funcs->destroy(primary);
 err_crtc:
        kfree(dcrtc);
 
index 445829b8877af4b328c2fb8d1402a69700612c7b..7ebd337b60af3d40b9b0fb920cc23126e2a7e330 100644 (file)
@@ -32,49 +32,8 @@ struct armada_regs {
        armada_reg_queue_mod(_r, _i, 0, 0, ~0)
 
 struct armada_crtc;
-struct armada_plane;
 struct armada_variant;
 
-struct armada_plane_work {
-       void (*fn)(struct armada_crtc *, struct armada_plane_work *);
-       void (*cancel)(struct armada_crtc *, struct armada_plane_work *);
-       bool need_kfree;
-       struct drm_plane *plane;
-       struct drm_framebuffer *old_fb;
-       struct drm_pending_vblank_event *event;
-       struct armada_regs regs[14];
-};
-
-struct armada_plane_state {
-       u16 src_x;
-       u16 src_y;
-       u32 src_hw;
-       u32 dst_hw;
-       u32 dst_yx;
-       u32 ctrl0;
-       bool changed;
-       bool vsync_update;
-};
-
-struct armada_plane {
-       struct drm_plane        base;
-       wait_queue_head_t       frame_wait;
-       bool                    next_work;
-       struct armada_plane_work works[2];
-       struct armada_plane_work *work;
-       struct armada_plane_state state;
-};
-#define drm_to_armada_plane(p) container_of(p, struct armada_plane, base)
-
-int armada_drm_plane_init(struct armada_plane *plane);
-int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
-       struct armada_plane_work *work);
-int armada_drm_plane_work_wait(struct armada_plane *plane, long timeout);
-void armada_drm_plane_work_cancel(struct armada_crtc *dcrtc,
-       struct armada_plane *plane);
-void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb,
-       int x, int y);
-
 struct armada_crtc {
        struct drm_crtc         crtc;
        const struct armada_variant *variant;
@@ -89,10 +48,6 @@ struct armada_crtc {
        } v[2];
        bool                    interlaced;
        bool                    cursor_update;
-       uint8_t                 csc_yuv_mode;
-       uint8_t                 csc_rgb_mode;
-
-       struct drm_plane        *plane;
 
        struct armada_gem_object        *cursor_obj;
        int                     cursor_x;
@@ -102,21 +57,22 @@ struct armada_crtc {
        uint32_t                cursor_w;
        uint32_t                cursor_h;
 
-       int                     dpms;
        uint32_t                cfg_dumb_ctrl;
-       uint32_t                dumb_ctrl;
        uint32_t                spu_iopad_ctrl;
 
        spinlock_t              irq_lock;
        uint32_t                irq_ena;
+
+       bool                    update_pending;
+       struct drm_pending_vblank_event *event;
+       struct armada_regs      atomic_regs[32];
+       struct armada_regs      *regs;
+       unsigned int            regs_idx;
 };
 #define drm_to_armada_crtc(c) container_of(c, struct armada_crtc, crtc)
 
 void armada_drm_crtc_update_regs(struct armada_crtc *, struct armada_regs *);
 
-int armada_drm_plane_disable(struct drm_plane *plane,
-                            struct drm_modeset_acquire_ctx *ctx);
-
 extern struct platform_driver armada_lcd_platform_driver;
 
 #endif
index cc4c557c9f66421737c77bd4dd92e977ab4fc75a..f09083ff15d3b678ab41d811f2813034411793cd 100644 (file)
@@ -42,11 +42,12 @@ struct armada_private;
 
 struct armada_variant {
        bool has_spu_adv_reg;
-       uint32_t spu_adv_reg;
        int (*init)(struct armada_crtc *, struct device *);
        int (*compute_clock)(struct armada_crtc *,
                             const struct drm_display_mode *,
                             uint32_t *);
+       void (*disable)(struct armada_crtc *);
+       void (*enable)(struct armada_crtc *, const struct drm_display_mode *);
 };
 
 /* Variant ops */
@@ -54,14 +55,10 @@ extern const struct armada_variant armada510_ops;
 
 struct armada_private {
        struct drm_device       drm;
-       struct work_struct      fb_unref_work;
-       DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8);
        struct drm_fb_helper    *fbdev;
        struct armada_crtc      *dcrtc[2];
        struct drm_mm           linear; /* protected by linear_lock */
        struct mutex            linear_lock;
-       struct drm_property     *csc_yuv_prop;
-       struct drm_property     *csc_rgb_prop;
        struct drm_property     *colorkey_prop;
        struct drm_property     *colorkey_min_prop;
        struct drm_property     *colorkey_max_prop;
@@ -76,13 +73,6 @@ struct armada_private {
 #endif
 };
 
-void __armada_drm_queue_unref_work(struct drm_device *,
-       struct drm_framebuffer *);
-void armada_drm_queue_unref_work(struct drm_device *,
-       struct drm_framebuffer *);
-
-extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs;
-
 int armada_fbdev_init(struct drm_device *);
 void armada_fbdev_fini(struct drm_device *);
 
index 4b11b6b52f1debe27a974e85d67e1826ba6eeb2b..fa31589b4fc0914229eae00cc2d868b43b429eba 100644 (file)
@@ -9,46 +9,18 @@
 #include <linux/component.h>
 #include <linux/module.h>
 #include <linux/of_graph.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_of.h>
 #include "armada_crtc.h"
 #include "armada_drm.h"
 #include "armada_gem.h"
+#include "armada_fb.h"
 #include "armada_hw.h"
 #include <drm/armada_drm.h>
 #include "armada_ioctlP.h"
 
-static void armada_drm_unref_work(struct work_struct *work)
-{
-       struct armada_private *priv =
-               container_of(work, struct armada_private, fb_unref_work);
-       struct drm_framebuffer *fb;
-
-       while (kfifo_get(&priv->fb_unref, &fb))
-               drm_framebuffer_put(fb);
-}
-
-/* Must be called with dev->event_lock held */
-void __armada_drm_queue_unref_work(struct drm_device *dev,
-       struct drm_framebuffer *fb)
-{
-       struct armada_private *priv = dev->dev_private;
-
-       WARN_ON(!kfifo_put(&priv->fb_unref, fb));
-       schedule_work(&priv->fb_unref_work);
-}
-
-void armada_drm_queue_unref_work(struct drm_device *dev,
-       struct drm_framebuffer *fb)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev->event_lock, flags);
-       __armada_drm_queue_unref_work(dev, fb);
-       spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
 static struct drm_ioctl_desc armada_ioctls[] = {
        DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,0),
        DRM_IOCTL_DEF_DRV(ARMADA_GEM_MMAP, armada_gem_mmap_ioctl, 0),
@@ -72,11 +44,18 @@ static struct drm_driver armada_drm_driver = {
        .desc                   = "Armada SoC DRM",
        .date                   = "20120730",
        .driver_features        = DRIVER_GEM | DRIVER_MODESET |
-                                 DRIVER_PRIME,
+                                 DRIVER_PRIME | DRIVER_ATOMIC,
        .ioctls                 = armada_ioctls,
        .fops                   = &armada_drm_fops,
 };
 
+static const struct drm_mode_config_funcs armada_drm_mode_config_funcs = {
+       .fb_create              = armada_fb_create,
+       .output_poll_changed    = drm_fb_helper_output_poll_changed,
+       .atomic_check           = drm_atomic_helper_check,
+       .atomic_commit          = drm_atomic_helper_commit,
+};
+
 static int armada_drm_bind(struct device *dev)
 {
        struct armada_private *priv;
@@ -109,7 +88,7 @@ static int armada_drm_bind(struct device *dev)
 
        /*
         * The drm_device structure must be at the start of
-        * armada_private for drm_dev_unref() to work correctly.
+        * armada_private for drm_dev_put() to work correctly.
         */
        BUILD_BUG_ON(offsetof(struct armada_private, drm) != 0);
 
@@ -125,9 +104,6 @@ static int armada_drm_bind(struct device *dev)
 
        dev_set_drvdata(dev, &priv->drm);
 
-       INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
-       INIT_KFIFO(priv->fb_unref);
-
        /* Mode setting support */
        drm_mode_config_init(&priv->drm);
        priv->drm.mode_config.min_width = 320;
@@ -155,6 +131,8 @@ static int armada_drm_bind(struct device *dev)
 
        priv->drm.irq_enabled = true;
 
+       drm_mode_config_reset(&priv->drm);
+
        ret = armada_fbdev_init(&priv->drm);
        if (ret)
                goto err_comp;
@@ -179,8 +157,7 @@ static int armada_drm_bind(struct device *dev)
  err_kms:
        drm_mode_config_cleanup(&priv->drm);
        drm_mm_takedown(&priv->linear);
-       flush_work(&priv->fb_unref_work);
-       drm_dev_unref(&priv->drm);
+       drm_dev_put(&priv->drm);
        return ret;
 }
 
@@ -198,9 +175,8 @@ static void armada_drm_unbind(struct device *dev)
 
        drm_mode_config_cleanup(&priv->drm);
        drm_mm_takedown(&priv->linear);
-       flush_work(&priv->fb_unref_work);
 
-       drm_dev_unref(&priv->drm);
+       drm_dev_put(&priv->drm);
 }
 
 static int compare_of(struct device *dev, void *data)
index ac92bce07ecd92aedffe35ea481df054e24175b8..6bd638a54579f683d27541e77f80a9efc046b8cb 100644 (file)
@@ -7,30 +7,15 @@
  */
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 #include "armada_drm.h"
 #include "armada_fb.h"
 #include "armada_gem.h"
 #include "armada_hw.h"
 
-static void armada_fb_destroy(struct drm_framebuffer *fb)
-{
-       struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
-
-       drm_framebuffer_cleanup(&dfb->fb);
-       drm_gem_object_put_unlocked(&dfb->obj->obj);
-       kfree(dfb);
-}
-
-static int armada_fb_create_handle(struct drm_framebuffer *fb,
-       struct drm_file *dfile, unsigned int *handle)
-{
-       struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
-       return drm_gem_handle_create(dfile, &dfb->obj->obj, handle);
-}
-
 static const struct drm_framebuffer_funcs armada_fb_funcs = {
-       .destroy        = armada_fb_destroy,
-       .create_handle  = armada_fb_create_handle,
+       .destroy        = drm_gem_fb_destroy,
+       .create_handle  = drm_gem_fb_create_handle,
 };
 
 struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
@@ -78,7 +63,7 @@ struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
 
        dfb->fmt = format;
        dfb->mod = config;
-       dfb->obj = obj;
+       dfb->fb.obj[0] = &obj->obj;
 
        drm_helper_mode_fill_fb_struct(dev, &dfb->fb, mode);
 
@@ -99,7 +84,7 @@ struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
        return dfb;
 }
 
-static struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
+struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
        struct drm_file *dfile, const struct drm_mode_fb_cmd2 *mode)
 {
        struct armada_gem_object *obj;
@@ -153,8 +138,3 @@ static struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
        DRM_ERROR("failed to initialize framebuffer: %d\n", ret);
        return ERR_PTR(ret);
 }
-
-const struct drm_mode_config_funcs armada_drm_mode_config_funcs = {
-       .fb_create              = armada_fb_create,
-       .output_poll_changed    = drm_fb_helper_output_poll_changed,
-};
index 48073c4f54d8d673bba030fa3893332a8eae93a2..476daad0a36a25c6d879867a4f229c00a88a7485 100644 (file)
 
 struct armada_framebuffer {
        struct drm_framebuffer  fb;
-       struct armada_gem_object *obj;
        uint8_t                 fmt;
        uint8_t                 mod;
 };
 #define drm_fb_to_armada_fb(dfb) \
        container_of(dfb, struct armada_framebuffer, fb)
-#define drm_fb_obj(fb) drm_fb_to_armada_fb(fb)->obj
+#define drm_fb_obj(fb) drm_to_armada_gem((fb)->obj[0])
 
 struct armada_framebuffer *armada_framebuffer_create(struct drm_device *,
        const struct drm_mode_fb_cmd2 *, struct armada_gem_object *);
-
+struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
+       struct drm_file *dfile, const struct drm_mode_fb_cmd2 *mode);
 #endif
index 2a59db0994b2e704d92edb5b0ca1b03d1888737b..8d23700848df7bc4a359f8681515dae6b92373bf 100644 (file)
@@ -24,7 +24,7 @@ static /*const*/ struct fb_ops armada_fb_ops = {
        .fb_imageblit   = drm_fb_helper_cfb_imageblit,
 };
 
-static int armada_fb_create(struct drm_fb_helper *fbh,
+static int armada_fbdev_create(struct drm_fb_helper *fbh,
        struct drm_fb_helper_surface_size *sizes)
 {
        struct drm_device *dev = fbh->dev;
@@ -108,7 +108,7 @@ static int armada_fb_probe(struct drm_fb_helper *fbh,
        int ret = 0;
 
        if (!fbh->fb) {
-               ret = armada_fb_create(fbh, sizes);
+               ret = armada_fbdev_create(fbh, sizes);
                if (ret == 0)
                        ret = 1;
        }
index a97f509743a596631254ebcd4ec56b853fd9e034..892c1d9304bb7640d46f192c587958c0946c48b1 100644 (file)
 #include <drm/armada_drm.h>
 #include "armada_ioctlP.h"
 
-static int armada_gem_vm_fault(struct vm_fault *vmf)
+static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
 {
        struct drm_gem_object *gobj = vmf->vma->vm_private_data;
        struct armada_gem_object *obj = drm_to_armada_gem(gobj);
        unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
-       int ret;
 
        pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
-       ret = vm_insert_pfn(vmf->vma, vmf->address, pfn);
-
-       switch (ret) {
-       case 0:
-       case -EBUSY:
-               return VM_FAULT_NOPAGE;
-       case -ENOMEM:
-               return VM_FAULT_OOM;
-       default:
-               return VM_FAULT_SIGBUS;
-       }
+       return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
 }
 
 const struct vm_operations_struct armada_gem_vm_ops = {
@@ -490,8 +479,6 @@ static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
        .map_dma_buf    = armada_gem_prime_map_dma_buf,
        .unmap_dma_buf  = armada_gem_prime_unmap_dma_buf,
        .release        = drm_gem_dmabuf_release,
-       .map_atomic     = armada_gem_dmabuf_no_kmap,
-       .unmap_atomic   = armada_gem_dmabuf_no_kunmap,
        .map            = armada_gem_dmabuf_no_kmap,
        .unmap          = armada_gem_dmabuf_no_kunmap,
        .mmap           = armada_gem_dmabuf_mmap,
index 27319a8335e258cf12cb093c5c598523fbe307f3..277580b367586889080b15a59a6c1a2c45658cce 100644 (file)
@@ -160,6 +160,7 @@ enum {
        CFG_ALPHAM_GRA          = 0x1 << 16,
        CFG_ALPHAM_CFG          = 0x2 << 16,
        CFG_ALPHA_MASK          = 0xff << 8,
+#define CFG_ALPHA(x)           ((x) << 8)
        CFG_PIXCMD_MASK         = 0xff,
 };
 
@@ -315,4 +316,19 @@ enum {
        PWRDN_IRQ_LEVEL         = 1 << 0,
 };
 
+static inline u32 armada_rect_hw_fp(struct drm_rect *r)
+{
+       return (drm_rect_height(r) & 0xffff0000) | drm_rect_width(r) >> 16;
+}
+
+static inline u32 armada_rect_hw(struct drm_rect *r)
+{
+       return drm_rect_height(r) << 16 | (drm_rect_width(r) & 0x0000ffff);
+}
+
+static inline u32 armada_rect_yx(struct drm_rect *r)
+{
+       return (r)->y1 << 16 | ((r)->x1 & 0x0000ffff);
+}
+
 #endif
index c391955009d6051a6bb67b8a9a7bcebee38a8983..eb7dfb65ef475ce6a8ce6b9d491c19664af777a3 100644 (file)
  * published by the Free Software Foundation.
  */
 #include <drm/drmP.h>
+#include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/armada_drm.h>
 #include "armada_crtc.h"
 #include "armada_drm.h"
 #include "armada_fb.h"
 #include "armada_gem.h"
 #include "armada_hw.h"
-#include <drm/armada_drm.h>
 #include "armada_ioctlP.h"
+#include "armada_plane.h"
 #include "armada_trace.h"
 
-struct armada_ovl_plane_properties {
-       uint32_t colorkey_yr;
-       uint32_t colorkey_ug;
-       uint32_t colorkey_vb;
-#define K2R(val) (((val) >> 0) & 0xff)
-#define K2G(val) (((val) >> 8) & 0xff)
-#define K2B(val) (((val) >> 16) & 0xff)
-       int16_t  brightness;
-       uint16_t contrast;
-       uint16_t saturation;
-       uint32_t colorkey_mode;
-};
-
-struct armada_ovl_plane {
-       struct armada_plane base;
-       struct armada_ovl_plane_properties prop;
+#define DEFAULT_BRIGHTNESS     0
+#define DEFAULT_CONTRAST       0x4000
+#define DEFAULT_SATURATION     0x4000
+#define DEFAULT_ENCODING       DRM_COLOR_YCBCR_BT601
+
+struct armada_overlay_state {
+       struct drm_plane_state base;
+       u32 colorkey_yr;
+       u32 colorkey_ug;
+       u32 colorkey_vb;
+       u32 colorkey_mode;
+       u32 colorkey_enable;
+       s16 brightness;
+       u16 contrast;
+       u16 saturation;
 };
-#define drm_to_armada_ovl_plane(p) \
-       container_of(p, struct armada_ovl_plane, base.base)
-
+#define drm_to_overlay_state(s) \
+       container_of(s, struct armada_overlay_state, base)
 
-static void
-armada_ovl_update_attr(struct armada_ovl_plane_properties *prop,
-       struct armada_crtc *dcrtc)
+static inline u32 armada_spu_contrast(struct drm_plane_state *state)
 {
-       writel_relaxed(prop->colorkey_yr, dcrtc->base + LCD_SPU_COLORKEY_Y);
-       writel_relaxed(prop->colorkey_ug, dcrtc->base + LCD_SPU_COLORKEY_U);
-       writel_relaxed(prop->colorkey_vb, dcrtc->base + LCD_SPU_COLORKEY_V);
+       return drm_to_overlay_state(state)->brightness << 16 |
+              drm_to_overlay_state(state)->contrast;
+}
 
-       writel_relaxed(prop->brightness << 16 | prop->contrast,
-                      dcrtc->base + LCD_SPU_CONTRAST);
+static inline u32 armada_spu_saturation(struct drm_plane_state *state)
+{
        /* Docs say 15:0, but it seems to actually be 31:16 on Armada 510 */
-       writel_relaxed(prop->saturation << 16,
-                      dcrtc->base + LCD_SPU_SATURATION);
-       writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE);
-
-       spin_lock_irq(&dcrtc->irq_lock);
-       armada_updatel(prop->colorkey_mode | CFG_ALPHAM_GRA,
-                    CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
-                    dcrtc->base + LCD_SPU_DMA_CTRL1);
+       return drm_to_overlay_state(state)->saturation << 16;
+}
 
-       armada_updatel(ADV_GRACOLORKEY, 0, dcrtc->base + LCD_SPU_ADV_REG);
-       spin_unlock_irq(&dcrtc->irq_lock);
+static inline u32 armada_csc(struct drm_plane_state *state)
+{
+       /*
+        * The CFG_CSC_RGB_* settings control the output of the colour space
+        * converter, setting the range of output values it produces.  Since
+        * we will be blending with the full-range graphics, we need to
+        * produce full-range RGB output from the conversion.
+        */
+       return CFG_CSC_RGB_COMPUTER |
+              (state->color_encoding == DRM_COLOR_YCBCR_BT709 ?
+                       CFG_CSC_YUV_CCIR709 : CFG_CSC_YUV_CCIR601);
 }
 
 /* === Plane support === */
-static void armada_ovl_plane_work(struct armada_crtc *dcrtc,
-       struct armada_plane_work *work)
+static void armada_drm_overlay_plane_atomic_update(struct drm_plane *plane,
+       struct drm_plane_state *old_state)
 {
-       unsigned long flags;
+       struct drm_plane_state *state = plane->state;
+       struct armada_crtc *dcrtc;
+       struct armada_regs *regs;
+       unsigned int idx;
+       u32 cfg, cfg_mask, val;
 
-       trace_armada_ovl_plane_work(&dcrtc->crtc, work->plane);
+       DRM_DEBUG_KMS("[PLANE:%d:%s]\n", plane->base.id, plane->name);
 
-       spin_lock_irqsave(&dcrtc->irq_lock, flags);
-       armada_drm_crtc_update_regs(dcrtc, work->regs);
-       spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
-}
-
-static void armada_ovl_plane_update_state(struct drm_plane_state *state,
-       struct armada_regs *regs)
-{
-       struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(state->plane);
-       struct armada_framebuffer *dfb = drm_fb_to_armada_fb(state->fb);
-       const struct drm_format_info *format;
-       unsigned int idx = 0;
-       bool fb_changed;
-       u32 val, ctrl0;
-       u16 src_x, src_y;
+       if (!state->fb || WARN_ON(!state->crtc))
+               return;
 
-       ctrl0 = CFG_DMA_FMT(dfb->fmt) | CFG_DMA_MOD(dfb->mod) | CFG_CBSH_ENA;
-       if (state->visible)
-               ctrl0 |= CFG_DMA_ENA;
-       if (drm_rect_width(&state->src) >> 16 != drm_rect_width(&state->dst))
-               ctrl0 |= CFG_DMA_HSMOOTH;
+       DRM_DEBUG_KMS("[PLANE:%d:%s] is on [CRTC:%d:%s] with [FB:%d] visible %u->%u\n",
+               plane->base.id, plane->name,
+               state->crtc->base.id, state->crtc->name,
+               state->fb->base.id,
+               old_state->visible, state->visible);
 
-       /*
-        * Shifting a YUV packed format image by one pixel causes the U/V
-        * planes to swap.  Compensate for it by also toggling the UV swap.
-        */
-       format = dfb->fb.format;
-       if (format->num_planes == 1 && state->src.x1 >> 16 & (format->hsub - 1))
-               ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
+       dcrtc = drm_to_armada_crtc(state->crtc);
+       regs = dcrtc->regs + dcrtc->regs_idx;
 
-       if (~dplane->base.state.ctrl0 & ctrl0 & CFG_DMA_ENA) {
-               /* Power up the Y/U/V FIFOs on ENA 0->1 transitions */
+       idx = 0;
+       if (!old_state->visible && state->visible)
                armada_reg_queue_mod(regs, idx,
                                     0, CFG_PDWN16x66 | CFG_PDWN32x66,
                                     LCD_SPU_SRAM_PARA1);
-       }
-
-       fb_changed = dplane->base.base.fb != &dfb->fb ||
-                    dplane->base.state.src_x != state->src.x1 >> 16 ||
-                    dplane->base.state.src_y != state->src.y1 >> 16;
-
-       dplane->base.state.vsync_update = fb_changed;
-
+       val = armada_rect_hw_fp(&state->src);
+       if (armada_rect_hw_fp(&old_state->src) != val)
+               armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_HPXL_VLN);
+       val = armada_rect_yx(&state->dst);
+       if (armada_rect_yx(&old_state->dst) != val)
+               armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_OVSA_HPXL_VLN);
+       val = armada_rect_hw(&state->dst);
+       if (armada_rect_hw(&old_state->dst) != val)
+               armada_reg_queue_set(regs, idx, val, LCD_SPU_DZM_HPXL_VLN);
        /* FIXME: overlay on an interlaced display */
-       if (fb_changed) {
-               u32 addrs[3];
-
-               dplane->base.state.src_y = src_y = state->src.y1 >> 16;
-               dplane->base.state.src_x = src_x = state->src.x1 >> 16;
+       if (old_state->src.x1 != state->src.x1 ||
+           old_state->src.y1 != state->src.y1 ||
+           old_state->fb != state->fb) {
+               const struct drm_format_info *format;
+               u16 src_x, pitches[3];
+               u32 addrs[2][3];
 
-               armada_drm_plane_calc_addrs(addrs, &dfb->fb, src_x, src_y);
+               armada_drm_plane_calc(state, addrs, pitches, false);
 
-               armada_reg_queue_set(regs, idx, addrs[0],
+               armada_reg_queue_set(regs, idx, addrs[0][0],
                                     LCD_SPU_DMA_START_ADDR_Y0);
-               armada_reg_queue_set(regs, idx, addrs[1],
+               armada_reg_queue_set(regs, idx, addrs[0][1],
                                     LCD_SPU_DMA_START_ADDR_U0);
-               armada_reg_queue_set(regs, idx, addrs[2],
+               armada_reg_queue_set(regs, idx, addrs[0][2],
                                     LCD_SPU_DMA_START_ADDR_V0);
-               armada_reg_queue_set(regs, idx, addrs[0],
+               armada_reg_queue_set(regs, idx, addrs[1][0],
                                     LCD_SPU_DMA_START_ADDR_Y1);
-               armada_reg_queue_set(regs, idx, addrs[1],
+               armada_reg_queue_set(regs, idx, addrs[1][1],
                                     LCD_SPU_DMA_START_ADDR_U1);
-               armada_reg_queue_set(regs, idx, addrs[2],
+               armada_reg_queue_set(regs, idx, addrs[1][2],
                                     LCD_SPU_DMA_START_ADDR_V1);
 
-               val = dfb->fb.pitches[0] << 16 | dfb->fb.pitches[0];
-               armada_reg_queue_set(regs, idx, val,
-                                    LCD_SPU_DMA_PITCH_YC);
-               val = dfb->fb.pitches[1] << 16 | dfb->fb.pitches[2];
-               armada_reg_queue_set(regs, idx, val,
-                                    LCD_SPU_DMA_PITCH_UV);
-       }
+               val = pitches[0] << 16 | pitches[0];
+               armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_PITCH_YC);
+               val = pitches[1] << 16 | pitches[2];
+               armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_PITCH_UV);
 
-       val = (drm_rect_height(&state->src) & 0xffff0000) |
-              drm_rect_width(&state->src) >> 16;
-       if (dplane->base.state.src_hw != val) {
-               dplane->base.state.src_hw = val;
-               armada_reg_queue_set(regs, idx, val,
-                                    LCD_SPU_DMA_HPXL_VLN);
-       }
+               cfg = CFG_DMA_FMT(drm_fb_to_armada_fb(state->fb)->fmt) |
+                     CFG_DMA_MOD(drm_fb_to_armada_fb(state->fb)->mod) |
+                     CFG_CBSH_ENA;
+               if (state->visible)
+                       cfg |= CFG_DMA_ENA;
 
-       val = drm_rect_height(&state->dst) << 16 | drm_rect_width(&state->dst);
-       if (dplane->base.state.dst_hw != val) {
-               dplane->base.state.dst_hw = val;
-               armada_reg_queue_set(regs, idx, val,
-                                    LCD_SPU_DZM_HPXL_VLN);
+               /*
+                * Shifting a YUV packed format image by one pixel causes the
+                * U/V planes to swap.  Compensate for it by also toggling
+                * the UV swap.
+                */
+               format = state->fb->format;
+               src_x = state->src.x1 >> 16;
+               if (format->num_planes == 1 && src_x & (format->hsub - 1))
+                       cfg ^= CFG_DMA_MOD(CFG_SWAPUV);
+               cfg_mask = CFG_CBSH_ENA | CFG_DMAFORMAT |
+                          CFG_DMA_MOD(CFG_SWAPRB | CFG_SWAPUV |
+                                      CFG_SWAPYU | CFG_YUV2RGB) |
+                          CFG_DMA_FTOGGLE | CFG_DMA_TSTMODE |
+                          CFG_DMA_ENA;
+       } else if (old_state->visible != state->visible) {
+               cfg = state->visible ? CFG_DMA_ENA : 0;
+               cfg_mask = CFG_DMA_ENA;
+       } else {
+               cfg = cfg_mask = 0;
        }
-
-       val = state->dst.y1 << 16 | state->dst.x1;
-       if (dplane->base.state.dst_yx != val) {
-               dplane->base.state.dst_yx = val;
-               armada_reg_queue_set(regs, idx, val,
-                                    LCD_SPU_DMA_OVSA_HPXL_VLN);
+       if (drm_rect_width(&old_state->src) != drm_rect_width(&state->src) ||
+           drm_rect_width(&old_state->dst) != drm_rect_width(&state->dst)) {
+               cfg_mask |= CFG_DMA_HSMOOTH;
+               if (drm_rect_width(&state->src) >> 16 !=
+                   drm_rect_width(&state->dst))
+                       cfg |= CFG_DMA_HSMOOTH;
        }
 
-       if (dplane->base.state.ctrl0 != ctrl0) {
-               dplane->base.state.ctrl0 = ctrl0;
-               armada_reg_queue_mod(regs, idx, ctrl0,
-                       CFG_CBSH_ENA | CFG_DMAFORMAT | CFG_DMA_FTOGGLE |
-                       CFG_DMA_HSMOOTH | CFG_DMA_TSTMODE |
-                       CFG_DMA_MOD(CFG_SWAPRB | CFG_SWAPUV | CFG_SWAPYU |
-                       CFG_YUV2RGB) | CFG_DMA_ENA,
-                       LCD_SPU_DMA_CTRL0);
-               dplane->base.state.vsync_update = true;
-       }
+       if (cfg_mask)
+               armada_reg_queue_mod(regs, idx, cfg, cfg_mask,
+                                    LCD_SPU_DMA_CTRL0);
+
+       val = armada_spu_contrast(state);
+       if ((!old_state->visible && state->visible) ||
+           armada_spu_contrast(old_state) != val)
+               armada_reg_queue_set(regs, idx, val, LCD_SPU_CONTRAST);
+       val = armada_spu_saturation(state);
+       if ((!old_state->visible && state->visible) ||
+           armada_spu_saturation(old_state) != val)
+               armada_reg_queue_set(regs, idx, val, LCD_SPU_SATURATION);
+       if (!old_state->visible && state->visible)
+               armada_reg_queue_set(regs, idx, 0x00002000, LCD_SPU_CBSH_HUE);
+       val = armada_csc(state);
+       if ((!old_state->visible && state->visible) ||
+           armada_csc(old_state) != val)
+               armada_reg_queue_mod(regs, idx, val, CFG_CSC_MASK,
+                                    LCD_SPU_IOPAD_CONTROL);
+       val = drm_to_overlay_state(state)->colorkey_yr;
+       if ((!old_state->visible && state->visible) ||
+           drm_to_overlay_state(old_state)->colorkey_yr != val)
+               armada_reg_queue_set(regs, idx, val, LCD_SPU_COLORKEY_Y);
+       val = drm_to_overlay_state(state)->colorkey_ug;
+       if ((!old_state->visible && state->visible) ||
+           drm_to_overlay_state(old_state)->colorkey_ug != val)
+               armada_reg_queue_set(regs, idx, val, LCD_SPU_COLORKEY_U);
+       val = drm_to_overlay_state(state)->colorkey_vb;
+       if ((!old_state->visible && state->visible) ||
+           drm_to_overlay_state(old_state)->colorkey_vb != val)
+               armada_reg_queue_set(regs, idx, val, LCD_SPU_COLORKEY_V);
+       val = drm_to_overlay_state(state)->colorkey_mode;
+       if ((!old_state->visible && state->visible) ||
+           drm_to_overlay_state(old_state)->colorkey_mode != val)
+               armada_reg_queue_mod(regs, idx, val, CFG_CKMODE_MASK |
+                                    CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
+                                    LCD_SPU_DMA_CTRL1);
+       val = drm_to_overlay_state(state)->colorkey_enable;
+       if (((!old_state->visible && state->visible) ||
+            drm_to_overlay_state(old_state)->colorkey_enable != val) &&
+           dcrtc->variant->has_spu_adv_reg)
+               armada_reg_queue_mod(regs, idx, val, ADV_GRACOLORKEY |
+                                    ADV_VIDCOLORKEY, LCD_SPU_ADV_REG);
+
+       dcrtc->regs_idx += idx;
+}
+
+static void armada_drm_overlay_plane_atomic_disable(struct drm_plane *plane,
+       struct drm_plane_state *old_state)
+{
+       struct armada_crtc *dcrtc;
+       struct armada_regs *regs;
+       unsigned int idx = 0;
+
+       DRM_DEBUG_KMS("[PLANE:%d:%s]\n", plane->base.id, plane->name);
+
+       if (!old_state->crtc)
+               return;
 
-       dplane->base.state.changed = idx != 0;
+       DRM_DEBUG_KMS("[PLANE:%d:%s] was on [CRTC:%d:%s] with [FB:%d]\n",
+               plane->base.id, plane->name,
+               old_state->crtc->base.id, old_state->crtc->name,
+               old_state->fb->base.id);
 
-       armada_reg_queue_end(regs, idx);
+       dcrtc = drm_to_armada_crtc(old_state->crtc);
+       regs = dcrtc->regs + dcrtc->regs_idx;
+
+       /* Disable plane and power down the YUV FIFOs */
+       armada_reg_queue_mod(regs, idx, 0, CFG_DMA_ENA, LCD_SPU_DMA_CTRL0);
+       armada_reg_queue_mod(regs, idx, CFG_PDWN16x66 | CFG_PDWN32x66, 0,
+                            LCD_SPU_SRAM_PARA1);
+
+       dcrtc->regs_idx += idx;
 }
 
+static const struct drm_plane_helper_funcs armada_overlay_plane_helper_funcs = {
+       .prepare_fb     = armada_drm_plane_prepare_fb,
+       .cleanup_fb     = armada_drm_plane_cleanup_fb,
+       .atomic_check   = armada_drm_plane_atomic_check,
+       .atomic_update  = armada_drm_overlay_plane_atomic_update,
+       .atomic_disable = armada_drm_overlay_plane_atomic_disable,
+};
+
 static int
-armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+armada_overlay_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
        struct drm_framebuffer *fb,
        int crtc_x, int crtc_y, unsigned crtc_w, unsigned crtc_h,
        uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h,
        struct drm_modeset_acquire_ctx *ctx)
 {
-       struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
-       struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
-       struct armada_plane_work *work;
-       struct drm_plane_state state = {
-               .plane = plane,
-               .crtc = crtc,
-               .fb = fb,
-               .src_x = src_x,
-               .src_y = src_y,
-               .src_w = src_w,
-               .src_h = src_h,
-               .crtc_x = crtc_x,
-               .crtc_y = crtc_y,
-               .crtc_w = crtc_w,
-               .crtc_h = crtc_h,
-               .rotation = DRM_MODE_ROTATE_0,
-       };
-       struct drm_crtc_state crtc_state = {
-               .crtc = crtc,
-               .enable = crtc->enabled,
-               .mode = crtc->mode,
-       };
-       int ret;
+       struct drm_atomic_state *state;
+       struct drm_plane_state *plane_state;
+       int ret = 0;
 
        trace_armada_ovl_plane_update(plane, crtc, fb,
                                 crtc_x, crtc_y, crtc_w, crtc_h,
                                 src_x, src_y, src_w, src_h);
 
-       ret = drm_atomic_helper_check_plane_state(&state, &crtc_state, 0,
-                                                 INT_MAX, true, false);
-       if (ret)
-               return ret;
-
-       work = &dplane->base.works[dplane->base.next_work];
-
-       if (plane->fb != fb) {
-               /*
-                * Take a reference on the new framebuffer - we want to
-                * hold on to it while the hardware is displaying it.
-                */
-               drm_framebuffer_reference(fb);
+       state = drm_atomic_state_alloc(plane->dev);
+       if (!state)
+               return -ENOMEM;
 
-               work->old_fb = plane->fb;
-       } else {
-               work->old_fb = NULL;
+       state->acquire_ctx = ctx;
+       plane_state = drm_atomic_get_plane_state(state, plane);
+       if (IS_ERR(plane_state)) {
+               ret = PTR_ERR(plane_state);
+               goto fail;
        }
 
-       armada_ovl_plane_update_state(&state, work->regs);
-
-       if (!dplane->base.state.changed)
-               return 0;
-
-       /* Wait for pending work to complete */
-       if (armada_drm_plane_work_wait(&dplane->base, HZ / 25) == 0)
-               armada_drm_plane_work_cancel(dcrtc, &dplane->base);
+       ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
+       if (ret != 0)
+               goto fail;
+
+       drm_atomic_set_fb_for_plane(plane_state, fb);
+       plane_state->crtc_x = crtc_x;
+       plane_state->crtc_y = crtc_y;
+       plane_state->crtc_h = crtc_h;
+       plane_state->crtc_w = crtc_w;
+       plane_state->src_x = src_x;
+       plane_state->src_y = src_y;
+       plane_state->src_h = src_h;
+       plane_state->src_w = src_w;
+
+       ret = drm_atomic_nonblocking_commit(state);
+fail:
+       drm_atomic_state_put(state);
+       return ret;
+}
 
-       /* Just updating the position/size? */
-       if (!dplane->base.state.vsync_update) {
-               armada_ovl_plane_work(dcrtc, work);
-               return 0;
-       }
+static void armada_ovl_plane_destroy(struct drm_plane *plane)
+{
+       drm_plane_cleanup(plane);
+       kfree(plane);
+}
 
-       if (!dcrtc->plane) {
-               dcrtc->plane = plane;
-               armada_ovl_update_attr(&dplane->prop, dcrtc);
+static void armada_overlay_reset(struct drm_plane *plane)
+{
+       struct armada_overlay_state *state;
+
+       if (plane->state)
+               __drm_atomic_helper_plane_destroy_state(plane->state);
+       kfree(plane->state);
+
+       state = kzalloc(sizeof(*state), GFP_KERNEL);
+       if (state) {
+               state->base.plane = plane;
+               state->base.color_encoding = DEFAULT_ENCODING;
+               state->base.color_range = DRM_COLOR_YCBCR_LIMITED_RANGE;
+               state->base.rotation = DRM_MODE_ROTATE_0;
+               state->colorkey_yr = 0xfefefe00;
+               state->colorkey_ug = 0x01010100;
+               state->colorkey_vb = 0x01010100;
+               state->colorkey_mode = CFG_CKMODE(CKMODE_RGB) |
+                                      CFG_ALPHAM_GRA | CFG_ALPHA(0);
+               state->colorkey_enable = ADV_GRACOLORKEY;
+               state->brightness = DEFAULT_BRIGHTNESS;
+               state->contrast = DEFAULT_CONTRAST;
+               state->saturation = DEFAULT_SATURATION;
        }
-
-       /* Queue it for update on the next interrupt if we are enabled */
-       ret = armada_drm_plane_work_queue(dcrtc, work);
-       if (ret)
-               DRM_ERROR("failed to queue plane work: %d\n", ret);
-
-       dplane->base.next_work = !dplane->base.next_work;
-
-       return 0;
+       plane->state = &state->base;
 }
 
-static void armada_ovl_plane_destroy(struct drm_plane *plane)
+struct drm_plane_state *
+armada_overlay_duplicate_state(struct drm_plane *plane)
 {
-       struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
+       struct armada_overlay_state *state;
 
-       drm_plane_cleanup(plane);
+       if (WARN_ON(!plane->state))
+               return NULL;
 
-       kfree(dplane);
+       state = kmemdup(plane->state, sizeof(*state), GFP_KERNEL);
+       if (state)
+               __drm_atomic_helper_plane_duplicate_state(plane, &state->base);
+       return &state->base;
 }
 
-static int armada_ovl_plane_set_property(struct drm_plane *plane,
-       struct drm_property *property, uint64_t val)
+static int armada_overlay_set_property(struct drm_plane *plane,
+       struct drm_plane_state *state, struct drm_property *property,
+       uint64_t val)
 {
        struct armada_private *priv = plane->dev->dev_private;
-       struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
-       bool update_attr = false;
 
+#define K2R(val) (((val) >> 0) & 0xff)
+#define K2G(val) (((val) >> 8) & 0xff)
+#define K2B(val) (((val) >> 16) & 0xff)
        if (property == priv->colorkey_prop) {
 #define CCC(v) ((v) << 24 | (v) << 16 | (v) << 8)
-               dplane->prop.colorkey_yr = CCC(K2R(val));
-               dplane->prop.colorkey_ug = CCC(K2G(val));
-               dplane->prop.colorkey_vb = CCC(K2B(val));
+               drm_to_overlay_state(state)->colorkey_yr = CCC(K2R(val));
+               drm_to_overlay_state(state)->colorkey_ug = CCC(K2G(val));
+               drm_to_overlay_state(state)->colorkey_vb = CCC(K2B(val));
 #undef CCC
-               update_attr = true;
        } else if (property == priv->colorkey_min_prop) {
-               dplane->prop.colorkey_yr &= ~0x00ff0000;
-               dplane->prop.colorkey_yr |= K2R(val) << 16;
-               dplane->prop.colorkey_ug &= ~0x00ff0000;
-               dplane->prop.colorkey_ug |= K2G(val) << 16;
-               dplane->prop.colorkey_vb &= ~0x00ff0000;
-               dplane->prop.colorkey_vb |= K2B(val) << 16;
-               update_attr = true;
+               drm_to_overlay_state(state)->colorkey_yr &= ~0x00ff0000;
+               drm_to_overlay_state(state)->colorkey_yr |= K2R(val) << 16;
+               drm_to_overlay_state(state)->colorkey_ug &= ~0x00ff0000;
+               drm_to_overlay_state(state)->colorkey_ug |= K2G(val) << 16;
+               drm_to_overlay_state(state)->colorkey_vb &= ~0x00ff0000;
+               drm_to_overlay_state(state)->colorkey_vb |= K2B(val) << 16;
        } else if (property == priv->colorkey_max_prop) {
-               dplane->prop.colorkey_yr &= ~0xff000000;
-               dplane->prop.colorkey_yr |= K2R(val) << 24;
-               dplane->prop.colorkey_ug &= ~0xff000000;
-               dplane->prop.colorkey_ug |= K2G(val) << 24;
-               dplane->prop.colorkey_vb &= ~0xff000000;
-               dplane->prop.colorkey_vb |= K2B(val) << 24;
-               update_attr = true;
+               drm_to_overlay_state(state)->colorkey_yr &= ~0xff000000;
+               drm_to_overlay_state(state)->colorkey_yr |= K2R(val) << 24;
+               drm_to_overlay_state(state)->colorkey_ug &= ~0xff000000;
+               drm_to_overlay_state(state)->colorkey_ug |= K2G(val) << 24;
+               drm_to_overlay_state(state)->colorkey_vb &= ~0xff000000;
+               drm_to_overlay_state(state)->colorkey_vb |= K2B(val) << 24;
        } else if (property == priv->colorkey_val_prop) {
-               dplane->prop.colorkey_yr &= ~0x0000ff00;
-               dplane->prop.colorkey_yr |= K2R(val) << 8;
-               dplane->prop.colorkey_ug &= ~0x0000ff00;
-               dplane->prop.colorkey_ug |= K2G(val) << 8;
-               dplane->prop.colorkey_vb &= ~0x0000ff00;
-               dplane->prop.colorkey_vb |= K2B(val) << 8;
-               update_attr = true;
+               drm_to_overlay_state(state)->colorkey_yr &= ~0x0000ff00;
+               drm_to_overlay_state(state)->colorkey_yr |= K2R(val) << 8;
+               drm_to_overlay_state(state)->colorkey_ug &= ~0x0000ff00;
+               drm_to_overlay_state(state)->colorkey_ug |= K2G(val) << 8;
+               drm_to_overlay_state(state)->colorkey_vb &= ~0x0000ff00;
+               drm_to_overlay_state(state)->colorkey_vb |= K2B(val) << 8;
        } else if (property == priv->colorkey_alpha_prop) {
-               dplane->prop.colorkey_yr &= ~0x000000ff;
-               dplane->prop.colorkey_yr |= K2R(val);
-               dplane->prop.colorkey_ug &= ~0x000000ff;
-               dplane->prop.colorkey_ug |= K2G(val);
-               dplane->prop.colorkey_vb &= ~0x000000ff;
-               dplane->prop.colorkey_vb |= K2B(val);
-               update_attr = true;
+               drm_to_overlay_state(state)->colorkey_yr &= ~0x000000ff;
+               drm_to_overlay_state(state)->colorkey_yr |= K2R(val);
+               drm_to_overlay_state(state)->colorkey_ug &= ~0x000000ff;
+               drm_to_overlay_state(state)->colorkey_ug |= K2G(val);
+               drm_to_overlay_state(state)->colorkey_vb &= ~0x000000ff;
+               drm_to_overlay_state(state)->colorkey_vb |= K2B(val);
        } else if (property == priv->colorkey_mode_prop) {
-               dplane->prop.colorkey_mode &= ~CFG_CKMODE_MASK;
-               dplane->prop.colorkey_mode |= CFG_CKMODE(val);
-               update_attr = true;
+               if (val == CKMODE_DISABLE) {
+                       drm_to_overlay_state(state)->colorkey_mode =
+                               CFG_CKMODE(CKMODE_DISABLE) |
+                               CFG_ALPHAM_CFG | CFG_ALPHA(255);
+                       drm_to_overlay_state(state)->colorkey_enable = 0;
+               } else {
+                       drm_to_overlay_state(state)->colorkey_mode =
+                               CFG_CKMODE(val) |
+                               CFG_ALPHAM_GRA | CFG_ALPHA(0);
+                       drm_to_overlay_state(state)->colorkey_enable =
+                               ADV_GRACOLORKEY;
+               }
        } else if (property == priv->brightness_prop) {
-               dplane->prop.brightness = val - 256;
-               update_attr = true;
+               drm_to_overlay_state(state)->brightness = val - 256;
        } else if (property == priv->contrast_prop) {
-               dplane->prop.contrast = val;
-               update_attr = true;
+               drm_to_overlay_state(state)->contrast = val;
        } else if (property == priv->saturation_prop) {
-               dplane->prop.saturation = val;
-               update_attr = true;
+               drm_to_overlay_state(state)->saturation = val;
+       } else {
+               return -EINVAL;
        }
+       return 0;
+}
 
-       if (update_attr && dplane->base.base.crtc)
-               armada_ovl_update_attr(&dplane->prop,
-                                      drm_to_armada_crtc(dplane->base.base.crtc));
+static int armada_overlay_get_property(struct drm_plane *plane,
+       const struct drm_plane_state *state, struct drm_property *property,
+       uint64_t *val)
+{
+       struct armada_private *priv = plane->dev->dev_private;
 
+#define C2K(c,s)       (((c) >> (s)) & 0xff)
+#define R2BGR(r,g,b,s) (C2K(r,s) << 0 | C2K(g,s) << 8 | C2K(b,s) << 16)
+       if (property == priv->colorkey_prop) {
+               /* Do best-efforts here for this property */
+               *val = R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+                            drm_to_overlay_state(state)->colorkey_ug,
+                            drm_to_overlay_state(state)->colorkey_vb, 16);
+               /* If min != max, or min != val, error out */
+               if (*val != R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+                                 drm_to_overlay_state(state)->colorkey_ug,
+                                 drm_to_overlay_state(state)->colorkey_vb, 24) ||
+                   *val != R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+                                 drm_to_overlay_state(state)->colorkey_ug,
+                                 drm_to_overlay_state(state)->colorkey_vb, 8))
+                       return -EINVAL;
+       } else if (property == priv->colorkey_min_prop) {
+               *val = R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+                            drm_to_overlay_state(state)->colorkey_ug,
+                            drm_to_overlay_state(state)->colorkey_vb, 16);
+       } else if (property == priv->colorkey_max_prop) {
+               *val = R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+                            drm_to_overlay_state(state)->colorkey_ug,
+                            drm_to_overlay_state(state)->colorkey_vb, 24);
+       } else if (property == priv->colorkey_val_prop) {
+               *val = R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+                            drm_to_overlay_state(state)->colorkey_ug,
+                            drm_to_overlay_state(state)->colorkey_vb, 8);
+       } else if (property == priv->colorkey_alpha_prop) {
+               *val = R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+                            drm_to_overlay_state(state)->colorkey_ug,
+                            drm_to_overlay_state(state)->colorkey_vb, 0);
+       } else if (property == priv->colorkey_mode_prop) {
+               *val = (drm_to_overlay_state(state)->colorkey_mode &
+                       CFG_CKMODE_MASK) >> ffs(CFG_CKMODE_MASK);
+       } else if (property == priv->brightness_prop) {
+               *val = drm_to_overlay_state(state)->brightness + 256;
+       } else if (property == priv->contrast_prop) {
+               *val = drm_to_overlay_state(state)->contrast;
+       } else if (property == priv->saturation_prop) {
+               *val = drm_to_overlay_state(state)->saturation;
+       } else {
+               return -EINVAL;
+       }
        return 0;
 }
 
 static const struct drm_plane_funcs armada_ovl_plane_funcs = {
-       .update_plane   = armada_ovl_plane_update,
-       .disable_plane  = armada_drm_plane_disable,
+       .update_plane   = armada_overlay_plane_update,
+       .disable_plane  = drm_atomic_helper_disable_plane,
        .destroy        = armada_ovl_plane_destroy,
-       .set_property   = armada_ovl_plane_set_property,
+       .reset          = armada_overlay_reset,
+       .atomic_duplicate_state = armada_overlay_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+       .atomic_set_property = armada_overlay_set_property,
+       .atomic_get_property = armada_overlay_get_property,
 };
 
 static const uint32_t armada_ovl_formats[] = {
@@ -419,46 +541,31 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
 {
        struct armada_private *priv = dev->dev_private;
        struct drm_mode_object *mobj;
-       struct armada_ovl_plane *dplane;
+       struct drm_plane *overlay;
        int ret;
 
        ret = armada_overlay_create_properties(dev);
        if (ret)
                return ret;
 
-       dplane = kzalloc(sizeof(*dplane), GFP_KERNEL);
-       if (!dplane)
+       overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
+       if (!overlay)
                return -ENOMEM;
 
-       ret = armada_drm_plane_init(&dplane->base);
-       if (ret) {
-               kfree(dplane);
-               return ret;
-       }
-
-       dplane->base.works[0].fn = armada_ovl_plane_work;
-       dplane->base.works[1].fn = armada_ovl_plane_work;
+       drm_plane_helper_add(overlay, &armada_overlay_plane_helper_funcs);
 
-       ret = drm_universal_plane_init(dev, &dplane->base.base, crtcs,
+       ret = drm_universal_plane_init(dev, overlay, crtcs,
                                       &armada_ovl_plane_funcs,
                                       armada_ovl_formats,
                                       ARRAY_SIZE(armada_ovl_formats),
                                       NULL,
                                       DRM_PLANE_TYPE_OVERLAY, NULL);
        if (ret) {
-               kfree(dplane);
+               kfree(overlay);
                return ret;
        }
 
-       dplane->prop.colorkey_yr = 0xfefefe00;
-       dplane->prop.colorkey_ug = 0x01010100;
-       dplane->prop.colorkey_vb = 0x01010100;
-       dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB);
-       dplane->prop.brightness = 0;
-       dplane->prop.contrast = 0x4000;
-       dplane->prop.saturation = 0x4000;
-
-       mobj = &dplane->base.base.base;
+       mobj = &overlay->base;
        drm_object_attach_property(mobj, priv->colorkey_prop,
                                   0x0101fe);
        drm_object_attach_property(mobj, priv->colorkey_min_prop,
@@ -471,11 +578,19 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
                                   0x000000);
        drm_object_attach_property(mobj, priv->colorkey_mode_prop,
                                   CKMODE_RGB);
-       drm_object_attach_property(mobj, priv->brightness_prop, 256);
+       drm_object_attach_property(mobj, priv->brightness_prop,
+                                  256 + DEFAULT_BRIGHTNESS);
        drm_object_attach_property(mobj, priv->contrast_prop,
-                                  dplane->prop.contrast);
+                                  DEFAULT_CONTRAST);
        drm_object_attach_property(mobj, priv->saturation_prop,
-                                  dplane->prop.saturation);
+                                  DEFAULT_SATURATION);
 
-       return 0;
+       ret = drm_plane_create_color_properties(overlay,
+                                               BIT(DRM_COLOR_YCBCR_BT601) |
+                                               BIT(DRM_COLOR_YCBCR_BT709),
+                                               BIT(DRM_COLOR_YCBCR_LIMITED_RANGE),
+                                               DEFAULT_ENCODING,
+                                               DRM_COLOR_YCBCR_LIMITED_RANGE);
+
+       return ret;
 }
diff --git a/drivers/gpu/drm/armada/armada_plane.c b/drivers/gpu/drm/armada/armada_plane.c
new file mode 100644 (file)
index 0000000..9f36423
--- /dev/null
@@ -0,0 +1,289 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *  Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+#include "armada_plane.h"
+#include "armada_trace.h"
+
+static const uint32_t armada_primary_formats[] = {
+       DRM_FORMAT_UYVY,
+       DRM_FORMAT_YUYV,
+       DRM_FORMAT_VYUY,
+       DRM_FORMAT_YVYU,
+       DRM_FORMAT_ARGB8888,
+       DRM_FORMAT_ABGR8888,
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_XBGR8888,
+       DRM_FORMAT_RGB888,
+       DRM_FORMAT_BGR888,
+       DRM_FORMAT_ARGB1555,
+       DRM_FORMAT_ABGR1555,
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_BGR565,
+};
+
+void armada_drm_plane_calc(struct drm_plane_state *state, u32 addrs[2][3],
+       u16 pitches[3], bool interlaced)
+{
+       struct drm_framebuffer *fb = state->fb;
+       const struct drm_format_info *format = fb->format;
+       unsigned int num_planes = format->num_planes;
+       unsigned int x = state->src.x1 >> 16;
+       unsigned int y = state->src.y1 >> 16;
+       u32 addr = drm_fb_obj(fb)->dev_addr;
+       int i;
+
+       DRM_DEBUG_KMS("pitch %u x %d y %d bpp %d\n",
+                     fb->pitches[0], x, y, format->cpp[0] * 8);
+
+       if (num_planes > 3)
+               num_planes = 3;
+
+       addrs[0][0] = addr + fb->offsets[0] + y * fb->pitches[0] +
+                     x * format->cpp[0];
+       pitches[0] = fb->pitches[0];
+
+       y /= format->vsub;
+       x /= format->hsub;
+
+       for (i = 1; i < num_planes; i++) {
+               addrs[0][i] = addr + fb->offsets[i] + y * fb->pitches[i] +
+                             x * format->cpp[i];
+               pitches[i] = fb->pitches[i];
+       }
+       for (; i < 3; i++) {
+               addrs[0][i] = 0;
+               pitches[i] = 0;
+       }
+       if (interlaced) {
+               for (i = 0; i < 3; i++) {
+                       addrs[1][i] = addrs[0][i] + pitches[i];
+                       pitches[i] *= 2;
+               }
+       } else {
+               for (i = 0; i < 3; i++)
+                       addrs[1][i] = addrs[0][i];
+       }
+}
+
+static unsigned armada_drm_crtc_calc_fb(struct drm_plane_state *state,
+       struct armada_regs *regs, bool interlaced)
+{
+       u16 pitches[3];
+       u32 addrs[2][3];
+       unsigned i = 0;
+
+       armada_drm_plane_calc(state, addrs, pitches, interlaced);
+
+       /* write offset, base, and pitch */
+       armada_reg_queue_set(regs, i, addrs[0][0], LCD_CFG_GRA_START_ADDR0);
+       armada_reg_queue_set(regs, i, addrs[1][0], LCD_CFG_GRA_START_ADDR1);
+       armada_reg_queue_mod(regs, i, pitches[0], 0xffff, LCD_CFG_GRA_PITCH);
+
+       return i;
+}
+
+int armada_drm_plane_prepare_fb(struct drm_plane *plane,
+       struct drm_plane_state *state)
+{
+       DRM_DEBUG_KMS("[PLANE:%d:%s] [FB:%d]\n",
+               plane->base.id, plane->name,
+               state->fb ? state->fb->base.id : 0);
+
+       /*
+        * Take a reference on the new framebuffer - we want to
+        * hold on to it while the hardware is displaying it.
+        */
+       if (state->fb)
+               drm_framebuffer_get(state->fb);
+       return 0;
+}
+
+void armada_drm_plane_cleanup_fb(struct drm_plane *plane,
+       struct drm_plane_state *old_state)
+{
+       DRM_DEBUG_KMS("[PLANE:%d:%s] [FB:%d]\n",
+               plane->base.id, plane->name,
+               old_state->fb ? old_state->fb->base.id : 0);
+
+       if (old_state->fb)
+               drm_framebuffer_put(old_state->fb);
+}
+
+int armada_drm_plane_atomic_check(struct drm_plane *plane,
+       struct drm_plane_state *state)
+{
+       if (state->fb && !WARN_ON(!state->crtc)) {
+               struct drm_crtc *crtc = state->crtc;
+               struct drm_crtc_state *crtc_state;
+
+               if (state->state)
+                       crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
+               else
+                       crtc_state = crtc->state;
+               return drm_atomic_helper_check_plane_state(state, crtc_state,
+                                                          0, INT_MAX,
+                                                          true, false);
+       } else {
+               state->visible = false;
+       }
+       return 0;
+}
+
+static void armada_drm_primary_plane_atomic_update(struct drm_plane *plane,
+       struct drm_plane_state *old_state)
+{
+       struct drm_plane_state *state = plane->state;
+       struct armada_crtc *dcrtc;
+       struct armada_regs *regs;
+       u32 cfg, cfg_mask, val;
+       unsigned int idx;
+
+       DRM_DEBUG_KMS("[PLANE:%d:%s]\n", plane->base.id, plane->name);
+
+       if (!state->fb || WARN_ON(!state->crtc))
+               return;
+
+       DRM_DEBUG_KMS("[PLANE:%d:%s] is on [CRTC:%d:%s] with [FB:%d] visible %u->%u\n",
+               plane->base.id, plane->name,
+               state->crtc->base.id, state->crtc->name,
+               state->fb->base.id,
+               old_state->visible, state->visible);
+
+       dcrtc = drm_to_armada_crtc(state->crtc);
+       regs = dcrtc->regs + dcrtc->regs_idx;
+
+       idx = 0;
+       if (!old_state->visible && state->visible) {
+               val = CFG_PDWN64x66;
+               if (drm_fb_to_armada_fb(state->fb)->fmt > CFG_420)
+                       val |= CFG_PDWN256x24;
+               armada_reg_queue_mod(regs, idx, 0, val, LCD_SPU_SRAM_PARA1);
+       }
+       val = armada_rect_hw_fp(&state->src);
+       if (armada_rect_hw_fp(&old_state->src) != val)
+               armada_reg_queue_set(regs, idx, val, LCD_SPU_GRA_HPXL_VLN);
+       val = armada_rect_yx(&state->dst);
+       if (armada_rect_yx(&old_state->dst) != val)
+               armada_reg_queue_set(regs, idx, val, LCD_SPU_GRA_OVSA_HPXL_VLN);
+       val = armada_rect_hw(&state->dst);
+       if (armada_rect_hw(&old_state->dst) != val)
+               armada_reg_queue_set(regs, idx, val, LCD_SPU_GZM_HPXL_VLN);
+       if (old_state->src.x1 != state->src.x1 ||
+           old_state->src.y1 != state->src.y1 ||
+           old_state->fb != state->fb ||
+           state->crtc->state->mode_changed) {
+               idx += armada_drm_crtc_calc_fb(state, regs + idx,
+                                              dcrtc->interlaced);
+       }
+       if (old_state->fb != state->fb ||
+           state->crtc->state->mode_changed) {
+               cfg = CFG_GRA_FMT(drm_fb_to_armada_fb(state->fb)->fmt) |
+                     CFG_GRA_MOD(drm_fb_to_armada_fb(state->fb)->mod);
+               if (drm_fb_to_armada_fb(state->fb)->fmt > CFG_420)
+                       cfg |= CFG_PALETTE_ENA;
+               if (state->visible)
+                       cfg |= CFG_GRA_ENA;
+               if (dcrtc->interlaced)
+                       cfg |= CFG_GRA_FTOGGLE;
+               cfg_mask = CFG_GRAFORMAT |
+                          CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV |
+                                      CFG_SWAPYU | CFG_YUV2RGB) |
+                          CFG_PALETTE_ENA | CFG_GRA_FTOGGLE |
+                          CFG_GRA_ENA;
+       } else if (old_state->visible != state->visible) {
+               cfg = state->visible ? CFG_GRA_ENA : 0;
+               cfg_mask = CFG_GRA_ENA;
+       } else {
+               cfg = cfg_mask = 0;
+       }
+       if (drm_rect_width(&old_state->src) != drm_rect_width(&state->src) ||
+           drm_rect_width(&old_state->dst) != drm_rect_width(&state->dst)) {
+               cfg_mask |= CFG_GRA_HSMOOTH;
+               if (drm_rect_width(&state->src) >> 16 !=
+                   drm_rect_width(&state->dst))
+                       cfg |= CFG_GRA_HSMOOTH;
+       }
+
+       if (cfg_mask)
+               armada_reg_queue_mod(regs, idx, cfg, cfg_mask,
+                                    LCD_SPU_DMA_CTRL0);
+
+       dcrtc->regs_idx += idx;
+}
+
+static void armada_drm_primary_plane_atomic_disable(struct drm_plane *plane,
+       struct drm_plane_state *old_state)
+{
+       struct armada_crtc *dcrtc;
+       struct armada_regs *regs;
+       unsigned int idx = 0;
+
+       DRM_DEBUG_KMS("[PLANE:%d:%s]\n", plane->base.id, plane->name);
+
+       if (!old_state->crtc)
+               return;
+
+       DRM_DEBUG_KMS("[PLANE:%d:%s] was on [CRTC:%d:%s] with [FB:%d]\n",
+               plane->base.id, plane->name,
+               old_state->crtc->base.id, old_state->crtc->name,
+               old_state->fb->base.id);
+
+       dcrtc = drm_to_armada_crtc(old_state->crtc);
+       regs = dcrtc->regs + dcrtc->regs_idx;
+
+       /* Disable plane and power down most RAMs and FIFOs */
+       armada_reg_queue_mod(regs, idx, 0, CFG_GRA_ENA, LCD_SPU_DMA_CTRL0);
+       armada_reg_queue_mod(regs, idx, CFG_PDWN256x32 | CFG_PDWN256x24 |
+                            CFG_PDWN256x8 | CFG_PDWN32x32 | CFG_PDWN64x66,
+                            0, LCD_SPU_SRAM_PARA1);
+
+       dcrtc->regs_idx += idx;
+}
+
+static const struct drm_plane_helper_funcs armada_primary_plane_helper_funcs = {
+       .prepare_fb     = armada_drm_plane_prepare_fb,
+       .cleanup_fb     = armada_drm_plane_cleanup_fb,
+       .atomic_check   = armada_drm_plane_atomic_check,
+       .atomic_update  = armada_drm_primary_plane_atomic_update,
+       .atomic_disable = armada_drm_primary_plane_atomic_disable,
+};
+
+static const struct drm_plane_funcs armada_primary_plane_funcs = {
+       .update_plane   = drm_atomic_helper_update_plane,
+       .disable_plane  = drm_atomic_helper_disable_plane,
+       .destroy        = drm_primary_helper_destroy,
+       .reset          = drm_atomic_helper_plane_reset,
+       .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+int armada_drm_primary_plane_init(struct drm_device *drm,
+       struct drm_plane *primary)
+{
+       int ret;
+
+       drm_plane_helper_add(primary, &armada_primary_plane_helper_funcs);
+
+       ret = drm_universal_plane_init(drm, primary, 0,
+                                      &armada_primary_plane_funcs,
+                                      armada_primary_formats,
+                                      ARRAY_SIZE(armada_primary_formats),
+                                      NULL,
+                                      DRM_PLANE_TYPE_PRIMARY, NULL);
+
+       return ret;
+}
diff --git a/drivers/gpu/drm/armada/armada_plane.h b/drivers/gpu/drm/armada/armada_plane.h
new file mode 100644 (file)
index 0000000..ff4281b
--- /dev/null
@@ -0,0 +1,15 @@
+#ifndef ARMADA_PLANE_H
+#define ARMADA_PLANE_H
+
+void armada_drm_plane_calc(struct drm_plane_state *state, u32 addrs[2][3],
+       u16 pitches[3], bool interlaced);
+int armada_drm_plane_prepare_fb(struct drm_plane *plane,
+       struct drm_plane_state *state);
+void armada_drm_plane_cleanup_fb(struct drm_plane *plane,
+       struct drm_plane_state *old_state);
+int armada_drm_plane_atomic_check(struct drm_plane *plane,
+       struct drm_plane_state *state);
+int armada_drm_primary_plane_init(struct drm_device *drm,
+       struct drm_plane *primary);
+
+#endif
index 036dff8a1f33321c8101a091712f076d682b7301..5e77d456d9bb9434040107a69536815a270c7865 100644 (file)
@@ -790,12 +790,12 @@ static int ast_get_modes(struct drm_connector *connector)
        if (!flags)
                edid = drm_get_edid(connector, &ast_connector->i2c->adapter);
        if (edid) {
-               drm_mode_connector_update_edid_property(&ast_connector->base, edid);
+               drm_connector_update_edid_property(&ast_connector->base, edid);
                ret = drm_add_edid_modes(connector, edid);
                kfree(edid);
                return ret;
        } else
-               drm_mode_connector_update_edid_property(&ast_connector->base, NULL);
+               drm_connector_update_edid_property(&ast_connector->base, NULL);
        return 0;
 }
 
@@ -900,7 +900,7 @@ static int ast_connector_init(struct drm_device *dev)
        connector->polled = DRM_CONNECTOR_POLL_CONNECT;
 
        encoder = list_first_entry(&dev->mode_config.encoder_list, struct drm_encoder, head);
-       drm_mode_connector_attach_encoder(connector, encoder);
+       drm_connector_attach_encoder(connector, encoder);
 
        ast_connector->i2c = ast_i2c_create(dev);
        if (!ast_connector->i2c)
index c1ea5c36b0061a9220a0c168eab6a8d588054f25..843cac222e60d8e801aa1ad5c95db75d848d69ff 100644 (file)
@@ -681,6 +681,7 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev)
        drm_fb_cma_fbdev_fini(dev);
        flush_workqueue(dc->wq);
        drm_kms_helper_poll_fini(dev);
+       drm_atomic_helper_shutdown(dev);
        drm_mode_config_cleanup(dev);
 
        pm_runtime_get_sync(dev->dev);
index 73c875db45f4346afd5a25408e9264c466401138..04440064b9b7baaeaec4a9d19d48a33ed2b5dbdb 100644 (file)
@@ -412,9 +412,10 @@ static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane,
                                    ATMEL_HLCDC_LAYER_FORMAT_CFG, cfg);
 }
 
-static void atmel_hlcdc_plane_update_clut(struct atmel_hlcdc_plane *plane)
+static void atmel_hlcdc_plane_update_clut(struct atmel_hlcdc_plane *plane,
+                                         struct atmel_hlcdc_plane_state *state)
 {
-       struct drm_crtc *crtc = plane->base.crtc;
+       struct drm_crtc *crtc = state->base.crtc;
        struct drm_color_lut *lut;
        int idx;
 
@@ -779,7 +780,7 @@ static void atmel_hlcdc_plane_atomic_update(struct drm_plane *p,
        atmel_hlcdc_plane_update_pos_and_size(plane, state);
        atmel_hlcdc_plane_update_general_settings(plane, state);
        atmel_hlcdc_plane_update_format(plane, state);
-       atmel_hlcdc_plane_update_clut(plane);
+       atmel_hlcdc_plane_update_clut(plane, state);
        atmel_hlcdc_plane_update_buffers(plane, state);
        atmel_hlcdc_plane_update_disc_area(plane, state);
 
@@ -816,16 +817,6 @@ static void atmel_hlcdc_plane_atomic_disable(struct drm_plane *p,
        atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_HLCDC_LAYER_ISR);
 }
 
-static void atmel_hlcdc_plane_destroy(struct drm_plane *p)
-{
-       struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
-
-       if (plane->base.fb)
-               drm_framebuffer_put(plane->base.fb);
-
-       drm_plane_cleanup(p);
-}
-
 static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane)
 {
        const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc;
@@ -839,7 +830,7 @@ static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane)
                        return ret;
        }
 
-       if (desc->layout.xstride && desc->layout.pstride) {
+       if (desc->layout.xstride[0] && desc->layout.pstride[0]) {
                int ret;
 
                ret = drm_plane_create_rotation_property(&plane->base,
@@ -1002,7 +993,7 @@ static void atmel_hlcdc_plane_atomic_destroy_state(struct drm_plane *p,
 static const struct drm_plane_funcs layer_plane_funcs = {
        .update_plane = drm_atomic_helper_update_plane,
        .disable_plane = drm_atomic_helper_disable_plane,
-       .destroy = atmel_hlcdc_plane_destroy,
+       .destroy = drm_plane_cleanup,
        .reset = atmel_hlcdc_plane_reset,
        .atomic_duplicate_state = atmel_hlcdc_plane_atomic_duplicate_state,
        .atomic_destroy_state = atmel_hlcdc_plane_atomic_destroy_state,
index 233980a785912853256373fb7dd8607a6ad20b61..ca5a9afdd5cfa0a338b207888883d112a925938b 100644 (file)
@@ -259,7 +259,7 @@ int bochs_kms_init(struct bochs_device *bochs)
        bochs_crtc_init(bochs->dev);
        bochs_encoder_init(bochs->dev);
        bochs_connector_init(bochs->dev);
-       drm_mode_connector_attach_encoder(&bochs->connector,
+       drm_connector_attach_encoder(&bochs->connector,
                                          &bochs->encoder);
 
        return 0;
index fa2c7997e2fdf977253ff365fe7fe2a12d5637b4..bf6cad6c9178b10eda1e639ac0d785631d1f6518 100644 (file)
@@ -82,9 +82,11 @@ config DRM_PARADE_PS8622
 
 config DRM_SIL_SII8620
        tristate "Silicon Image SII8620 HDMI/MHL bridge"
-       depends on OF && RC_CORE
+       depends on OF
        select DRM_KMS_HELPER
        imply EXTCON
+       select INPUT
+       select RC_CORE
        help
          Silicon Image SII8620 HDMI/MHL bridge chip driver.
 
index 73021b388e12d3bc1ca7cabfc0178af223f77686..6437b878724a15cbc0e29e7df0808e4deef7a908 100644 (file)
@@ -601,7 +601,7 @@ static int adv7511_get_modes(struct adv7511 *adv7511,
                __adv7511_power_off(adv7511);
 
 
-       drm_mode_connector_update_edid_property(connector, edid);
+       drm_connector_update_edid_property(connector, edid);
        count = drm_add_edid_modes(connector, edid);
 
        adv7511_set_config_csc(adv7511, connector, adv7511->rgb,
@@ -860,7 +860,7 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge)
        }
        drm_connector_helper_add(&adv->connector,
                                 &adv7511_connector_helper_funcs);
-       drm_mode_connector_attach_encoder(&adv->connector, bridge->encoder);
+       drm_connector_attach_encoder(&adv->connector, bridge->encoder);
 
        if (adv->type == ADV7533)
                ret = adv7533_attach_dsi(adv);
index b49043866be612019993b058ce2fbbc53b03ac63..f8433c93f4634620c177c77ac67aea70337288ec 100644 (file)
@@ -969,8 +969,8 @@ static int anx78xx_get_modes(struct drm_connector *connector)
                goto unlock;
        }
 
-       err = drm_mode_connector_update_edid_property(connector,
-                                                     anx78xx->edid);
+       err = drm_connector_update_edid_property(connector,
+                                                anx78xx->edid);
        if (err) {
                DRM_ERROR("Failed to update EDID property: %d\n", err);
                goto unlock;
@@ -1048,8 +1048,8 @@ static int anx78xx_bridge_attach(struct drm_bridge *bridge)
 
        anx78xx->connector.polled = DRM_CONNECTOR_POLL_HPD;
 
-       err = drm_mode_connector_attach_encoder(&anx78xx->connector,
-                                               bridge->encoder);
+       err = drm_connector_attach_encoder(&anx78xx->connector,
+                                          bridge->encoder);
        if (err) {
                DRM_ERROR("Failed to link up connector to encoder: %d\n", err);
                return err;
index 2bcbfadb6ac5535782f8a78e0934365abe0796f8..d68986cea13258bed331d8d023db7b493f720e5b 100644 (file)
@@ -1119,8 +1119,8 @@ static int analogix_dp_get_modes(struct drm_connector *connector)
                edid = drm_get_edid(connector, &dp->aux.ddc);
                pm_runtime_put(dp->dev);
                if (edid) {
-                       drm_mode_connector_update_edid_property(&dp->connector,
-                                                               edid);
+                       drm_connector_update_edid_property(&dp->connector,
+                                                          edid);
                        num_modes += drm_add_edid_modes(&dp->connector, edid);
                        kfree(edid);
                }
@@ -1210,7 +1210,7 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge)
 
                drm_connector_helper_add(connector,
                                         &analogix_dp_connector_helper_funcs);
-               drm_mode_connector_attach_encoder(connector, encoder);
+               drm_connector_attach_encoder(connector, encoder);
        }
 
        /*
index c255fc3e1be5d2cf13952613e608c53fe64fc3f3..ce9496d13986937f9de7a0cbd4d146b9959d3615 100644 (file)
@@ -1152,7 +1152,7 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
                np = of_node_get(dev->dev.of_node);
 
        panel = of_drm_find_panel(np);
-       if (panel) {
+       if (!IS_ERR(panel)) {
                bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DSI);
        } else {
                bridge = of_drm_find_bridge(dev->dev.of_node);
@@ -1337,7 +1337,7 @@ static const struct mipi_dsi_host_ops cdns_dsi_ops = {
        .transfer = cdns_dsi_transfer,
 };
 
-static int cdns_dsi_resume(struct device *dev)
+static int __maybe_unused cdns_dsi_resume(struct device *dev)
 {
        struct cdns_dsi *dsi = dev_get_drvdata(dev);
 
@@ -1350,7 +1350,7 @@ static int cdns_dsi_resume(struct device *dev)
        return 0;
 }
 
-static int cdns_dsi_suspend(struct device *dev)
+static int __maybe_unused cdns_dsi_suspend(struct device *dev)
 {
        struct cdns_dsi *dsi = dev_get_drvdata(dev);
 
index 9837c8d69e6918f0418158054db605b55e082cb8..9b706789a3417615fa74118186318f514376da80 100644 (file)
@@ -55,7 +55,7 @@ static int dumb_vga_get_modes(struct drm_connector *connector)
                goto fallback;
        }
 
-       drm_mode_connector_update_edid_property(connector, edid);
+       drm_connector_update_edid_property(connector, edid);
        ret = drm_add_edid_modes(connector, edid);
        kfree(edid);
        return ret;
@@ -122,7 +122,7 @@ static int dumb_vga_attach(struct drm_bridge *bridge)
                return ret;
        }
 
-       drm_mode_connector_attach_encoder(&vga->connector,
+       drm_connector_attach_encoder(&vga->connector,
                                          bridge->encoder);
 
        return 0;
index 75b0d3f6e4de919301b63af95b28d2b7af4bf79e..f56c92f7af7c484b90fcbac98b9177e36177b949 100644 (file)
@@ -68,9 +68,9 @@ static int lvds_encoder_probe(struct platform_device *pdev)
 
        panel = of_drm_find_panel(panel_node);
        of_node_put(panel_node);
-       if (!panel) {
+       if (IS_ERR(panel)) {
                dev_dbg(&pdev->dev, "panel not found, deferring probe\n");
-               return -EPROBE_DEFER;
+               return PTR_ERR(panel);
        }
 
        lvds_encoder->panel_bridge =
index 7ccadba7c98cd30c3c81e3e0dc183cd050ab6d63..2136c97aeb8ec9463ac1d79766448402b9664504 100644 (file)
@@ -152,7 +152,7 @@ static int ge_b850v3_lvds_get_modes(struct drm_connector *connector)
        ge_b850v3_lvds_ptr->edid = (struct edid *)stdp2690_get_edid(client);
 
        if (ge_b850v3_lvds_ptr->edid) {
-               drm_mode_connector_update_edid_property(connector,
+               drm_connector_update_edid_property(connector,
                                                      ge_b850v3_lvds_ptr->edid);
                num_modes = drm_add_edid_modes(connector,
                                               ge_b850v3_lvds_ptr->edid);
@@ -241,7 +241,7 @@ static int ge_b850v3_lvds_attach(struct drm_bridge *bridge)
                return ret;
        }
 
-       ret = drm_mode_connector_attach_encoder(connector, bridge->encoder);
+       ret = drm_connector_attach_encoder(connector, bridge->encoder);
        if (ret)
                return ret;
 
index d64a3283822ae7a877175aad01305d32b6d49bf5..a3e817abace101fecc6638b7aa58af2714dbcea3 100644 (file)
@@ -222,7 +222,7 @@ static int ptn3460_get_modes(struct drm_connector *connector)
        }
 
        ptn_bridge->edid = (struct edid *)edid;
-       drm_mode_connector_update_edid_property(connector, ptn_bridge->edid);
+       drm_connector_update_edid_property(connector, ptn_bridge->edid);
 
        num_modes = drm_add_edid_modes(connector, ptn_bridge->edid);
 
@@ -265,7 +265,7 @@ static int ptn3460_bridge_attach(struct drm_bridge *bridge)
        drm_connector_helper_add(&ptn_bridge->connector,
                                        &ptn3460_connector_helper_funcs);
        drm_connector_register(&ptn_bridge->connector);
-       drm_mode_connector_attach_encoder(&ptn_bridge->connector,
+       drm_connector_attach_encoder(&ptn_bridge->connector,
                                                        bridge->encoder);
 
        if (ptn_bridge->panel)
index 6d99d4a3beb36c13aac92a71cee32bc6e6c59cec..7cbaba213ef693d11c430533df45c838a56c1229 100644 (file)
@@ -79,7 +79,7 @@ static int panel_bridge_attach(struct drm_bridge *bridge)
                return ret;
        }
 
-       drm_mode_connector_attach_encoder(&panel_bridge->connector,
+       drm_connector_attach_encoder(&panel_bridge->connector,
                                          bridge->encoder);
 
        ret = drm_panel_attach(panel_bridge->panel, &panel_bridge->connector);
index 81198f5e9afacf91bf2301652a2ac4671030f7cf..7334d1b62b71f800e7a7fbc0e4b121e0df2bacc2 100644 (file)
@@ -503,7 +503,7 @@ static int ps8622_attach(struct drm_bridge *bridge)
        drm_connector_helper_add(&ps8622->connector,
                                        &ps8622_connector_helper_funcs);
        drm_connector_register(&ps8622->connector);
-       drm_mode_connector_attach_encoder(&ps8622->connector,
+       drm_connector_attach_encoder(&ps8622->connector,
                                                        bridge->encoder);
 
        if (ps8622->panel)
index 60373d7eb22021127cb6f4a9bc7edf646e934be6..e59a135423336bd187f0038956f06ac4574d94dc 100644 (file)
@@ -170,7 +170,7 @@ static int sii902x_get_modes(struct drm_connector *connector)
                return ret;
 
        edid = drm_get_edid(connector, sii902x->i2c->adapter);
-       drm_mode_connector_update_edid_property(connector, edid);
+       drm_connector_update_edid_property(connector, edid);
        if (edid) {
                num = drm_add_edid_modes(connector, edid);
                kfree(edid);
@@ -324,7 +324,7 @@ static int sii902x_bridge_attach(struct drm_bridge *bridge)
        else
                sii902x->connector.polled = DRM_CONNECTOR_POLL_CONNECT;
 
-       drm_mode_connector_attach_encoder(&sii902x->connector, bridge->encoder);
+       drm_connector_attach_encoder(&sii902x->connector, bridge->encoder);
 
        return 0;
 }
index 7ab36042a822cf6cfec2fc440ef3fbe6e018fa3d..a6e8f4591e636241c6f1e8515fea33dc9147a7f3 100644 (file)
@@ -14,6 +14,7 @@
 #include <drm/bridge/mhl.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_edid.h>
+#include <drm/drm_encoder.h>
 
 #include <linux/clk.h>
 #include <linux/delay.h>
 
 #define SII8620_BURST_BUF_LEN 288
 #define VAL_RX_HDMI_CTRL2_DEFVAL VAL_RX_HDMI_CTRL2_IDLE_CNT(3)
-#define MHL1_MAX_LCLK 225000
-#define MHL3_MAX_LCLK 600000
+
+#define MHL1_MAX_PCLK 75000
+#define MHL1_MAX_PCLK_PP_MODE 150000
+#define MHL3_MAX_PCLK 200000
+#define MHL3_MAX_PCLK_PP_MODE 300000
 
 enum sii8620_mode {
        CM_DISCONNECTED,
@@ -69,9 +73,7 @@ struct sii8620 {
        struct regulator_bulk_data supplies[2];
        struct mutex lock; /* context lock, protects fields below */
        int error;
-       int pixel_clock;
        unsigned int use_packed_pixel:1;
-       int video_code;
        enum sii8620_mode mode;
        enum sii8620_sink_type sink_type;
        u8 cbus_status;
@@ -79,7 +81,9 @@ struct sii8620 {
        u8 xstat[MHL_XDS_SIZE];
        u8 devcap[MHL_DCAP_SIZE];
        u8 xdevcap[MHL_XDC_SIZE];
-       u8 avif[HDMI_INFOFRAME_SIZE(AVI)];
+       bool feature_complete;
+       bool devcap_read;
+       bool sink_detected;
        struct edid *edid;
        unsigned int gen2_write_burst:1;
        enum sii8620_mt_state mt_state;
@@ -476,7 +480,7 @@ static void sii8620_update_array(u8 *dst, u8 *src, int count)
        }
 }
 
-static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
+static void sii8620_identify_sink(struct sii8620 *ctx)
 {
        static const char * const sink_str[] = {
                [SINK_NONE] = "NONE",
@@ -487,7 +491,7 @@ static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
        char sink_name[20];
        struct device *dev = ctx->dev;
 
-       if (ret < 0)
+       if (!ctx->sink_detected || !ctx->devcap_read)
                return;
 
        sii8620_fetch_edid(ctx);
@@ -496,6 +500,7 @@ static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
                sii8620_mhl_disconnected(ctx);
                return;
        }
+       sii8620_set_upstream_edid(ctx);
 
        if (drm_detect_hdmi_monitor(ctx->edid))
                ctx->sink_type = SINK_HDMI;
@@ -508,53 +513,6 @@ static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
                 sink_str[ctx->sink_type], sink_name);
 }
 
-static void sii8620_hsic_init(struct sii8620 *ctx)
-{
-       if (!sii8620_is_mhl3(ctx))
-               return;
-
-       sii8620_write(ctx, REG_FCGC,
-               BIT_FCGC_HSIC_HOSTMODE | BIT_FCGC_HSIC_ENABLE);
-       sii8620_setbits(ctx, REG_HRXCTRL3,
-               BIT_HRXCTRL3_HRX_STAY_RESET | BIT_HRXCTRL3_STATUS_EN, ~0);
-       sii8620_setbits(ctx, REG_TTXNUMB, MSK_TTXNUMB_TTX_NUMBPS, 4);
-       sii8620_setbits(ctx, REG_TRXCTRL, BIT_TRXCTRL_TRX_FROM_SE_COC, ~0);
-       sii8620_setbits(ctx, REG_HTXCTRL, BIT_HTXCTRL_HTX_DRVCONN1, 0);
-       sii8620_setbits(ctx, REG_KEEPER, MSK_KEEPER_MODE, VAL_KEEPER_MODE_HOST);
-       sii8620_write_seq_static(ctx,
-               REG_TDMLLCTL, 0,
-               REG_UTSRST, BIT_UTSRST_HRX_SRST | BIT_UTSRST_HTX_SRST |
-                       BIT_UTSRST_KEEPER_SRST | BIT_UTSRST_FC_SRST,
-               REG_UTSRST, BIT_UTSRST_HRX_SRST | BIT_UTSRST_HTX_SRST,
-               REG_HRXINTL, 0xff,
-               REG_HRXINTH, 0xff,
-               REG_TTXINTL, 0xff,
-               REG_TTXINTH, 0xff,
-               REG_TRXINTL, 0xff,
-               REG_TRXINTH, 0xff,
-               REG_HTXINTL, 0xff,
-               REG_HTXINTH, 0xff,
-               REG_FCINTR0, 0xff,
-               REG_FCINTR1, 0xff,
-               REG_FCINTR2, 0xff,
-               REG_FCINTR3, 0xff,
-               REG_FCINTR4, 0xff,
-               REG_FCINTR5, 0xff,
-               REG_FCINTR6, 0xff,
-               REG_FCINTR7, 0xff
-       );
-}
-
-static void sii8620_edid_read(struct sii8620 *ctx, int ret)
-{
-       if (ret < 0)
-               return;
-
-       sii8620_set_upstream_edid(ctx);
-       sii8620_hsic_init(ctx);
-       sii8620_enable_hpd(ctx);
-}
-
 static void sii8620_mr_devcap(struct sii8620 *ctx)
 {
        u8 dcap[MHL_DCAP_SIZE];
@@ -570,6 +528,8 @@ static void sii8620_mr_devcap(struct sii8620 *ctx)
                 dcap[MHL_DCAP_ADOPTER_ID_H], dcap[MHL_DCAP_ADOPTER_ID_L],
                 dcap[MHL_DCAP_DEVICE_ID_H], dcap[MHL_DCAP_DEVICE_ID_L]);
        sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE);
+       ctx->devcap_read = true;
+       sii8620_identify_sink(ctx);
 }
 
 static void sii8620_mr_xdevcap(struct sii8620 *ctx)
@@ -807,6 +767,7 @@ static void sii8620_burst_rx_all(struct sii8620 *ctx)
 static void sii8620_fetch_edid(struct sii8620 *ctx)
 {
        u8 lm_ddc, ddc_cmd, int3, cbus;
+       unsigned long timeout;
        int fetched, i;
        int edid_len = EDID_LENGTH;
        u8 *edid;
@@ -856,23 +817,31 @@ static void sii8620_fetch_edid(struct sii8620 *ctx)
                        REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_ENH_DDC_READ_NO_ACK
                );
 
-               do {
-                       int3 = sii8620_readb(ctx, REG_INTR3);
+               int3 = 0;
+               timeout = jiffies + msecs_to_jiffies(200);
+               for (;;) {
                        cbus = sii8620_readb(ctx, REG_CBUS_STATUS);
-
-                       if (int3 & BIT_DDC_CMD_DONE)
-                               break;
-
-                       if (!(cbus & BIT_CBUS_STATUS_CBUS_CONNECTED)) {
+                       if (~cbus & BIT_CBUS_STATUS_CBUS_CONNECTED) {
+                               kfree(edid);
+                               edid = NULL;
+                               goto end;
+                       }
+                       if (int3 & BIT_DDC_CMD_DONE) {
+                               if (sii8620_readb(ctx, REG_DDC_DOUT_CNT)
+                                   >= FETCH_SIZE)
+                                       break;
+                       } else {
+                               int3 = sii8620_readb(ctx, REG_INTR3);
+                       }
+                       if (time_is_before_jiffies(timeout)) {
+                               ctx->error = -ETIMEDOUT;
+                               dev_err(ctx->dev, "timeout during EDID read\n");
                                kfree(edid);
                                edid = NULL;
                                goto end;
                        }
-               } while (1);
-
-               sii8620_readb(ctx, REG_DDC_STATUS);
-               while (sii8620_readb(ctx, REG_DDC_DOUT_CNT) < FETCH_SIZE)
                        usleep_range(10, 20);
+               }
 
                sii8620_read_buf(ctx, REG_DDC_DATA, edid + fetched, FETCH_SIZE);
                if (fetched + FETCH_SIZE == EDID_LENGTH) {
@@ -971,8 +940,17 @@ static int sii8620_hw_on(struct sii8620 *ctx)
        ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
        if (ret)
                return ret;
+
        usleep_range(10000, 20000);
-       return clk_prepare_enable(ctx->clk_xtal);
+       ret = clk_prepare_enable(ctx->clk_xtal);
+       if (ret)
+               return ret;
+
+       msleep(100);
+       gpiod_set_value(ctx->gpio_reset, 0);
+       msleep(100);
+
+       return 0;
 }
 
 static int sii8620_hw_off(struct sii8620 *ctx)
@@ -982,17 +960,6 @@ static int sii8620_hw_off(struct sii8620 *ctx)
        return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
 }
 
-static void sii8620_hw_reset(struct sii8620 *ctx)
-{
-       usleep_range(10000, 20000);
-       gpiod_set_value(ctx->gpio_reset, 0);
-       usleep_range(5000, 20000);
-       gpiod_set_value(ctx->gpio_reset, 1);
-       usleep_range(10000, 20000);
-       gpiod_set_value(ctx->gpio_reset, 0);
-       msleep(300);
-}
-
 static void sii8620_cbus_reset(struct sii8620 *ctx)
 {
        sii8620_write(ctx, REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST
@@ -1055,23 +1022,23 @@ static void sii8620_set_format(struct sii8620 *ctx)
                                BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED,
                                ctx->use_packed_pixel ? ~0 : 0);
        } else {
-               if (ctx->use_packed_pixel)
+               if (ctx->use_packed_pixel) {
                        sii8620_write_seq_static(ctx,
                                REG_VID_MODE, BIT_VID_MODE_M1080P,
                                REG_MHL_TOP_CTL, BIT_MHL_TOP_CTL_MHL_PP_SEL | 1,
                                REG_MHLTX_CTL6, 0x60
                        );
-               else
+               } else {
                        sii8620_write_seq_static(ctx,
                                REG_VID_MODE, 0,
                                REG_MHL_TOP_CTL, 1,
                                REG_MHLTX_CTL6, 0xa0
                        );
+               }
        }
 
        if (ctx->use_packed_pixel)
-               out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL) |
-                       BIT_TPI_OUTPUT_CSCMODE709;
+               out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL);
        else
                out_fmt = VAL_TPI_FORMAT(RGB, FULL);
 
@@ -1128,18 +1095,28 @@ static ssize_t mhl3_infoframe_pack(struct mhl3_infoframe *frame,
        return frm_len;
 }
 
-static void sii8620_set_infoframes(struct sii8620 *ctx)
+static void sii8620_set_infoframes(struct sii8620 *ctx,
+                                  struct drm_display_mode *mode)
 {
        struct mhl3_infoframe mhl_frm;
        union hdmi_infoframe frm;
        u8 buf[31];
        int ret;
 
+       ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi,
+                                                      mode,
+                                                      true);
+       if (ctx->use_packed_pixel)
+               frm.avi.colorspace = HDMI_COLORSPACE_YUV422;
+
+       if (!ret)
+               ret = hdmi_avi_infoframe_pack(&frm.avi, buf, ARRAY_SIZE(buf));
+       if (ret > 0)
+               sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, buf + 3, ret - 3);
+
        if (!sii8620_is_mhl3(ctx) || !ctx->use_packed_pixel) {
                sii8620_write(ctx, REG_TPI_SC,
                        BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI);
-               sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, ctx->avif + 3,
-                       ARRAY_SIZE(ctx->avif) - 3);
                sii8620_write(ctx, REG_PKT_FILTER_0,
                        BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT |
                        BIT_PKT_FILTER_0_DROP_MPEG_PKT |
@@ -1148,16 +1125,6 @@ static void sii8620_set_infoframes(struct sii8620 *ctx)
                return;
        }
 
-       ret = hdmi_avi_infoframe_init(&frm.avi);
-       frm.avi.colorspace = HDMI_COLORSPACE_YUV422;
-       frm.avi.active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
-       frm.avi.picture_aspect = HDMI_PICTURE_ASPECT_16_9;
-       frm.avi.colorimetry = HDMI_COLORIMETRY_ITU_709;
-       frm.avi.video_code = ctx->video_code;
-       if (!ret)
-               ret = hdmi_avi_infoframe_pack(&frm.avi, buf, ARRAY_SIZE(buf));
-       if (ret > 0)
-               sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, buf + 3, ret - 3);
        sii8620_write(ctx, REG_PKT_FILTER_0,
                BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT |
                BIT_PKT_FILTER_0_DROP_MPEG_PKT |
@@ -1177,6 +1144,9 @@ static void sii8620_set_infoframes(struct sii8620 *ctx)
 
 static void sii8620_start_video(struct sii8620 *ctx)
 {
+       struct drm_display_mode *mode =
+               &ctx->bridge.encoder->crtc->state->adjusted_mode;
+
        if (!sii8620_is_mhl3(ctx))
                sii8620_stop_video(ctx);
 
@@ -1195,8 +1165,14 @@ static void sii8620_start_video(struct sii8620 *ctx)
        sii8620_set_format(ctx);
 
        if (!sii8620_is_mhl3(ctx)) {
-               sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
-                       MHL_DST_LM_CLK_MODE_NORMAL | MHL_DST_LM_PATH_ENABLED);
+               u8 link_mode = MHL_DST_LM_PATH_ENABLED;
+
+               if (ctx->use_packed_pixel)
+                       link_mode |= MHL_DST_LM_CLK_MODE_PACKED_PIXEL;
+               else
+                       link_mode |= MHL_DST_LM_CLK_MODE_NORMAL;
+
+               sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), link_mode);
                sii8620_set_auto_zone(ctx);
        } else {
                static const struct {
@@ -1213,10 +1189,10 @@ static void sii8620_start_video(struct sii8620 *ctx)
                          MHL_XDS_LINK_RATE_6_0_GBPS, 0x40 },
                };
                u8 p0_ctrl = BIT_M3_P0CTRL_MHL3_P0_PORT_EN;
-               int clk = ctx->pixel_clock * (ctx->use_packed_pixel ? 2 : 3);
+               int clk = mode->clock * (ctx->use_packed_pixel ? 2 : 3);
                int i;
 
-               for (i = 0; i < ARRAY_SIZE(clk_spec); ++i)
+               for (i = 0; i < ARRAY_SIZE(clk_spec) - 1; ++i)
                        if (clk < clk_spec[i].max_clk)
                                break;
 
@@ -1242,7 +1218,7 @@ static void sii8620_start_video(struct sii8620 *ctx)
                        clk_spec[i].link_rate);
        }
 
-       sii8620_set_infoframes(ctx);
+       sii8620_set_infoframes(ctx, mode);
 }
 
 static void sii8620_disable_hpd(struct sii8620 *ctx)
@@ -1534,6 +1510,16 @@ static void sii8620_set_mode(struct sii8620 *ctx, enum sii8620_mode mode)
        );
 }
 
+static void sii8620_hpd_unplugged(struct sii8620 *ctx)
+{
+       sii8620_disable_hpd(ctx);
+       ctx->sink_type = SINK_NONE;
+       ctx->sink_detected = false;
+       ctx->feature_complete = false;
+       kfree(ctx->edid);
+       ctx->edid = NULL;
+}
+
 static void sii8620_disconnect(struct sii8620 *ctx)
 {
        sii8620_disable_gen2_write_burst(ctx);
@@ -1561,7 +1547,7 @@ static void sii8620_disconnect(struct sii8620 *ctx)
                REG_MHL_DP_CTL6, 0x2A,
                REG_MHL_DP_CTL7, 0x03
        );
-       sii8620_disable_hpd(ctx);
+       sii8620_hpd_unplugged(ctx);
        sii8620_write_seq_static(ctx,
                REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
                REG_MHL_COC_CTL1, 0x07,
@@ -1609,10 +1595,8 @@ static void sii8620_disconnect(struct sii8620 *ctx)
        memset(ctx->xstat, 0, sizeof(ctx->xstat));
        memset(ctx->devcap, 0, sizeof(ctx->devcap));
        memset(ctx->xdevcap, 0, sizeof(ctx->xdevcap));
+       ctx->devcap_read = false;
        ctx->cbus_status = 0;
-       ctx->sink_type = SINK_NONE;
-       kfree(ctx->edid);
-       ctx->edid = NULL;
        sii8620_mt_cleanup(ctx);
 }
 
@@ -1699,17 +1683,18 @@ static void sii8620_status_dcap_ready(struct sii8620 *ctx)
 
 static void sii8620_status_changed_path(struct sii8620 *ctx)
 {
-       if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) {
-               sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
-                                     MHL_DST_LM_CLK_MODE_NORMAL
-                                     | MHL_DST_LM_PATH_ENABLED);
-               if (!sii8620_is_mhl3(ctx))
-                       sii8620_mt_read_devcap(ctx, false);
-               sii8620_mt_set_cont(ctx, sii8620_sink_detected);
-       } else {
-               sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
-                                     MHL_DST_LM_CLK_MODE_NORMAL);
-       }
+       u8 link_mode;
+
+       if (ctx->use_packed_pixel)
+               link_mode = MHL_DST_LM_CLK_MODE_PACKED_PIXEL;
+       else
+               link_mode = MHL_DST_LM_CLK_MODE_NORMAL;
+
+       if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED)
+               link_mode |= MHL_DST_LM_PATH_ENABLED;
+
+       sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
+                             link_mode);
 }
 
 static void sii8620_msc_mr_write_stat(struct sii8620 *ctx)
@@ -1722,9 +1707,14 @@ static void sii8620_msc_mr_write_stat(struct sii8620 *ctx)
        sii8620_update_array(ctx->stat, st, MHL_DST_SIZE);
        sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE);
 
-       if (ctx->stat[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY)
+       if (ctx->stat[MHL_DST_CONNECTED_RDY] & st[MHL_DST_CONNECTED_RDY] &
+           MHL_DST_CONN_DCAP_RDY) {
                sii8620_status_dcap_ready(ctx);
 
+               if (!sii8620_is_mhl3(ctx))
+                       sii8620_mt_read_devcap(ctx, false);
+       }
+
        if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED)
                sii8620_status_changed_path(ctx);
 }
@@ -1808,8 +1798,11 @@ static void sii8620_msc_mr_set_int(struct sii8620 *ctx)
        }
        if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_REQ)
                sii8620_send_features(ctx);
-       if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_COMPLETE)
-               sii8620_edid_read(ctx, 0);
+       if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_COMPLETE) {
+               ctx->feature_complete = true;
+               if (ctx->edid)
+                       sii8620_enable_hpd(ctx);
+       }
 }
 
 static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx)
@@ -1884,6 +1877,15 @@ static void sii8620_irq_msc(struct sii8620 *ctx)
        if (stat & BIT_CBUS_MSC_MR_WRITE_STAT)
                sii8620_msc_mr_write_stat(ctx);
 
+       if (stat & BIT_CBUS_HPD_CHG) {
+               if (ctx->cbus_status & BIT_CBUS_STATUS_CBUS_HPD) {
+                       ctx->sink_detected = true;
+                       sii8620_identify_sink(ctx);
+               } else {
+                       sii8620_hpd_unplugged(ctx);
+               }
+       }
+
        if (stat & BIT_CBUS_MSC_MR_SET_INT)
                sii8620_msc_mr_set_int(ctx);
 
@@ -1931,14 +1933,6 @@ static void sii8620_irq_edid(struct sii8620 *ctx)
                ctx->mt_state = MT_STATE_DONE;
 }
 
-static void sii8620_scdt_high(struct sii8620 *ctx)
-{
-       sii8620_write_seq_static(ctx,
-               REG_INTR8_MASK, BIT_CEA_NEW_AVI | BIT_CEA_NEW_VSI,
-               REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI,
-       );
-}
-
 static void sii8620_irq_scdt(struct sii8620 *ctx)
 {
        u8 stat = sii8620_readb(ctx, REG_INTR5);
@@ -1946,53 +1940,13 @@ static void sii8620_irq_scdt(struct sii8620 *ctx)
        if (stat & BIT_INTR_SCDT_CHANGE) {
                u8 cstat = sii8620_readb(ctx, REG_TMDS_CSTAT_P3);
 
-               if (cstat & BIT_TMDS_CSTAT_P3_SCDT) {
-                       if (ctx->sink_type == SINK_HDMI)
-                               /* enable infoframe interrupt */
-                               sii8620_scdt_high(ctx);
-                       else
-                               sii8620_start_video(ctx);
-               }
+               if (cstat & BIT_TMDS_CSTAT_P3_SCDT)
+                       sii8620_start_video(ctx);
        }
 
        sii8620_write(ctx, REG_INTR5, stat);
 }
 
-static void sii8620_new_vsi(struct sii8620 *ctx)
-{
-       u8 vsif[11];
-
-       sii8620_write(ctx, REG_RX_HDMI_CTRL2,
-                     VAL_RX_HDMI_CTRL2_DEFVAL |
-                     BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI);
-       sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, vsif,
-                        ARRAY_SIZE(vsif));
-}
-
-static void sii8620_new_avi(struct sii8620 *ctx)
-{
-       sii8620_write(ctx, REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL);
-       sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, ctx->avif,
-                        ARRAY_SIZE(ctx->avif));
-}
-
-static void sii8620_irq_infr(struct sii8620 *ctx)
-{
-       u8 stat = sii8620_readb(ctx, REG_INTR8)
-               & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI);
-
-       sii8620_write(ctx, REG_INTR8, stat);
-
-       if (stat & BIT_CEA_NEW_VSI)
-               sii8620_new_vsi(ctx);
-
-       if (stat & BIT_CEA_NEW_AVI)
-               sii8620_new_avi(ctx);
-
-       if (stat & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI))
-               sii8620_start_video(ctx);
-}
-
 static void sii8620_got_xdevcap(struct sii8620 *ctx, int ret)
 {
        if (ret < 0)
@@ -2043,11 +1997,11 @@ static void sii8620_irq_ddc(struct sii8620 *ctx)
 
        if (stat & BIT_DDC_CMD_DONE) {
                sii8620_write(ctx, REG_INTR3_MASK, 0);
-               if (sii8620_is_mhl3(ctx))
+               if (sii8620_is_mhl3(ctx) && !ctx->feature_complete)
                        sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE),
                                           MHL_INT_RC_FEAT_REQ);
                else
-                       sii8620_edid_read(ctx, 0);
+                       sii8620_enable_hpd(ctx);
        }
        sii8620_write(ctx, REG_INTR3, stat);
 }
@@ -2074,7 +2028,6 @@ static irqreturn_t sii8620_irq_thread(int irq, void *data)
                { BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid },
                { BIT_FAST_INTR_STAT_DDC, sii8620_irq_ddc },
                { BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt },
-               { BIT_FAST_INTR_STAT_INFR, sii8620_irq_infr },
        };
        struct sii8620 *ctx = data;
        u8 stats[LEN_FAST_INTR_STAT];
@@ -2112,7 +2065,6 @@ static void sii8620_cable_in(struct sii8620 *ctx)
                dev_err(dev, "Error powering on, %d.\n", ret);
                return;
        }
-       sii8620_hw_reset(ctx);
 
        sii8620_read_buf(ctx, REG_VND_IDL, ver, ARRAY_SIZE(ver));
        ret = sii8620_clear_error(ctx);
@@ -2268,17 +2220,43 @@ static void sii8620_detach(struct drm_bridge *bridge)
        rc_unregister_device(ctx->rc_dev);
 }
 
+static int sii8620_is_packing_required(struct sii8620 *ctx,
+                                      const struct drm_display_mode *mode)
+{
+       int max_pclk, max_pclk_pp_mode;
+
+       if (sii8620_is_mhl3(ctx)) {
+               max_pclk = MHL3_MAX_PCLK;
+               max_pclk_pp_mode = MHL3_MAX_PCLK_PP_MODE;
+       } else {
+               max_pclk = MHL1_MAX_PCLK;
+               max_pclk_pp_mode = MHL1_MAX_PCLK_PP_MODE;
+       }
+
+       if (mode->clock < max_pclk)
+               return 0;
+       else if (mode->clock < max_pclk_pp_mode)
+               return 1;
+       else
+               return -1;
+}
+
 static enum drm_mode_status sii8620_mode_valid(struct drm_bridge *bridge,
                                         const struct drm_display_mode *mode)
 {
        struct sii8620 *ctx = bridge_to_sii8620(bridge);
+       int pack_required = sii8620_is_packing_required(ctx, mode);
        bool can_pack = ctx->devcap[MHL_DCAP_VID_LINK_MODE] &
                        MHL_DCAP_VID_LINK_PPIXEL;
-       unsigned int max_pclk = sii8620_is_mhl3(ctx) ? MHL3_MAX_LCLK :
-                                                      MHL1_MAX_LCLK;
-       max_pclk /= can_pack ? 2 : 3;
 
-       return (mode->clock > max_pclk) ? MODE_CLOCK_HIGH : MODE_OK;
+       switch (pack_required) {
+       case 0:
+               return MODE_OK;
+       case 1:
+               return (can_pack) ? MODE_OK : MODE_CLOCK_HIGH;
+       default:
+               return MODE_CLOCK_HIGH;
+       }
 }
 
 static bool sii8620_mode_fixup(struct drm_bridge *bridge,
@@ -2286,43 +2264,14 @@ static bool sii8620_mode_fixup(struct drm_bridge *bridge,
                               struct drm_display_mode *adjusted_mode)
 {
        struct sii8620 *ctx = bridge_to_sii8620(bridge);
-       int max_lclk;
-       bool ret = true;
 
        mutex_lock(&ctx->lock);
 
-       max_lclk = sii8620_is_mhl3(ctx) ? MHL3_MAX_LCLK : MHL1_MAX_LCLK;
-       if (max_lclk > 3 * adjusted_mode->clock) {
-               ctx->use_packed_pixel = 0;
-               goto end;
-       }
-       if ((ctx->devcap[MHL_DCAP_VID_LINK_MODE] & MHL_DCAP_VID_LINK_PPIXEL) &&
-           max_lclk > 2 * adjusted_mode->clock) {
-               ctx->use_packed_pixel = 1;
-               goto end;
-       }
-       ret = false;
-end:
-       if (ret) {
-               u8 vic = drm_match_cea_mode(adjusted_mode);
-
-               if (!vic) {
-                       union hdmi_infoframe frm;
-                       u8 mhl_vic[] = { 0, 95, 94, 93, 98 };
-
-                       /* FIXME: We need the connector here */
-                       drm_hdmi_vendor_infoframe_from_display_mode(
-                               &frm.vendor.hdmi, NULL, adjusted_mode);
-                       vic = frm.vendor.hdmi.vic;
-                       if (vic >= ARRAY_SIZE(mhl_vic))
-                               vic = 0;
-                       vic = mhl_vic[vic];
-               }
-               ctx->video_code = vic;
-               ctx->pixel_clock = adjusted_mode->clock;
-       }
+       ctx->use_packed_pixel = sii8620_is_packing_required(ctx, adjusted_mode);
+
        mutex_unlock(&ctx->lock);
-       return ret;
+
+       return true;
 }
 
 static const struct drm_bridge_funcs sii8620_bridge_funcs = {
index 3c136f2b954fd1344225f5a8c9ff015478786b68..5971976284bf9ddb34434485624d531313791ee3 100644 (file)
@@ -1922,7 +1922,7 @@ static int dw_hdmi_connector_get_modes(struct drm_connector *connector)
 
                hdmi->sink_is_hdmi = drm_detect_hdmi_monitor(edid);
                hdmi->sink_has_audio = drm_detect_monitor_audio(edid);
-               drm_mode_connector_update_edid_property(connector, edid);
+               drm_connector_update_edid_property(connector, edid);
                cec_notifier_set_phys_addr_from_edid(hdmi->cec_notifier, edid);
                ret = drm_add_edid_modes(connector, edid);
                kfree(edid);
@@ -1974,7 +1974,7 @@ static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
        drm_connector_init(bridge->dev, connector, &dw_hdmi_connector_funcs,
                           DRM_MODE_CONNECTOR_HDMIA);
 
-       drm_mode_connector_attach_encoder(connector, encoder);
+       drm_connector_attach_encoder(connector, encoder);
 
        return 0;
 }
index 0fd9cf27542c396a2fc36ae7056f3237906d407a..8e28e738cb52dec6ee8ea7eda2d655fc7035be93 100644 (file)
@@ -1140,7 +1140,7 @@ static int tc_connector_get_modes(struct drm_connector *connector)
        if (!edid)
                return 0;
 
-       drm_mode_connector_update_edid_property(connector, edid);
+       drm_connector_update_edid_property(connector, edid);
        count = drm_add_edid_modes(connector, edid);
 
        return count;
@@ -1195,7 +1195,7 @@ static int tc_bridge_attach(struct drm_bridge *bridge)
 
        drm_display_info_set_bus_formats(&tc->connector.display_info,
                                         &bus_format, 1);
-       drm_mode_connector_attach_encoder(&tc->connector, tc->bridge.encoder);
+       drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder);
 
        return 0;
 }
index acb857030951a0eff0682fce7b5d38d80ba6edb1..c3e32138c6bb08c5cdb6c75e7d6624984914e0c0 100644 (file)
@@ -62,7 +62,7 @@ static int tfp410_get_modes(struct drm_connector *connector)
                goto fallback;
        }
 
-       drm_mode_connector_update_edid_property(connector, edid);
+       drm_connector_update_edid_property(connector, edid);
 
        return drm_add_edid_modes(connector, edid);
 fallback:
@@ -132,7 +132,7 @@ static int tfp410_attach(struct drm_bridge *bridge)
                return ret;
        }
 
-       drm_mode_connector_attach_encoder(&dvi->connector,
+       drm_connector_attach_encoder(&dvi->connector,
                                          bridge->encoder);
 
        return 0;
index be2d7e4880621a075b4d0f900d14cc204e86c262..ce9db7aab2255c773218778aab7fd62e607a071d 100644 (file)
@@ -92,7 +92,6 @@
 
 #define to_cirrus_crtc(x) container_of(x, struct cirrus_crtc, base)
 #define to_cirrus_encoder(x) container_of(x, struct cirrus_encoder, base)
-#define to_cirrus_framebuffer(x) container_of(x, struct cirrus_framebuffer, base)
 
 struct cirrus_crtc {
        struct drm_crtc                 base;
@@ -117,11 +116,6 @@ struct cirrus_connector {
        struct drm_connector            base;
 };
 
-struct cirrus_framebuffer {
-       struct drm_framebuffer          base;
-       struct drm_gem_object *obj;
-};
-
 struct cirrus_mc {
        resource_size_t                 vram_size;
        resource_size_t                 vram_base;
@@ -152,7 +146,7 @@ struct cirrus_device {
 
 struct cirrus_fbdev {
        struct drm_fb_helper helper;
-       struct cirrus_framebuffer gfb;
+       struct drm_framebuffer gfb;
        void *sysram;
        int size;
        int x1, y1, x2, y2; /* dirty rect */
@@ -198,7 +192,7 @@ int cirrus_dumb_create(struct drm_file *file,
                       struct drm_mode_create_dumb *args);
 
 int cirrus_framebuffer_init(struct drm_device *dev,
-                          struct cirrus_framebuffer *gfb,
+                           struct drm_framebuffer *gfb,
                            const struct drm_mode_fb_cmd2 *mode_cmd,
                            struct drm_gem_object *obj);
 
index 32fbfba2c623a7ddbb43d68534316b5a8485d950..b643ac92801c81cacae37fc876497cb9a52accdf 100644 (file)
@@ -22,14 +22,14 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
        struct drm_gem_object *obj;
        struct cirrus_bo *bo;
        int src_offset, dst_offset;
-       int bpp = afbdev->gfb.base.format->cpp[0];
+       int bpp = afbdev->gfb.format->cpp[0];
        int ret = -EBUSY;
        bool unmap = false;
        bool store_for_later = false;
        int x2, y2;
        unsigned long flags;
 
-       obj = afbdev->gfb.obj;
+       obj = afbdev->gfb.obj[0];
        bo = gem_to_cirrus_bo(obj);
 
        /*
@@ -82,7 +82,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
        }
        for (i = y; i < y + height; i++) {
                /* assume equal stride for now */
-               src_offset = dst_offset = i * afbdev->gfb.base.pitches[0] + (x * bpp);
+               src_offset = dst_offset = i * afbdev->gfb.pitches[0] + (x * bpp);
                memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
 
        }
@@ -204,7 +204,7 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
        gfbdev->sysram = sysram;
        gfbdev->size = size;
 
-       fb = &gfbdev->gfb.base;
+       fb = &gfbdev->gfb;
        if (!fb) {
                DRM_INFO("fb is NULL\n");
                return -EINVAL;
@@ -246,19 +246,19 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
 static int cirrus_fbdev_destroy(struct drm_device *dev,
                                struct cirrus_fbdev *gfbdev)
 {
-       struct cirrus_framebuffer *gfb = &gfbdev->gfb;
+       struct drm_framebuffer *gfb = &gfbdev->gfb;
 
        drm_fb_helper_unregister_fbi(&gfbdev->helper);
 
-       if (gfb->obj) {
-               drm_gem_object_put_unlocked(gfb->obj);
-               gfb->obj = NULL;
+       if (gfb->obj[0]) {
+               drm_gem_object_put_unlocked(gfb->obj[0]);
+               gfb->obj[0] = NULL;
        }
 
        vfree(gfbdev->sysram);
        drm_fb_helper_fini(&gfbdev->helper);
-       drm_framebuffer_unregister_private(&gfb->base);
-       drm_framebuffer_cleanup(&gfb->base);
+       drm_framebuffer_unregister_private(gfb);
+       drm_framebuffer_cleanup(gfb);
 
        return 0;
 }
index 26df1e8cd490d57f9fd3af3624f484a588114735..60d54e10a34d434ef3d48fcaa299f1e8c305c13c 100644 (file)
  */
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 
 #include "cirrus_drv.h"
 
-static int cirrus_create_handle(struct drm_framebuffer *fb,
-                               struct drm_file* file_priv,
-                               unsigned int* handle)
-{
-       struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
-
-       return drm_gem_handle_create(file_priv, cirrus_fb->obj, handle);
-}
-
-static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
-       struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
-
-       drm_gem_object_put_unlocked(cirrus_fb->obj);
-       drm_framebuffer_cleanup(fb);
-       kfree(fb);
-}
-
 static const struct drm_framebuffer_funcs cirrus_fb_funcs = {
-       .create_handle = cirrus_create_handle,
-       .destroy = cirrus_user_framebuffer_destroy,
+       .create_handle = drm_gem_fb_create_handle,
+       .destroy = drm_gem_fb_destroy,
 };
 
 int cirrus_framebuffer_init(struct drm_device *dev,
-                           struct cirrus_framebuffer *gfb,
+                           struct drm_framebuffer *gfb,
                            const struct drm_mode_fb_cmd2 *mode_cmd,
                            struct drm_gem_object *obj)
 {
        int ret;
 
-       drm_helper_mode_fill_fb_struct(dev, &gfb->base, mode_cmd);
-       gfb->obj = obj;
-       ret = drm_framebuffer_init(dev, &gfb->base, &cirrus_fb_funcs);
+       drm_helper_mode_fill_fb_struct(dev, gfb, mode_cmd);
+       gfb->obj[0] = obj;
+       ret = drm_framebuffer_init(dev, gfb, &cirrus_fb_funcs);
        if (ret) {
                DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
                return ret;
@@ -60,7 +43,7 @@ cirrus_user_framebuffer_create(struct drm_device *dev,
 {
        struct cirrus_device *cdev = dev->dev_private;
        struct drm_gem_object *obj;
-       struct cirrus_framebuffer *cirrus_fb;
+       struct drm_framebuffer *fb;
        u32 bpp;
        int ret;
 
@@ -74,19 +57,19 @@ cirrus_user_framebuffer_create(struct drm_device *dev,
        if (obj == NULL)
                return ERR_PTR(-ENOENT);
 
-       cirrus_fb = kzalloc(sizeof(*cirrus_fb), GFP_KERNEL);
-       if (!cirrus_fb) {
+       fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+       if (!fb) {
                drm_gem_object_put_unlocked(obj);
                return ERR_PTR(-ENOMEM);
        }
 
-       ret = cirrus_framebuffer_init(dev, cirrus_fb, mode_cmd, obj);
+       ret = cirrus_framebuffer_init(dev, fb, mode_cmd, obj);
        if (ret) {
                drm_gem_object_put_unlocked(obj);
-               kfree(cirrus_fb);
+               kfree(fb);
                return ERR_PTR(ret);
        }
-       return &cirrus_fb->base;
+       return fb;
 }
 
 static const struct drm_mode_config_funcs cirrus_mode_funcs = {
index c91b9b054e3f77805bd3ae00b0db3bf74144f6d1..336bfda401257f60a17bfa350529e4dc3555b9f9 100644 (file)
@@ -101,17 +101,13 @@ static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
                                int x, int y, int atomic)
 {
        struct cirrus_device *cdev = crtc->dev->dev_private;
-       struct drm_gem_object *obj;
-       struct cirrus_framebuffer *cirrus_fb;
        struct cirrus_bo *bo;
        int ret;
        u64 gpu_addr;
 
        /* push the previous fb to system ram */
        if (!atomic && fb) {
-               cirrus_fb = to_cirrus_framebuffer(fb);
-               obj = cirrus_fb->obj;
-               bo = gem_to_cirrus_bo(obj);
+               bo = gem_to_cirrus_bo(fb->obj[0]);
                ret = cirrus_bo_reserve(bo, false);
                if (ret)
                        return ret;
@@ -119,9 +115,7 @@ static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
                cirrus_bo_unreserve(bo);
        }
 
-       cirrus_fb = to_cirrus_framebuffer(crtc->primary->fb);
-       obj = cirrus_fb->obj;
-       bo = gem_to_cirrus_bo(obj);
+       bo = gem_to_cirrus_bo(crtc->primary->fb->obj[0]);
 
        ret = cirrus_bo_reserve(bo, false);
        if (ret)
@@ -133,7 +127,7 @@ static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
                return ret;
        }
 
-       if (&cdev->mode_info.gfbdev->gfb == cirrus_fb) {
+       if (&cdev->mode_info.gfbdev->gfb == crtc->primary->fb) {
                /* if pushing console in kmap it */
                ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
                if (ret)
@@ -536,7 +530,7 @@ int cirrus_modeset_init(struct cirrus_device *cdev)
                return -1;
        }
 
-       drm_mode_connector_attach_encoder(connector, encoder);
+       drm_connector_attach_encoder(connector, encoder);
 
        ret = cirrus_fbdev_init(cdev);
        if (ret) {
index 895741e9cd7db291c099a69df24bb5d184eb0691..3eb061e11e2efb2caa36738b6425d2f789c19f6d 100644 (file)
@@ -30,6 +30,7 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_mode.h>
 #include <drm/drm_print.h>
+#include <drm/drm_writeback.h>
 #include <linux/sync_file.h>
 
 #include "drm_crtc_internal.h"
@@ -325,6 +326,35 @@ static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
        return fence_ptr;
 }
 
+static int set_out_fence_for_connector(struct drm_atomic_state *state,
+                                       struct drm_connector *connector,
+                                       s32 __user *fence_ptr)
+{
+       unsigned int index = drm_connector_index(connector);
+
+       if (!fence_ptr)
+               return 0;
+
+       if (put_user(-1, fence_ptr))
+               return -EFAULT;
+
+       state->connectors[index].out_fence_ptr = fence_ptr;
+
+       return 0;
+}
+
+static s32 __user *get_out_fence_for_connector(struct drm_atomic_state *state,
+                                              struct drm_connector *connector)
+{
+       unsigned int index = drm_connector_index(connector);
+       s32 __user *fence_ptr;
+
+       fence_ptr = state->connectors[index].out_fence_ptr;
+       state->connectors[index].out_fence_ptr = NULL;
+
+       return fence_ptr;
+}
+
 /**
  * drm_atomic_set_mode_for_crtc - set mode for CRTC
  * @state: the CRTC whose incoming state to update
@@ -339,6 +369,7 @@ static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
 int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
                                 const struct drm_display_mode *mode)
 {
+       struct drm_crtc *crtc = state->crtc;
        struct drm_mode_modeinfo umode;
 
        /* Early return for no change. */
@@ -359,13 +390,13 @@ int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
 
                drm_mode_copy(&state->mode, mode);
                state->enable = true;
-               DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
-                                mode->name, state);
+               DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
+                                mode->name, crtc->base.id, crtc->name, state);
        } else {
                memset(&state->mode, 0, sizeof(state->mode));
                state->enable = false;
-               DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
-                                state);
+               DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n",
+                                crtc->base.id, crtc->name, state);
        }
 
        return 0;
@@ -388,6 +419,8 @@ EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc);
 int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
                                       struct drm_property_blob *blob)
 {
+       struct drm_crtc *crtc = state->crtc;
+
        if (blob == state->mode_blob)
                return 0;
 
@@ -397,19 +430,34 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
        memset(&state->mode, 0, sizeof(state->mode));
 
        if (blob) {
-               if (blob->length != sizeof(struct drm_mode_modeinfo) ||
-                   drm_mode_convert_umode(state->crtc->dev, &state->mode,
-                                          blob->data))
+               int ret;
+
+               if (blob->length != sizeof(struct drm_mode_modeinfo)) {
+                       DRM_DEBUG_ATOMIC("[CRTC:%d:%s] bad mode blob length: %zu\n",
+                                        crtc->base.id, crtc->name,
+                                        blob->length);
                        return -EINVAL;
+               }
+
+               ret = drm_mode_convert_umode(crtc->dev,
+                                            &state->mode, blob->data);
+               if (ret) {
+                       DRM_DEBUG_ATOMIC("[CRTC:%d:%s] invalid mode (ret=%d, status=%s):\n",
+                                        crtc->base.id, crtc->name,
+                                        ret, drm_get_mode_status_name(state->mode.status));
+                       drm_mode_debug_printmodeline(&state->mode);
+                       return -EINVAL;
+               }
 
                state->mode_blob = drm_property_blob_get(blob);
                state->enable = true;
-               DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
-                                state->mode.name, state);
+               DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
+                                state->mode.name, crtc->base.id, crtc->name,
+                                state);
        } else {
                state->enable = false;
-               DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
-                                state);
+               DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n",
+                                crtc->base.id, crtc->name, state);
        }
 
        return 0;
@@ -539,10 +587,14 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
                        return -EFAULT;
 
                set_out_fence_for_crtc(state->state, crtc, fence_ptr);
-       } else if (crtc->funcs->atomic_set_property)
+       } else if (crtc->funcs->atomic_set_property) {
                return crtc->funcs->atomic_set_property(crtc, state, property, val);
-       else
+       } else {
+               DRM_DEBUG_ATOMIC("[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n",
+                                crtc->base.id, crtc->name,
+                                property->base.id, property->name);
                return -EINVAL;
+       }
 
        return 0;
 }
@@ -676,6 +728,51 @@ static void drm_atomic_crtc_print_state(struct drm_printer *p,
                crtc->funcs->atomic_print_state(p, state);
 }
 
+/**
+ * drm_atomic_connector_check - check connector state
+ * @connector: connector to check
+ * @state: connector state to check
+ *
+ * Provides core sanity checks for connector state.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+static int drm_atomic_connector_check(struct drm_connector *connector,
+               struct drm_connector_state *state)
+{
+       struct drm_crtc_state *crtc_state;
+       struct drm_writeback_job *writeback_job = state->writeback_job;
+
+       if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job)
+               return 0;
+
+       if (writeback_job->fb && !state->crtc) {
+               DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] framebuffer without CRTC\n",
+                                connector->base.id, connector->name);
+               return -EINVAL;
+       }
+
+       if (state->crtc)
+               crtc_state = drm_atomic_get_existing_crtc_state(state->state,
+                                                               state->crtc);
+
+       if (writeback_job->fb && !crtc_state->active) {
+               DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] has framebuffer, but [CRTC:%d] is off\n",
+                                connector->base.id, connector->name,
+                                state->crtc->base.id);
+               return -EINVAL;
+       }
+
+       if (writeback_job->out_fence && !writeback_job->fb) {
+               DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
+                                connector->base.id, connector->name);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 /**
  * drm_atomic_get_plane_state - get plane state
  * @state: global atomic state object
@@ -700,6 +797,11 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
 
        WARN_ON(!state->acquire_ctx);
 
+       /* the legacy pointers should never be set */
+       WARN_ON(plane->fb);
+       WARN_ON(plane->old_fb);
+       WARN_ON(plane->crtc);
+
        plane_state = drm_atomic_get_existing_plane_state(state, plane);
        if (plane_state)
                return plane_state;
@@ -794,8 +896,11 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
        } else if (property == plane->alpha_property) {
                state->alpha = val;
        } else if (property == plane->rotation_property) {
-               if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK))
+               if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK)) {
+                       DRM_DEBUG_ATOMIC("[PLANE:%d:%s] bad rotation bitmask: 0x%llx\n",
+                                        plane->base.id, plane->name, val);
                        return -EINVAL;
+               }
                state->rotation = val;
        } else if (property == plane->zpos_property) {
                state->zpos = val;
@@ -807,6 +912,9 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
                return plane->funcs->atomic_set_property(plane, state,
                                property, val);
        } else {
+               DRM_DEBUG_ATOMIC("[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n",
+                                plane->base.id, plane->name,
+                                property->base.id, property->name);
                return -EINVAL;
        }
 
@@ -914,10 +1022,12 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
 
        /* either *both* CRTC and FB must be set, or neither */
        if (state->crtc && !state->fb) {
-               DRM_DEBUG_ATOMIC("CRTC set but no FB\n");
+               DRM_DEBUG_ATOMIC("[PLANE:%d:%s] CRTC set but no FB\n",
+                                plane->base.id, plane->name);
                return -EINVAL;
        } else if (state->fb && !state->crtc) {
-               DRM_DEBUG_ATOMIC("FB set but no CRTC\n");
+               DRM_DEBUG_ATOMIC("[PLANE:%d:%s] FB set but no CRTC\n",
+                                plane->base.id, plane->name);
                return -EINVAL;
        }
 
@@ -927,7 +1037,9 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
 
        /* Check whether this plane is usable on this CRTC */
        if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
-               DRM_DEBUG_ATOMIC("Invalid crtc for plane\n");
+               DRM_DEBUG_ATOMIC("Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n",
+                                state->crtc->base.id, state->crtc->name,
+                                plane->base.id, plane->name);
                return -EINVAL;
        }
 
@@ -936,7 +1048,8 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
                                           state->fb->modifier);
        if (ret) {
                struct drm_format_name_buf format_name;
-               DRM_DEBUG_ATOMIC("Invalid pixel format %s, modifier 0x%llx\n",
+               DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %s, modifier 0x%llx\n",
+                                plane->base.id, plane->name,
                                 drm_get_format_name(state->fb->format->format,
                                                     &format_name),
                                 state->fb->modifier);
@@ -948,7 +1061,8 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
            state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
            state->crtc_h > INT_MAX ||
            state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
-               DRM_DEBUG_ATOMIC("Invalid CRTC coordinates %ux%u+%d+%d\n",
+               DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n",
+                                plane->base.id, plane->name,
                                 state->crtc_w, state->crtc_h,
                                 state->crtc_x, state->crtc_y);
                return -ERANGE;
@@ -962,8 +1076,9 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
            state->src_x > fb_width - state->src_w ||
            state->src_h > fb_height ||
            state->src_y > fb_height - state->src_h) {
-               DRM_DEBUG_ATOMIC("Invalid source coordinates "
+               DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid source coordinates "
                                 "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
+                                plane->base.id, plane->name,
                                 state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
                                 state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
                                 state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
@@ -996,6 +1111,7 @@ static void drm_atomic_plane_print_state(struct drm_printer *p,
        drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest));
        drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src));
        drm_printf(p, "\trotation=%x\n", state->rotation);
+       drm_printf(p, "\tnormalized-zpos=%x\n", state->normalized_zpos);
        drm_printf(p, "\tcolor-encoding=%s\n",
                   drm_get_color_encoding_name(state->color_encoding));
        drm_printf(p, "\tcolor-range=%s\n",
@@ -1120,6 +1236,7 @@ drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
        state->private_objs[index].old_state = obj->state;
        state->private_objs[index].new_state = obj_state;
        state->private_objs[index].ptr = obj;
+       obj_state->state = state;
 
        state->num_private_objs = num_objs;
 
@@ -1278,6 +1395,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
                        state->link_status = val;
        } else if (property == config->aspect_ratio_property) {
                state->picture_aspect_ratio = val;
+       } else if (property == config->content_type_property) {
+               state->content_type = val;
        } else if (property == connector->scaling_mode_property) {
                state->scaling_mode = val;
        } else if (property == connector->content_protection_property) {
@@ -1286,10 +1405,24 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
                        return -EINVAL;
                }
                state->content_protection = val;
+       } else if (property == config->writeback_fb_id_property) {
+               struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
+               int ret = drm_atomic_set_writeback_fb_for_connector(state, fb);
+               if (fb)
+                       drm_framebuffer_put(fb);
+               return ret;
+       } else if (property == config->writeback_out_fence_ptr_property) {
+               s32 __user *fence_ptr = u64_to_user_ptr(val);
+
+               return set_out_fence_for_connector(state->state, connector,
+                                                  fence_ptr);
        } else if (connector->funcs->atomic_set_property) {
                return connector->funcs->atomic_set_property(connector,
                                state, property, val);
        } else {
+               DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]]\n",
+                                connector->base.id, connector->name,
+                                property->base.id, property->name);
                return -EINVAL;
        }
 
@@ -1304,6 +1437,10 @@ static void drm_atomic_connector_print_state(struct drm_printer *p,
        drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
        drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
 
+       if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+               if (state->writeback_job && state->writeback_job->fb)
+                       drm_printf(p, "\tfb=%d\n", state->writeback_job->fb->base.id);
+
        if (connector->funcs->atomic_print_state)
                connector->funcs->atomic_print_state(p, state);
 }
@@ -1363,10 +1500,17 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
                *val = state->link_status;
        } else if (property == config->aspect_ratio_property) {
                *val = state->picture_aspect_ratio;
+       } else if (property == config->content_type_property) {
+               *val = state->content_type;
        } else if (property == connector->scaling_mode_property) {
                *val = state->scaling_mode;
        } else if (property == connector->content_protection_property) {
                *val = state->content_protection;
+       } else if (property == config->writeback_fb_id_property) {
+               /* Writeback framebuffer is one-shot, write and forget */
+               *val = 0;
+       } else if (property == config->writeback_out_fence_ptr_property) {
+               *val = 0;
        } else if (connector->funcs->atomic_get_property) {
                return connector->funcs->atomic_get_property(connector,
                                state, property, val);
@@ -1442,7 +1586,7 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
                if (WARN_ON(IS_ERR(crtc_state)))
                        return PTR_ERR(crtc_state);
 
-               crtc_state->plane_mask &= ~(1 << drm_plane_index(plane));
+               crtc_state->plane_mask &= ~drm_plane_mask(plane);
        }
 
        plane_state->crtc = crtc;
@@ -1452,15 +1596,16 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
                                                       crtc);
                if (IS_ERR(crtc_state))
                        return PTR_ERR(crtc_state);
-               crtc_state->plane_mask |= (1 << drm_plane_index(plane));
+               crtc_state->plane_mask |= drm_plane_mask(plane);
        }
 
        if (crtc)
-               DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d:%s]\n",
-                                plane_state, crtc->base.id, crtc->name);
+               DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [CRTC:%d:%s]\n",
+                                plane->base.id, plane->name, plane_state,
+                                crtc->base.id, crtc->name);
        else
-               DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n",
-                                plane_state);
+               DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [NOCRTC]\n",
+                                plane->base.id, plane->name, plane_state);
 
        return 0;
 }
@@ -1480,12 +1625,15 @@ void
 drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
                            struct drm_framebuffer *fb)
 {
+       struct drm_plane *plane = plane_state->plane;
+
        if (fb)
-               DRM_DEBUG_ATOMIC("Set [FB:%d] for plane state %p\n",
-                                fb->base.id, plane_state);
-       else
-               DRM_DEBUG_ATOMIC("Set [NOFB] for plane state %p\n",
+               DRM_DEBUG_ATOMIC("Set [FB:%d] for [PLANE:%d:%s] state %p\n",
+                                fb->base.id, plane->base.id, plane->name,
                                 plane_state);
+       else
+               DRM_DEBUG_ATOMIC("Set [NOFB] for [PLANE:%d:%s] state %p\n",
+                                plane->base.id, plane->name, plane_state);
 
        drm_framebuffer_assign(&plane_state->fb, fb);
 }
@@ -1546,6 +1694,7 @@ int
 drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
                                  struct drm_crtc *crtc)
 {
+       struct drm_connector *connector = conn_state->connector;
        struct drm_crtc_state *crtc_state;
 
        if (conn_state->crtc == crtc)
@@ -1556,7 +1705,7 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
                                                           conn_state->crtc);
 
                crtc_state->connector_mask &=
-                       ~(1 << drm_connector_index(conn_state->connector));
+                       ~drm_connector_mask(conn_state->connector);
 
                drm_connector_put(conn_state->connector);
                conn_state->crtc = NULL;
@@ -1568,15 +1717,17 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
                        return PTR_ERR(crtc_state);
 
                crtc_state->connector_mask |=
-                       1 << drm_connector_index(conn_state->connector);
+                       drm_connector_mask(conn_state->connector);
 
                drm_connector_get(conn_state->connector);
                conn_state->crtc = crtc;
 
-               DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d:%s]\n",
+               DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [CRTC:%d:%s]\n",
+                                connector->base.id, connector->name,
                                 conn_state, crtc->base.id, crtc->name);
        } else {
-               DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n",
+               DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [NOCRTC]\n",
+                                connector->base.id, connector->name,
                                 conn_state);
        }
 
@@ -1584,6 +1735,70 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
 }
 EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
 
+/*
+ * drm_atomic_get_writeback_job - return or allocate a writeback job
+ * @conn_state: Connector state to get the job for
+ *
+ * Writeback jobs have a different lifetime to the atomic state they are
+ * associated with. This convenience function takes care of allocating a job
+ * if there isn't yet one associated with the connector state, otherwise
+ * it just returns the existing job.
+ *
+ * Returns: The writeback job for the given connector state
+ */
+static struct drm_writeback_job *
+drm_atomic_get_writeback_job(struct drm_connector_state *conn_state)
+{
+       WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
+
+       if (!conn_state->writeback_job)
+               conn_state->writeback_job =
+                       kzalloc(sizeof(*conn_state->writeback_job), GFP_KERNEL);
+
+       return conn_state->writeback_job;
+}
+
+/**
+ * drm_atomic_set_writeback_fb_for_connector - set writeback framebuffer
+ * @conn_state: atomic state object for the connector
+ * @fb: fb to use for the connector
+ *
+ * This is used to set the framebuffer for a writeback connector, which outputs
+ * to a buffer instead of an actual physical connector.
+ * Changing the assigned framebuffer requires us to grab a reference to the new
+ * fb and drop the reference to the old fb, if there is one. This function
+ * takes care of all these details besides updating the pointer in the
+ * state object itself.
+ *
+ * Note: The only way conn_state can already have an fb set is if the commit
+ * sets the property more than once.
+ *
+ * See also: drm_writeback_connector_init()
+ *
+ * Returns: 0 on success
+ */
+int drm_atomic_set_writeback_fb_for_connector(
+               struct drm_connector_state *conn_state,
+               struct drm_framebuffer *fb)
+{
+       struct drm_writeback_job *job =
+               drm_atomic_get_writeback_job(conn_state);
+       if (!job)
+               return -ENOMEM;
+
+       drm_framebuffer_assign(&job->fb, fb);
+
+       if (fb)
+               DRM_DEBUG_ATOMIC("Set [FB:%d] for connector state %p\n",
+                                fb->base.id, conn_state);
+       else
+               DRM_DEBUG_ATOMIC("Set [NOFB] for connector state %p\n",
+                                conn_state);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_atomic_set_writeback_fb_for_connector);
+
 /**
  * drm_atomic_add_affected_connectors - add connectors for crtc
  * @state: atomic state
@@ -1629,7 +1844,7 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
         */
        drm_connector_list_iter_begin(state->dev, &conn_iter);
        drm_for_each_connector_iter(connector, &conn_iter) {
-               if (!(crtc_state->connector_mask & (1 << drm_connector_index(connector))))
+               if (!(crtc_state->connector_mask & drm_connector_mask(connector)))
                        continue;
 
                conn_state = drm_atomic_get_connector_state(state, connector);
@@ -1672,6 +1887,9 @@ drm_atomic_add_affected_planes(struct drm_atomic_state *state,
 
        WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
 
+       DRM_DEBUG_ATOMIC("Adding all current planes for [CRTC:%d:%s] to %p\n",
+                        crtc->base.id, crtc->name, state);
+
        drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
                struct drm_plane_state *plane_state =
                        drm_atomic_get_plane_state(state, plane);
@@ -1702,6 +1920,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
        struct drm_plane_state *plane_state;
        struct drm_crtc *crtc;
        struct drm_crtc_state *crtc_state;
+       struct drm_connector *conn;
+       struct drm_connector_state *conn_state;
        int i, ret = 0;
 
        DRM_DEBUG_ATOMIC("checking %p\n", state);
@@ -1724,6 +1944,15 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
                }
        }
 
+       for_each_new_connector_in_state(state, conn, conn_state, i) {
+               ret = drm_atomic_connector_check(conn, conn_state);
+               if (ret) {
+                       DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] atomic core check failed\n",
+                                        conn->base.id, conn->name);
+                       return ret;
+               }
+       }
+
        if (config->funcs->atomic_check) {
                ret = config->funcs->atomic_check(state->dev, state);
 
@@ -2047,45 +2276,6 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
        return ret;
 }
 
-/**
- * drm_atomic_clean_old_fb -- Unset old_fb pointers and set plane->fb pointers.
- *
- * @dev: drm device to check.
- * @plane_mask: plane mask for planes that were updated.
- * @ret: return value, can be -EDEADLK for a retry.
- *
- * Before doing an update &drm_plane.old_fb is set to &drm_plane.fb, but before
- * dropping the locks old_fb needs to be set to NULL and plane->fb updated. This
- * is a common operation for each atomic update, so this call is split off as a
- * helper.
- */
-void drm_atomic_clean_old_fb(struct drm_device *dev,
-                            unsigned plane_mask,
-                            int ret)
-{
-       struct drm_plane *plane;
-
-       /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
-        * locks (ie. while it is still safe to deref plane->state).  We
-        * need to do this here because the driver entry points cannot
-        * distinguish between legacy and atomic ioctls.
-        */
-       drm_for_each_plane_mask(plane, dev, plane_mask) {
-               if (ret == 0) {
-                       struct drm_framebuffer *new_fb = plane->state->fb;
-                       if (new_fb)
-                               drm_framebuffer_get(new_fb);
-                       plane->fb = new_fb;
-                       plane->crtc = plane->state->crtc;
-
-                       if (plane->old_fb)
-                               drm_framebuffer_put(plane->old_fb);
-               }
-               plane->old_fb = NULL;
-       }
-}
-EXPORT_SYMBOL(drm_atomic_clean_old_fb);
-
 /**
  * DOC: explicit fencing properties
  *
@@ -2161,7 +2351,7 @@ static int setup_out_fence(struct drm_out_fence_state *fence_state,
        return 0;
 }
 
-static int prepare_crtc_signaling(struct drm_device *dev,
+static int prepare_signaling(struct drm_device *dev,
                                  struct drm_atomic_state *state,
                                  struct drm_mode_atomic *arg,
                                  struct drm_file *file_priv,
@@ -2170,6 +2360,8 @@ static int prepare_crtc_signaling(struct drm_device *dev,
 {
        struct drm_crtc *crtc;
        struct drm_crtc_state *crtc_state;
+       struct drm_connector *conn;
+       struct drm_connector_state *conn_state;
        int i, c = 0, ret;
 
        if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
@@ -2235,6 +2427,45 @@ static int prepare_crtc_signaling(struct drm_device *dev,
                c++;
        }
 
+       for_each_new_connector_in_state(state, conn, conn_state, i) {
+               struct drm_writeback_connector *wb_conn;
+               struct drm_writeback_job *job;
+               struct drm_out_fence_state *f;
+               struct dma_fence *fence;
+               s32 __user *fence_ptr;
+
+               fence_ptr = get_out_fence_for_connector(state, conn);
+               if (!fence_ptr)
+                       continue;
+
+               job = drm_atomic_get_writeback_job(conn_state);
+               if (!job)
+                       return -ENOMEM;
+
+               f = krealloc(*fence_state, sizeof(**fence_state) *
+                            (*num_fences + 1), GFP_KERNEL);
+               if (!f)
+                       return -ENOMEM;
+
+               memset(&f[*num_fences], 0, sizeof(*f));
+
+               f[*num_fences].out_fence_ptr = fence_ptr;
+               *fence_state = f;
+
+               wb_conn = drm_connector_to_writeback(conn);
+               fence = drm_writeback_get_out_fence(wb_conn);
+               if (!fence)
+                       return -ENOMEM;
+
+               ret = setup_out_fence(&f[(*num_fences)++], fence);
+               if (ret) {
+                       dma_fence_put(fence);
+                       return ret;
+               }
+
+               job->out_fence = fence;
+       }
+
        /*
         * Having this flag means user mode pends on event which will never
         * reach due to lack of at least one CRTC for signaling
@@ -2245,11 +2476,11 @@ static int prepare_crtc_signaling(struct drm_device *dev,
        return 0;
 }
 
-static void complete_crtc_signaling(struct drm_device *dev,
-                                   struct drm_atomic_state *state,
-                                   struct drm_out_fence_state *fence_state,
-                                   unsigned int num_fences,
-                                   bool install_fds)
+static void complete_signaling(struct drm_device *dev,
+                              struct drm_atomic_state *state,
+                              struct drm_out_fence_state *fence_state,
+                              unsigned int num_fences,
+                              bool install_fds)
 {
        struct drm_crtc *crtc;
        struct drm_crtc_state *crtc_state;
@@ -2306,9 +2537,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
        unsigned int copied_objs, copied_props;
        struct drm_atomic_state *state;
        struct drm_modeset_acquire_ctx ctx;
-       struct drm_plane *plane;
        struct drm_out_fence_state *fence_state;
-       unsigned plane_mask;
        int ret = 0;
        unsigned int i, j, num_fences;
 
@@ -2348,7 +2577,6 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
        state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
 
 retry:
-       plane_mask = 0;
        copied_objs = 0;
        copied_props = 0;
        fence_state = NULL;
@@ -2419,17 +2647,11 @@ retry:
                        copied_props++;
                }
 
-               if (obj->type == DRM_MODE_OBJECT_PLANE && count_props &&
-                   !(arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)) {
-                       plane = obj_to_plane(obj);
-                       plane_mask |= (1 << drm_plane_index(plane));
-                       plane->old_fb = plane->fb;
-               }
                drm_mode_object_put(obj);
        }
 
-       ret = prepare_crtc_signaling(dev, state, arg, file_priv, &fence_state,
-                                    &num_fences);
+       ret = prepare_signaling(dev, state, arg, file_priv, &fence_state,
+                               &num_fences);
        if (ret)
                goto out;
 
@@ -2445,9 +2667,7 @@ retry:
        }
 
 out:
-       drm_atomic_clean_old_fb(dev, plane_mask, ret);
-
-       complete_crtc_signaling(dev, state, fence_state, num_fences, !ret);
+       complete_signaling(dev, state, fence_state, num_fences, !ret);
 
        if (ret == -EDEADLK) {
                drm_atomic_state_clear(state);
index 130da5195f3b622062c274507eb0727c76601473..866a2cc72ef68458e1003135902c0bb913eda4f7 100644 (file)
@@ -30,6 +30,7 @@
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_writeback.h>
 #include <linux/dma-fence.h>
 
 #include "drm_crtc_helper_internal.h"
@@ -120,7 +121,7 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
                        new_encoder = drm_atomic_helper_best_encoder(connector);
 
                if (new_encoder) {
-                       if (encoder_mask & (1 << drm_encoder_index(new_encoder))) {
+                       if (encoder_mask & drm_encoder_mask(new_encoder)) {
                                DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n",
                                        new_encoder->base.id, new_encoder->name,
                                        connector->base.id, connector->name);
@@ -128,7 +129,7 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
                                return -EINVAL;
                        }
 
-                       encoder_mask |= 1 << drm_encoder_index(new_encoder);
+                       encoder_mask |= drm_encoder_mask(new_encoder);
                }
        }
 
@@ -154,7 +155,7 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
                        continue;
 
                encoder = connector->state->best_encoder;
-               if (!encoder || !(encoder_mask & (1 << drm_encoder_index(encoder))))
+               if (!encoder || !(encoder_mask & drm_encoder_mask(encoder)))
                        continue;
 
                if (!disable_conflicting_encoders) {
@@ -222,7 +223,7 @@ set_best_encoder(struct drm_atomic_state *state,
                        crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
 
                        crtc_state->encoder_mask &=
-                               ~(1 << drm_encoder_index(conn_state->best_encoder));
+                               ~drm_encoder_mask(conn_state->best_encoder);
                }
        }
 
@@ -233,7 +234,7 @@ set_best_encoder(struct drm_atomic_state *state,
                        crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
 
                        crtc_state->encoder_mask |=
-                               1 << drm_encoder_index(encoder);
+                               drm_encoder_mask(encoder);
                }
        }
 
@@ -644,7 +645,7 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
                if (ret)
                        return ret;
 
-               connectors_mask += BIT(i);
+               connectors_mask |= BIT(i);
        }
 
        /*
@@ -1172,6 +1173,27 @@ void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
 }
 EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
 
+static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
+                                               struct drm_atomic_state *old_state)
+{
+       struct drm_connector *connector;
+       struct drm_connector_state *new_conn_state;
+       int i;
+
+       for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
+               const struct drm_connector_helper_funcs *funcs;
+
+               funcs = connector->helper_private;
+               if (!funcs->atomic_commit)
+                       continue;
+
+               if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) {
+                       WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
+                       funcs->atomic_commit(connector, new_conn_state);
+               }
+       }
+}
+
 /**
  * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
  * @dev: DRM device
@@ -1251,6 +1273,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
 
                drm_bridge_enable(encoder->bridge);
        }
+
+       drm_atomic_helper_commit_writebacks(dev, old_state);
 }
 EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
 
@@ -1426,6 +1450,8 @@ void drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state)
 
        drm_atomic_helper_commit_modeset_enables(dev, old_state);
 
+       drm_atomic_helper_fake_vblank(old_state);
+
        drm_atomic_helper_commit_hw_done(old_state);
 
        drm_atomic_helper_wait_for_vblanks(dev, old_state);
@@ -1455,6 +1481,8 @@ void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state)
        drm_atomic_helper_commit_planes(dev, old_state,
                                        DRM_PLANE_COMMIT_ACTIVE_ONLY);
 
+       drm_atomic_helper_fake_vblank(old_state);
+
        drm_atomic_helper_commit_hw_done(old_state);
 
        drm_atomic_helper_wait_for_vblanks(dev, old_state);
@@ -2029,6 +2057,45 @@ void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
 }
 EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
 
+/**
+ * drm_atomic_helper_fake_vblank - fake VBLANK events if needed
+ * @old_state: atomic state object with old state structures
+ *
+ * This function walks all CRTCs and fake VBLANK events on those with
+ * &drm_crtc_state.no_vblank set to true and &drm_crtc_state.event != NULL.
+ * The primary use of this function is writeback connectors working in oneshot
+ * mode and faking VBLANK events. In this case they only fake the VBLANK event
+ * when a job is queued, and any change to the pipeline that does not touch the
+ * connector is leading to timeouts when calling
+ * drm_atomic_helper_wait_for_vblanks() or
+ * drm_atomic_helper_wait_for_flip_done().
+ *
+ * This is part of the atomic helper support for nonblocking commits, see
+ * drm_atomic_helper_setup_commit() for an overview.
+ */
+void drm_atomic_helper_fake_vblank(struct drm_atomic_state *old_state)
+{
+       struct drm_crtc_state *new_crtc_state;
+       struct drm_crtc *crtc;
+       int i;
+
+       for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
+               unsigned long flags;
+
+               if (!new_crtc_state->no_vblank)
+                       continue;
+
+               spin_lock_irqsave(&old_state->dev->event_lock, flags);
+               if (new_crtc_state->event) {
+                       drm_crtc_send_vblank_event(crtc,
+                                                  new_crtc_state->event);
+                       new_crtc_state->event = NULL;
+               }
+               spin_unlock_irqrestore(&old_state->dev->event_lock, flags);
+       }
+}
+EXPORT_SYMBOL(drm_atomic_helper_fake_vblank);
+
 /**
  * drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
  * @old_state: atomic state object with old state structures
@@ -2320,11 +2387,13 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
        const struct drm_crtc_helper_funcs *crtc_funcs;
        struct drm_crtc *crtc = old_crtc_state->crtc;
        struct drm_atomic_state *old_state = old_crtc_state->state;
+       struct drm_crtc_state *new_crtc_state =
+               drm_atomic_get_new_crtc_state(old_state, crtc);
        struct drm_plane *plane;
        unsigned plane_mask;
 
        plane_mask = old_crtc_state->plane_mask;
-       plane_mask |= crtc->state->plane_mask;
+       plane_mask |= new_crtc_state->plane_mask;
 
        crtc_funcs = crtc->helper_private;
        if (crtc_funcs && crtc_funcs->atomic_begin)
@@ -2333,6 +2402,8 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
        drm_for_each_plane_mask(plane, crtc->dev, plane_mask) {
                struct drm_plane_state *old_plane_state =
                        drm_atomic_get_old_plane_state(old_state, plane);
+               struct drm_plane_state *new_plane_state =
+                       drm_atomic_get_new_plane_state(old_state, plane);
                const struct drm_plane_helper_funcs *plane_funcs;
 
                plane_funcs = plane->helper_private;
@@ -2340,13 +2411,14 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
                if (!old_plane_state || !plane_funcs)
                        continue;
 
-               WARN_ON(plane->state->crtc && plane->state->crtc != crtc);
+               WARN_ON(new_plane_state->crtc &&
+                       new_plane_state->crtc != crtc);
 
-               if (drm_atomic_plane_disabling(old_plane_state, plane->state) &&
+               if (drm_atomic_plane_disabling(old_plane_state, new_plane_state) &&
                    plane_funcs->atomic_disable)
                        plane_funcs->atomic_disable(plane, old_plane_state);
-               else if (plane->state->crtc ||
-                        drm_atomic_plane_disabling(old_plane_state, plane->state))
+               else if (new_plane_state->crtc ||
+                        drm_atomic_plane_disabling(old_plane_state, new_plane_state))
                        plane_funcs->atomic_update(plane, old_plane_state);
        }
 
@@ -2795,7 +2867,7 @@ static int update_output_state(struct drm_atomic_state *state,
  * resets the "link-status" property to GOOD, to force any link
  * re-training. The SETCRTC ioctl does not define whether an update does
  * need a full modeset or just a plane update, hence we're allowed to do
- * that. See also drm_mode_connector_set_link_status_property().
+ * that. See also drm_connector_set_link_status_property().
  *
  * Returns:
  * Returns 0 on success, negative errno numbers on failure.
@@ -2914,7 +2986,6 @@ static int __drm_atomic_helper_disable_all(struct drm_device *dev,
        struct drm_plane *plane;
        struct drm_crtc_state *crtc_state;
        struct drm_crtc *crtc;
-       unsigned plane_mask = 0;
        int ret, i;
 
        state = drm_atomic_state_alloc(dev);
@@ -2957,17 +3028,10 @@ static int __drm_atomic_helper_disable_all(struct drm_device *dev,
                        goto free;
 
                drm_atomic_set_fb_for_plane(plane_state, NULL);
-
-               if (clean_old_fbs) {
-                       plane->old_fb = plane->fb;
-                       plane_mask |= BIT(drm_plane_index(plane));
-               }
        }
 
        ret = drm_atomic_commit(state);
 free:
-       if (plane_mask)
-               drm_atomic_clean_old_fb(dev, plane_mask, ret);
        drm_atomic_state_put(state);
        return ret;
 }
@@ -3129,13 +3193,8 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
 
        state->acquire_ctx = ctx;
 
-       for_each_new_plane_in_state(state, plane, new_plane_state, i) {
-               WARN_ON(plane->crtc != new_plane_state->crtc);
-               WARN_ON(plane->fb != new_plane_state->fb);
-               WARN_ON(plane->old_fb);
-
+       for_each_new_plane_in_state(state, plane, new_plane_state, i)
                state->planes[i].old_state = plane->state;
-       }
 
        for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
                state->crtcs[i].old_state = crtc->state;
@@ -3660,6 +3719,9 @@ __drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
        if (state->crtc)
                drm_connector_get(connector);
        state->commit = NULL;
+
+       /* Don't copy over a writeback job, they are used only once */
+       state->writeback_job = NULL;
 }
 EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state);
 
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
new file mode 100644 (file)
index 0000000..baff50a
--- /dev/null
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Noralf Trønnes
+ */
+
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+
+#include <drm/drm_client.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_print.h>
+#include <drm/drmP.h>
+
+#include "drm_crtc_internal.h"
+#include "drm_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * This library provides support for clients running in the kernel like fbdev and bootsplash.
+ * Currently it's only partially implemented, just enough to support fbdev.
+ *
+ * GEM drivers which provide a GEM based dumb buffer with a virtual address are supported.
+ */
+
+static int drm_client_open(struct drm_client_dev *client)
+{
+       struct drm_device *dev = client->dev;
+       struct drm_file *file;
+
+       file = drm_file_alloc(dev->primary);
+       if (IS_ERR(file))
+               return PTR_ERR(file);
+
+       mutex_lock(&dev->filelist_mutex);
+       list_add(&file->lhead, &dev->filelist_internal);
+       mutex_unlock(&dev->filelist_mutex);
+
+       client->file = file;
+
+       return 0;
+}
+
+static void drm_client_close(struct drm_client_dev *client)
+{
+       struct drm_device *dev = client->dev;
+
+       mutex_lock(&dev->filelist_mutex);
+       list_del(&client->file->lhead);
+       mutex_unlock(&dev->filelist_mutex);
+
+       drm_file_free(client->file);
+}
+EXPORT_SYMBOL(drm_client_close);
+
+/**
+ * drm_client_new - Create a DRM client
+ * @dev: DRM device
+ * @client: DRM client
+ * @name: Client name
+ * @funcs: DRM client functions (optional)
+ *
+ * The caller needs to hold a reference on @dev before calling this function.
+ * The client is freed when the &drm_device is unregistered. See drm_client_release().
+ *
+ * Returns:
+ * Zero on success or negative error code on failure.
+ */
+int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
+                  const char *name, const struct drm_client_funcs *funcs)
+{
+       int ret;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET) ||
+           !dev->driver->dumb_create || !dev->driver->gem_prime_vmap)
+               return -ENOTSUPP;
+
+       if (funcs && !try_module_get(funcs->owner))
+               return -ENODEV;
+
+       client->dev = dev;
+       client->name = name;
+       client->funcs = funcs;
+
+       ret = drm_client_open(client);
+       if (ret)
+               goto err_put_module;
+
+       mutex_lock(&dev->clientlist_mutex);
+       list_add(&client->list, &dev->clientlist);
+       mutex_unlock(&dev->clientlist_mutex);
+
+       drm_dev_get(dev);
+
+       return 0;
+
+err_put_module:
+       if (funcs)
+               module_put(funcs->owner);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_client_new);
+
+/**
+ * drm_client_release - Release DRM client resources
+ * @client: DRM client
+ *
+ * Releases resources by closing the &drm_file that was opened by drm_client_new().
+ * It is called automatically if the &drm_client_funcs.unregister callback is _not_ set.
+ *
+ * This function should only be called from the unregister callback. An exception
+ * is fbdev which cannot free the buffer if userspace has open file descriptors.
+ *
+ * Note:
+ * Clients cannot initiate a release by themselves. This is done to keep the code simple.
+ * The driver has to be unloaded before the client can be unloaded.
+ */
+void drm_client_release(struct drm_client_dev *client)
+{
+       struct drm_device *dev = client->dev;
+
+       DRM_DEV_DEBUG_KMS(dev->dev, "%s\n", client->name);
+
+       drm_client_close(client);
+       drm_dev_put(dev);
+       if (client->funcs)
+               module_put(client->funcs->owner);
+}
+EXPORT_SYMBOL(drm_client_release);
+
+void drm_client_dev_unregister(struct drm_device *dev)
+{
+       struct drm_client_dev *client, *tmp;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return;
+
+       mutex_lock(&dev->clientlist_mutex);
+       list_for_each_entry_safe(client, tmp, &dev->clientlist, list) {
+               list_del(&client->list);
+               if (client->funcs && client->funcs->unregister) {
+                       client->funcs->unregister(client);
+               } else {
+                       drm_client_release(client);
+                       kfree(client);
+               }
+       }
+       mutex_unlock(&dev->clientlist_mutex);
+}
+
+/**
+ * drm_client_dev_hotplug - Send hotplug event to clients
+ * @dev: DRM device
+ *
+ * This function calls the &drm_client_funcs.hotplug callback on the attached clients.
+ *
+ * drm_kms_helper_hotplug_event() calls this function, so drivers that use it
+ * don't need to call this function themselves.
+ */
+void drm_client_dev_hotplug(struct drm_device *dev)
+{
+       struct drm_client_dev *client;
+       int ret;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return;
+
+       mutex_lock(&dev->clientlist_mutex);
+       list_for_each_entry(client, &dev->clientlist, list) {
+               if (!client->funcs || !client->funcs->hotplug)
+                       continue;
+
+               ret = client->funcs->hotplug(client);
+               DRM_DEV_DEBUG_KMS(dev->dev, "%s: ret=%d\n", client->name, ret);
+       }
+       mutex_unlock(&dev->clientlist_mutex);
+}
+EXPORT_SYMBOL(drm_client_dev_hotplug);
+
+void drm_client_dev_restore(struct drm_device *dev)
+{
+       struct drm_client_dev *client;
+       int ret;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               return;
+
+       mutex_lock(&dev->clientlist_mutex);
+       list_for_each_entry(client, &dev->clientlist, list) {
+               if (!client->funcs || !client->funcs->restore)
+                       continue;
+
+               ret = client->funcs->restore(client);
+               DRM_DEV_DEBUG_KMS(dev->dev, "%s: ret=%d\n", client->name, ret);
+               if (!ret) /* The first one to return zero gets the privilege to restore */
+                       break;
+       }
+       mutex_unlock(&dev->clientlist_mutex);
+}
+
+static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
+{
+       struct drm_device *dev = buffer->client->dev;
+
+       if (buffer->vaddr && dev->driver->gem_prime_vunmap)
+               dev->driver->gem_prime_vunmap(buffer->gem, buffer->vaddr);
+
+       if (buffer->gem)
+               drm_gem_object_put_unlocked(buffer->gem);
+
+       if (buffer->handle)
+               drm_mode_destroy_dumb(dev, buffer->handle, buffer->client->file);
+
+       kfree(buffer);
+}
+
+static struct drm_client_buffer *
+drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format)
+{
+       struct drm_mode_create_dumb dumb_args = { };
+       struct drm_device *dev = client->dev;
+       struct drm_client_buffer *buffer;
+       struct drm_gem_object *obj;
+       void *vaddr;
+       int ret;
+
+       buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+       if (!buffer)
+               return ERR_PTR(-ENOMEM);
+
+       buffer->client = client;
+
+       dumb_args.width = width;
+       dumb_args.height = height;
+       dumb_args.bpp = drm_format_plane_cpp(format, 0) * 8;
+       ret = drm_mode_create_dumb(dev, &dumb_args, client->file);
+       if (ret)
+               goto err_delete;
+
+       buffer->handle = dumb_args.handle;
+       buffer->pitch = dumb_args.pitch;
+
+       obj = drm_gem_object_lookup(client->file, dumb_args.handle);
+       if (!obj)  {
+               ret = -ENOENT;
+               goto err_delete;
+       }
+
+       buffer->gem = obj;
+
+       /*
+        * FIXME: The dependency on GEM here isn't required, we could
+        * convert the driver handle to a dma-buf instead and use the
+        * backend-agnostic dma-buf vmap support instead. This would
+        * require that the handle2fd prime ioctl is reworked to pull the
+        * fd_install step out of the driver backend hooks, to make that
+        * final step optional for internal users.
+        */
+       vaddr = dev->driver->gem_prime_vmap(obj);
+       if (!vaddr) {
+               ret = -ENOMEM;
+               goto err_delete;
+       }
+
+       buffer->vaddr = vaddr;
+
+       return buffer;
+
+err_delete:
+       drm_client_buffer_delete(buffer);
+
+       return ERR_PTR(ret);
+}
+
+static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer)
+{
+       int ret;
+
+       if (!buffer->fb)
+               return;
+
+       ret = drm_mode_rmfb(buffer->client->dev, buffer->fb->base.id, buffer->client->file);
+       if (ret)
+               DRM_DEV_ERROR(buffer->client->dev->dev,
+                             "Error removing FB:%u (%d)\n", buffer->fb->base.id, ret);
+
+       buffer->fb = NULL;
+}
+
+static int drm_client_buffer_addfb(struct drm_client_buffer *buffer,
+                                  u32 width, u32 height, u32 format)
+{
+       struct drm_client_dev *client = buffer->client;
+       struct drm_mode_fb_cmd fb_req = { };
+       const struct drm_format_info *info;
+       int ret;
+
+       info = drm_format_info(format);
+       fb_req.bpp = info->cpp[0] * 8;
+       fb_req.depth = info->depth;
+       fb_req.width = width;
+       fb_req.height = height;
+       fb_req.handle = buffer->handle;
+       fb_req.pitch = buffer->pitch;
+
+       ret = drm_mode_addfb(client->dev, &fb_req, client->file);
+       if (ret)
+               return ret;
+
+       buffer->fb = drm_framebuffer_lookup(client->dev, buffer->client->file, fb_req.fb_id);
+       if (WARN_ON(!buffer->fb))
+               return -ENOENT;
+
+       /* drop the reference we picked up in framebuffer lookup */
+       drm_framebuffer_put(buffer->fb);
+
+       strscpy(buffer->fb->comm, client->name, TASK_COMM_LEN);
+
+       return 0;
+}
+
+/**
+ * drm_client_framebuffer_create - Create a client framebuffer
+ * @client: DRM client
+ * @width: Framebuffer width
+ * @height: Framebuffer height
+ * @format: Buffer format
+ *
+ * This function creates a &drm_client_buffer which consists of a
+ * &drm_framebuffer backed by a dumb buffer.
+ * Call drm_client_framebuffer_delete() to free the buffer.
+ *
+ * Returns:
+ * Pointer to a client buffer or an error pointer on failure.
+ */
+struct drm_client_buffer *
+drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format)
+{
+       struct drm_client_buffer *buffer;
+       int ret;
+
+       buffer = drm_client_buffer_create(client, width, height, format);
+       if (IS_ERR(buffer))
+               return buffer;
+
+       ret = drm_client_buffer_addfb(buffer, width, height, format);
+       if (ret) {
+               drm_client_buffer_delete(buffer);
+               return ERR_PTR(ret);
+       }
+
+       return buffer;
+}
+EXPORT_SYMBOL(drm_client_framebuffer_create);
+
+/**
+ * drm_client_framebuffer_delete - Delete a client framebuffer
+ * @buffer: DRM client buffer (can be NULL)
+ */
+void drm_client_framebuffer_delete(struct drm_client_buffer *buffer)
+{
+       if (!buffer)
+               return;
+
+       drm_client_buffer_rmfb(buffer);
+       drm_client_buffer_delete(buffer);
+}
+EXPORT_SYMBOL(drm_client_framebuffer_delete);
+
+#ifdef CONFIG_DEBUG_FS
+static int drm_client_debugfs_internal_clients(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_printer p = drm_seq_file_printer(m);
+       struct drm_client_dev *client;
+
+       mutex_lock(&dev->clientlist_mutex);
+       list_for_each_entry(client, &dev->clientlist, list)
+               drm_printf(&p, "%s\n", client->name);
+       mutex_unlock(&dev->clientlist_mutex);
+
+       return 0;
+}
+
+static const struct drm_info_list drm_client_debugfs_list[] = {
+       { "internal_clients", drm_client_debugfs_internal_clients, 0 },
+};
+
+int drm_client_debugfs_init(struct drm_minor *minor)
+{
+       return drm_debugfs_create_files(drm_client_debugfs_list,
+                                       ARRAY_SIZE(drm_client_debugfs_list),
+                                       minor->debugfs_root, minor);
+}
+#endif
index 9b9ba5d5ec0cb30549e678346161d9f21dc01a78..6011d769d50bb51197bfd935f3e6fff660878a1b 100644 (file)
@@ -48,7 +48,7 @@
  *
  * Connectors must be attached to an encoder to be used. For devices that map
  * connectors to encoders 1:1, the connector should be attached at
- * initialization time with a call to drm_mode_connector_attach_encoder(). The
+ * initialization time with a call to drm_connector_attach_encoder(). The
  * driver must also set the &drm_connector.encoder field to point to the
  * attached encoder.
  *
@@ -87,6 +87,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] = {
        { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
        { DRM_MODE_CONNECTOR_DSI, "DSI" },
        { DRM_MODE_CONNECTOR_DPI, "DPI" },
+       { DRM_MODE_CONNECTOR_WRITEBACK, "Writeback" },
 };
 
 void drm_connector_ida_init(void)
@@ -195,6 +196,10 @@ int drm_connector_init(struct drm_device *dev,
        struct ida *connector_ida =
                &drm_connector_enum_list[connector_type].ida;
 
+       WARN_ON(drm_drv_uses_atomic_modeset(dev) &&
+               (!funcs->atomic_destroy_state ||
+                !funcs->atomic_duplicate_state));
+
        ret = __drm_mode_object_add(dev, &connector->base,
                                    DRM_MODE_OBJECT_CONNECTOR,
                                    false, drm_connector_free);
@@ -249,7 +254,8 @@ int drm_connector_init(struct drm_device *dev,
        config->num_connector++;
        spin_unlock_irq(&config->connector_list_lock);
 
-       if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
+       if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL &&
+           connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
                drm_object_attach_property(&connector->base,
                                              config->edid_property,
                                              0);
@@ -285,7 +291,7 @@ out_put:
 EXPORT_SYMBOL(drm_connector_init);
 
 /**
- * drm_mode_connector_attach_encoder - attach a connector to an encoder
+ * drm_connector_attach_encoder - attach a connector to an encoder
  * @connector: connector to attach
  * @encoder: encoder to attach @connector to
  *
@@ -296,8 +302,8 @@ EXPORT_SYMBOL(drm_connector_init);
  * Returns:
  * Zero on success, negative errno on failure.
  */
-int drm_mode_connector_attach_encoder(struct drm_connector *connector,
-                                     struct drm_encoder *encoder)
+int drm_connector_attach_encoder(struct drm_connector *connector,
+                                struct drm_encoder *encoder)
 {
        int i;
 
@@ -315,7 +321,7 @@ int drm_mode_connector_attach_encoder(struct drm_connector *connector,
        if (WARN_ON(connector->encoder))
                return -EINVAL;
 
-       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+       for (i = 0; i < ARRAY_SIZE(connector->encoder_ids); i++) {
                if (connector->encoder_ids[i] == 0) {
                        connector->encoder_ids[i] = encoder->base.id;
                        return 0;
@@ -323,7 +329,30 @@ int drm_mode_connector_attach_encoder(struct drm_connector *connector,
        }
        return -ENOMEM;
 }
-EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
+EXPORT_SYMBOL(drm_connector_attach_encoder);
+
+/**
+ * drm_connector_has_possible_encoder - check if the connector and encoder are assosicated with each other
+ * @connector: the connector
+ * @encoder: the encoder
+ *
+ * Returns:
+ * True if @encoder is one of the possible encoders for @connector.
+ */
+bool drm_connector_has_possible_encoder(struct drm_connector *connector,
+                                       struct drm_encoder *encoder)
+{
+       struct drm_encoder *enc;
+       int i;
+
+       drm_connector_for_each_possible_encoder(connector, enc, i) {
+               if (enc == encoder)
+                       return true;
+       }
+
+       return false;
+}
+EXPORT_SYMBOL(drm_connector_has_possible_encoder);
 
 static void drm_mode_remove(struct drm_connector *connector,
                            struct drm_display_mode *mode)
@@ -577,7 +606,7 @@ __drm_connector_put_safe(struct drm_connector *conn)
 
 /**
  * drm_connector_list_iter_next - return next connector
- * @iter: connectr_list iterator
+ * @iter: connector_list iterator
  *
  * Returns the next connector for @iter, or NULL when the list walk has
  * completed.
@@ -720,6 +749,14 @@ static const struct drm_prop_enum_list drm_aspect_ratio_enum_list[] = {
        { DRM_MODE_PICTURE_ASPECT_16_9, "16:9" },
 };
 
+static const struct drm_prop_enum_list drm_content_type_enum_list[] = {
+       { DRM_MODE_CONTENT_TYPE_NO_DATA, "No Data" },
+       { DRM_MODE_CONTENT_TYPE_GRAPHICS, "Graphics" },
+       { DRM_MODE_CONTENT_TYPE_PHOTO, "Photo" },
+       { DRM_MODE_CONTENT_TYPE_CINEMA, "Cinema" },
+       { DRM_MODE_CONTENT_TYPE_GAME, "Game" },
+};
+
 static const struct drm_prop_enum_list drm_panel_orientation_enum_list[] = {
        { DRM_MODE_PANEL_ORIENTATION_NORMAL,    "Normal"        },
        { DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP, "Upside Down"   },
@@ -777,7 +814,7 @@ DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
  *     Blob property which contains the current EDID read from the sink. This
  *     is useful to parse sink identification information like vendor, model
  *     and serial. Drivers should update this property by calling
- *     drm_mode_connector_update_edid_property(), usually after having parsed
+ *     drm_connector_update_edid_property(), usually after having parsed
  *     the EDID using drm_add_edid_modes(). Userspace cannot change this
  *     property.
  * DPMS:
@@ -815,7 +852,7 @@ DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
  * PATH:
  *     Connector path property to identify how this sink is physically
  *     connected. Used by DP MST. This should be set by calling
- *     drm_mode_connector_set_path_property(), in the case of DP MST with the
+ *     drm_connector_set_path_property(), in the case of DP MST with the
  *     path property the MST manager created. Userspace cannot change this
  *     property.
  * TILE:
@@ -826,14 +863,14 @@ DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
  *     are not gen-locked. Note that for tiled panels which are genlocked, like
  *     dual-link LVDS or dual-link DSI, the driver should try to not expose the
  *     tiling and virtualize both &drm_crtc and &drm_plane if needed. Drivers
- *     should update this value using drm_mode_connector_set_tile_property().
+ *     should update this value using drm_connector_set_tile_property().
  *     Userspace cannot change this property.
  * link-status:
  *      Connector link-status property to indicate the status of link. The
  *      default value of link-status is "GOOD". If something fails during or
  *      after modeset, the kernel driver may set this to "BAD" and issue a
  *      hotplug uevent. Drivers should update this value using
- *      drm_mode_connector_set_link_status_property().
+ *      drm_connector_set_link_status_property().
  * non_desktop:
  *     Indicates the output should be ignored for purposes of displaying a
  *     standard desktop environment or console. This is most likely because
@@ -996,6 +1033,82 @@ int drm_mode_create_dvi_i_properties(struct drm_device *dev)
 }
 EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
 
+/**
+ * DOC: HDMI connector properties
+ *
+ * content type (HDMI specific):
+ *     Indicates content type setting to be used in HDMI infoframes to indicate
+ *     content type for the external device, so that it adjusts it's display
+ *     settings accordingly.
+ *
+ *     The value of this property can be one of the following:
+ *
+ *     No Data:
+ *             Content type is unknown
+ *     Graphics:
+ *             Content type is graphics
+ *     Photo:
+ *             Content type is photo
+ *     Cinema:
+ *             Content type is cinema
+ *     Game:
+ *             Content type is game
+ *
+ *     Drivers can set up this property by calling
+ *     drm_connector_attach_content_type_property(). Decoding to
+ *     infoframe values is done through drm_hdmi_avi_infoframe_content_type().
+ */
+
+/**
+ * drm_connector_attach_content_type_property - attach content-type property
+ * @connector: connector to attach content type property on.
+ *
+ * Called by a driver the first time a HDMI connector is made.
+ */
+int drm_connector_attach_content_type_property(struct drm_connector *connector)
+{
+       if (!drm_mode_create_content_type_property(connector->dev))
+               drm_object_attach_property(&connector->base,
+                                          connector->dev->mode_config.content_type_property,
+                                          DRM_MODE_CONTENT_TYPE_NO_DATA);
+       return 0;
+}
+EXPORT_SYMBOL(drm_connector_attach_content_type_property);
+
+
+/**
+ * drm_hdmi_avi_infoframe_content_type() - fill the HDMI AVI infoframe
+ *                                         content type information, based
+ *                                         on correspondent DRM property.
+ * @frame: HDMI AVI infoframe
+ * @conn_state: DRM display connector state
+ *
+ */
+void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame,
+                                        const struct drm_connector_state *conn_state)
+{
+       switch (conn_state->content_type) {
+       case DRM_MODE_CONTENT_TYPE_GRAPHICS:
+               frame->content_type = HDMI_CONTENT_TYPE_GRAPHICS;
+               break;
+       case DRM_MODE_CONTENT_TYPE_CINEMA:
+               frame->content_type = HDMI_CONTENT_TYPE_CINEMA;
+               break;
+       case DRM_MODE_CONTENT_TYPE_GAME:
+               frame->content_type = HDMI_CONTENT_TYPE_GAME;
+               break;
+       case DRM_MODE_CONTENT_TYPE_PHOTO:
+               frame->content_type = HDMI_CONTENT_TYPE_PHOTO;
+               break;
+       default:
+               /* Graphics is the default(0) */
+               frame->content_type = HDMI_CONTENT_TYPE_GRAPHICS;
+       }
+
+       frame->itc = conn_state->content_type != DRM_MODE_CONTENT_TYPE_NO_DATA;
+}
+EXPORT_SYMBOL(drm_hdmi_avi_infoframe_content_type);
+
 /**
  * drm_create_tv_properties - create TV specific connector properties
  * @dev: DRM device
@@ -1260,6 +1373,33 @@ int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
 }
 EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
 
+/**
+ * drm_mode_create_content_type_property - create content type property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_create_content_type_property(struct drm_device *dev)
+{
+       if (dev->mode_config.content_type_property)
+               return 0;
+
+       dev->mode_config.content_type_property =
+               drm_property_create_enum(dev, 0, "content type",
+                                        drm_content_type_enum_list,
+                                        ARRAY_SIZE(drm_content_type_enum_list));
+
+       if (dev->mode_config.content_type_property == NULL)
+               return -ENOMEM;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_content_type_property);
+
 /**
  * drm_mode_create_suggested_offset_properties - create suggests offset properties
  * @dev: DRM device
@@ -1285,7 +1425,7 @@ int drm_mode_create_suggested_offset_properties(struct drm_device *dev)
 EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties);
 
 /**
- * drm_mode_connector_set_path_property - set tile property on connector
+ * drm_connector_set_path_property - set tile property on connector
  * @connector: connector to set property on.
  * @path: path to use for property; must not be NULL.
  *
@@ -1297,8 +1437,8 @@ EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties);
  * Returns:
  * Zero on success, negative errno on failure.
  */
-int drm_mode_connector_set_path_property(struct drm_connector *connector,
-                                        const char *path)
+int drm_connector_set_path_property(struct drm_connector *connector,
+                                   const char *path)
 {
        struct drm_device *dev = connector->dev;
        int ret;
@@ -1311,10 +1451,10 @@ int drm_mode_connector_set_path_property(struct drm_connector *connector,
                                               dev->mode_config.path_property);
        return ret;
 }
-EXPORT_SYMBOL(drm_mode_connector_set_path_property);
+EXPORT_SYMBOL(drm_connector_set_path_property);
 
 /**
- * drm_mode_connector_set_tile_property - set tile property on connector
+ * drm_connector_set_tile_property - set tile property on connector
  * @connector: connector to set property on.
  *
  * This looks up the tile information for a connector, and creates a
@@ -1324,7 +1464,7 @@ EXPORT_SYMBOL(drm_mode_connector_set_path_property);
  * Returns:
  * Zero on success, errno on failure.
  */
-int drm_mode_connector_set_tile_property(struct drm_connector *connector)
+int drm_connector_set_tile_property(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
        char tile[256];
@@ -1354,10 +1494,10 @@ int drm_mode_connector_set_tile_property(struct drm_connector *connector)
                                               dev->mode_config.tile_property);
        return ret;
 }
-EXPORT_SYMBOL(drm_mode_connector_set_tile_property);
+EXPORT_SYMBOL(drm_connector_set_tile_property);
 
 /**
- * drm_mode_connector_update_edid_property - update the edid property of a connector
+ * drm_connector_update_edid_property - update the edid property of a connector
  * @connector: drm connector
  * @edid: new value of the edid property
  *
@@ -1367,8 +1507,8 @@ EXPORT_SYMBOL(drm_mode_connector_set_tile_property);
  * Returns:
  * Zero on success, negative errno on failure.
  */
-int drm_mode_connector_update_edid_property(struct drm_connector *connector,
-                                           const struct edid *edid)
+int drm_connector_update_edid_property(struct drm_connector *connector,
+                                      const struct edid *edid)
 {
        struct drm_device *dev = connector->dev;
        size_t size = 0;
@@ -1406,10 +1546,10 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
                                               dev->mode_config.edid_property);
        return ret;
 }
-EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
+EXPORT_SYMBOL(drm_connector_update_edid_property);
 
 /**
- * drm_mode_connector_set_link_status_property - Set link status property of a connector
+ * drm_connector_set_link_status_property - Set link status property of a connector
  * @connector: drm connector
  * @link_status: new value of link status property (0: Good, 1: Bad)
  *
@@ -1427,8 +1567,8 @@ EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
  * it is not limited to DP or link training. For example, if we implement
  * asynchronous setcrtc, this property can be used to report any failures in that.
  */
-void drm_mode_connector_set_link_status_property(struct drm_connector *connector,
-                                                uint64_t link_status)
+void drm_connector_set_link_status_property(struct drm_connector *connector,
+                                           uint64_t link_status)
 {
        struct drm_device *dev = connector->dev;
 
@@ -1436,7 +1576,7 @@ void drm_mode_connector_set_link_status_property(struct drm_connector *connector
        connector->state->link_status = link_status;
        drm_modeset_unlock(&dev->mode_config.connection_mutex);
 }
-EXPORT_SYMBOL(drm_mode_connector_set_link_status_property);
+EXPORT_SYMBOL(drm_connector_set_link_status_property);
 
 /**
  * drm_connector_init_panel_orientation_property -
@@ -1489,7 +1629,7 @@ int drm_connector_init_panel_orientation_property(
 }
 EXPORT_SYMBOL(drm_connector_init_panel_orientation_property);
 
-int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
+int drm_connector_set_obj_prop(struct drm_mode_object *obj,
                                    struct drm_property *property,
                                    uint64_t value)
 {
@@ -1507,8 +1647,8 @@ int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
        return ret;
 }
 
-int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
-                                      void *data, struct drm_file *file_priv)
+int drm_connector_property_set_ioctl(struct drm_device *dev,
+                                    void *data, struct drm_file *file_priv)
 {
        struct drm_mode_connector_set_property *conn_set_prop = data;
        struct drm_mode_obj_set_property obj_set_prop = {
@@ -1589,22 +1729,19 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
        if (!connector)
                return -ENOENT;
 
-       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
-               if (connector->encoder_ids[i] != 0)
-                       encoders_count++;
+       drm_connector_for_each_possible_encoder(connector, encoder, i)
+               encoders_count++;
 
        if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
                copied = 0;
                encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
-               for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-                       if (connector->encoder_ids[i] != 0) {
-                               if (put_user(connector->encoder_ids[i],
-                                            encoder_ptr + copied)) {
-                                       ret = -EFAULT;
-                                       goto out;
-                               }
-                               copied++;
+
+               drm_connector_for_each_possible_encoder(connector, encoder, i) {
+                       if (put_user(encoder->base.id, encoder_ptr + copied)) {
+                               ret = -EFAULT;
+                               goto out;
                        }
+                       copied++;
                }
        }
        out_resp->count_encoders = encoders_count;
index 98a36e6c69ad1b158184daa434559800abf3b86a..bae43938c8f6128ce29d75ee03a84c310c779621 100644 (file)
@@ -225,16 +225,9 @@ static const char *drm_crtc_fence_get_timeline_name(struct dma_fence *fence)
        return crtc->timeline_name;
 }
 
-static bool drm_crtc_fence_enable_signaling(struct dma_fence *fence)
-{
-       return true;
-}
-
 static const struct dma_fence_ops drm_crtc_fence_ops = {
        .get_driver_name = drm_crtc_fence_get_driver_name,
        .get_timeline_name = drm_crtc_fence_get_timeline_name,
-       .enable_signaling = drm_crtc_fence_enable_signaling,
-       .wait = dma_fence_default_wait,
 };
 
 struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc)
@@ -286,6 +279,10 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
        if (WARN_ON(config->num_crtc >= 32))
                return -EINVAL;
 
+       WARN_ON(drm_drv_uses_atomic_modeset(dev) &&
+               (!funcs->atomic_destroy_state ||
+                !funcs->atomic_duplicate_state));
+
        crtc->dev = dev;
        crtc->funcs = funcs;
 
@@ -325,9 +322,9 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
        crtc->primary = primary;
        crtc->cursor = cursor;
        if (primary && !primary->possible_crtcs)
-               primary->possible_crtcs = 1 << drm_crtc_index(crtc);
+               primary->possible_crtcs = drm_crtc_mask(crtc);
        if (cursor && !cursor->possible_crtcs)
-               cursor->possible_crtcs = 1 << drm_crtc_index(crtc);
+               cursor->possible_crtcs = drm_crtc_mask(crtc);
 
        ret = drm_crtc_crc_init(crtc);
        if (ret) {
@@ -464,32 +461,42 @@ static int __drm_mode_set_config_internal(struct drm_mode_set *set,
        struct drm_crtc *tmp;
        int ret;
 
+       WARN_ON(drm_drv_uses_atomic_modeset(crtc->dev));
+
        /*
         * NOTE: ->set_config can also disable other crtcs (if we steal all
         * connectors from it), hence we need to refcount the fbs across all
         * crtcs. Atomic modeset will have saner semantics ...
         */
-       drm_for_each_crtc(tmp, crtc->dev)
-               tmp->primary->old_fb = tmp->primary->fb;
+       drm_for_each_crtc(tmp, crtc->dev) {
+               struct drm_plane *plane = tmp->primary;
+
+               plane->old_fb = plane->fb;
+       }
 
        fb = set->fb;
 
        ret = crtc->funcs->set_config(set, ctx);
        if (ret == 0) {
-               crtc->primary->crtc = fb ? crtc : NULL;
-               crtc->primary->fb = fb;
+               struct drm_plane *plane = crtc->primary;
+
+               plane->crtc = fb ? crtc : NULL;
+               plane->fb = fb;
        }
 
        drm_for_each_crtc(tmp, crtc->dev) {
-               if (tmp->primary->fb)
-                       drm_framebuffer_get(tmp->primary->fb);
-               if (tmp->primary->old_fb)
-                       drm_framebuffer_put(tmp->primary->old_fb);
-               tmp->primary->old_fb = NULL;
+               struct drm_plane *plane = tmp->primary;
+
+               if (plane->fb)
+                       drm_framebuffer_get(plane->fb);
+               if (plane->old_fb)
+                       drm_framebuffer_put(plane->old_fb);
+               plane->old_fb = NULL;
        }
 
        return ret;
 }
+
 /**
  * drm_mode_set_config_internal - helper to call &drm_mode_config_funcs.set_config
  * @set: modeset config to set
@@ -640,7 +647,9 @@ retry:
 
                ret = drm_mode_convert_umode(dev, mode, &crtc_req->mode);
                if (ret) {
-                       DRM_DEBUG_KMS("Invalid mode\n");
+                       DRM_DEBUG_KMS("Invalid mode (ret=%d, status=%s)\n",
+                                     ret, drm_get_mode_status_name(mode->status));
+                       drm_mode_debug_printmodeline(mode);
                        goto out;
                }
 
@@ -732,7 +741,11 @@ retry:
        set.connectors = connector_set;
        set.num_connectors = crtc_req->count_connectors;
        set.fb = fb;
-       ret = __drm_mode_set_config_internal(&set, &ctx);
+
+       if (drm_drv_uses_atomic_modeset(dev))
+               ret = crtc->funcs->set_config(&set, &ctx);
+       else
+               ret = __drm_mode_set_config_internal(&set, &ctx);
 
 out:
        if (fb)
index 5d307b23a4e66f176b8ce83cfd2119935a7da597..b61322763394af2f3dd29f8a49e48a5dc85bf9d1 100644 (file)
@@ -56,12 +56,21 @@ int drm_mode_setcrtc(struct drm_device *dev,
 int drm_modeset_register_all(struct drm_device *dev);
 void drm_modeset_unregister_all(struct drm_device *dev);
 
+/* drm_modes.c */
+const char *drm_get_mode_status_name(enum drm_mode_status status);
+
 /* IOCTLs */
 int drm_mode_getresources(struct drm_device *dev,
                          void *data, struct drm_file *file_priv);
 
 
 /* drm_dumb_buffers.c */
+int drm_mode_create_dumb(struct drm_device *dev,
+                        struct drm_mode_create_dumb *args,
+                        struct drm_file *file_priv);
+int drm_mode_destroy_dumb(struct drm_device *dev, u32 handle,
+                         struct drm_file *file_priv);
+
 /* IOCTLs */
 int drm_mode_create_dumb_ioctl(struct drm_device *dev,
                               void *data, struct drm_file *file_priv);
@@ -139,7 +148,7 @@ void drm_connector_ida_init(void);
 void drm_connector_ida_destroy(void);
 void drm_connector_unregister_all(struct drm_device *dev);
 int drm_connector_register_all(struct drm_device *dev);
-int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
+int drm_connector_set_obj_prop(struct drm_mode_object *obj,
                                    struct drm_property *property,
                                    uint64_t value);
 int drm_connector_create_standard_properties(struct drm_device *dev);
@@ -147,8 +156,8 @@ const char *drm_get_connector_force_name(enum drm_connector_force force);
 void drm_connector_free_work_fn(struct work_struct *work);
 
 /* IOCTL */
-int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
-                                         void *data, struct drm_file *file_priv);
+int drm_connector_property_set_ioctl(struct drm_device *dev,
+                                    void *data, struct drm_file *file_priv);
 int drm_mode_getconnector(struct drm_device *dev,
                          void *data, struct drm_file *file_priv);
 
@@ -163,14 +172,19 @@ int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y,
                                     const struct drm_framebuffer *fb);
 void drm_fb_release(struct drm_file *file_priv);
 
+int drm_mode_addfb(struct drm_device *dev, struct drm_mode_fb_cmd *or,
+                  struct drm_file *file_priv);
+int drm_mode_rmfb(struct drm_device *dev, u32 fb_id,
+                 struct drm_file *file_priv);
+
 
 /* IOCTL */
-int drm_mode_addfb(struct drm_device *dev,
-                  void *data, struct drm_file *file_priv);
+int drm_mode_addfb_ioctl(struct drm_device *dev,
+                        void *data, struct drm_file *file_priv);
 int drm_mode_addfb2(struct drm_device *dev,
                    void *data, struct drm_file *file_priv);
-int drm_mode_rmfb(struct drm_device *dev,
-                 void *data, struct drm_file *file_priv);
+int drm_mode_rmfb_ioctl(struct drm_device *dev,
+                       void *data, struct drm_file *file_priv);
 int drm_mode_getfb(struct drm_device *dev,
                   void *data, struct drm_file *file_priv);
 int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
index b2482818fee8c22c125c5f725d5d2fb6db1fdc91..6f28fe58f1696ca5010915149173e966df35ffc7 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/slab.h>
 #include <linux/export.h>
 
+#include <drm/drm_client.h>
 #include <drm/drm_debugfs.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_atomic.h>
@@ -164,6 +165,12 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
                        DRM_ERROR("Failed to create framebuffer debugfs file\n");
                        return ret;
                }
+
+               ret = drm_client_debugfs_init(minor);
+               if (ret) {
+                       DRM_ERROR("Failed to create client debugfs file\n");
+                       return ret;
+               }
        }
 
        if (dev->driver->debugfs_init) {
@@ -307,13 +314,13 @@ static ssize_t edid_write(struct file *file, const char __user *ubuf,
 
        if (len == 5 && !strncmp(buf, "reset", 5)) {
                connector->override_edid = false;
-               ret = drm_mode_connector_update_edid_property(connector, NULL);
+               ret = drm_connector_update_edid_property(connector, NULL);
        } else if (len < EDID_LENGTH ||
                   EDID_LENGTH * (1 + edid->extensions) > len)
                ret = -EINVAL;
        else {
                connector->override_edid = false;
-               ret = drm_mode_connector_update_edid_property(connector, edid);
+               ret = drm_connector_update_edid_property(connector, edid);
                if (!ret)
                        connector->override_edid = true;
        }
index 9f8312137cadec4c5e9eeb03ca7ca6147be3b355..99961192bf034f893cbac5521c996dc98aa49887 100644 (file)
@@ -139,6 +139,7 @@ static int crtc_crc_data_count(struct drm_crtc_crc *crc)
 static void crtc_crc_cleanup(struct drm_crtc_crc *crc)
 {
        kfree(crc->entries);
+       crc->overflow = false;
        crc->entries = NULL;
        crc->head = 0;
        crc->tail = 0;
@@ -391,8 +392,14 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
        tail = crc->tail;
 
        if (CIRC_SPACE(head, tail, DRM_CRC_ENTRIES_NR) < 1) {
+               bool was_overflow = crc->overflow;
+
+               crc->overflow = true;
                spin_unlock(&crc->lock);
-               DRM_ERROR("Overflow of CRC buffer, userspace reads too slow.\n");
+
+               if (!was_overflow)
+                       DRM_ERROR("Overflow of CRC buffer, userspace reads too slow.\n");
+
                return -ENOBUFS;
        }
 
diff --git a/drivers/gpu/drm/drm_dp_cec.c b/drivers/gpu/drm/drm_dp_cec.c
new file mode 100644 (file)
index 0000000..9885133
--- /dev/null
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DisplayPort CEC-Tunneling-over-AUX support
+ *
+ * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <drm/drm_dp_helper.h>
+#include <media/cec.h>
+
+/*
+ * Unfortunately it turns out that we have a chicken-and-egg situation
+ * here. Quite a few active (mini-)DP-to-HDMI or USB-C-to-HDMI adapters
+ * have a converter chip that supports CEC-Tunneling-over-AUX (usually the
+ * Parade PS176), but they do not wire up the CEC pin, thus making CEC
+ * useless.
+ *
+ * Sadly there is no way for this driver to know this. What happens is
+ * that a /dev/cecX device is created that is isolated and unable to see
+ * any of the other CEC devices. Quite literally the CEC wire is cut
+ * (or in this case, never connected in the first place).
+ *
+ * The reason so few adapters support this is that this tunneling protocol
+ * was never supported by any OS. So there was no easy way of testing it,
+ * and no incentive to correctly wire up the CEC pin.
+ *
+ * Hopefully by creating this driver it will be easier for vendors to
+ * finally fix their adapters and test the CEC functionality.
+ *
+ * I keep a list of known working adapters here:
+ *
+ * https://hverkuil.home.xs4all.nl/cec-status.txt
+ *
+ * Please mail me (hverkuil@xs4all.nl) if you find an adapter that works
+ * and is not yet listed there.
+ *
+ * Note that the current implementation does not support CEC over an MST hub.
+ * As far as I can see there is no mechanism defined in the DisplayPort
+ * standard to transport CEC interrupts over an MST device. It might be
+ * possible to do this through polling, but I have not been able to get that
+ * to work.
+ */
+
+/**
+ * DOC: dp cec helpers
+ *
+ * These functions take care of supporting the CEC-Tunneling-over-AUX
+ * feature of DisplayPort-to-HDMI adapters.
+ */
+
+/*
+ * When the EDID is unset because the HPD went low, then the CEC DPCD registers
+ * typically can no longer be read (true for a DP-to-HDMI adapter since it is
+ * powered by the HPD). However, some displays toggle the HPD off and on for a
+ * short period for one reason or another, and that would cause the CEC adapter
+ * to be removed and added again, even though nothing else changed.
+ *
+ * This module parameter sets a delay in seconds before the CEC adapter is
+ * actually unregistered. Only if the HPD does not return within that time will
+ * the CEC adapter be unregistered.
+ *
+ * If it is set to a value >= NEVER_UNREG_DELAY, then the CEC adapter will never
+ * be unregistered for as long as the connector remains registered.
+ *
+ * If it is set to 0, then the CEC adapter will be unregistered immediately as
+ * soon as the HPD disappears.
+ *
+ * The default is one second to prevent short HPD glitches from unregistering
+ * the CEC adapter.
+ *
+ * Note that for integrated HDMI branch devices that support CEC the DPCD
+ * registers remain available even if the HPD goes low since it is not powered
+ * by the HPD. In that case the CEC adapter will never be unregistered during
+ * the life time of the connector. At least, this is the theory since I do not
+ * have hardware with an integrated HDMI branch device that supports CEC.
+ */
+#define NEVER_UNREG_DELAY 1000
+static unsigned int drm_dp_cec_unregister_delay = 1;
+module_param(drm_dp_cec_unregister_delay, uint, 0600);
+MODULE_PARM_DESC(drm_dp_cec_unregister_delay,
+                "CEC unregister delay in seconds, 0: no delay, >= 1000: never unregister");
+
+static int drm_dp_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+       struct drm_dp_aux *aux = cec_get_drvdata(adap);
+       u32 val = enable ? DP_CEC_TUNNELING_ENABLE : 0;
+       ssize_t err = 0;
+
+       err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val);
+       return (enable && err < 0) ? err : 0;
+}
+
+static int drm_dp_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
+{
+       struct drm_dp_aux *aux = cec_get_drvdata(adap);
+       /* Bit 15 (logical address 15) should always be set */
+       u16 la_mask = 1 << CEC_LOG_ADDR_BROADCAST;
+       u8 mask[2];
+       ssize_t err;
+
+       if (addr != CEC_LOG_ADDR_INVALID)
+               la_mask |= adap->log_addrs.log_addr_mask | (1 << addr);
+       mask[0] = la_mask & 0xff;
+       mask[1] = la_mask >> 8;
+       err = drm_dp_dpcd_write(aux, DP_CEC_LOGICAL_ADDRESS_MASK, mask, 2);
+       return (addr != CEC_LOG_ADDR_INVALID && err < 0) ? err : 0;
+}
+
+static int drm_dp_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+                                   u32 signal_free_time, struct cec_msg *msg)
+{
+       struct drm_dp_aux *aux = cec_get_drvdata(adap);
+       unsigned int retries = min(5, attempts - 1);
+       ssize_t err;
+
+       err = drm_dp_dpcd_write(aux, DP_CEC_TX_MESSAGE_BUFFER,
+                               msg->msg, msg->len);
+       if (err < 0)
+               return err;
+
+       err = drm_dp_dpcd_writeb(aux, DP_CEC_TX_MESSAGE_INFO,
+                                (msg->len - 1) | (retries << 4) |
+                                DP_CEC_TX_MESSAGE_SEND);
+       return err < 0 ? err : 0;
+}
+
+static int drm_dp_cec_adap_monitor_all_enable(struct cec_adapter *adap,
+                                             bool enable)
+{
+       struct drm_dp_aux *aux = cec_get_drvdata(adap);
+       ssize_t err;
+       u8 val;
+
+       if (!(adap->capabilities & CEC_CAP_MONITOR_ALL))
+               return 0;
+
+       err = drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CONTROL, &val);
+       if (err >= 0) {
+               if (enable)
+                       val |= DP_CEC_SNOOPING_ENABLE;
+               else
+                       val &= ~DP_CEC_SNOOPING_ENABLE;
+               err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val);
+       }
+       return (enable && err < 0) ? err : 0;
+}
+
+static void drm_dp_cec_adap_status(struct cec_adapter *adap,
+                                  struct seq_file *file)
+{
+       struct drm_dp_aux *aux = cec_get_drvdata(adap);
+       struct drm_dp_desc desc;
+       struct drm_dp_dpcd_ident *id = &desc.ident;
+
+       if (drm_dp_read_desc(aux, &desc, true))
+               return;
+       seq_printf(file, "OUI: %*phD\n",
+                  (int)sizeof(id->oui), id->oui);
+       seq_printf(file, "ID: %*pE\n",
+                  (int)strnlen(id->device_id, sizeof(id->device_id)),
+                  id->device_id);
+       seq_printf(file, "HW Rev: %d.%d\n", id->hw_rev >> 4, id->hw_rev & 0xf);
+       /*
+        * Show this both in decimal and hex: at least one vendor
+        * always reports this in hex.
+        */
+       seq_printf(file, "FW/SW Rev: %d.%d (0x%02x.0x%02x)\n",
+                  id->sw_major_rev, id->sw_minor_rev,
+                  id->sw_major_rev, id->sw_minor_rev);
+}
+
+static const struct cec_adap_ops drm_dp_cec_adap_ops = {
+       .adap_enable = drm_dp_cec_adap_enable,
+       .adap_log_addr = drm_dp_cec_adap_log_addr,
+       .adap_transmit = drm_dp_cec_adap_transmit,
+       .adap_monitor_all_enable = drm_dp_cec_adap_monitor_all_enable,
+       .adap_status = drm_dp_cec_adap_status,
+};
+
+static int drm_dp_cec_received(struct drm_dp_aux *aux)
+{
+       struct cec_adapter *adap = aux->cec.adap;
+       struct cec_msg msg;
+       u8 rx_msg_info;
+       ssize_t err;
+
+       err = drm_dp_dpcd_readb(aux, DP_CEC_RX_MESSAGE_INFO, &rx_msg_info);
+       if (err < 0)
+               return err;
+
+       if (!(rx_msg_info & DP_CEC_RX_MESSAGE_ENDED))
+               return 0;
+
+       msg.len = (rx_msg_info & DP_CEC_RX_MESSAGE_LEN_MASK) + 1;
+       err = drm_dp_dpcd_read(aux, DP_CEC_RX_MESSAGE_BUFFER, msg.msg, msg.len);
+       if (err < 0)
+               return err;
+
+       cec_received_msg(adap, &msg);
+       return 0;
+}
+
+static void drm_dp_cec_handle_irq(struct drm_dp_aux *aux)
+{
+       struct cec_adapter *adap = aux->cec.adap;
+       u8 flags;
+
+       if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, &flags) < 0)
+               return;
+
+       if (flags & DP_CEC_RX_MESSAGE_INFO_VALID)
+               drm_dp_cec_received(aux);
+
+       if (flags & DP_CEC_TX_MESSAGE_SENT)
+               cec_transmit_attempt_done(adap, CEC_TX_STATUS_OK);
+       else if (flags & DP_CEC_TX_LINE_ERROR)
+               cec_transmit_attempt_done(adap, CEC_TX_STATUS_ERROR |
+                                               CEC_TX_STATUS_MAX_RETRIES);
+       else if (flags &
+                (DP_CEC_TX_ADDRESS_NACK_ERROR | DP_CEC_TX_DATA_NACK_ERROR))
+               cec_transmit_attempt_done(adap, CEC_TX_STATUS_NACK |
+                                               CEC_TX_STATUS_MAX_RETRIES);
+       drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, flags);
+}
+
+/**
+ * drm_dp_cec_irq() - handle CEC interrupt, if any
+ * @aux: DisplayPort AUX channel
+ *
+ * Should be called when handling an IRQ_HPD request. If CEC-tunneling-over-AUX
+ * is present, then it will check for a CEC_IRQ and handle it accordingly.
+ */
+void drm_dp_cec_irq(struct drm_dp_aux *aux)
+{
+       u8 cec_irq;
+       int ret;
+
+       mutex_lock(&aux->cec.lock);
+       if (!aux->cec.adap)
+               goto unlock;
+
+       ret = drm_dp_dpcd_readb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1,
+                               &cec_irq);
+       if (ret < 0 || !(cec_irq & DP_CEC_IRQ))
+               goto unlock;
+
+       drm_dp_cec_handle_irq(aux);
+       drm_dp_dpcd_writeb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, DP_CEC_IRQ);
+unlock:
+       mutex_unlock(&aux->cec.lock);
+}
+EXPORT_SYMBOL(drm_dp_cec_irq);
+
+static bool drm_dp_cec_cap(struct drm_dp_aux *aux, u8 *cec_cap)
+{
+       u8 cap = 0;
+
+       if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CAPABILITY, &cap) != 1 ||
+           !(cap & DP_CEC_TUNNELING_CAPABLE))
+               return false;
+       if (cec_cap)
+               *cec_cap = cap;
+       return true;
+}
+
+/*
+ * Called if the HPD was low for more than drm_dp_cec_unregister_delay
+ * seconds. This unregisters the CEC adapter.
+ */
+static void drm_dp_cec_unregister_work(struct work_struct *work)
+{
+       struct drm_dp_aux *aux = container_of(work, struct drm_dp_aux,
+                                             cec.unregister_work.work);
+
+       mutex_lock(&aux->cec.lock);
+       cec_unregister_adapter(aux->cec.adap);
+       aux->cec.adap = NULL;
+       mutex_unlock(&aux->cec.lock);
+}
+
+/*
+ * A new EDID is set. If there is no CEC adapter, then create one. If
+ * there was a CEC adapter, then check if the CEC adapter properties
+ * were unchanged and just update the CEC physical address. Otherwise
+ * unregister the old CEC adapter and create a new one.
+ */
+void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid)
+{
+       u32 cec_caps = CEC_CAP_DEFAULTS | CEC_CAP_NEEDS_HPD;
+       unsigned int num_las = 1;
+       u8 cap;
+
+#ifndef CONFIG_MEDIA_CEC_RC
+       /*
+        * CEC_CAP_RC is part of CEC_CAP_DEFAULTS, but it is stripped by
+        * cec_allocate_adapter() if CONFIG_MEDIA_CEC_RC is undefined.
+        *
+        * Do this here as well to ensure the tests against cec_caps are
+        * correct.
+        */
+       cec_caps &= ~CEC_CAP_RC;
+#endif
+       cancel_delayed_work_sync(&aux->cec.unregister_work);
+
+       mutex_lock(&aux->cec.lock);
+       if (!drm_dp_cec_cap(aux, &cap)) {
+               /* CEC is not supported, unregister any existing adapter */
+               cec_unregister_adapter(aux->cec.adap);
+               aux->cec.adap = NULL;
+               goto unlock;
+       }
+
+       if (cap & DP_CEC_SNOOPING_CAPABLE)
+               cec_caps |= CEC_CAP_MONITOR_ALL;
+       if (cap & DP_CEC_MULTIPLE_LA_CAPABLE)
+               num_las = CEC_MAX_LOG_ADDRS;
+
+       if (aux->cec.adap) {
+               if (aux->cec.adap->capabilities == cec_caps &&
+                   aux->cec.adap->available_log_addrs == num_las) {
+                       /* Unchanged, so just set the phys addr */
+                       cec_s_phys_addr_from_edid(aux->cec.adap, edid);
+                       goto unlock;
+               }
+               /*
+                * The capabilities changed, so unregister the old
+                * adapter first.
+                */
+               cec_unregister_adapter(aux->cec.adap);
+       }
+
+       /* Create a new adapter */
+       aux->cec.adap = cec_allocate_adapter(&drm_dp_cec_adap_ops,
+                                            aux, aux->cec.name, cec_caps,
+                                            num_las);
+       if (IS_ERR(aux->cec.adap)) {
+               aux->cec.adap = NULL;
+               goto unlock;
+       }
+       if (cec_register_adapter(aux->cec.adap, aux->cec.parent)) {
+               cec_delete_adapter(aux->cec.adap);
+               aux->cec.adap = NULL;
+       } else {
+               /*
+                * Update the phys addr for the new CEC adapter. When called
+                * from drm_dp_cec_register_connector() edid == NULL, so in
+                * that case the phys addr is just invalidated.
+                */
+               cec_s_phys_addr_from_edid(aux->cec.adap, edid);
+       }
+unlock:
+       mutex_unlock(&aux->cec.lock);
+}
+EXPORT_SYMBOL(drm_dp_cec_set_edid);
+
+/*
+ * The EDID disappeared (likely because of the HPD going down).
+ */
+void drm_dp_cec_unset_edid(struct drm_dp_aux *aux)
+{
+       cancel_delayed_work_sync(&aux->cec.unregister_work);
+
+       mutex_lock(&aux->cec.lock);
+       if (!aux->cec.adap)
+               goto unlock;
+
+       cec_phys_addr_invalidate(aux->cec.adap);
+       /*
+        * We're done if we want to keep the CEC device
+        * (drm_dp_cec_unregister_delay is >= NEVER_UNREG_DELAY) or if the
+        * DPCD still indicates the CEC capability (expected for an integrated
+        * HDMI branch device).
+        */
+       if (drm_dp_cec_unregister_delay < NEVER_UNREG_DELAY &&
+           !drm_dp_cec_cap(aux, NULL)) {
+               /*
+                * Unregister the CEC adapter after drm_dp_cec_unregister_delay
+                * seconds. This to debounce short HPD off-and-on cycles from
+                * displays.
+                */
+               schedule_delayed_work(&aux->cec.unregister_work,
+                                     drm_dp_cec_unregister_delay * HZ);
+       }
+unlock:
+       mutex_unlock(&aux->cec.lock);
+}
+EXPORT_SYMBOL(drm_dp_cec_unset_edid);
+
+/**
+ * drm_dp_cec_register_connector() - register a new connector
+ * @aux: DisplayPort AUX channel
+ * @name: name of the CEC device
+ * @parent: parent device
+ *
+ * A new connector was registered with associated CEC adapter name and
+ * CEC adapter parent device. After registering the name and parent
+ * drm_dp_cec_set_edid() is called to check if the connector supports
+ * CEC and to register a CEC adapter if that is the case.
+ */
+void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name,
+                                  struct device *parent)
+{
+       WARN_ON(aux->cec.adap);
+       aux->cec.name = name;
+       aux->cec.parent = parent;
+       INIT_DELAYED_WORK(&aux->cec.unregister_work,
+                         drm_dp_cec_unregister_work);
+
+       drm_dp_cec_set_edid(aux, NULL);
+}
+EXPORT_SYMBOL(drm_dp_cec_register_connector);
+
+/**
+ * drm_dp_cec_unregister_connector() - unregister the CEC adapter, if any
+ * @aux: DisplayPort AUX channel
+ */
+void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux)
+{
+       if (!aux->cec.adap)
+               return;
+       cancel_delayed_work_sync(&aux->cec.unregister_work);
+       cec_unregister_adapter(aux->cec.adap);
+       aux->cec.adap = NULL;
+}
+EXPORT_SYMBOL(drm_dp_cec_unregister_connector);
index a7ba602a43a82bc978d349543b4158196a2c5edf..0cccbcb2d03ea8670e71ee62a9bd6526db6b0b97 100644 (file)
@@ -185,6 +185,20 @@ EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);
 
 #define AUX_RETRY_INTERVAL 500 /* us */
 
+static inline void
+drm_dp_dump_access(const struct drm_dp_aux *aux,
+                  u8 request, uint offset, void *buffer, int ret)
+{
+       const char *arrow = request == DP_AUX_NATIVE_READ ? "->" : "<-";
+
+       if (ret > 0)
+               drm_dbg(DRM_UT_DP, "%s: 0x%05x AUX %s (ret=%3d) %*ph\n",
+                       aux->name, offset, arrow, ret, min(ret, 20), buffer);
+       else
+               drm_dbg(DRM_UT_DP, "%s: 0x%05x AUX %s (ret=%3d)\n",
+                       aux->name, offset, arrow, ret);
+}
+
 /**
  * DOC: dp helpers
  *
@@ -288,10 +302,14 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
        ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, DP_DPCD_REV, buffer,
                                 1);
        if (ret != 1)
-               return ret;
+               goto out;
 
-       return drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, buffer,
-                                 size);
+       ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, buffer,
+                                size);
+
+out:
+       drm_dp_dump_access(aux, DP_AUX_NATIVE_READ, offset, buffer, ret);
+       return ret;
 }
 EXPORT_SYMBOL(drm_dp_dpcd_read);
 
@@ -312,8 +330,12 @@ EXPORT_SYMBOL(drm_dp_dpcd_read);
 ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
                          void *buffer, size_t size)
 {
-       return drm_dp_dpcd_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer,
-                                 size);
+       int ret;
+
+       ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer,
+                                size);
+       drm_dp_dump_access(aux, DP_AUX_NATIVE_WRITE, offset, buffer, ret);
+       return ret;
 }
 EXPORT_SYMBOL(drm_dp_dpcd_write);
 
@@ -1087,6 +1109,7 @@ static void drm_dp_aux_crc_work(struct work_struct *work)
 void drm_dp_aux_init(struct drm_dp_aux *aux)
 {
        mutex_init(&aux->hw_mutex);
+       mutex_init(&aux->cec.lock);
        INIT_WORK(&aux->crc_work, drm_dp_aux_crc_work);
 
        aux->ddc.algo = &drm_dp_i2c_algo;
index 658830620ca3668a706817ce5efb1805d32be9b0..7780567aa6692fa56450aed78ff101b294fe7da6 100644 (file)
@@ -1215,7 +1215,7 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
                     port->pdt == DP_PEER_DEVICE_SST_SINK) &&
                    port->port_num >= DP_MST_LOGICAL_PORT_0) {
                        port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
-                       drm_mode_connector_set_tile_property(port->connector);
+                       drm_connector_set_tile_property(port->connector);
                }
                (*mstb->mgr->cbs->register_connector)(port->connector);
        }
@@ -2559,7 +2559,7 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
                edid = drm_edid_duplicate(port->cached_edid);
        else {
                edid = drm_get_edid(connector, &port->aux.ddc);
-               drm_mode_connector_set_tile_property(connector);
+               drm_connector_set_tile_property(connector);
        }
        port->has_audio = drm_detect_monitor_audio(edid);
        drm_dp_put_port(port);
index b553a6f2ff0eb27dec7ad0aaeeb891992fe19ab8..ea4941da9b273f39fb81ab238eae769589f6a1c7 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/slab.h>
 #include <linux/srcu.h>
 
+#include <drm/drm_client.h>
 #include <drm/drm_drv.h>
 #include <drm/drmP.h>
 
@@ -53,13 +54,14 @@ MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
 MODULE_DESCRIPTION("DRM shared core routines");
 MODULE_LICENSE("GPL and additional rights");
 MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n"
-"\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n"
-"\t\tBit 1 (0x02) will enable DRIVER messages (drm controller code)\n"
-"\t\tBit 2 (0x04) will enable KMS messages (modesetting code)\n"
-"\t\tBit 3 (0x08) will enable PRIME messages (prime code)\n"
-"\t\tBit 4 (0x10) will enable ATOMIC messages (atomic code)\n"
-"\t\tBit 5 (0x20) will enable VBL messages (vblank code)\n"
-"\t\tBit 7 (0x80) will enable LEASE messages (leasing code)");
+"\t\tBit 0 (0x01)  will enable CORE messages (drm core code)\n"
+"\t\tBit 1 (0x02)  will enable DRIVER messages (drm controller code)\n"
+"\t\tBit 2 (0x04)  will enable KMS messages (modesetting code)\n"
+"\t\tBit 3 (0x08)  will enable PRIME messages (prime code)\n"
+"\t\tBit 4 (0x10)  will enable ATOMIC messages (atomic code)\n"
+"\t\tBit 5 (0x20)  will enable VBL messages (vblank code)\n"
+"\t\tBit 7 (0x80)  will enable LEASE messages (leasing code)\n"
+"\t\tBit 8 (0x100) will enable DP messages (displayport code)");
 module_param_named(debug, drm_debug, int, 0600);
 
 static DEFINE_SPINLOCK(drm_minor_lock);
@@ -369,13 +371,6 @@ EXPORT_SYMBOL(drm_dev_exit);
  */
 void drm_dev_unplug(struct drm_device *dev)
 {
-       drm_dev_unregister(dev);
-
-       mutex_lock(&drm_global_mutex);
-       if (dev->open_count == 0)
-               drm_dev_put(dev);
-       mutex_unlock(&drm_global_mutex);
-
        /*
         * After synchronizing any critical read section is guaranteed to see
         * the new value of ->unplugged, and any critical section which might
@@ -384,6 +379,13 @@ void drm_dev_unplug(struct drm_device *dev)
         */
        dev->unplugged = true;
        synchronize_srcu(&drm_unplug_srcu);
+
+       drm_dev_unregister(dev);
+
+       mutex_lock(&drm_global_mutex);
+       if (dev->open_count == 0)
+               drm_dev_put(dev);
+       mutex_unlock(&drm_global_mutex);
 }
 EXPORT_SYMBOL(drm_dev_unplug);
 
@@ -505,6 +507,8 @@ int drm_dev_init(struct drm_device *dev,
        dev->driver = driver;
 
        INIT_LIST_HEAD(&dev->filelist);
+       INIT_LIST_HEAD(&dev->filelist_internal);
+       INIT_LIST_HEAD(&dev->clientlist);
        INIT_LIST_HEAD(&dev->ctxlist);
        INIT_LIST_HEAD(&dev->vmalist);
        INIT_LIST_HEAD(&dev->maplist);
@@ -514,6 +518,7 @@ int drm_dev_init(struct drm_device *dev,
        spin_lock_init(&dev->event_lock);
        mutex_init(&dev->struct_mutex);
        mutex_init(&dev->filelist_mutex);
+       mutex_init(&dev->clientlist_mutex);
        mutex_init(&dev->ctxlist_mutex);
        mutex_init(&dev->master_mutex);
 
@@ -569,6 +574,7 @@ err_minors:
 err_free:
        mutex_destroy(&dev->master_mutex);
        mutex_destroy(&dev->ctxlist_mutex);
+       mutex_destroy(&dev->clientlist_mutex);
        mutex_destroy(&dev->filelist_mutex);
        mutex_destroy(&dev->struct_mutex);
        return ret;
@@ -603,6 +609,7 @@ void drm_dev_fini(struct drm_device *dev)
 
        mutex_destroy(&dev->master_mutex);
        mutex_destroy(&dev->ctxlist_mutex);
+       mutex_destroy(&dev->clientlist_mutex);
        mutex_destroy(&dev->filelist_mutex);
        mutex_destroy(&dev->struct_mutex);
        kfree(dev->unique);
@@ -858,6 +865,8 @@ void drm_dev_unregister(struct drm_device *dev)
 
        dev->registered = false;
 
+       drm_client_dev_unregister(dev);
+
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                drm_modeset_unregister_all(dev);
 
index 9e2ae02f31e08fbad87669a126761c015d25db44..81dfdd33753a90651d08e94b7e1b89a38d76de25 100644 (file)
  * a hardware-specific ioctl to allocate suitable buffer objects.
  */
 
-int drm_mode_create_dumb_ioctl(struct drm_device *dev,
-                              void *data, struct drm_file *file_priv)
+int drm_mode_create_dumb(struct drm_device *dev,
+                        struct drm_mode_create_dumb *args,
+                        struct drm_file *file_priv)
 {
-       struct drm_mode_create_dumb *args = data;
        u32 cpp, stride, size;
 
        if (!dev->driver->dumb_create)
@@ -92,6 +92,12 @@ int drm_mode_create_dumb_ioctl(struct drm_device *dev,
        return dev->driver->dumb_create(file_priv, dev, args);
 }
 
+int drm_mode_create_dumb_ioctl(struct drm_device *dev,
+                              void *data, struct drm_file *file_priv)
+{
+       return drm_mode_create_dumb(dev, data, file_priv);
+}
+
 /**
  * drm_mode_mmap_dumb_ioctl - create an mmap offset for a dumb backing storage buffer
  * @dev: DRM device
@@ -123,17 +129,22 @@ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
                                               &args->offset);
 }
 
-int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
-                               void *data, struct drm_file *file_priv)
+int drm_mode_destroy_dumb(struct drm_device *dev, u32 handle,
+                         struct drm_file *file_priv)
 {
-       struct drm_mode_destroy_dumb *args = data;
-
        if (!dev->driver->dumb_create)
                return -ENOSYS;
 
        if (dev->driver->dumb_destroy)
-               return dev->driver->dumb_destroy(file_priv, dev, args->handle);
+               return dev->driver->dumb_destroy(file_priv, dev, handle);
        else
-               return drm_gem_dumb_destroy(file_priv, dev, args->handle);
+               return drm_gem_dumb_destroy(file_priv, dev, handle);
 }
 
+int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
+                               void *data, struct drm_file *file_priv)
+{
+       struct drm_mode_destroy_dumb *args = data;
+
+       return drm_mode_destroy_dumb(dev, args->handle, file_priv);
+}
index a5808382bdf0333b2124a4ecf0d85c4346814000..5dc742b27ca033652880ceaabc1257ae5b633328 100644 (file)
@@ -163,8 +163,9 @@ static const struct edid_quirk {
        /* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
        { "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
 
-       /* HTC Vive VR Headset */
+       /* HTC Vive and Vive Pro VR Headsets */
        { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
+       { "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP },
 
        /* Oculus Rift DK1, DK2, and CV1 VR Headsets */
        { "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP },
@@ -687,562 +688,562 @@ static const struct minimode extra_modes[] = {
 static const struct drm_display_mode edid_cea_modes[] = {
        /* 0 - dummy, VICs start at 1 */
        { },
-       /* 1 - 640x480@60Hz */
+       /* 1 - 640x480@60Hz 4:3 */
        { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
                   752, 800, 0, 480, 490, 492, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 2 - 720x480@60Hz */
+       /* 2 - 720x480@60Hz 4:3 */
        { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
                   798, 858, 0, 480, 489, 495, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 3 - 720x480@60Hz */
+       /* 3 - 720x480@60Hz 16:9 */
        { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
                   798, 858, 0, 480, 489, 495, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 4 - 1280x720@60Hz */
+       /* 4 - 1280x720@60Hz 16:9 */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
                   1430, 1650, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 5 - 1920x1080i@60Hz */
+       /* 5 - 1920x1080i@60Hz 16:9 */
        { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
                   2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
-                       DRM_MODE_FLAG_INTERLACE),
+                  DRM_MODE_FLAG_INTERLACE),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 6 - 720(1440)x480i@60Hz */
+       /* 6 - 720(1440)x480i@60Hz 4:3 */
        { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
                   801, 858, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+                  DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 7 - 720(1440)x480i@60Hz */
+       /* 7 - 720(1440)x480i@60Hz 16:9 */
        { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
                   801, 858, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+                  DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 8 - 720(1440)x240@60Hz */
+       /* 8 - 720(1440)x240@60Hz 4:3 */
        { DRM_MODE("720x240", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
                   801, 858, 0, 240, 244, 247, 262, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_DBLCLK),
+                  DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 9 - 720(1440)x240@60Hz */
+       /* 9 - 720(1440)x240@60Hz 16:9 */
        { DRM_MODE("720x240", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
                   801, 858, 0, 240, 244, 247, 262, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_DBLCLK),
+                  DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 10 - 2880x480i@60Hz */
+       /* 10 - 2880x480i@60Hz 4:3 */
        { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
                   3204, 3432, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE),
+                  DRM_MODE_FLAG_INTERLACE),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 11 - 2880x480i@60Hz */
+       /* 11 - 2880x480i@60Hz 16:9 */
        { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
                   3204, 3432, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE),
+                  DRM_MODE_FLAG_INTERLACE),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 12 - 2880x240@60Hz */
+       /* 12 - 2880x240@60Hz 4:3 */
        { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
                   3204, 3432, 0, 240, 244, 247, 262, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 13 - 2880x240@60Hz */
+       /* 13 - 2880x240@60Hz 16:9 */
        { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
                   3204, 3432, 0, 240, 244, 247, 262, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 14 - 1440x480@60Hz */
+       /* 14 - 1440x480@60Hz 4:3 */
        { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
                   1596, 1716, 0, 480, 489, 495, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 15 - 1440x480@60Hz */
+       /* 15 - 1440x480@60Hz 16:9 */
        { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
                   1596, 1716, 0, 480, 489, 495, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 16 - 1920x1080@60Hz */
+       /* 16 - 1920x1080@60Hz 16:9 */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
                   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 17 - 720x576@50Hz */
+       /* 17 - 720x576@50Hz 4:3 */
        { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
                   796, 864, 0, 576, 581, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 18 - 720x576@50Hz */
+       /* 18 - 720x576@50Hz 16:9 */
        { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
                   796, 864, 0, 576, 581, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 19 - 1280x720@50Hz */
+       /* 19 - 1280x720@50Hz 16:9 */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
                   1760, 1980, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 20 - 1920x1080i@50Hz */
+       /* 20 - 1920x1080i@50Hz 16:9 */
        { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
                   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
-                       DRM_MODE_FLAG_INTERLACE),
+                  DRM_MODE_FLAG_INTERLACE),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 21 - 720(1440)x576i@50Hz */
+       /* 21 - 720(1440)x576i@50Hz 4:3 */
        { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
                   795, 864, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+                  DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 22 - 720(1440)x576i@50Hz */
+       /* 22 - 720(1440)x576i@50Hz 16:9 */
        { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
                   795, 864, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+                  DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 23 - 720(1440)x288@50Hz */
+       /* 23 - 720(1440)x288@50Hz 4:3 */
        { DRM_MODE("720x288", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
                   795, 864, 0, 288, 290, 293, 312, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_DBLCLK),
+                  DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 24 - 720(1440)x288@50Hz */
+       /* 24 - 720(1440)x288@50Hz 16:9 */
        { DRM_MODE("720x288", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
                   795, 864, 0, 288, 290, 293, 312, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_DBLCLK),
+                  DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 25 - 2880x576i@50Hz */
+       /* 25 - 2880x576i@50Hz 4:3 */
        { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
                   3180, 3456, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE),
+                  DRM_MODE_FLAG_INTERLACE),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 26 - 2880x576i@50Hz */
+       /* 26 - 2880x576i@50Hz 16:9 */
        { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
                   3180, 3456, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE),
+                  DRM_MODE_FLAG_INTERLACE),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 27 - 2880x288@50Hz */
+       /* 27 - 2880x288@50Hz 4:3 */
        { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
                   3180, 3456, 0, 288, 290, 293, 312, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 28 - 2880x288@50Hz */
+       /* 28 - 2880x288@50Hz 16:9 */
        { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
                   3180, 3456, 0, 288, 290, 293, 312, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 29 - 1440x576@50Hz */
+       /* 29 - 1440x576@50Hz 4:3 */
        { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
                   1592, 1728, 0, 576, 581, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 30 - 1440x576@50Hz */
+       /* 30 - 1440x576@50Hz 16:9 */
        { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
                   1592, 1728, 0, 576, 581, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 31 - 1920x1080@50Hz */
+       /* 31 - 1920x1080@50Hz 16:9 */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
                   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 32 - 1920x1080@24Hz */
+       /* 32 - 1920x1080@24Hz 16:9 */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
                   2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 33 - 1920x1080@25Hz */
+       /* 33 - 1920x1080@25Hz 16:9 */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
                   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 34 - 1920x1080@30Hz */
+       /* 34 - 1920x1080@30Hz 16:9 */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
                   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 35 - 2880x480@60Hz */
+       /* 35 - 2880x480@60Hz 4:3 */
        { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
                   3192, 3432, 0, 480, 489, 495, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 36 - 2880x480@60Hz */
+       /* 36 - 2880x480@60Hz 16:9 */
        { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
                   3192, 3432, 0, 480, 489, 495, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 37 - 2880x576@50Hz */
+       /* 37 - 2880x576@50Hz 4:3 */
        { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
                   3184, 3456, 0, 576, 581, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 38 - 2880x576@50Hz */
+       /* 38 - 2880x576@50Hz 16:9 */
        { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
                   3184, 3456, 0, 576, 581, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 39 - 1920x1080i@50Hz */
+       /* 39 - 1920x1080i@50Hz 16:9 */
        { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
                   2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE),
+                  DRM_MODE_FLAG_INTERLACE),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 40 - 1920x1080i@100Hz */
+       /* 40 - 1920x1080i@100Hz 16:9 */
        { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
                   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
-                       DRM_MODE_FLAG_INTERLACE),
+                  DRM_MODE_FLAG_INTERLACE),
          .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 41 - 1280x720@100Hz */
+       /* 41 - 1280x720@100Hz 16:9 */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
                   1760, 1980, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 42 - 720x576@100Hz */
+       /* 42 - 720x576@100Hz 4:3 */
        { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
                   796, 864, 0, 576, 581, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 43 - 720x576@100Hz */
+       /* 43 - 720x576@100Hz 16:9 */
        { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
                   796, 864, 0, 576, 581, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 44 - 720(1440)x576i@100Hz */
+       /* 44 - 720(1440)x576i@100Hz 4:3 */
        { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
                   795, 864, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+                  DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 45 - 720(1440)x576i@100Hz */
+       /* 45 - 720(1440)x576i@100Hz 16:9 */
        { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
                   795, 864, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+                  DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 46 - 1920x1080i@120Hz */
+       /* 46 - 1920x1080i@120Hz 16:9 */
        { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
                   2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
-                       DRM_MODE_FLAG_INTERLACE),
+                  DRM_MODE_FLAG_INTERLACE),
          .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 47 - 1280x720@120Hz */
+       /* 47 - 1280x720@120Hz 16:9 */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
                   1430, 1650, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 48 - 720x480@120Hz */
+       /* 48 - 720x480@120Hz 4:3 */
        { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
                   798, 858, 0, 480, 489, 495, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 49 - 720x480@120Hz */
+       /* 49 - 720x480@120Hz 16:9 */
        { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
                   798, 858, 0, 480, 489, 495, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 50 - 720(1440)x480i@120Hz */
+       /* 50 - 720(1440)x480i@120Hz 4:3 */
        { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 27000, 720, 739,
                   801, 858, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+                  DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 51 - 720(1440)x480i@120Hz */
+       /* 51 - 720(1440)x480i@120Hz 16:9 */
        { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 27000, 720, 739,
                   801, 858, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+                  DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 52 - 720x576@200Hz */
+       /* 52 - 720x576@200Hz 4:3 */
        { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
                   796, 864, 0, 576, 581, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 53 - 720x576@200Hz */
+       /* 53 - 720x576@200Hz 16:9 */
        { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
                   796, 864, 0, 576, 581, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 54 - 720(1440)x576i@200Hz */
+       /* 54 - 720(1440)x576i@200Hz 4:3 */
        { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
                   795, 864, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+                  DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 55 - 720(1440)x576i@200Hz */
+       /* 55 - 720(1440)x576i@200Hz 16:9 */
        { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
                   795, 864, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+                  DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 56 - 720x480@240Hz */
+       /* 56 - 720x480@240Hz 4:3 */
        { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
                   798, 858, 0, 480, 489, 495, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 57 - 720x480@240Hz */
+       /* 57 - 720x480@240Hz 16:9 */
        { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
                   798, 858, 0, 480, 489, 495, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 58 - 720(1440)x480i@240Hz */
+       /* 58 - 720(1440)x480i@240Hz 4:3 */
        { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
                   801, 858, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+                  DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 59 - 720(1440)x480i@240Hz */
+       /* 59 - 720(1440)x480i@240Hz 16:9 */
        { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
                   801, 858, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
+                  DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 60 - 1280x720@24Hz */
+       /* 60 - 1280x720@24Hz 16:9 */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
                   3080, 3300, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 61 - 1280x720@25Hz */
+       /* 61 - 1280x720@25Hz 16:9 */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
                   3740, 3960, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 62 - 1280x720@30Hz */
+       /* 62 - 1280x720@30Hz 16:9 */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
                   3080, 3300, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 63 - 1920x1080@120Hz */
+       /* 63 - 1920x1080@120Hz 16:9 */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
                   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-        .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 64 - 1920x1080@100Hz */
+         .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+       /* 64 - 1920x1080@100Hz 16:9 */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
                   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
-        .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 65 - 1280x720@24Hz */
+         .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+       /* 65 - 1280x720@24Hz 64:27 */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
                   3080, 3300, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 66 - 1280x720@25Hz */
+       /* 66 - 1280x720@25Hz 64:27 */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
                   3740, 3960, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 67 - 1280x720@30Hz */
+       /* 67 - 1280x720@30Hz 64:27 */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
                   3080, 3300, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 68 - 1280x720@50Hz */
+       /* 68 - 1280x720@50Hz 64:27 */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
                   1760, 1980, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 69 - 1280x720@60Hz */
+       /* 69 - 1280x720@60Hz 64:27 */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
                   1430, 1650, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 70 - 1280x720@100Hz */
+       /* 70 - 1280x720@100Hz 64:27 */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
                   1760, 1980, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 71 - 1280x720@120Hz */
+       /* 71 - 1280x720@120Hz 64:27 */
        { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
                   1430, 1650, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 72 - 1920x1080@24Hz */
+       /* 72 - 1920x1080@24Hz 64:27 */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
                   2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 73 - 1920x1080@25Hz */
+       /* 73 - 1920x1080@25Hz 64:27 */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
                   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 74 - 1920x1080@30Hz */
+       /* 74 - 1920x1080@30Hz 64:27 */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
                   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 75 - 1920x1080@50Hz */
+       /* 75 - 1920x1080@50Hz 64:27 */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
                   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 76 - 1920x1080@60Hz */
+       /* 76 - 1920x1080@60Hz 64:27 */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
                   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 77 - 1920x1080@100Hz */
+       /* 77 - 1920x1080@100Hz 64:27 */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
                   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 78 - 1920x1080@120Hz */
+       /* 78 - 1920x1080@120Hz 64:27 */
        { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
                   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 79 - 1680x720@24Hz */
+       /* 79 - 1680x720@24Hz 64:27 */
        { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 3040,
                   3080, 3300, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 80 - 1680x720@25Hz */
+       /* 80 - 1680x720@25Hz 64:27 */
        { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2908,
                   2948, 3168, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 81 - 1680x720@30Hz */
+       /* 81 - 1680x720@30Hz 64:27 */
        { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2380,
                   2420, 2640, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 82 - 1680x720@50Hz */
+       /* 82 - 1680x720@50Hz 64:27 */
        { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 82500, 1680, 1940,
                   1980, 2200, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 83 - 1680x720@60Hz */
+       /* 83 - 1680x720@60Hz 64:27 */
        { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 99000, 1680, 1940,
                   1980, 2200, 0, 720, 725, 730, 750, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 84 - 1680x720@100Hz */
+       /* 84 - 1680x720@100Hz 64:27 */
        { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 165000, 1680, 1740,
                   1780, 2000, 0, 720, 725, 730, 825, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 85 - 1680x720@120Hz */
+       /* 85 - 1680x720@120Hz 64:27 */
        { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 198000, 1680, 1740,
                   1780, 2000, 0, 720, 725, 730, 825, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 86 - 2560x1080@24Hz */
+       /* 86 - 2560x1080@24Hz 64:27 */
        { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 99000, 2560, 3558,
                   3602, 3750, 0, 1080, 1084, 1089, 1100, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 87 - 2560x1080@25Hz */
+       /* 87 - 2560x1080@25Hz 64:27 */
        { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 90000, 2560, 3008,
                   3052, 3200, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 88 - 2560x1080@30Hz */
+       /* 88 - 2560x1080@30Hz 64:27 */
        { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 118800, 2560, 3328,
                   3372, 3520, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 89 - 2560x1080@50Hz */
+       /* 89 - 2560x1080@50Hz 64:27 */
        { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 185625, 2560, 3108,
                   3152, 3300, 0, 1080, 1084, 1089, 1125, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 90 - 2560x1080@60Hz */
+       /* 90 - 2560x1080@60Hz 64:27 */
        { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 198000, 2560, 2808,
                   2852, 3000, 0, 1080, 1084, 1089, 1100, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 91 - 2560x1080@100Hz */
+       /* 91 - 2560x1080@100Hz 64:27 */
        { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 371250, 2560, 2778,
                   2822, 2970, 0, 1080, 1084, 1089, 1250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 92 - 2560x1080@120Hz */
+       /* 92 - 2560x1080@120Hz 64:27 */
        { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 495000, 2560, 3108,
                   3152, 3300, 0, 1080, 1084, 1089, 1250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 93 - 3840x2160p@24Hz 16:9 */
+       /* 93 - 3840x2160@24Hz 16:9 */
        { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
                   5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 94 - 3840x2160p@25Hz 16:9 */
+       /* 94 - 3840x2160@25Hz 16:9 */
        { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
                   4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 95 - 3840x2160p@30Hz 16:9 */
+       /* 95 - 3840x2160@30Hz 16:9 */
        { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
                   4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 96 - 3840x2160p@50Hz 16:9 */
+       /* 96 - 3840x2160@50Hz 16:9 */
        { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
                   4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 97 - 3840x2160p@60Hz 16:9 */
+       /* 97 - 3840x2160@60Hz 16:9 */
        { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
                   4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 98 - 4096x2160p@24Hz 256:135 */
+       /* 98 - 4096x2160@24Hz 256:135 */
        { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5116,
                   5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
-       /* 99 - 4096x2160p@25Hz 256:135 */
+       /* 99 - 4096x2160@25Hz 256:135 */
        { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5064,
                   5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
-       /* 100 - 4096x2160p@30Hz 256:135 */
+       /* 100 - 4096x2160@30Hz 256:135 */
        { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 4184,
                   4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
-       /* 101 - 4096x2160p@50Hz 256:135 */
+       /* 101 - 4096x2160@50Hz 256:135 */
        { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5064,
                   5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
-       /* 102 - 4096x2160p@60Hz 256:135 */
+       /* 102 - 4096x2160@60Hz 256:135 */
        { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 4184,
                   4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
-       /* 103 - 3840x2160p@24Hz 64:27 */
+       /* 103 - 3840x2160@24Hz 64:27 */
        { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
                   5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 104 - 3840x2160p@25Hz 64:27 */
+       /* 104 - 3840x2160@25Hz 64:27 */
        { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
                   4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 105 - 3840x2160p@30Hz 64:27 */
+       /* 105 - 3840x2160@30Hz 64:27 */
        { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
                   4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 106 - 3840x2160p@50Hz 64:27 */
+       /* 106 - 3840x2160@50Hz 64:27 */
        { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
                   4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
-       /* 107 - 3840x2160p@60Hz 64:27 */
+       /* 107 - 3840x2160@60Hz 64:27 */
        { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
                   4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
@@ -4873,6 +4874,14 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
 
        frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
 
+       /*
+        * As some drivers don't support atomic, we can't use connector state.
+        * So just initialize the frame with default values, just the same way
+        * as it's done with other properties here.
+        */
+       frame->content_type = HDMI_CONTENT_TYPE_GRAPHICS;
+       frame->itc = 0;
+
        /*
         * Populate picture aspect ratio from either
         * user input (if specified) or from the CEA mode list.
index 186d00adfb5f86675bfd27a8dd9d812e1475d9fe..9da36a6271d3a24380e6a1221ae027701eba6e52 100644 (file)
@@ -18,6 +18,7 @@
  */
 
 #include <drm/drmP.h>
+#include <drm/drm_client.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_framebuffer.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_print.h>
 #include <linux/module.h>
 
-#define DEFAULT_FBDEFIO_DELAY_MS 50
-
 struct drm_fbdev_cma {
        struct drm_fb_helper    fb_helper;
-       const struct drm_framebuffer_funcs *fb_funcs;
 };
 
 /**
@@ -44,36 +42,6 @@ struct drm_fbdev_cma {
  *
  * An fbdev framebuffer backed by cma is also available by calling
  * drm_fb_cma_fbdev_init(). drm_fb_cma_fbdev_fini() tears it down.
- * If the &drm_framebuffer_funcs.dirty callback is set, fb_deferred_io will be
- * set up automatically. &drm_framebuffer_funcs.dirty is called by
- * drm_fb_helper_deferred_io() in process context (&struct delayed_work).
- *
- * Example fbdev deferred io code::
- *
- *     static int driver_fb_dirty(struct drm_framebuffer *fb,
- *                                struct drm_file *file_priv,
- *                                unsigned flags, unsigned color,
- *                                struct drm_clip_rect *clips,
- *                                unsigned num_clips)
- *     {
- *         struct drm_gem_cma_object *cma = drm_fb_cma_get_gem_obj(fb, 0);
- *         ... push changes ...
- *         return 0;
- *     }
- *
- *     static struct drm_framebuffer_funcs driver_fb_funcs = {
- *         .destroy       = drm_gem_fb_destroy,
- *         .create_handle = drm_gem_fb_create_handle,
- *         .dirty         = driver_fb_dirty,
- *     };
- *
- * Initialize::
- *
- *     fbdev = drm_fb_cma_fbdev_init_with_funcs(dev, 16,
- *                                           dev->mode_config.num_crtc,
- *                                           dev->mode_config.num_connector,
- *                                           &driver_fb_funcs);
- *
  */
 
 static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper)
@@ -131,236 +99,28 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
 }
 EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_addr);
 
-static int drm_fb_cma_mmap(struct fb_info *info, struct vm_area_struct *vma)
-{
-       return dma_mmap_writecombine(info->device, vma, info->screen_base,
-                                    info->fix.smem_start, info->fix.smem_len);
-}
-
-static struct fb_ops drm_fbdev_cma_ops = {
-       .owner          = THIS_MODULE,
-       DRM_FB_HELPER_DEFAULT_OPS,
-       .fb_fillrect    = drm_fb_helper_sys_fillrect,
-       .fb_copyarea    = drm_fb_helper_sys_copyarea,
-       .fb_imageblit   = drm_fb_helper_sys_imageblit,
-       .fb_mmap        = drm_fb_cma_mmap,
-};
-
-static int drm_fbdev_cma_deferred_io_mmap(struct fb_info *info,
-                                         struct vm_area_struct *vma)
-{
-       fb_deferred_io_mmap(info, vma);
-       vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-
-       return 0;
-}
-
-static int drm_fbdev_cma_defio_init(struct fb_info *fbi,
-                                   struct drm_gem_cma_object *cma_obj)
-{
-       struct fb_deferred_io *fbdefio;
-       struct fb_ops *fbops;
-
-       /*
-        * Per device structures are needed because:
-        * fbops: fb_deferred_io_cleanup() clears fbops.fb_mmap
-        * fbdefio: individual delays
-        */
-       fbdefio = kzalloc(sizeof(*fbdefio), GFP_KERNEL);
-       fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
-       if (!fbdefio || !fbops) {
-               kfree(fbdefio);
-               kfree(fbops);
-               return -ENOMEM;
-       }
-
-       /* can't be offset from vaddr since dirty() uses cma_obj */
-       fbi->screen_buffer = cma_obj->vaddr;
-       /* fb_deferred_io_fault() needs a physical address */
-       fbi->fix.smem_start = page_to_phys(virt_to_page(fbi->screen_buffer));
-
-       *fbops = *fbi->fbops;
-       fbi->fbops = fbops;
-
-       fbdefio->delay = msecs_to_jiffies(DEFAULT_FBDEFIO_DELAY_MS);
-       fbdefio->deferred_io = drm_fb_helper_deferred_io;
-       fbi->fbdefio = fbdefio;
-       fb_deferred_io_init(fbi);
-       fbi->fbops->fb_mmap = drm_fbdev_cma_deferred_io_mmap;
-
-       return 0;
-}
-
-static void drm_fbdev_cma_defio_fini(struct fb_info *fbi)
-{
-       if (!fbi->fbdefio)
-               return;
-
-       fb_deferred_io_cleanup(fbi);
-       kfree(fbi->fbdefio);
-       kfree(fbi->fbops);
-}
-
-static int
-drm_fbdev_cma_create(struct drm_fb_helper *helper,
-       struct drm_fb_helper_surface_size *sizes)
-{
-       struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper);
-       struct drm_device *dev = helper->dev;
-       struct drm_gem_cma_object *obj;
-       struct drm_framebuffer *fb;
-       unsigned int bytes_per_pixel;
-       unsigned long offset;
-       struct fb_info *fbi;
-       size_t size;
-       int ret;
-
-       DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
-                       sizes->surface_width, sizes->surface_height,
-                       sizes->surface_bpp);
-
-       bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
-       size = sizes->surface_width * sizes->surface_height * bytes_per_pixel;
-       obj = drm_gem_cma_create(dev, size);
-       if (IS_ERR(obj))
-               return -ENOMEM;
-
-       fbi = drm_fb_helper_alloc_fbi(helper);
-       if (IS_ERR(fbi)) {
-               ret = PTR_ERR(fbi);
-               goto err_gem_free_object;
-       }
-
-       fb = drm_gem_fbdev_fb_create(dev, sizes, 0, &obj->base,
-                                    fbdev_cma->fb_funcs);
-       if (IS_ERR(fb)) {
-               dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
-               ret = PTR_ERR(fb);
-               goto err_fb_info_destroy;
-       }
-
-       helper->fb = fb;
-
-       fbi->par = helper;
-       fbi->flags = FBINFO_FLAG_DEFAULT;
-       fbi->fbops = &drm_fbdev_cma_ops;
-
-       drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
-       drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
-
-       offset = fbi->var.xoffset * bytes_per_pixel;
-       offset += fbi->var.yoffset * fb->pitches[0];
-
-       dev->mode_config.fb_base = (resource_size_t)obj->paddr;
-       fbi->screen_base = obj->vaddr + offset;
-       fbi->fix.smem_start = (unsigned long)(obj->paddr + offset);
-       fbi->screen_size = size;
-       fbi->fix.smem_len = size;
-
-       if (fb->funcs->dirty) {
-               ret = drm_fbdev_cma_defio_init(fbi, obj);
-               if (ret)
-                       goto err_cma_destroy;
-       }
-
-       return 0;
-
-err_cma_destroy:
-       drm_framebuffer_remove(fb);
-err_fb_info_destroy:
-       drm_fb_helper_fini(helper);
-err_gem_free_object:
-       drm_gem_object_put_unlocked(&obj->base);
-       return ret;
-}
-
-static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
-       .fb_probe = drm_fbdev_cma_create,
-};
-
 /**
- * drm_fb_cma_fbdev_init_with_funcs() - Allocate and initialize fbdev emulation
+ * drm_fb_cma_fbdev_init() - Allocate and initialize fbdev emulation
  * @dev: DRM device
  * @preferred_bpp: Preferred bits per pixel for the device.
  *                 @dev->mode_config.preferred_depth is used if this is zero.
  * @max_conn_count: Maximum number of connectors.
  *                  @dev->mode_config.num_connector is used if this is zero.
- * @funcs: Framebuffer functions, in particular a custom dirty() callback.
- *         Can be NULL.
  *
  * Returns:
  * Zero on success or negative error code on failure.
  */
-int drm_fb_cma_fbdev_init_with_funcs(struct drm_device *dev,
-       unsigned int preferred_bpp, unsigned int max_conn_count,
-       const struct drm_framebuffer_funcs *funcs)
+int drm_fb_cma_fbdev_init(struct drm_device *dev, unsigned int preferred_bpp,
+                         unsigned int max_conn_count)
 {
        struct drm_fbdev_cma *fbdev_cma;
-       struct drm_fb_helper *fb_helper;
-       int ret;
-
-       if (!preferred_bpp)
-               preferred_bpp = dev->mode_config.preferred_depth;
-       if (!preferred_bpp)
-               preferred_bpp = 32;
-
-       if (!max_conn_count)
-               max_conn_count = dev->mode_config.num_connector;
-
-       fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL);
-       if (!fbdev_cma)
-               return -ENOMEM;
 
-       fbdev_cma->fb_funcs = funcs;
-       fb_helper = &fbdev_cma->fb_helper;
-
-       drm_fb_helper_prepare(dev, fb_helper, &drm_fb_cma_helper_funcs);
-
-       ret = drm_fb_helper_init(dev, fb_helper, max_conn_count);
-       if (ret < 0) {
-               DRM_DEV_ERROR(dev->dev, "Failed to initialize fbdev helper.\n");
-               goto err_free;
-       }
-
-       ret = drm_fb_helper_single_add_all_connectors(fb_helper);
-       if (ret < 0) {
-               DRM_DEV_ERROR(dev->dev, "Failed to add connectors.\n");
-               goto err_drm_fb_helper_fini;
-       }
-
-       ret = drm_fb_helper_initial_config(fb_helper, preferred_bpp);
-       if (ret < 0) {
-               DRM_DEV_ERROR(dev->dev, "Failed to set fbdev configuration.\n");
-               goto err_drm_fb_helper_fini;
-       }
+       /* dev->fb_helper will indirectly point to fbdev_cma after this call */
+       fbdev_cma = drm_fbdev_cma_init(dev, preferred_bpp, max_conn_count);
+       if (IS_ERR(fbdev_cma))
+               return PTR_ERR(fbdev_cma);
 
        return 0;
-
-err_drm_fb_helper_fini:
-       drm_fb_helper_fini(fb_helper);
-err_free:
-       kfree(fbdev_cma);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_init_with_funcs);
-
-/**
- * drm_fb_cma_fbdev_init() - Allocate and initialize fbdev emulation
- * @dev: DRM device
- * @preferred_bpp: Preferred bits per pixel for the device.
- *                 @dev->mode_config.preferred_depth is used if this is zero.
- * @max_conn_count: Maximum number of connectors.
- *                  @dev->mode_config.num_connector is used if this is zero.
- *
- * Returns:
- * Zero on success or negative error code on failure.
- */
-int drm_fb_cma_fbdev_init(struct drm_device *dev, unsigned int preferred_bpp,
-                         unsigned int max_conn_count)
-{
-       return drm_fb_cma_fbdev_init_with_funcs(dev, preferred_bpp,
-                                               max_conn_count, NULL);
 }
 EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_init);
 
@@ -370,104 +130,54 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_init);
  */
 void drm_fb_cma_fbdev_fini(struct drm_device *dev)
 {
-       struct drm_fb_helper *fb_helper = dev->fb_helper;
-
-       if (!fb_helper)
-               return;
-
-       /* Unregister if it hasn't been done already */
-       if (fb_helper->fbdev && fb_helper->fbdev->dev)
-               drm_fb_helper_unregister_fbi(fb_helper);
-
-       if (fb_helper->fbdev)
-               drm_fbdev_cma_defio_fini(fb_helper->fbdev);
-
-       if (fb_helper->fb)
-               drm_framebuffer_remove(fb_helper->fb);
-
-       drm_fb_helper_fini(fb_helper);
-       kfree(to_fbdev_cma(fb_helper));
+       if (dev->fb_helper)
+               drm_fbdev_cma_fini(to_fbdev_cma(dev->fb_helper));
 }
 EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_fini);
 
+static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
+       .fb_probe = drm_fb_helper_generic_probe,
+};
+
 /**
- * drm_fbdev_cma_init_with_funcs() - Allocate and initializes a drm_fbdev_cma struct
+ * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
  * @dev: DRM device
  * @preferred_bpp: Preferred bits per pixel for the device
  * @max_conn_count: Maximum number of connectors
- * @funcs: fb helper functions, in particular a custom dirty() callback
  *
  * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
  */
-struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
-       unsigned int preferred_bpp, unsigned int max_conn_count,
-       const struct drm_framebuffer_funcs *funcs)
+struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
+       unsigned int preferred_bpp, unsigned int max_conn_count)
 {
        struct drm_fbdev_cma *fbdev_cma;
-       struct drm_fb_helper *helper;
+       struct drm_fb_helper *fb_helper;
        int ret;
 
        fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL);
-       if (!fbdev_cma) {
-               dev_err(dev->dev, "Failed to allocate drm fbdev.\n");
+       if (!fbdev_cma)
                return ERR_PTR(-ENOMEM);
-       }
-       fbdev_cma->fb_funcs = funcs;
 
-       helper = &fbdev_cma->fb_helper;
-
-       drm_fb_helper_prepare(dev, helper, &drm_fb_cma_helper_funcs);
+       fb_helper = &fbdev_cma->fb_helper;
 
-       ret = drm_fb_helper_init(dev, helper, max_conn_count);
-       if (ret < 0) {
-               dev_err(dev->dev, "Failed to initialize drm fb helper.\n");
+       ret = drm_client_new(dev, &fb_helper->client, "fbdev", NULL);
+       if (ret)
                goto err_free;
-       }
-
-       ret = drm_fb_helper_single_add_all_connectors(helper);
-       if (ret < 0) {
-               dev_err(dev->dev, "Failed to add connectors.\n");
-               goto err_drm_fb_helper_fini;
-
-       }
 
-       ret = drm_fb_helper_initial_config(helper, preferred_bpp);
-       if (ret < 0) {
-               dev_err(dev->dev, "Failed to set initial hw configuration.\n");
-               goto err_drm_fb_helper_fini;
-       }
+       ret = drm_fb_helper_fbdev_setup(dev, fb_helper, &drm_fb_cma_helper_funcs,
+                                       preferred_bpp, max_conn_count);
+       if (ret)
+               goto err_client_put;
 
        return fbdev_cma;
 
-err_drm_fb_helper_fini:
-       drm_fb_helper_fini(helper);
+err_client_put:
+       drm_client_release(&fb_helper->client);
 err_free:
        kfree(fbdev_cma);
 
        return ERR_PTR(ret);
 }
-EXPORT_SYMBOL_GPL(drm_fbdev_cma_init_with_funcs);
-
-static const struct drm_framebuffer_funcs drm_fb_cma_funcs = {
-       .destroy        = drm_gem_fb_destroy,
-       .create_handle  = drm_gem_fb_create_handle,
-};
-
-/**
- * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
- * @dev: DRM device
- * @preferred_bpp: Preferred bits per pixel for the device
- * @max_conn_count: Maximum number of connectors
- *
- * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
- */
-struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
-       unsigned int preferred_bpp, unsigned int max_conn_count)
-{
-       return drm_fbdev_cma_init_with_funcs(dev, preferred_bpp,
-                                            max_conn_count,
-                                            &drm_fb_cma_funcs);
-}
 EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
 
 /**
@@ -477,14 +187,7 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
 void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
 {
        drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper);
-       if (fbdev_cma->fb_helper.fbdev)
-               drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev);
-
-       if (fbdev_cma->fb_helper.fb)
-               drm_framebuffer_remove(fbdev_cma->fb_helper.fb);
-
-       drm_fb_helper_fini(&fbdev_cma->fb_helper);
-       kfree(fbdev_cma);
+       /* All resources have now been freed by drm_fbdev_fb_destroy() */
 }
 EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini);
 
index 2ee1eaa6618805308846e6a4629a2cb2b8804f76..4b0dd20bccb8f3cc2b7da96eb4c4203dabf2815f 100644 (file)
@@ -30,6 +30,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/console.h>
+#include <linux/dma-buf.h>
 #include <linux/kernel.h>
 #include <linux/sysrq.h>
 #include <linux/slab.h>
@@ -66,6 +67,9 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
  * helper functions used by many drivers to implement the kernel mode setting
  * interfaces.
  *
+ * Drivers that support a dumb buffer with a virtual address and mmap support,
+ * should try out the generic fbdev emulation using drm_fbdev_generic_setup().
+ *
  * Setup fbdev emulation by calling drm_fb_helper_fbdev_setup() and tear it
  * down by calling drm_fb_helper_fbdev_teardown().
  *
@@ -368,7 +372,6 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper, bool activ
        struct drm_plane *plane;
        struct drm_atomic_state *state;
        int i, ret;
-       unsigned int plane_mask;
        struct drm_modeset_acquire_ctx ctx;
 
        drm_modeset_acquire_init(&ctx, 0);
@@ -381,7 +384,6 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper, bool activ
 
        state->acquire_ctx = &ctx;
 retry:
-       plane_mask = 0;
        drm_for_each_plane(plane, dev) {
                plane_state = drm_atomic_get_plane_state(state, plane);
                if (IS_ERR(plane_state)) {
@@ -391,9 +393,6 @@ retry:
 
                plane_state->rotation = DRM_MODE_ROTATE_0;
 
-               plane->old_fb = plane->fb;
-               plane_mask |= 1 << drm_plane_index(plane);
-
                /* disable non-primary: */
                if (plane->type == DRM_PLANE_TYPE_PRIMARY)
                        continue;
@@ -430,8 +429,6 @@ retry:
        ret = drm_atomic_commit(state);
 
 out_state:
-       drm_atomic_clean_old_fb(dev, plane_mask, ret);
-
        if (ret == -EDEADLK)
                goto backoff;
 
@@ -745,6 +742,24 @@ static void drm_fb_helper_resume_worker(struct work_struct *work)
        console_unlock();
 }
 
+static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper,
+                                         struct drm_clip_rect *clip)
+{
+       struct drm_framebuffer *fb = fb_helper->fb;
+       unsigned int cpp = drm_format_plane_cpp(fb->format->format, 0);
+       size_t offset = clip->y1 * fb->pitches[0] + clip->x1 * cpp;
+       void *src = fb_helper->fbdev->screen_buffer + offset;
+       void *dst = fb_helper->buffer->vaddr + offset;
+       size_t len = (clip->x2 - clip->x1) * cpp;
+       unsigned int y;
+
+       for (y = clip->y1; y < clip->y2; y++) {
+               memcpy(dst, src, len);
+               src += fb->pitches[0];
+               dst += fb->pitches[0];
+       }
+}
+
 static void drm_fb_helper_dirty_work(struct work_struct *work)
 {
        struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper,
@@ -760,8 +775,12 @@ static void drm_fb_helper_dirty_work(struct work_struct *work)
        spin_unlock_irqrestore(&helper->dirty_lock, flags);
 
        /* call dirty callback only when it has been really touched */
-       if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2)
+       if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2) {
+               /* Generic fbdev uses a shadow buffer */
+               if (helper->buffer)
+                       drm_fb_helper_dirty_blit_real(helper, &clip_copy);
                helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
+       }
 }
 
 /**
@@ -1164,7 +1183,7 @@ EXPORT_SYMBOL(drm_fb_helper_sys_imageblit);
  * @info: fbdev registered by the helper
  * @rect: info about rectangle to fill
  *
- * A wrapper around cfb_imageblit implemented by fbdev core
+ * A wrapper around cfb_fillrect implemented by fbdev core
  */
 void drm_fb_helper_cfb_fillrect(struct fb_info *info,
                                const struct fb_fillrect *rect)
@@ -2330,6 +2349,20 @@ retry:
        return true;
 }
 
+static bool connector_has_possible_crtc(struct drm_connector *connector,
+                                       struct drm_crtc *crtc)
+{
+       struct drm_encoder *encoder;
+       int i;
+
+       drm_connector_for_each_possible_encoder(connector, encoder, i) {
+               if (encoder->possible_crtcs & drm_crtc_mask(crtc))
+                       return true;
+       }
+
+       return false;
+}
+
 static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
                          struct drm_fb_helper_crtc **best_crtcs,
                          struct drm_display_mode **modes,
@@ -2338,7 +2371,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
        int c, o;
        struct drm_connector *connector;
        const struct drm_connector_helper_funcs *connector_funcs;
-       struct drm_encoder *encoder;
        int my_score, best_score, score;
        struct drm_fb_helper_crtc **crtcs, *crtc;
        struct drm_fb_helper_connector *fb_helper_conn;
@@ -2369,20 +2401,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
 
        connector_funcs = connector->helper_private;
 
-       /*
-        * If the DRM device implements atomic hooks and ->best_encoder() is
-        * NULL we fallback to the default drm_atomic_helper_best_encoder()
-        * helper.
-        */
-       if (drm_drv_uses_atomic_modeset(fb_helper->dev) &&
-           !connector_funcs->best_encoder)
-               encoder = drm_atomic_helper_best_encoder(connector);
-       else
-               encoder = connector_funcs->best_encoder(connector);
-
-       if (!encoder)
-               goto out;
-
        /*
         * select a crtc for this connector and then attempt to configure
         * remaining connectors
@@ -2390,7 +2408,8 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
        for (c = 0; c < fb_helper->crtc_count; c++) {
                crtc = &fb_helper->crtc_info[c];
 
-               if ((encoder->possible_crtcs & (1 << c)) == 0)
+               if (!connector_has_possible_crtc(connector,
+                                                crtc->mode_set.crtc))
                        continue;
 
                for (o = 0; o < n; o++)
@@ -2417,7 +2436,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
                               sizeof(struct drm_fb_helper_crtc *));
                }
        }
-out:
+
        kfree(crtcs);
        return best_score;
 }
@@ -2928,6 +2947,294 @@ void drm_fb_helper_output_poll_changed(struct drm_device *dev)
 }
 EXPORT_SYMBOL(drm_fb_helper_output_poll_changed);
 
+/* @user: 1=userspace, 0=fbcon */
+static int drm_fbdev_fb_open(struct fb_info *info, int user)
+{
+       struct drm_fb_helper *fb_helper = info->par;
+
+       if (!try_module_get(fb_helper->dev->driver->fops->owner))
+               return -ENODEV;
+
+       return 0;
+}
+
+static int drm_fbdev_fb_release(struct fb_info *info, int user)
+{
+       struct drm_fb_helper *fb_helper = info->par;
+
+       module_put(fb_helper->dev->driver->fops->owner);
+
+       return 0;
+}
+
+/*
+ * fb_ops.fb_destroy is called by the last put_fb_info() call at the end of
+ * unregister_framebuffer() or fb_release().
+ */
+static void drm_fbdev_fb_destroy(struct fb_info *info)
+{
+       struct drm_fb_helper *fb_helper = info->par;
+       struct fb_info *fbi = fb_helper->fbdev;
+       struct fb_ops *fbops = NULL;
+       void *shadow = NULL;
+
+       if (fbi->fbdefio) {
+               fb_deferred_io_cleanup(fbi);
+               shadow = fbi->screen_buffer;
+               fbops = fbi->fbops;
+       }
+
+       drm_fb_helper_fini(fb_helper);
+
+       if (shadow) {
+               vfree(shadow);
+               kfree(fbops);
+       }
+
+       drm_client_framebuffer_delete(fb_helper->buffer);
+       /*
+        * FIXME:
+        * Remove conditional when all CMA drivers have been moved over to using
+        * drm_fbdev_generic_setup().
+        */
+       if (fb_helper->client.funcs) {
+               drm_client_release(&fb_helper->client);
+               kfree(fb_helper);
+       }
+}
+
+static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+       struct drm_fb_helper *fb_helper = info->par;
+
+       if (fb_helper->dev->driver->gem_prime_mmap)
+               return fb_helper->dev->driver->gem_prime_mmap(fb_helper->buffer->gem, vma);
+       else
+               return -ENODEV;
+}
+
+static struct fb_ops drm_fbdev_fb_ops = {
+       .owner          = THIS_MODULE,
+       DRM_FB_HELPER_DEFAULT_OPS,
+       .fb_open        = drm_fbdev_fb_open,
+       .fb_release     = drm_fbdev_fb_release,
+       .fb_destroy     = drm_fbdev_fb_destroy,
+       .fb_mmap        = drm_fbdev_fb_mmap,
+       .fb_read        = drm_fb_helper_sys_read,
+       .fb_write       = drm_fb_helper_sys_write,
+       .fb_fillrect    = drm_fb_helper_sys_fillrect,
+       .fb_copyarea    = drm_fb_helper_sys_copyarea,
+       .fb_imageblit   = drm_fb_helper_sys_imageblit,
+};
+
+static struct fb_deferred_io drm_fbdev_defio = {
+       .delay          = HZ / 20,
+       .deferred_io    = drm_fb_helper_deferred_io,
+};
+
+/**
+ * drm_fb_helper_generic_probe - Generic fbdev emulation probe helper
+ * @fb_helper: fbdev helper structure
+ * @sizes: describes fbdev size and scanout surface size
+ *
+ * This function uses the client API to crate a framebuffer backed by a dumb buffer.
+ *
+ * The _sys_ versions are used for &fb_ops.fb_read, fb_write, fb_fillrect,
+ * fb_copyarea, fb_imageblit.
+ *
+ * Returns:
+ * Zero on success or negative error code on failure.
+ */
+int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
+                               struct drm_fb_helper_surface_size *sizes)
+{
+       struct drm_client_dev *client = &fb_helper->client;
+       struct drm_client_buffer *buffer;
+       struct drm_framebuffer *fb;
+       struct fb_info *fbi;
+       u32 format;
+       int ret;
+
+       DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
+                     sizes->surface_width, sizes->surface_height,
+                     sizes->surface_bpp);
+
+       format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
+       buffer = drm_client_framebuffer_create(client, sizes->surface_width,
+                                              sizes->surface_height, format);
+       if (IS_ERR(buffer))
+               return PTR_ERR(buffer);
+
+       fb_helper->buffer = buffer;
+       fb_helper->fb = buffer->fb;
+       fb = buffer->fb;
+
+       fbi = drm_fb_helper_alloc_fbi(fb_helper);
+       if (IS_ERR(fbi)) {
+               ret = PTR_ERR(fbi);
+               goto err_free_buffer;
+       }
+
+       fbi->par = fb_helper;
+       fbi->fbops = &drm_fbdev_fb_ops;
+       fbi->screen_size = fb->height * fb->pitches[0];
+       fbi->fix.smem_len = fbi->screen_size;
+       fbi->screen_buffer = buffer->vaddr;
+       strcpy(fbi->fix.id, "DRM emulated");
+
+       drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
+       drm_fb_helper_fill_var(fbi, fb_helper, sizes->fb_width, sizes->fb_height);
+
+       if (fb->funcs->dirty) {
+               struct fb_ops *fbops;
+               void *shadow;
+
+               /*
+                * fb_deferred_io_cleanup() clears &fbops->fb_mmap so a per
+                * instance version is necessary.
+                */
+               fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
+               shadow = vzalloc(fbi->screen_size);
+               if (!fbops || !shadow) {
+                       kfree(fbops);
+                       vfree(shadow);
+                       ret = -ENOMEM;
+                       goto err_fb_info_destroy;
+               }
+
+               *fbops = *fbi->fbops;
+               fbi->fbops = fbops;
+               fbi->screen_buffer = shadow;
+               fbi->fbdefio = &drm_fbdev_defio;
+
+               fb_deferred_io_init(fbi);
+       }
+
+       return 0;
+
+err_fb_info_destroy:
+       drm_fb_helper_fini(fb_helper);
+err_free_buffer:
+       drm_client_framebuffer_delete(buffer);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_fb_helper_generic_probe);
+
+static const struct drm_fb_helper_funcs drm_fb_helper_generic_funcs = {
+       .fb_probe = drm_fb_helper_generic_probe,
+};
+
+static void drm_fbdev_client_unregister(struct drm_client_dev *client)
+{
+       struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+
+       if (fb_helper->fbdev) {
+               drm_fb_helper_unregister_fbi(fb_helper);
+               /* drm_fbdev_fb_destroy() takes care of cleanup */
+               return;
+       }
+
+       /* Did drm_fb_helper_fbdev_setup() run? */
+       if (fb_helper->dev)
+               drm_fb_helper_fini(fb_helper);
+
+       drm_client_release(client);
+       kfree(fb_helper);
+}
+
+static int drm_fbdev_client_restore(struct drm_client_dev *client)
+{
+       struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+
+       drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+
+       return 0;
+}
+
+static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
+{
+       struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+       struct drm_device *dev = client->dev;
+       int ret;
+
+       /* If drm_fb_helper_fbdev_setup() failed, we only try once */
+       if (!fb_helper->dev && fb_helper->funcs)
+               return 0;
+
+       if (dev->fb_helper)
+               return drm_fb_helper_hotplug_event(dev->fb_helper);
+
+       if (!dev->mode_config.num_connector)
+               return 0;
+
+       ret = drm_fb_helper_fbdev_setup(dev, fb_helper, &drm_fb_helper_generic_funcs,
+                                       fb_helper->preferred_bpp, 0);
+       if (ret) {
+               fb_helper->dev = NULL;
+               fb_helper->fbdev = NULL;
+               return ret;
+       }
+
+       return 0;
+}
+
+static const struct drm_client_funcs drm_fbdev_client_funcs = {
+       .owner          = THIS_MODULE,
+       .unregister     = drm_fbdev_client_unregister,
+       .restore        = drm_fbdev_client_restore,
+       .hotplug        = drm_fbdev_client_hotplug,
+};
+
+/**
+ * drm_fb_helper_generic_fbdev_setup() - Setup generic fbdev emulation
+ * @dev: DRM device
+ * @preferred_bpp: Preferred bits per pixel for the device.
+ *                 @dev->mode_config.preferred_depth is used if this is zero.
+ *
+ * This function sets up generic fbdev emulation for drivers that supports
+ * dumb buffers with a virtual address and that can be mmap'ed.
+ *
+ * Restore, hotplug events and teardown are all taken care of. Drivers that do
+ * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
+ * Simple drivers might use drm_mode_config_helper_suspend().
+ *
+ * Drivers that set the dirty callback on their framebuffer will get a shadow
+ * fbdev buffer that is blitted onto the real buffer. This is done in order to
+ * make deferred I/O work with all kinds of buffers.
+ *
+ * This function is safe to call even when there are no connectors present.
+ * Setup will be retried on the next hotplug event.
+ *
+ * Returns:
+ * Zero on success or negative error code on failure.
+ */
+int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
+{
+       struct drm_fb_helper *fb_helper;
+       int ret;
+
+       if (!drm_fbdev_emulation)
+               return 0;
+
+       fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
+       if (!fb_helper)
+               return -ENOMEM;
+
+       ret = drm_client_new(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
+       if (ret) {
+               kfree(fb_helper);
+               return ret;
+       }
+
+       fb_helper->preferred_bpp = preferred_bpp;
+
+       drm_fbdev_client_hotplug(&fb_helper->client);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_fbdev_generic_setup);
+
 /* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
  * but the module doesn't depend on any fb console symbols.  At least
  * attempt to load fbcon to avoid leaving the system without a usable console.
index 6d9b9453707c5af4cbad984c790ce9538daa5d82..ffa8dc35515ffaddf0f87c56cc2108e0be4151f6 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/slab.h>
 #include <linux/module.h>
 
+#include <drm/drm_client.h>
 #include <drm/drm_file.h>
 #include <drm/drmP.h>
 
@@ -101,6 +102,166 @@ DEFINE_MUTEX(drm_global_mutex);
 
 static int drm_open_helper(struct file *filp, struct drm_minor *minor);
 
+/**
+ * drm_file_alloc - allocate file context
+ * @minor: minor to allocate on
+ *
+ * This allocates a new DRM file context. It is not linked into any context and
+ * can be used by the caller freely. Note that the context keeps a pointer to
+ * @minor, so it must be freed before @minor is.
+ *
+ * RETURNS:
+ * Pointer to newly allocated context, ERR_PTR on failure.
+ */
+struct drm_file *drm_file_alloc(struct drm_minor *minor)
+{
+       struct drm_device *dev = minor->dev;
+       struct drm_file *file;
+       int ret;
+
+       file = kzalloc(sizeof(*file), GFP_KERNEL);
+       if (!file)
+               return ERR_PTR(-ENOMEM);
+
+       file->pid = get_pid(task_pid(current));
+       file->minor = minor;
+
+       /* for compatibility root is always authenticated */
+       file->authenticated = capable(CAP_SYS_ADMIN);
+       file->lock_count = 0;
+
+       INIT_LIST_HEAD(&file->lhead);
+       INIT_LIST_HEAD(&file->fbs);
+       mutex_init(&file->fbs_lock);
+       INIT_LIST_HEAD(&file->blobs);
+       INIT_LIST_HEAD(&file->pending_event_list);
+       INIT_LIST_HEAD(&file->event_list);
+       init_waitqueue_head(&file->event_wait);
+       file->event_space = 4096; /* set aside 4k for event buffer */
+
+       mutex_init(&file->event_read_lock);
+
+       if (drm_core_check_feature(dev, DRIVER_GEM))
+               drm_gem_open(dev, file);
+
+       if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+               drm_syncobj_open(file);
+
+       if (drm_core_check_feature(dev, DRIVER_PRIME))
+               drm_prime_init_file_private(&file->prime);
+
+       if (dev->driver->open) {
+               ret = dev->driver->open(dev, file);
+               if (ret < 0)
+                       goto out_prime_destroy;
+       }
+
+       return file;
+
+out_prime_destroy:
+       if (drm_core_check_feature(dev, DRIVER_PRIME))
+               drm_prime_destroy_file_private(&file->prime);
+       if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+               drm_syncobj_release(file);
+       if (drm_core_check_feature(dev, DRIVER_GEM))
+               drm_gem_release(dev, file);
+       put_pid(file->pid);
+       kfree(file);
+
+       return ERR_PTR(ret);
+}
+
+static void drm_events_release(struct drm_file *file_priv)
+{
+       struct drm_device *dev = file_priv->minor->dev;
+       struct drm_pending_event *e, *et;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->event_lock, flags);
+
+       /* Unlink pending events */
+       list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
+                                pending_link) {
+               list_del(&e->pending_link);
+               e->file_priv = NULL;
+       }
+
+       /* Remove unconsumed events */
+       list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
+               list_del(&e->link);
+               kfree(e);
+       }
+
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+/**
+ * drm_file_free - free file context
+ * @file: context to free, or NULL
+ *
+ * This destroys and deallocates a DRM file context previously allocated via
+ * drm_file_alloc(). The caller must make sure to unlink it from any contexts
+ * before calling this.
+ *
+ * If NULL is passed, this is a no-op.
+ *
+ * RETURNS:
+ * 0 on success, or error code on failure.
+ */
+void drm_file_free(struct drm_file *file)
+{
+       struct drm_device *dev;
+
+       if (!file)
+               return;
+
+       dev = file->minor->dev;
+
+       DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
+                 task_pid_nr(current),
+                 (long)old_encode_dev(file->minor->kdev->devt),
+                 dev->open_count);
+
+       if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
+           dev->driver->preclose)
+               dev->driver->preclose(dev, file);
+
+       if (drm_core_check_feature(dev, DRIVER_LEGACY))
+               drm_legacy_lock_release(dev, file->filp);
+
+       if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+               drm_legacy_reclaim_buffers(dev, file);
+
+       drm_events_release(file);
+
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               drm_fb_release(file);
+               drm_property_destroy_user_blobs(dev, file);
+       }
+
+       if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
+               drm_syncobj_release(file);
+
+       if (drm_core_check_feature(dev, DRIVER_GEM))
+               drm_gem_release(dev, file);
+
+       drm_legacy_ctxbitmap_flush(dev, file);
+
+       if (drm_is_primary_client(file))
+               drm_master_release(file);
+
+       if (dev->driver->postclose)
+               dev->driver->postclose(dev, file);
+
+       if (drm_core_check_feature(dev, DRIVER_PRIME))
+               drm_prime_destroy_file_private(&file->prime);
+
+       WARN_ON(!list_empty(&file->event_list));
+
+       put_pid(file->pid);
+       kfree(file);
+}
+
 static int drm_setup(struct drm_device * dev)
 {
        int ret;
@@ -207,52 +368,22 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
 
        DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor->index);
 
-       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-       if (!priv)
-               return -ENOMEM;
-
-       filp->private_data = priv;
-       filp->f_mode |= FMODE_UNSIGNED_OFFSET;
-       priv->filp = filp;
-       priv->pid = get_pid(task_pid(current));
-       priv->minor = minor;
-
-       /* for compatibility root is always authenticated */
-       priv->authenticated = capable(CAP_SYS_ADMIN);
-       priv->lock_count = 0;
-
-       INIT_LIST_HEAD(&priv->lhead);
-       INIT_LIST_HEAD(&priv->fbs);
-       mutex_init(&priv->fbs_lock);
-       INIT_LIST_HEAD(&priv->blobs);
-       INIT_LIST_HEAD(&priv->pending_event_list);
-       INIT_LIST_HEAD(&priv->event_list);
-       init_waitqueue_head(&priv->event_wait);
-       priv->event_space = 4096; /* set aside 4k for event buffer */
-
-       mutex_init(&priv->event_read_lock);
-
-       if (drm_core_check_feature(dev, DRIVER_GEM))
-               drm_gem_open(dev, priv);
-
-       if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
-               drm_syncobj_open(priv);
-
-       if (drm_core_check_feature(dev, DRIVER_PRIME))
-               drm_prime_init_file_private(&priv->prime);
-
-       if (dev->driver->open) {
-               ret = dev->driver->open(dev, priv);
-               if (ret < 0)
-                       goto out_prime_destroy;
-       }
+       priv = drm_file_alloc(minor);
+       if (IS_ERR(priv))
+               return PTR_ERR(priv);
 
        if (drm_is_primary_client(priv)) {
                ret = drm_master_open(priv);
-               if (ret)
-                       goto out_close;
+               if (ret) {
+                       drm_file_free(priv);
+                       return ret;
+               }
        }
 
+       filp->private_data = priv;
+       filp->f_mode |= FMODE_UNSIGNED_OFFSET;
+       priv->filp = filp;
+
        mutex_lock(&dev->filelist_mutex);
        list_add(&priv->lhead, &dev->filelist);
        mutex_unlock(&dev->filelist_mutex);
@@ -278,45 +409,6 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
 #endif
 
        return 0;
-
-out_close:
-       if (dev->driver->postclose)
-               dev->driver->postclose(dev, priv);
-out_prime_destroy:
-       if (drm_core_check_feature(dev, DRIVER_PRIME))
-               drm_prime_destroy_file_private(&priv->prime);
-       if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
-               drm_syncobj_release(priv);
-       if (drm_core_check_feature(dev, DRIVER_GEM))
-               drm_gem_release(dev, priv);
-       put_pid(priv->pid);
-       kfree(priv);
-       filp->private_data = NULL;
-       return ret;
-}
-
-static void drm_events_release(struct drm_file *file_priv)
-{
-       struct drm_device *dev = file_priv->minor->dev;
-       struct drm_pending_event *e, *et;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev->event_lock, flags);
-
-       /* Unlink pending events */
-       list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
-                                pending_link) {
-               list_del(&e->pending_link);
-               e->file_priv = NULL;
-       }
-
-       /* Remove unconsumed events */
-       list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
-               list_del(&e->link);
-               kfree(e);
-       }
-
-       spin_unlock_irqrestore(&dev->event_lock, flags);
 }
 
 static void drm_legacy_dev_reinit(struct drm_device *dev)
@@ -353,6 +445,8 @@ void drm_lastclose(struct drm_device * dev)
 
        if (drm_core_check_feature(dev, DRIVER_LEGACY))
                drm_legacy_dev_reinit(dev);
+
+       drm_client_dev_restore(dev);
 }
 
 /**
@@ -383,57 +477,7 @@ int drm_release(struct inode *inode, struct file *filp)
        list_del(&file_priv->lhead);
        mutex_unlock(&dev->filelist_mutex);
 
-       if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
-           dev->driver->preclose)
-               dev->driver->preclose(dev, file_priv);
-
-       /* ========================================================
-        * Begin inline drm_release
-        */
-
-       DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
-                 task_pid_nr(current),
-                 (long)old_encode_dev(file_priv->minor->kdev->devt),
-                 dev->open_count);
-
-       if (drm_core_check_feature(dev, DRIVER_LEGACY))
-               drm_legacy_lock_release(dev, filp);
-
-       if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
-               drm_legacy_reclaim_buffers(dev, file_priv);
-
-       drm_events_release(file_priv);
-
-       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               drm_fb_release(file_priv);
-               drm_property_destroy_user_blobs(dev, file_priv);
-       }
-
-       if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
-               drm_syncobj_release(file_priv);
-
-       if (drm_core_check_feature(dev, DRIVER_GEM))
-               drm_gem_release(dev, file_priv);
-
-       drm_legacy_ctxbitmap_flush(dev, file_priv);
-
-       if (drm_is_primary_client(file_priv))
-               drm_master_release(file_priv);
-
-       if (dev->driver->postclose)
-               dev->driver->postclose(dev, file_priv);
-
-       if (drm_core_check_feature(dev, DRIVER_PRIME))
-               drm_prime_destroy_file_private(&file_priv->prime);
-
-       WARN_ON(!list_empty(&file_priv->event_list));
-
-       put_pid(file_priv->pid);
-       kfree(file_priv);
-
-       /* ========================================================
-        * End inline drm_release
-        */
+       drm_file_free(file_priv);
 
        if (!--dev->open_count) {
                drm_lastclose(dev);
index 5ca6395cd4d39c8f749738c0506fd815e818fcc9..35c1e2742c27751dc9de3a036334664c87b17cde 100644 (file)
@@ -152,27 +152,27 @@ const struct drm_format_info *__drm_format_info(u32 format)
                { .format = DRM_FORMAT_XBGR8888_A8,     .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
                { .format = DRM_FORMAT_RGBX8888_A8,     .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
                { .format = DRM_FORMAT_BGRX8888_A8,     .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
-               { .format = DRM_FORMAT_YUV410,          .depth = 0,  .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4 },
-               { .format = DRM_FORMAT_YVU410,          .depth = 0,  .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4 },
-               { .format = DRM_FORMAT_YUV411,          .depth = 0,  .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1 },
-               { .format = DRM_FORMAT_YVU411,          .depth = 0,  .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1 },
-               { .format = DRM_FORMAT_YUV420,          .depth = 0,  .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2 },
-               { .format = DRM_FORMAT_YVU420,          .depth = 0,  .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2 },
-               { .format = DRM_FORMAT_YUV422,          .depth = 0,  .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1 },
-               { .format = DRM_FORMAT_YVU422,          .depth = 0,  .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1 },
-               { .format = DRM_FORMAT_YUV444,          .depth = 0,  .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1 },
-               { .format = DRM_FORMAT_YVU444,          .depth = 0,  .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1 },
-               { .format = DRM_FORMAT_NV12,            .depth = 0,  .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2 },
-               { .format = DRM_FORMAT_NV21,            .depth = 0,  .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2 },
-               { .format = DRM_FORMAT_NV16,            .depth = 0,  .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1 },
-               { .format = DRM_FORMAT_NV61,            .depth = 0,  .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1 },
-               { .format = DRM_FORMAT_NV24,            .depth = 0,  .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1 },
-               { .format = DRM_FORMAT_NV42,            .depth = 0,  .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1 },
-               { .format = DRM_FORMAT_YUYV,            .depth = 0,  .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
-               { .format = DRM_FORMAT_YVYU,            .depth = 0,  .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
-               { .format = DRM_FORMAT_UYVY,            .depth = 0,  .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
-               { .format = DRM_FORMAT_VYUY,            .depth = 0,  .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
-               { .format = DRM_FORMAT_AYUV,            .depth = 0,  .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+               { .format = DRM_FORMAT_YUV410,          .depth = 0,  .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4, .is_yuv = true },
+               { .format = DRM_FORMAT_YVU410,          .depth = 0,  .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4, .is_yuv = true },
+               { .format = DRM_FORMAT_YUV411,          .depth = 0,  .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1, .is_yuv = true },
+               { .format = DRM_FORMAT_YVU411,          .depth = 0,  .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1, .is_yuv = true },
+               { .format = DRM_FORMAT_YUV420,          .depth = 0,  .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2, .is_yuv = true },
+               { .format = DRM_FORMAT_YVU420,          .depth = 0,  .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2, .is_yuv = true },
+               { .format = DRM_FORMAT_YUV422,          .depth = 0,  .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+               { .format = DRM_FORMAT_YVU422,          .depth = 0,  .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+               { .format = DRM_FORMAT_YUV444,          .depth = 0,  .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1, .is_yuv = true },
+               { .format = DRM_FORMAT_YVU444,          .depth = 0,  .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1, .is_yuv = true },
+               { .format = DRM_FORMAT_NV12,            .depth = 0,  .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2, .is_yuv = true },
+               { .format = DRM_FORMAT_NV21,            .depth = 0,  .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2, .is_yuv = true },
+               { .format = DRM_FORMAT_NV16,            .depth = 0,  .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+               { .format = DRM_FORMAT_NV61,            .depth = 0,  .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+               { .format = DRM_FORMAT_NV24,            .depth = 0,  .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
+               { .format = DRM_FORMAT_NV42,            .depth = 0,  .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
+               { .format = DRM_FORMAT_YUYV,            .depth = 0,  .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+               { .format = DRM_FORMAT_YVYU,            .depth = 0,  .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+               { .format = DRM_FORMAT_UYVY,            .depth = 0,  .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+               { .format = DRM_FORMAT_VYUY,            .depth = 0,  .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+               { .format = DRM_FORMAT_AYUV,            .depth = 0,  .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true },
        };
 
        unsigned int i;
index bfedceff87bba8101ff51d18e68e4e0c9d107182..781af1d42d766bf63db12801ace4703132db84fa 100644 (file)
@@ -95,21 +95,20 @@ int drm_framebuffer_check_src_coords(uint32_t src_x, uint32_t src_y,
 /**
  * drm_mode_addfb - add an FB to the graphics configuration
  * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
+ * @or: pointer to request structure
+ * @file_priv: drm file
  *
  * Add a new FB to the specified CRTC, given a user request. This is the
  * original addfb ioctl which only supported RGB formats.
  *
- * Called by the user via ioctl.
+ * Called by the user via ioctl, or by an in-kernel client.
  *
  * Returns:
  * Zero on success, negative errno on failure.
  */
-int drm_mode_addfb(struct drm_device *dev,
-                  void *data, struct drm_file *file_priv)
+int drm_mode_addfb(struct drm_device *dev, struct drm_mode_fb_cmd *or,
+                  struct drm_file *file_priv)
 {
-       struct drm_mode_fb_cmd *or = data;
        struct drm_mode_fb_cmd2 r = {};
        int ret;
 
@@ -134,6 +133,12 @@ int drm_mode_addfb(struct drm_device *dev,
        return 0;
 }
 
+int drm_mode_addfb_ioctl(struct drm_device *dev,
+                        void *data, struct drm_file *file_priv)
+{
+       return drm_mode_addfb(dev, data, file_priv);
+}
+
 static int fb_plane_width(int width,
                          const struct drm_format_info *format, int plane)
 {
@@ -367,29 +372,28 @@ static void drm_mode_rmfb_work_fn(struct work_struct *w)
 
 /**
  * drm_mode_rmfb - remove an FB from the configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
+ * @dev: drm device
+ * @fb_id: id of framebuffer to remove
+ * @file_priv: drm file
  *
- * Remove the FB specified by the user.
+ * Remove the specified FB.
  *
- * Called by the user via ioctl.
+ * Called by the user via ioctl, or by an in-kernel client.
  *
  * Returns:
  * Zero on success, negative errno on failure.
  */
-int drm_mode_rmfb(struct drm_device *dev,
-                  void *data, struct drm_file *file_priv)
+int drm_mode_rmfb(struct drm_device *dev, u32 fb_id,
+                 struct drm_file *file_priv)
 {
        struct drm_framebuffer *fb = NULL;
        struct drm_framebuffer *fbl = NULL;
-       uint32_t *id = data;
        int found = 0;
 
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EINVAL;
 
-       fb = drm_framebuffer_lookup(dev, file_priv, *id);
+       fb = drm_framebuffer_lookup(dev, file_priv, fb_id);
        if (!fb)
                return -ENOENT;
 
@@ -435,6 +439,14 @@ fail_unref:
        return -ENOENT;
 }
 
+int drm_mode_rmfb_ioctl(struct drm_device *dev,
+                       void *data, struct drm_file *file_priv)
+{
+       uint32_t *fb_id = data;
+
+       return drm_mode_rmfb(dev, *fb_id, file_priv);
+}
+
 /**
  * drm_mode_getfb - get FB info
  * @dev: drm device for the ioctl
@@ -835,9 +847,7 @@ retry:
                if (ret)
                        goto unlock;
 
-               plane_mask |= BIT(drm_plane_index(plane));
-
-               plane->old_fb = plane->fb;
+               plane_mask |= drm_plane_mask(plane);
        }
 
        /* This list is only filled when disable_crtcs is set. */
@@ -852,9 +862,6 @@ retry:
                ret = drm_atomic_commit(state);
 
 unlock:
-       if (plane_mask)
-               drm_atomic_clean_old_fb(dev, plane_mask, ret);
-
        if (ret == -EDEADLK) {
                drm_atomic_state_clear(state);
                drm_modeset_backoff(&ctx);
index 4a16d7b26c89c1a350e5720c46d917f29532e416..bf90625df3c5bf4f34f1a1d187064f46c3599aa1 100644 (file)
@@ -1036,6 +1036,15 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
                return -EACCES;
        }
 
+       if (node->readonly) {
+               if (vma->vm_flags & VM_WRITE) {
+                       drm_gem_object_put_unlocked(obj);
+                       return -EINVAL;
+               }
+
+               vma->vm_flags &= ~VM_MAYWRITE;
+       }
+
        ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
                               vma);
 
index acfbc0641a06a1bd2f1a63f53ce01d0e2ea2da03..2810d413141182e4b6298524612670aa34a54757 100644 (file)
@@ -253,7 +253,7 @@ int drm_gem_fb_prepare_fb(struct drm_plane *plane,
        struct dma_buf *dma_buf;
        struct dma_fence *fence;
 
-       if (plane->state->fb == state->fb || !state->fb)
+       if (!state->fb)
                return 0;
 
        dma_buf = drm_gem_fb_get_obj(state->fb, 0)->dma_buf;
index b2dc21e33ae0db819fe73850e70c1140435f9464..5799e2782dd1329e49369b0b4f186ba009c3688f 100644 (file)
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /**************************************************************************
  *
  * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
index b72242e93ea471114ec738609e70321cbb5233c9..40179c5fc6b87981353181ce41529a85a1cc8100 100644 (file)
@@ -26,6 +26,8 @@
 
 /* drm_file.c */
 extern struct mutex drm_global_mutex;
+struct drm_file *drm_file_alloc(struct drm_minor *minor);
+void drm_file_free(struct drm_file *file);
 void drm_lastclose(struct drm_device *dev);
 
 /* drm_pci.c */
index 0d4cfb232576fc1b2d325cfa185a1477070e30c8..ea10e9a26aadd5e8598e837ccd4375a15bfbe3dc 100644 (file)
@@ -334,6 +334,13 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
                        return -EINVAL;
                file_priv->aspect_ratio_allowed = req->value;
                break;
+       case DRM_CLIENT_CAP_WRITEBACK_CONNECTORS:
+               if (!file_priv->atomic)
+                       return -EINVAL;
+               if (req->value > 1)
+                       return -EINVAL;
+               file_priv->writeback_connectors = req->value;
+               break;
        default:
                return -EINVAL;
        }
@@ -634,12 +641,12 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_connector_property_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb_ioctl, DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_UNLOCKED),
-       DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_UNLOCKED),
+       DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_UNLOCKED),
index 50c73c0a20b92e808d1ab511d1d3336ab9a094df..d638c0fb3418a4404b390fd12c668b4f02157a23 100644 (file)
@@ -553,24 +553,13 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
 
        /* Clone the lessor file to create a new file for us */
        DRM_DEBUG_LEASE("Allocating lease file\n");
-       path_get(&lessor_file->f_path);
-       lessee_file = alloc_file(&lessor_file->f_path,
-                                lessor_file->f_mode,
-                                fops_get(lessor_file->f_inode->i_fop));
-
+       lessee_file = filp_clone_open(lessor_file);
        if (IS_ERR(lessee_file)) {
                ret = PTR_ERR(lessee_file);
                goto out_lessee;
        }
 
-       /* Initialize the new file for DRM */
-       DRM_DEBUG_LEASE("Initializing the file with %p\n", lessee_file->f_op->open);
-       ret = lessee_file->f_op->open(lessee_file->f_inode, lessee_file);
-       if (ret)
-               goto out_lessee_file;
-
        lessee_priv = lessee_file->private_data;
-
        /* Change the file to a master one */
        drm_master_put(&lessee_priv->master);
        lessee_priv->master = lessee;
@@ -588,9 +577,6 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
        DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
        return 0;
 
-out_lessee_file:
-       fput(lessee_file);
-
 out_lessee:
        drm_master_put(&lessee);
 
index bc73b7f5b9fcd9184b3909f0b1c45cc73377cc4e..80b75501f5c6a203d7967fffdb94378b89039dab 100644 (file)
@@ -392,6 +392,7 @@ bool mipi_dsi_packet_format_is_short(u8 type)
        case MIPI_DSI_DCS_SHORT_WRITE:
        case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
        case MIPI_DSI_DCS_READ:
+       case MIPI_DSI_DCS_COMPRESSION_MODE:
        case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE:
                return true;
        }
@@ -410,6 +411,7 @@ EXPORT_SYMBOL(mipi_dsi_packet_format_is_short);
 bool mipi_dsi_packet_format_is_long(u8 type)
 {
        switch (type) {
+       case MIPI_DSI_PPS_LONG_WRITE:
        case MIPI_DSI_NULL_PACKET:
        case MIPI_DSI_BLANKING_PACKET:
        case MIPI_DSI_GENERIC_LONG_WRITE:
index 3166026a1874d7a9ffba190273dd1ab270fb83db..3cc5fbd78ee20b44eb2b722e96a158ac518c2e00 100644 (file)
@@ -239,6 +239,32 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
 #define HOLE_SIZE(NODE) ((NODE)->hole_size)
 #define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
 
+static u64 rb_to_hole_size(struct rb_node *rb)
+{
+       return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
+}
+
+static void insert_hole_size(struct rb_root_cached *root,
+                            struct drm_mm_node *node)
+{
+       struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
+       u64 x = node->hole_size;
+       bool first = true;
+
+       while (*link) {
+               rb = *link;
+               if (x > rb_to_hole_size(rb)) {
+                       link = &rb->rb_left;
+               } else {
+                       link = &rb->rb_right;
+                       first = false;
+               }
+       }
+
+       rb_link_node(&node->rb_hole_size, rb, link);
+       rb_insert_color_cached(&node->rb_hole_size, root, first);
+}
+
 static void add_hole(struct drm_mm_node *node)
 {
        struct drm_mm *mm = node->mm;
@@ -247,7 +273,7 @@ static void add_hole(struct drm_mm_node *node)
                __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
        DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
 
-       RB_INSERT(mm->holes_size, rb_hole_size, HOLE_SIZE);
+       insert_hole_size(&mm->holes_size, node);
        RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
 
        list_add(&node->hole_stack, &mm->hole_stack);
@@ -258,7 +284,7 @@ static void rm_hole(struct drm_mm_node *node)
        DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
 
        list_del(&node->hole_stack);
-       rb_erase(&node->rb_hole_size, &node->mm->holes_size);
+       rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
        rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
        node->hole_size = 0;
 
@@ -282,38 +308,39 @@ static inline u64 rb_hole_size(struct rb_node *rb)
 
 static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
 {
-       struct rb_node *best = NULL;
-       struct rb_node **link = &mm->holes_size.rb_node;
+       struct rb_node *rb = mm->holes_size.rb_root.rb_node;
+       struct drm_mm_node *best = NULL;
 
-       while (*link) {
-               struct rb_node *rb = *link;
+       do {
+               struct drm_mm_node *node =
+                       rb_entry(rb, struct drm_mm_node, rb_hole_size);
 
-               if (size <= rb_hole_size(rb)) {
-                       link = &rb->rb_left;
-                       best = rb;
+               if (size <= node->hole_size) {
+                       best = node;
+                       rb = rb->rb_right;
                } else {
-                       link = &rb->rb_right;
+                       rb = rb->rb_left;
                }
-       }
+       } while (rb);
 
-       return rb_hole_size_to_node(best);
+       return best;
 }
 
 static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
 {
+       struct rb_node *rb = mm->holes_addr.rb_node;
        struct drm_mm_node *node = NULL;
-       struct rb_node **link = &mm->holes_addr.rb_node;
 
-       while (*link) {
+       while (rb) {
                u64 hole_start;
 
-               node = rb_hole_addr_to_node(*link);
+               node = rb_hole_addr_to_node(rb);
                hole_start = __drm_mm_hole_node_start(node);
 
                if (addr < hole_start)
-                       link = &node->rb_hole_addr.rb_left;
+                       rb = node->rb_hole_addr.rb_left;
                else if (addr > hole_start + node->hole_size)
-                       link = &node->rb_hole_addr.rb_right;
+                       rb = node->rb_hole_addr.rb_right;
                else
                        break;
        }
@@ -326,9 +353,6 @@ first_hole(struct drm_mm *mm,
           u64 start, u64 end, u64 size,
           enum drm_mm_insert_mode mode)
 {
-       if (RB_EMPTY_ROOT(&mm->holes_size))
-               return NULL;
-
        switch (mode) {
        default:
        case DRM_MM_INSERT_BEST:
@@ -355,7 +379,7 @@ next_hole(struct drm_mm *mm,
        switch (mode) {
        default:
        case DRM_MM_INSERT_BEST:
-               return rb_hole_size_to_node(rb_next(&node->rb_hole_size));
+               return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
 
        case DRM_MM_INSERT_LOW:
                return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
@@ -426,6 +450,11 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 }
 EXPORT_SYMBOL(drm_mm_reserve_node);
 
+static u64 rb_to_hole_size_or_zero(struct rb_node *rb)
+{
+       return rb ? rb_to_hole_size(rb) : 0;
+}
+
 /**
  * drm_mm_insert_node_in_range - ranged search for space and insert @node
  * @mm: drm_mm to allocate from
@@ -451,18 +480,26 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
 {
        struct drm_mm_node *hole;
        u64 remainder_mask;
+       bool once;
 
        DRM_MM_BUG_ON(range_start >= range_end);
 
        if (unlikely(size == 0 || range_end - range_start < size))
                return -ENOSPC;
 
+       if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size)
+               return -ENOSPC;
+
        if (alignment <= 1)
                alignment = 0;
 
+       once = mode & DRM_MM_INSERT_ONCE;
+       mode &= ~DRM_MM_INSERT_ONCE;
+
        remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
-       for (hole = first_hole(mm, range_start, range_end, size, mode); hole;
-            hole = next_hole(mm, hole, mode)) {
+       for (hole = first_hole(mm, range_start, range_end, size, mode);
+            hole;
+            hole = once ? NULL : next_hole(mm, hole, mode)) {
                u64 hole_start = __drm_mm_hole_node_start(hole);
                u64 hole_end = hole_start + hole->hole_size;
                u64 adj_start, adj_end;
@@ -587,9 +624,9 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
 
        if (drm_mm_hole_follows(old)) {
                list_replace(&old->hole_stack, &new->hole_stack);
-               rb_replace_node(&old->rb_hole_size,
-                               &new->rb_hole_size,
-                               &mm->holes_size);
+               rb_replace_node_cached(&old->rb_hole_size,
+                                      &new->rb_hole_size,
+                                      &mm->holes_size);
                rb_replace_node(&old->rb_hole_addr,
                                &new->rb_hole_addr,
                                &mm->holes_addr);
@@ -885,7 +922,7 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
 
        INIT_LIST_HEAD(&mm->hole_stack);
        mm->interval_tree = RB_ROOT_CACHED;
-       mm->holes_size = RB_ROOT;
+       mm->holes_size = RB_ROOT_CACHED;
        mm->holes_addr = RB_ROOT;
 
        /* Clever trick to avoid a special case in the free hole tracking. */
index e5c653357024dc5ea522706df1a03be1f12d094d..21e353bd3948ece25235c844da7fd4e3689abe35 100644 (file)
@@ -145,6 +145,11 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
        count = 0;
        connector_id = u64_to_user_ptr(card_res->connector_id_ptr);
        drm_for_each_connector_iter(connector, &conn_iter) {
+               /* only expose writeback connectors if userspace understands them */
+               if (!file_priv->writeback_connectors &&
+                   (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK))
+                       continue;
+
                if (drm_lease_held(file_priv, connector->base.id)) {
                        if (count < card_res->count_connectors &&
                            put_user(connector->base.id, connector_id + count)) {
index ce4d2fb3281055be81e858f1b9a2707fdb7c31df..fcb0ab0abb75572a165341d00de8f83b01393aa9 100644 (file)
@@ -433,8 +433,7 @@ static int set_property_legacy(struct drm_mode_object *obj,
        drm_modeset_lock_all(dev);
        switch (obj->type) {
        case DRM_MODE_OBJECT_CONNECTOR:
-               ret = drm_mode_connector_set_obj_prop(obj, prop,
-                                                     prop_value);
+               ret = drm_connector_set_obj_prop(obj, prop, prop_value);
                break;
        case DRM_MODE_OBJECT_CRTC:
                ret = drm_mode_crtc_set_obj_prop(obj, prop, prop_value);
index c78ca0e84ffdc312860dc49805cc3b8e95bacce7..02db9ac82d7a91a9b7de054a71a92e377d27a3d5 100644 (file)
@@ -659,10 +659,12 @@ EXPORT_SYMBOL_GPL(drm_display_mode_to_videomode);
  * drm_bus_flags_from_videomode - extract information about pixelclk and
  * DE polarity from videomode and store it in a separate variable
  * @vm: videomode structure to use
- * @bus_flags: information about pixelclk and DE polarity will be stored here
+ * @bus_flags: information about pixelclk, sync and DE polarity will be stored
+ * here
  *
- * Sets DRM_BUS_FLAG_DE_(LOW|HIGH) and DRM_BUS_FLAG_PIXDATA_(POS|NEG)EDGE
- * in @bus_flags according to DISPLAY_FLAGS found in @vm
+ * Sets DRM_BUS_FLAG_DE_(LOW|HIGH),  DRM_BUS_FLAG_PIXDATA_(POS|NEG)EDGE and
+ * DISPLAY_FLAGS_SYNC_(POS|NEG)EDGE in @bus_flags according to DISPLAY_FLAGS
+ * found in @vm
  */
 void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags)
 {
@@ -672,6 +674,11 @@ void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags)
        if (vm->flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
                *bus_flags |= DRM_BUS_FLAG_PIXDATA_NEGEDGE;
 
+       if (vm->flags & DISPLAY_FLAGS_SYNC_POSEDGE)
+               *bus_flags |= DRM_BUS_FLAG_SYNC_POSEDGE;
+       if (vm->flags & DISPLAY_FLAGS_SYNC_NEGEDGE)
+               *bus_flags |= DRM_BUS_FLAG_SYNC_NEGEDGE;
+
        if (vm->flags & DISPLAY_FLAGS_DE_LOW)
                *bus_flags |= DRM_BUS_FLAG_DE_LOW;
        if (vm->flags & DISPLAY_FLAGS_DE_HIGH)
@@ -684,7 +691,7 @@ EXPORT_SYMBOL_GPL(drm_bus_flags_from_videomode);
  * of_get_drm_display_mode - get a drm_display_mode from devicetree
  * @np: device_node with the timing specification
  * @dmode: will be set to the return value
- * @bus_flags: information about pixelclk and DE polarity
+ * @bus_flags: information about pixelclk, sync and DE polarity
  * @index: index into the list of display timings in devicetree
  *
  * This function is expensive and should only be used, if only one mode is to be
@@ -1257,7 +1264,7 @@ static const char * const drm_mode_status_names[] = {
 
 #undef MODE_STATUS
 
-static const char *drm_get_mode_status_name(enum drm_mode_status status)
+const char *drm_get_mode_status_name(enum drm_mode_status status)
 {
        int index = status + 3;
 
@@ -1346,7 +1353,7 @@ void drm_mode_sort(struct list_head *mode_list)
 EXPORT_SYMBOL(drm_mode_sort);
 
 /**
- * drm_mode_connector_list_update - update the mode list for the connector
+ * drm_connector_list_update - update the mode list for the connector
  * @connector: the connector to update
  *
  * This moves the modes from the @connector probed_modes list
@@ -1356,7 +1363,7 @@ EXPORT_SYMBOL(drm_mode_sort);
  * This is just a helper functions doesn't validate any modes itself and also
  * doesn't prune any invalid modes. Callers need to do that themselves.
  */
-void drm_mode_connector_list_update(struct drm_connector *connector)
+void drm_connector_list_update(struct drm_connector *connector)
 {
        struct drm_display_mode *pmode, *pt;
 
@@ -1405,7 +1412,7 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
                }
        }
 }
-EXPORT_SYMBOL(drm_mode_connector_list_update);
+EXPORT_SYMBOL(drm_connector_list_update);
 
 /**
  * drm_mode_parse_command_line_for_connector - parse command line modeline for connector
index 1fe122461298e967997218292610e60d3eb0e999..2763a5ec845b0c14bff58b8aed4d8cb58d80d0a9 100644 (file)
@@ -9,21 +9,28 @@
 #include <drm/drm_panel.h>
 #include <drm/drm_of.h>
 
+/**
+ * DOC: overview
+ *
+ * A set of helper functions to aid DRM drivers in parsing standard DT
+ * properties.
+ */
+
 static void drm_release_of(struct device *dev, void *data)
 {
        of_node_put(data);
 }
 
 /**
- * drm_crtc_port_mask - find the mask of a registered CRTC by port OF node
+ * drm_of_crtc_port_mask - find the mask of a registered CRTC by port OF node
  * @dev: DRM device
  * @port: port OF node
  *
  * Given a port OF node, return the possible mask of the corresponding
  * CRTC within a device's list of CRTCs.  Returns zero if not found.
  */
-static uint32_t drm_crtc_port_mask(struct drm_device *dev,
-                                  struct device_node *port)
+uint32_t drm_of_crtc_port_mask(struct drm_device *dev,
+                           struct device_node *port)
 {
        unsigned int index = 0;
        struct drm_crtc *tmp;
@@ -37,6 +44,7 @@ static uint32_t drm_crtc_port_mask(struct drm_device *dev,
 
        return 0;
 }
+EXPORT_SYMBOL(drm_of_crtc_port_mask);
 
 /**
  * drm_of_find_possible_crtcs - find the possible CRTCs for an encoder port
@@ -62,7 +70,7 @@ uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
                        return 0;
                }
 
-               possible_crtcs |= drm_crtc_port_mask(dev, remote_port);
+               possible_crtcs |= drm_of_crtc_port_mask(dev, remote_port);
 
                of_node_put(remote_port);
        }
@@ -93,7 +101,7 @@ EXPORT_SYMBOL_GPL(drm_of_component_match_add);
  * drm_of_component_probe - Generic probe function for a component based master
  * @dev: master device containing the OF node
  * @compare_of: compare function used for matching components
- * @master_ops: component master ops to be used
+ * @m_ops: component master ops to be used
  *
  * Parse the platform device OF node and bind all the components associated
  * with the master. Interface ports are added before the encoders in order to
@@ -238,10 +246,17 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
        if (!remote)
                return -ENODEV;
 
+       if (!of_device_is_available(remote)) {
+               of_node_put(remote);
+               return -ENODEV;
+       }
+
        if (panel) {
                *panel = of_drm_find_panel(remote);
-               if (*panel)
+               if (!IS_ERR(*panel))
                        ret = 0;
+               else
+                       *panel = NULL;
        }
 
        /* No panel found yet, check for a bridge next. */
index 308d442a531b2dda2145dd296ee309ccda88e214..b902361dee6e1db300c10ce5798de7bada296b65 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/err.h>
 #include <linux/module.h>
 
+#include <drm/drm_device.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_panel.h>
 
@@ -94,6 +95,9 @@ EXPORT_SYMBOL(drm_panel_remove);
  *
  * An error is returned if the panel is already attached to another connector.
  *
+ * When unloading, the driver should detach from the panel by calling
+ * drm_panel_detach().
+ *
  * Return: 0 on success or a negative error code on failure.
  */
 int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
@@ -101,6 +105,13 @@ int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
        if (panel->connector)
                return -EBUSY;
 
+       panel->link = device_link_add(connector->dev->dev, panel->dev, 0);
+       if (!panel->link) {
+               dev_err(panel->dev, "failed to link panel to %s\n",
+                       dev_name(connector->dev->dev));
+               return -EINVAL;
+       }
+
        panel->connector = connector;
        panel->drm = connector->dev;
 
@@ -115,10 +126,15 @@ EXPORT_SYMBOL(drm_panel_attach);
  * Detaches a panel from the connector it is attached to. If a panel is not
  * attached to any connector this is effectively a no-op.
  *
+ * This function should not be called by the panel device itself. It
+ * is only for the drm device that called drm_panel_attach().
+ *
  * Return: 0 on success or a negative error code on failure.
  */
 int drm_panel_detach(struct drm_panel *panel)
 {
+       device_link_del(panel->link);
+
        panel->connector = NULL;
        panel->drm = NULL;
 
@@ -135,12 +151,19 @@ EXPORT_SYMBOL(drm_panel_detach);
  * tree node. If a matching panel is found, return a pointer to it.
  *
  * Return: A pointer to the panel registered for the specified device tree
- * node or NULL if no panel matching the device tree node can be found.
+ * node or an ERR_PTR() if no panel matching the device tree node can be found.
+ * Possible error codes returned by this function:
+ * - EPROBE_DEFER: the panel device has not been probed yet, and the caller
+ *   should retry later
+ * - ENODEV: the device is not available (status != "okay" or "ok")
  */
 struct drm_panel *of_drm_find_panel(const struct device_node *np)
 {
        struct drm_panel *panel;
 
+       if (!of_device_is_available(np))
+               return ERR_PTR(-ENODEV);
+
        mutex_lock(&panel_lock);
 
        list_for_each_entry(panel, &panel_list, list) {
@@ -151,7 +174,7 @@ struct drm_panel *of_drm_find_panel(const struct device_node *np)
        }
 
        mutex_unlock(&panel_lock);
-       return NULL;
+       return ERR_PTR(-EPROBE_DEFER);
 }
 EXPORT_SYMBOL(of_drm_find_panel);
 #endif
index 4db9c515b74f7713f3ffad8b22ca2d9f57cd8222..896e42a34895da226585977d512a89f0677234da 100644 (file)
@@ -326,64 +326,6 @@ int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
 }
 EXPORT_SYMBOL(drm_legacy_pci_init);
 
-int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
-{
-       struct pci_dev *root;
-       u32 lnkcap, lnkcap2;
-
-       *mask = 0;
-       if (!dev->pdev)
-               return -EINVAL;
-
-       root = dev->pdev->bus->self;
-
-       /* we've been informed via and serverworks don't make the cut */
-       if (root->vendor == PCI_VENDOR_ID_VIA ||
-           root->vendor == PCI_VENDOR_ID_SERVERWORKS)
-               return -EINVAL;
-
-       pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
-       pcie_capability_read_dword(root, PCI_EXP_LNKCAP2, &lnkcap2);
-
-       if (lnkcap2) {  /* PCIe r3.0-compliant */
-               if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
-                       *mask |= DRM_PCIE_SPEED_25;
-               if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
-                       *mask |= DRM_PCIE_SPEED_50;
-               if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
-                       *mask |= DRM_PCIE_SPEED_80;
-       } else {        /* pre-r3.0 */
-               if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
-                       *mask |= DRM_PCIE_SPEED_25;
-               if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
-                       *mask |= (DRM_PCIE_SPEED_25 | DRM_PCIE_SPEED_50);
-       }
-
-       DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", root->vendor, root->device, lnkcap, lnkcap2);
-       return 0;
-}
-EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask);
-
-int drm_pcie_get_max_link_width(struct drm_device *dev, u32 *mlw)
-{
-       struct pci_dev *root;
-       u32 lnkcap;
-
-       *mlw = 0;
-       if (!dev->pdev)
-               return -EINVAL;
-
-       root = dev->pdev->bus->self;
-
-       pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
-
-       *mlw = (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
-
-       DRM_INFO("probing mlw for device %x:%x = %x\n", root->vendor, root->device, lnkcap);
-       return 0;
-}
-EXPORT_SYMBOL(drm_pcie_get_max_link_width);
-
 #else
 
 void drm_pci_agp_destroy(struct drm_device *dev) {}
index 0350544553010c374c122af5093d7d93aac95e4e..6153cbda239fe6e6d6506503946c4119d77b7c46 100644 (file)
@@ -177,6 +177,10 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
        if (WARN_ON(config->num_total_plane >= 32))
                return -EINVAL;
 
+       WARN_ON(drm_drv_uses_atomic_modeset(dev) &&
+               (!funcs->atomic_destroy_state ||
+                !funcs->atomic_duplicate_state));
+
        ret = drm_mode_object_add(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
        if (ret)
                return ret;
@@ -561,19 +565,66 @@ int drm_plane_check_pixel_format(struct drm_plane *plane,
        if (i == plane->format_count)
                return -EINVAL;
 
-       if (!plane->modifier_count)
-               return 0;
+       if (plane->funcs->format_mod_supported) {
+               if (!plane->funcs->format_mod_supported(plane, format, modifier))
+                       return -EINVAL;
+       } else {
+               if (!plane->modifier_count)
+                       return 0;
 
-       for (i = 0; i < plane->modifier_count; i++) {
-               if (modifier == plane->modifiers[i])
-                       break;
+               for (i = 0; i < plane->modifier_count; i++) {
+                       if (modifier == plane->modifiers[i])
+                               break;
+               }
+               if (i == plane->modifier_count)
+                       return -EINVAL;
        }
-       if (i == plane->modifier_count)
-               return -EINVAL;
 
-       if (plane->funcs->format_mod_supported &&
-           !plane->funcs->format_mod_supported(plane, format, modifier))
+       return 0;
+}
+
+static int __setplane_check(struct drm_plane *plane,
+                           struct drm_crtc *crtc,
+                           struct drm_framebuffer *fb,
+                           int32_t crtc_x, int32_t crtc_y,
+                           uint32_t crtc_w, uint32_t crtc_h,
+                           uint32_t src_x, uint32_t src_y,
+                           uint32_t src_w, uint32_t src_h)
+{
+       int ret;
+
+       /* Check whether this plane is usable on this CRTC */
+       if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
+               DRM_DEBUG_KMS("Invalid crtc for plane\n");
                return -EINVAL;
+       }
+
+       /* Check whether this plane supports the fb pixel format. */
+       ret = drm_plane_check_pixel_format(plane, fb->format->format,
+                                          fb->modifier);
+       if (ret) {
+               struct drm_format_name_buf format_name;
+
+               DRM_DEBUG_KMS("Invalid pixel format %s, modifier 0x%llx\n",
+                             drm_get_format_name(fb->format->format,
+                                                 &format_name),
+                             fb->modifier);
+               return ret;
+       }
+
+       /* Give drivers some help against integer overflows */
+       if (crtc_w > INT_MAX ||
+           crtc_x > INT_MAX - (int32_t) crtc_w ||
+           crtc_h > INT_MAX ||
+           crtc_y > INT_MAX - (int32_t) crtc_h) {
+               DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
+                             crtc_w, crtc_h, crtc_x, crtc_y);
+               return -ERANGE;
+       }
+
+       ret = drm_framebuffer_check_src_coords(src_x, src_y, src_w, src_h, fb);
+       if (ret)
+               return ret;
 
        return 0;
 }
@@ -598,6 +649,8 @@ static int __setplane_internal(struct drm_plane *plane,
 {
        int ret = 0;
 
+       WARN_ON(drm_drv_uses_atomic_modeset(plane->dev));
+
        /* No fb means shut it down */
        if (!fb) {
                plane->old_fb = plane->fb;
@@ -611,37 +664,9 @@ static int __setplane_internal(struct drm_plane *plane,
                goto out;
        }
 
-       /* Check whether this plane is usable on this CRTC */
-       if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
-               DRM_DEBUG_KMS("Invalid crtc for plane\n");
-               ret = -EINVAL;
-               goto out;
-       }
-
-       /* Check whether this plane supports the fb pixel format. */
-       ret = drm_plane_check_pixel_format(plane, fb->format->format,
-                                          fb->modifier);
-       if (ret) {
-               struct drm_format_name_buf format_name;
-               DRM_DEBUG_KMS("Invalid pixel format %s, modifier 0x%llx\n",
-                             drm_get_format_name(fb->format->format,
-                                                 &format_name),
-                             fb->modifier);
-               goto out;
-       }
-
-       /* Give drivers some help against integer overflows */
-       if (crtc_w > INT_MAX ||
-           crtc_x > INT_MAX - (int32_t) crtc_w ||
-           crtc_h > INT_MAX ||
-           crtc_y > INT_MAX - (int32_t) crtc_h) {
-               DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
-                             crtc_w, crtc_h, crtc_x, crtc_y);
-               ret = -ERANGE;
-               goto out;
-       }
-
-       ret = drm_framebuffer_check_src_coords(src_x, src_y, src_w, src_h, fb);
+       ret = __setplane_check(plane, crtc, fb,
+                              crtc_x, crtc_y, crtc_w, crtc_h,
+                              src_x, src_y, src_w, src_h);
        if (ret)
                goto out;
 
@@ -665,6 +690,41 @@ out:
        return ret;
 }
 
+static int __setplane_atomic(struct drm_plane *plane,
+                            struct drm_crtc *crtc,
+                            struct drm_framebuffer *fb,
+                            int32_t crtc_x, int32_t crtc_y,
+                            uint32_t crtc_w, uint32_t crtc_h,
+                            uint32_t src_x, uint32_t src_y,
+                            uint32_t src_w, uint32_t src_h,
+                            struct drm_modeset_acquire_ctx *ctx)
+{
+       int ret;
+
+       WARN_ON(!drm_drv_uses_atomic_modeset(plane->dev));
+
+       /* No fb means shut it down */
+       if (!fb)
+               return plane->funcs->disable_plane(plane, ctx);
+
+       /*
+        * FIXME: This is redundant with drm_atomic_plane_check(),
+        * but the legacy cursor/"async" .update_plane() tricks
+        * don't call that so we still need this here. Should remove
+        * this when all .update_plane() implementations have been
+        * fixed to call drm_atomic_plane_check().
+        */
+       ret = __setplane_check(plane, crtc, fb,
+                              crtc_x, crtc_y, crtc_w, crtc_h,
+                              src_x, src_y, src_w, src_h);
+       if (ret)
+               return ret;
+
+       return plane->funcs->update_plane(plane, crtc, fb,
+                                         crtc_x, crtc_y, crtc_w, crtc_h,
+                                         src_x, src_y, src_w, src_h, ctx);
+}
+
 static int setplane_internal(struct drm_plane *plane,
                             struct drm_crtc *crtc,
                             struct drm_framebuffer *fb,
@@ -682,9 +742,15 @@ retry:
        ret = drm_modeset_lock_all_ctx(plane->dev, &ctx);
        if (ret)
                goto fail;
-       ret = __setplane_internal(plane, crtc, fb,
-                                 crtc_x, crtc_y, crtc_w, crtc_h,
-                                 src_x, src_y, src_w, src_h, &ctx);
+
+       if (drm_drv_uses_atomic_modeset(plane->dev))
+               ret = __setplane_atomic(plane, crtc, fb,
+                                       crtc_x, crtc_y, crtc_w, crtc_h,
+                                       src_x, src_y, src_w, src_h, &ctx);
+       else
+               ret = __setplane_internal(plane, crtc, fb,
+                                         crtc_x, crtc_y, crtc_w, crtc_h,
+                                         src_x, src_y, src_w, src_h, &ctx);
 
 fail:
        if (ret == -EDEADLK) {
@@ -816,9 +882,14 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
                src_h = fb->height << 16;
        }
 
-       ret = __setplane_internal(plane, crtc, fb,
-                                 crtc_x, crtc_y, crtc_w, crtc_h,
-                                 0, 0, src_w, src_h, ctx);
+       if (drm_drv_uses_atomic_modeset(dev))
+               ret = __setplane_atomic(plane, crtc, fb,
+                                       crtc_x, crtc_y, crtc_w, crtc_h,
+                                       0, 0, src_w, src_h, ctx);
+       else
+               ret = __setplane_internal(plane, crtc, fb,
+                                         crtc_x, crtc_y, crtc_w, crtc_h,
+                                         0, 0, src_w, src_h, ctx);
 
        if (fb)
                drm_framebuffer_put(fb);
@@ -1092,8 +1163,10 @@ retry:
                /* Keep the old fb, don't unref it. */
                plane->old_fb = NULL;
        } else {
-               plane->fb = fb;
-               drm_framebuffer_get(fb);
+               if (!plane->state) {
+                       plane->fb = fb;
+                       drm_framebuffer_get(fb);
+               }
        }
 
 out:
index f88f681615193040e5375ecaca9c05c4590267d0..621f17643bb07c69dae274c7c477f934a9c7e7fc 100644 (file)
@@ -440,6 +440,7 @@ out:
  * @src_y: y offset of @fb for panning
  * @src_w: width of source rectangle in @fb
  * @src_h: height of source rectangle in @fb
+ * @ctx: lock acquire context, not used here
  *
  * Provides a default plane update handler using the atomic plane update
  * functions. It is fully left to the driver to check plane constraints and
@@ -455,7 +456,8 @@ int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
                            int crtc_x, int crtc_y,
                            unsigned int crtc_w, unsigned int crtc_h,
                            uint32_t src_x, uint32_t src_y,
-                           uint32_t src_w, uint32_t src_h)
+                           uint32_t src_w, uint32_t src_h,
+                           struct drm_modeset_acquire_ctx *ctx)
 {
        struct drm_plane_state *plane_state;
 
@@ -489,6 +491,7 @@ EXPORT_SYMBOL(drm_plane_helper_update);
 /**
  * drm_plane_helper_disable() - Transitional helper for plane disable
  * @plane: plane to disable
+ * @ctx: lock acquire context, not used here
  *
  * Provides a default plane disable handler using the atomic plane update
  * functions. It is fully left to the driver to check plane constraints and
@@ -499,9 +502,11 @@ EXPORT_SYMBOL(drm_plane_helper_update);
  * RETURNS:
  * Zero on success, error code on failure
  */
-int drm_plane_helper_disable(struct drm_plane *plane)
+int drm_plane_helper_disable(struct drm_plane *plane,
+                            struct drm_modeset_acquire_ctx *ctx)
 {
        struct drm_plane_state *plane_state;
+       struct drm_framebuffer *old_fb;
 
        /* crtc helpers love to call disable functions for already disabled hw
         * functions. So cope with that. */
@@ -521,8 +526,9 @@ int drm_plane_helper_disable(struct drm_plane *plane)
        plane_state->plane = plane;
 
        plane_state->crtc = NULL;
+       old_fb = plane_state->fb;
        drm_atomic_set_fb_for_plane(plane_state, NULL);
 
-       return drm_plane_helper_commit(plane, plane_state, plane->fb);
+       return drm_plane_helper_commit(plane, plane_state, old_fb);
 }
 EXPORT_SYMBOL(drm_plane_helper_disable);
index 397b46b337399952fb2a143146294cc818deb49d..186db2e4c57a11d60dbb4b3b7912e1de7fc03626 100644 (file)
@@ -186,7 +186,6 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
 /**
  * drm_gem_map_attach - dma_buf attach implementation for GEM
  * @dma_buf: buffer to attach device to
- * @target_dev: not used
  * @attach: buffer attachment data
  *
  * Allocates &drm_prime_attachment and calls &drm_driver.gem_prime_pin for
@@ -195,7 +194,7 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
  *
  * Returns 0 on success, negative error code on failure.
  */
-int drm_gem_map_attach(struct dma_buf *dma_buf, struct device *target_dev,
+int drm_gem_map_attach(struct dma_buf *dma_buf,
                       struct dma_buf_attachment *attach)
 {
        struct drm_prime_attachment *prime_attach;
@@ -434,35 +433,6 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
 }
 EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
 
-/**
- * drm_gem_dmabuf_kmap_atomic - map_atomic implementation for GEM
- * @dma_buf: buffer to be mapped
- * @page_num: page number within the buffer
- *
- * Not implemented. This can be used as the &dma_buf_ops.map_atomic callback.
- */
-void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
-                                unsigned long page_num)
-{
-       return NULL;
-}
-EXPORT_SYMBOL(drm_gem_dmabuf_kmap_atomic);
-
-/**
- * drm_gem_dmabuf_kunmap_atomic - unmap_atomic implementation for GEM
- * @dma_buf: buffer to be unmapped
- * @page_num: page number within the buffer
- * @addr: virtual address of the buffer
- *
- * Not implemented. This can be used as the &dma_buf_ops.unmap_atomic callback.
- */
-void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
-                                 unsigned long page_num, void *addr)
-{
-
-}
-EXPORT_SYMBOL(drm_gem_dmabuf_kunmap_atomic);
-
 /**
  * drm_gem_dmabuf_kmap - map implementation for GEM
  * @dma_buf: buffer to be mapped
@@ -520,9 +490,7 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
        .unmap_dma_buf = drm_gem_unmap_dma_buf,
        .release = drm_gem_dmabuf_release,
        .map = drm_gem_dmabuf_kmap,
-       .map_atomic = drm_gem_dmabuf_kmap_atomic,
        .unmap = drm_gem_dmabuf_kunmap,
-       .unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
        .mmap = drm_gem_dmabuf_mmap,
        .vmap = drm_gem_dmabuf_vmap,
        .vunmap = drm_gem_dmabuf_vunmap,
index b25f98f33f6cb55e311b237b3c31d26f554edbc3..0e7fc3e7dfb48878671cbf709063e1489485186b 100644 (file)
 #include <drm/drmP.h>
 #include <drm/drm_print.h>
 
+void __drm_puts_coredump(struct drm_printer *p, const char *str)
+{
+       struct drm_print_iterator *iterator = p->arg;
+       ssize_t len;
+
+       if (!iterator->remain)
+               return;
+
+       if (iterator->offset < iterator->start) {
+               ssize_t copy;
+
+               len = strlen(str);
+
+               if (iterator->offset + len <= iterator->start) {
+                       iterator->offset += len;
+                       return;
+               }
+
+               copy = len - (iterator->start - iterator->offset);
+
+               if (copy > iterator->remain)
+                       copy = iterator->remain;
+
+               /* Copy out the bit of the string that we need */
+               memcpy(iterator->data,
+                       str + (iterator->start - iterator->offset), copy);
+
+               iterator->offset = iterator->start + copy;
+               iterator->remain -= copy;
+       } else {
+               ssize_t pos = iterator->offset - iterator->start;
+
+               len = min_t(ssize_t, strlen(str), iterator->remain);
+
+               memcpy(iterator->data + pos, str, len);
+
+               iterator->offset += len;
+               iterator->remain -= len;
+       }
+}
+EXPORT_SYMBOL(__drm_puts_coredump);
+
+void __drm_printfn_coredump(struct drm_printer *p, struct va_format *vaf)
+{
+       struct drm_print_iterator *iterator = p->arg;
+       size_t len;
+       char *buf;
+
+       if (!iterator->remain)
+               return;
+
+       /* Figure out how big the string will be */
+       len = snprintf(NULL, 0, "%pV", vaf);
+
+       /* This is the easiest path, we've already advanced beyond the offset */
+       if (iterator->offset + len <= iterator->start) {
+               iterator->offset += len;
+               return;
+       }
+
+       /* Then check if we can directly copy into the target buffer */
+       if ((iterator->offset >= iterator->start) && (len < iterator->remain)) {
+               ssize_t pos = iterator->offset - iterator->start;
+
+               snprintf(((char *) iterator->data) + pos,
+                       iterator->remain, "%pV", vaf);
+
+               iterator->offset += len;
+               iterator->remain -= len;
+
+               return;
+       }
+
+       /*
+        * Finally, hit the slow path and make a temporary string to copy over
+        * using _drm_puts_coredump
+        */
+       buf = kmalloc(len + 1, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+       if (!buf)
+               return;
+
+       snprintf(buf, len + 1, "%pV", vaf);
+       __drm_puts_coredump(p, (const char *) buf);
+
+       kfree(buf);
+}
+EXPORT_SYMBOL(__drm_printfn_coredump);
+
+void __drm_puts_seq_file(struct drm_printer *p, const char *str)
+{
+       seq_puts(p->arg, str);
+}
+EXPORT_SYMBOL(__drm_puts_seq_file);
+
 void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf)
 {
        seq_printf(p->arg, "%pV", vaf);
@@ -48,6 +142,23 @@ void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf)
 }
 EXPORT_SYMBOL(__drm_printfn_debug);
 
+/**
+ * drm_puts - print a const string to a &drm_printer stream
+ * @p: the &drm printer
+ * @str: const string
+ *
+ * Allow &drm_printer types that have a constant string
+ * option to use it.
+ */
+void drm_puts(struct drm_printer *p, const char *str)
+{
+       if (p->puts)
+               p->puts(p, str);
+       else
+               drm_printf(p, "%s", str);
+}
+EXPORT_SYMBOL(drm_puts);
+
 /**
  * drm_printf - print to a &drm_printer stream
  * @p: the &drm_printer
index 527743394150a83b684aba3b5c647ec10a0581a2..a1bb157bfdfaeb9bad32c5f39b200970547ec039 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/moduleparam.h>
 
 #include <drm/drmP.h>
+#include <drm/drm_client.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_fourcc.h>
 #include <drm/drm_crtc_helper.h>
@@ -88,9 +89,9 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
                            struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
-       uint32_t *ids = connector->encoder_ids;
        enum drm_mode_status ret = MODE_OK;
-       unsigned int i;
+       struct drm_encoder *encoder;
+       int i;
 
        /* Step 1: Validate against connector */
        ret = drm_connector_mode_valid(connector, mode);
@@ -98,13 +99,9 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
                return ret;
 
        /* Step 2: Validate against encoders and crtcs */
-       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-               struct drm_encoder *encoder = drm_encoder_find(dev, NULL, ids[i]);
+       drm_connector_for_each_possible_encoder(connector, encoder, i) {
                struct drm_crtc *crtc;
 
-               if (!encoder)
-                       continue;
-
                ret = drm_encoder_mode_valid(encoder, mode);
                if (ret != MODE_OK) {
                        /* No point in continuing for crtc check as this encoder
@@ -363,7 +360,7 @@ EXPORT_SYMBOL(drm_helper_probe_detect);
  *    using the VESA GTF/CVT formulas.
  *
  * 3. Modes are moved from the probed_modes list to the modes list. Potential
- *    duplicates are merged together (see drm_mode_connector_list_update()).
+ *    duplicates are merged together (see drm_connector_list_update()).
  *    After this step the probed_modes list will be empty again.
  *
  * 4. Any non-stale mode on the modes list then undergoes validation
@@ -475,7 +472,7 @@ retry:
        if (connector->status == connector_status_disconnected) {
                DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
                        connector->base.id, connector->name);
-               drm_mode_connector_update_edid_property(connector, NULL);
+               drm_connector_update_edid_property(connector, NULL);
                verbose_prune = false;
                goto prune;
        }
@@ -488,7 +485,7 @@ retry:
        if (count == 0)
                goto prune;
 
-       drm_mode_connector_list_update(connector);
+       drm_connector_list_update(connector);
 
        if (connector->interlace_allowed)
                mode_flags |= DRM_MODE_FLAG_INTERLACE;
@@ -563,6 +560,8 @@ void drm_kms_helper_hotplug_event(struct drm_device *dev)
        drm_sysfs_hotplug_event(dev);
        if (dev->mode_config.funcs->output_poll_changed)
                dev->mode_config.funcs->output_poll_changed(dev);
+
+       drm_client_dev_hotplug(dev);
 }
 EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
 
index 1f8031e30f5397bf97054b3fcffc11408c35759e..cdb10f885a4febea85fc5272e22f1378d770da8b 100644 (file)
@@ -532,7 +532,7 @@ static void drm_property_free_blob(struct kref *kref)
 
        drm_mode_object_unregister(blob->dev, &blob->base);
 
-       kfree(blob);
+       kvfree(blob);
 }
 
 /**
@@ -559,7 +559,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
        if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
                return ERR_PTR(-EINVAL);
 
-       blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
+       blob = kvzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
        if (!blob)
                return ERR_PTR(-ENOMEM);
 
@@ -576,7 +576,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
        ret = __drm_mode_object_add(dev, &blob->base, DRM_MODE_OBJECT_BLOB,
                                    true, drm_property_free_blob);
        if (ret) {
-               kfree(blob);
+               kvfree(blob);
                return ERR_PTR(-EINVAL);
        }
 
index 7a00455ca568b0e82f35f6a6a5d972f26d41b204..51fa978f0d236821ac020b16144f50e2fbb854bb 100644 (file)
@@ -52,7 +52,7 @@ static int drm_simple_kms_crtc_check(struct drm_crtc *crtc,
                                     struct drm_crtc_state *state)
 {
        bool has_primary = state->plane_mask &
-                          BIT(drm_plane_index(crtc->primary));
+                          drm_plane_mask(crtc->primary);
 
        /* We always want to have an active plane with an active CRTC */
        if (has_primary != state->enable)
@@ -281,13 +281,13 @@ int drm_simple_display_pipe_init(struct drm_device *dev,
        if (ret)
                return ret;
 
-       encoder->possible_crtcs = 1 << drm_crtc_index(crtc);
+       encoder->possible_crtcs = drm_crtc_mask(crtc);
        ret = drm_encoder_init(dev, encoder, &drm_simple_kms_encoder_funcs,
                               DRM_MODE_ENCODER_NONE, NULL);
        if (ret || !connector)
                return ret;
 
-       return drm_mode_connector_attach_encoder(connector, encoder);
+       return drm_connector_attach_encoder(connector, encoder);
 }
 EXPORT_SYMBOL(drm_simple_display_pipe_init);
 
index d4f4ce4845296aca38936902f2dc4698c2720902..adb3cb27d31e6fa6aa1f0c3102c7d8ede5599169 100644 (file)
@@ -207,7 +207,6 @@ static const struct dma_fence_ops drm_syncobj_null_fence_ops = {
        .get_driver_name = drm_syncobj_null_fence_get_name,
        .get_timeline_name = drm_syncobj_null_fence_get_name,
        .enable_signaling = drm_syncobj_null_fence_enable_signaling,
-       .wait = dma_fence_default_wait,
        .release = NULL,
 };
 
index 2660543ad86a571dab759a45370369d43fbb16c0..c3301046dfaa54d898198056dfb0631a9afb1b86 100644 (file)
@@ -100,7 +100,7 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
  * map, get the page, increment the use count and return it.
  */
 #if IS_ENABLED(CONFIG_AGP)
-static int drm_vm_fault(struct vm_fault *vmf)
+static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct drm_file *priv = vma->vm_file->private_data;
@@ -173,7 +173,7 @@ vm_fault_error:
        return VM_FAULT_SIGBUS; /* Disallow mremap */
 }
 #else
-static int drm_vm_fault(struct vm_fault *vmf)
+static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
 {
        return VM_FAULT_SIGBUS;
 }
@@ -189,7 +189,7 @@ static int drm_vm_fault(struct vm_fault *vmf)
  * Get the mapping, find the real physical page to map, get the page, and
  * return it.
  */
-static int drm_vm_shm_fault(struct vm_fault *vmf)
+static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct drm_local_map *map = vma->vm_private_data;
@@ -291,7 +291,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
  *
  * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
  */
-static int drm_vm_dma_fault(struct vm_fault *vmf)
+static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct drm_file *priv = vma->vm_file->private_data;
@@ -326,7 +326,7 @@ static int drm_vm_dma_fault(struct vm_fault *vmf)
  *
  * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
  */
-static int drm_vm_sg_fault(struct vm_fault *vmf)
+static vm_fault_t drm_vm_sg_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct drm_local_map *map = vma->vm_private_data;
index 23c749c05b5aa1fa1a579088294aebed9e934e00..a6b2fe36b025228cbcd0be546ce110e80bba7e39 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /*
  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  * Copyright (c) 2012 David Airlie <airlied@linux.ie>
diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c
new file mode 100644 (file)
index 0000000..c20e6fe
--- /dev/null
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Brian Starkey <brian.starkey@arm.com>
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ */
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_property.h>
+#include <drm/drm_writeback.h>
+#include <drm/drmP.h>
+#include <linux/dma-fence.h>
+
+/**
+ * DOC: overview
+ *
+ * Writeback connectors are used to expose hardware which can write the output
+ * from a CRTC to a memory buffer. They are used and act similarly to other
+ * types of connectors, with some important differences:
+ *
+ * * Writeback connectors don't provide a way to output visually to the user.
+ *
+ * * Writeback connectors are visible to userspace only when the client sets
+ *   DRM_CLIENT_CAP_WRITEBACK_CONNECTORS.
+ *
+ * * Writeback connectors don't have EDID.
+ *
+ * A framebuffer may only be attached to a writeback connector when the
+ * connector is attached to a CRTC. The WRITEBACK_FB_ID property which sets the
+ * framebuffer applies only to a single commit (see below). A framebuffer may
+ * not be attached while the CRTC is off.
+ *
+ * Unlike with planes, when a writeback framebuffer is removed by userspace DRM
+ * makes no attempt to remove it from active use by the connector. This is
+ * because no method is provided to abort a writeback operation, and in any
+ * case making a new commit whilst a writeback is ongoing is undefined (see
+ * WRITEBACK_OUT_FENCE_PTR below). As soon as the current writeback is finished,
+ * the framebuffer will automatically no longer be in active use. As it will
+ * also have already been removed from the framebuffer list, there will be no
+ * way for any userspace application to retrieve a reference to it in the
+ * intervening period.
+ *
+ * Writeback connectors have some additional properties, which userspace
+ * can use to query and control them:
+ *
+ *  "WRITEBACK_FB_ID":
+ *     Write-only object property storing a DRM_MODE_OBJECT_FB: it stores the
+ *     framebuffer to be written by the writeback connector. This property is
+ *     similar to the FB_ID property on planes, but will always read as zero
+ *     and is not preserved across commits.
+ *     Userspace must set this property to an output buffer every time it
+ *     wishes the buffer to get filled.
+ *
+ *  "WRITEBACK_PIXEL_FORMATS":
+ *     Immutable blob property to store the supported pixel formats table. The
+ *     data is an array of u32 DRM_FORMAT_* fourcc values.
+ *     Userspace can use this blob to find out what pixel formats are supported
+ *     by the connector's writeback engine.
+ *
+ *  "WRITEBACK_OUT_FENCE_PTR":
+ *     Userspace can use this property to provide a pointer for the kernel to
+ *     fill with a sync_file file descriptor, which will signal once the
+ *     writeback is finished. The value should be the address of a 32-bit
+ *     signed integer, cast to a u64.
+ *     Userspace should wait for this fence to signal before making another
+ *     commit affecting any of the same CRTCs, Planes or Connectors.
+ *     **Failure to do so will result in undefined behaviour.**
+ *     For this reason it is strongly recommended that all userspace
+ *     applications making use of writeback connectors *always* retrieve an
+ *     out-fence for the commit and use it appropriately.
+ *     From userspace, this property will always read as zero.
+ */
+
+#define fence_to_wb_connector(x) container_of(x->lock, \
+                                             struct drm_writeback_connector, \
+                                             fence_lock)
+
+static const char *drm_writeback_fence_get_driver_name(struct dma_fence *fence)
+{
+       struct drm_writeback_connector *wb_connector =
+               fence_to_wb_connector(fence);
+
+       return wb_connector->base.dev->driver->name;
+}
+
+static const char *
+drm_writeback_fence_get_timeline_name(struct dma_fence *fence)
+{
+       struct drm_writeback_connector *wb_connector =
+               fence_to_wb_connector(fence);
+
+       return wb_connector->timeline_name;
+}
+
+static bool drm_writeback_fence_enable_signaling(struct dma_fence *fence)
+{
+       return true;
+}
+
+static const struct dma_fence_ops drm_writeback_fence_ops = {
+       .get_driver_name = drm_writeback_fence_get_driver_name,
+       .get_timeline_name = drm_writeback_fence_get_timeline_name,
+       .enable_signaling = drm_writeback_fence_enable_signaling,
+       .wait = dma_fence_default_wait,
+};
+
+static int create_writeback_properties(struct drm_device *dev)
+{
+       struct drm_property *prop;
+
+       if (!dev->mode_config.writeback_fb_id_property) {
+               prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
+                                                 "WRITEBACK_FB_ID",
+                                                 DRM_MODE_OBJECT_FB);
+               if (!prop)
+                       return -ENOMEM;
+               dev->mode_config.writeback_fb_id_property = prop;
+       }
+
+       if (!dev->mode_config.writeback_pixel_formats_property) {
+               prop = drm_property_create(dev, DRM_MODE_PROP_BLOB |
+                                          DRM_MODE_PROP_ATOMIC |
+                                          DRM_MODE_PROP_IMMUTABLE,
+                                          "WRITEBACK_PIXEL_FORMATS", 0);
+               if (!prop)
+                       return -ENOMEM;
+               dev->mode_config.writeback_pixel_formats_property = prop;
+       }
+
+       if (!dev->mode_config.writeback_out_fence_ptr_property) {
+               prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+                                                "WRITEBACK_OUT_FENCE_PTR", 0,
+                                                U64_MAX);
+               if (!prop)
+                       return -ENOMEM;
+               dev->mode_config.writeback_out_fence_ptr_property = prop;
+       }
+
+       return 0;
+}
+
+static const struct drm_encoder_funcs drm_writeback_encoder_funcs = {
+       .destroy = drm_encoder_cleanup,
+};
+
+/**
+ * drm_writeback_connector_init - Initialize a writeback connector and its properties
+ * @dev: DRM device
+ * @wb_connector: Writeback connector to initialize
+ * @con_funcs: Connector funcs vtable
+ * @enc_helper_funcs: Encoder helper funcs vtable to be used by the internal encoder
+ * @formats: Array of supported pixel formats for the writeback engine
+ * @n_formats: Length of the formats array
+ *
+ * This function creates the writeback-connector-specific properties if they
+ * have not been already created, initializes the connector as
+ * type DRM_MODE_CONNECTOR_WRITEBACK, and correctly initializes the property
+ * values. It will also create an internal encoder associated with the
+ * drm_writeback_connector and set it to use the @enc_helper_funcs vtable for
+ * the encoder helper.
+ *
+ * Drivers should always use this function instead of drm_connector_init() to
+ * set up writeback connectors.
+ *
+ * Returns: 0 on success, or a negative error code
+ */
+int drm_writeback_connector_init(struct drm_device *dev,
+                                struct drm_writeback_connector *wb_connector,
+                                const struct drm_connector_funcs *con_funcs,
+                                const struct drm_encoder_helper_funcs *enc_helper_funcs,
+                                const u32 *formats, int n_formats)
+{
+       struct drm_property_blob *blob;
+       struct drm_connector *connector = &wb_connector->base;
+       struct drm_mode_config *config = &dev->mode_config;
+       int ret = create_writeback_properties(dev);
+
+       if (ret != 0)
+               return ret;
+
+       blob = drm_property_create_blob(dev, n_formats * sizeof(*formats),
+                                       formats);
+       if (IS_ERR(blob))
+               return PTR_ERR(blob);
+
+       drm_encoder_helper_add(&wb_connector->encoder, enc_helper_funcs);
+       ret = drm_encoder_init(dev, &wb_connector->encoder,
+                              &drm_writeback_encoder_funcs,
+                              DRM_MODE_ENCODER_VIRTUAL, NULL);
+       if (ret)
+               goto fail;
+
+       connector->interlace_allowed = 0;
+
+       ret = drm_connector_init(dev, connector, con_funcs,
+                                DRM_MODE_CONNECTOR_WRITEBACK);
+       if (ret)
+               goto connector_fail;
+
+       ret = drm_connector_attach_encoder(connector,
+                                               &wb_connector->encoder);
+       if (ret)
+               goto attach_fail;
+
+       INIT_LIST_HEAD(&wb_connector->job_queue);
+       spin_lock_init(&wb_connector->job_lock);
+
+       wb_connector->fence_context = dma_fence_context_alloc(1);
+       spin_lock_init(&wb_connector->fence_lock);
+       snprintf(wb_connector->timeline_name,
+                sizeof(wb_connector->timeline_name),
+                "CONNECTOR:%d-%s", connector->base.id, connector->name);
+
+       drm_object_attach_property(&connector->base,
+                                  config->writeback_out_fence_ptr_property, 0);
+
+       drm_object_attach_property(&connector->base,
+                                  config->writeback_fb_id_property, 0);
+
+       drm_object_attach_property(&connector->base,
+                                  config->writeback_pixel_formats_property,
+                                  blob->base.id);
+       wb_connector->pixel_formats_blob_ptr = blob;
+
+       return 0;
+
+attach_fail:
+       drm_connector_cleanup(connector);
+connector_fail:
+       drm_encoder_cleanup(&wb_connector->encoder);
+fail:
+       drm_property_blob_put(blob);
+       return ret;
+}
+EXPORT_SYMBOL(drm_writeback_connector_init);
+
+/**
+ * drm_writeback_queue_job - Queue a writeback job for later signalling
+ * @wb_connector: The writeback connector to queue a job on
+ * @job: The job to queue
+ *
+ * This function adds a job to the job_queue for a writeback connector. It
+ * should be considered to take ownership of the writeback job, and so any other
+ * references to the job must be cleared after calling this function.
+ *
+ * Drivers must ensure that for a given writeback connector, jobs are queued in
+ * exactly the same order as they will be completed by the hardware (and
+ * signaled via drm_writeback_signal_completion).
+ *
+ * For every call to drm_writeback_queue_job() there must be exactly one call to
+ * drm_writeback_signal_completion()
+ *
+ * See also: drm_writeback_signal_completion()
+ */
+void drm_writeback_queue_job(struct drm_writeback_connector *wb_connector,
+                            struct drm_writeback_job *job)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&wb_connector->job_lock, flags);
+       list_add_tail(&job->list_entry, &wb_connector->job_queue);
+       spin_unlock_irqrestore(&wb_connector->job_lock, flags);
+}
+EXPORT_SYMBOL(drm_writeback_queue_job);
+
+/*
+ * @cleanup_work: deferred cleanup of a writeback job
+ *
+ * The job cannot be cleaned up directly in drm_writeback_signal_completion,
+ * because it may be called in interrupt context. Dropping the framebuffer
+ * reference can sleep, and so the cleanup is deferred to a workqueue.
+ */
+static void cleanup_work(struct work_struct *work)
+{
+       struct drm_writeback_job *job = container_of(work,
+                                                    struct drm_writeback_job,
+                                                    cleanup_work);
+       drm_framebuffer_put(job->fb);
+       kfree(job);
+}
+
+
+/**
+ * drm_writeback_signal_completion - Signal the completion of a writeback job
+ * @wb_connector: The writeback connector whose job is complete
+ * @status: Status code to set in the writeback out_fence (0 for success)
+ *
+ * Drivers should call this to signal the completion of a previously queued
+ * writeback job. It should be called as soon as possible after the hardware
+ * has finished writing, and may be called from interrupt context.
+ * It is the driver's responsibility to ensure that for a given connector, the
+ * hardware completes writeback jobs in the same order as they are queued.
+ *
+ * Unless the driver is holding its own reference to the framebuffer, it must
+ * not be accessed after calling this function.
+ *
+ * See also: drm_writeback_queue_job()
+ */
+void
+drm_writeback_signal_completion(struct drm_writeback_connector *wb_connector,
+                               int status)
+{
+       unsigned long flags;
+       struct drm_writeback_job *job;
+
+       spin_lock_irqsave(&wb_connector->job_lock, flags);
+       job = list_first_entry_or_null(&wb_connector->job_queue,
+                                      struct drm_writeback_job,
+                                      list_entry);
+       if (job) {
+               list_del(&job->list_entry);
+               if (job->out_fence) {
+                       if (status)
+                               dma_fence_set_error(job->out_fence, status);
+                       dma_fence_signal(job->out_fence);
+                       dma_fence_put(job->out_fence);
+               }
+       }
+       spin_unlock_irqrestore(&wb_connector->job_lock, flags);
+
+       if (WARN_ON(!job))
+               return;
+
+       INIT_WORK(&job->cleanup_work, cleanup_work);
+       queue_work(system_long_wq, &job->cleanup_work);
+}
+EXPORT_SYMBOL(drm_writeback_signal_completion);
+
+struct dma_fence *
+drm_writeback_get_out_fence(struct drm_writeback_connector *wb_connector)
+{
+       struct dma_fence *fence;
+
+       if (WARN_ON(wb_connector->base.connector_type !=
+                   DRM_MODE_CONNECTOR_WRITEBACK))
+               return NULL;
+
+       fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+       if (!fence)
+               return NULL;
+
+       dma_fence_init(fence, &drm_writeback_fence_ops,
+                      &wb_connector->fence_lock, wb_connector->fence_context,
+                      ++wb_connector->fence_seqno);
+
+       return fence;
+}
+EXPORT_SYMBOL(drm_writeback_get_out_fence);
index e5013a9991477eda57913a80f7c978a199727a62..9b2720b41571f245a1ba5ad677bb0566d95ca207 100644 (file)
@@ -49,12 +49,12 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
 
        for (i = 0; i < ETNA_MAX_PIPES; i++) {
                struct etnaviv_gpu *gpu = priv->gpu[i];
+               struct drm_sched_rq *rq;
 
                if (gpu) {
-                       drm_sched_entity_init(&gpu->sched,
-                               &ctx->sched_entity[i],
-                               &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
-                               NULL);
+                       rq = &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+                       drm_sched_entity_init(&ctx->sched_entity[i],
+                                             &rq, 1, NULL);
                        }
        }
 
@@ -78,8 +78,7 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
                                gpu->lastctx = NULL;
                        mutex_unlock(&gpu->lock);
 
-                       drm_sched_entity_fini(&gpu->sched,
-                                             &ctx->sched_entity[i]);
+                       drm_sched_entity_destroy(&ctx->sched_entity[i]);
                }
        }
 
@@ -631,8 +630,11 @@ static struct platform_driver etnaviv_platform_driver = {
        },
 };
 
+static struct platform_device *etnaviv_drm;
+
 static int __init etnaviv_init(void)
 {
+       struct platform_device *pdev;
        int ret;
        struct device_node *np;
 
@@ -644,7 +646,7 @@ static int __init etnaviv_init(void)
 
        ret = platform_driver_register(&etnaviv_platform_driver);
        if (ret != 0)
-               platform_driver_unregister(&etnaviv_gpu_driver);
+               goto unregister_gpu_driver;
 
        /*
         * If the DT contains at least one available GPU device, instantiate
@@ -653,20 +655,33 @@ static int __init etnaviv_init(void)
        for_each_compatible_node(np, NULL, "vivante,gc") {
                if (!of_device_is_available(np))
                        continue;
-
-               platform_device_register_simple("etnaviv", -1, NULL, 0);
+               pdev = platform_device_register_simple("etnaviv", -1,
+                                                      NULL, 0);
+               if (IS_ERR(pdev)) {
+                       ret = PTR_ERR(pdev);
+                       of_node_put(np);
+                       goto unregister_platform_driver;
+               }
+               etnaviv_drm = pdev;
                of_node_put(np);
                break;
        }
 
+       return 0;
+
+unregister_platform_driver:
+       platform_driver_unregister(&etnaviv_platform_driver);
+unregister_gpu_driver:
+       platform_driver_unregister(&etnaviv_gpu_driver);
        return ret;
 }
 module_init(etnaviv_init);
 
 static void __exit etnaviv_exit(void)
 {
-       platform_driver_unregister(&etnaviv_gpu_driver);
+       platform_device_unregister(etnaviv_drm);
        platform_driver_unregister(&etnaviv_platform_driver);
+       platform_driver_unregister(&etnaviv_gpu_driver);
 }
 module_exit(etnaviv_exit);
 
index d36c7bbe66db4d3b3e2b7fec541be70104caecb2..8d02d1b7dcf5a54b5bc1623b847f73dad1b493d0 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/time64.h>
 #include <linux/types.h>
 #include <linux/sizes.h>
+#include <linux/mm_types.h>
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
@@ -53,7 +54,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
                struct drm_file *file);
 
 int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-int etnaviv_gem_fault(struct vm_fault *vmf);
+vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf);
 int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset);
 struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj);
 void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);
index 209ef1274b8063aae26b79e9f650cc160292c17a..1fa74226db91f6f1ed42cdc57f6d10bffe2604de 100644 (file)
@@ -169,31 +169,30 @@ int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
        return obj->ops->mmap(obj, vma);
 }
 
-int etnaviv_gem_fault(struct vm_fault *vmf)
+vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct drm_gem_object *obj = vma->vm_private_data;
        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
        struct page **pages, *page;
        pgoff_t pgoff;
-       int ret;
+       int err;
 
        /*
         * Make sure we don't parallel update on a fault, nor move or remove
-        * something from beneath our feet.  Note that vm_insert_page() is
+        * something from beneath our feet.  Note that vmf_insert_page() is
         * specifically coded to take care of this, so we don't have to.
         */
-       ret = mutex_lock_interruptible(&etnaviv_obj->lock);
-       if (ret)
-               goto out;
-
+       err = mutex_lock_interruptible(&etnaviv_obj->lock);
+       if (err)
+               return VM_FAULT_NOPAGE;
        /* make sure we have pages attached now */
        pages = etnaviv_gem_get_pages(etnaviv_obj);
        mutex_unlock(&etnaviv_obj->lock);
 
        if (IS_ERR(pages)) {
-               ret = PTR_ERR(pages);
-               goto out;
+               err = PTR_ERR(pages);
+               return vmf_error(err);
        }
 
        /* We don't use vmf->pgoff since that has the fake offset: */
@@ -204,25 +203,7 @@ int etnaviv_gem_fault(struct vm_fault *vmf)
        VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
             page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
 
-       ret = vm_insert_page(vma, vmf->address, page);
-
-out:
-       switch (ret) {
-       case -EAGAIN:
-       case 0:
-       case -ERESTARTSYS:
-       case -EINTR:
-       case -EBUSY:
-               /*
-                * EBUSY is ok: this just means that another thread
-                * already did the job.
-                */
-               return VM_FAULT_NOPAGE;
-       case -ENOMEM:
-               return VM_FAULT_OOM;
-       default:
-               return VM_FAULT_SIGBUS;
-       }
+       return vmf_insert_page(vma, vmf->address, page);
 }
 
 int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
index 46ecd3e66ac9889c9f08fdf2dba8e9cc2c3673bc..983e67f19e4508b5023cb5efba221981f4a9b4b0 100644 (file)
@@ -388,9 +388,9 @@ static void submit_cleanup(struct kref *kref)
                dma_fence_put(submit->in_fence);
        if (submit->out_fence) {
                /* first remove from IDR, so fence can not be found anymore */
-               mutex_lock(&submit->gpu->fence_idr_lock);
+               mutex_lock(&submit->gpu->fence_lock);
                idr_remove(&submit->gpu->fence_idr, submit->out_fence_id);
-               mutex_unlock(&submit->gpu->fence_idr_lock);
+               mutex_unlock(&submit->gpu->fence_lock);
                dma_fence_put(submit->out_fence);
        }
        kfree(submit->pmrs);
index 686f6552db48d927f4b00f82576dc6c0e037837e..f225fbc6edd2d94c7b82a37b56c401a66966fd95 100644 (file)
@@ -799,6 +799,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
 
 free_buffer:
        etnaviv_cmdbuf_free(&gpu->buffer);
+       gpu->buffer.suballoc = NULL;
 destroy_iommu:
        etnaviv_iommu_destroy(gpu->mmu);
        gpu->mmu = NULL;
@@ -1027,11 +1028,6 @@ static const char *etnaviv_fence_get_timeline_name(struct dma_fence *fence)
        return dev_name(f->gpu->dev);
 }
 
-static bool etnaviv_fence_enable_signaling(struct dma_fence *fence)
-{
-       return true;
-}
-
 static bool etnaviv_fence_signaled(struct dma_fence *fence)
 {
        struct etnaviv_fence *f = to_etnaviv_fence(fence);
@@ -1049,9 +1045,7 @@ static void etnaviv_fence_release(struct dma_fence *fence)
 static const struct dma_fence_ops etnaviv_fence_ops = {
        .get_driver_name = etnaviv_fence_get_driver_name,
        .get_timeline_name = etnaviv_fence_get_timeline_name,
-       .enable_signaling = etnaviv_fence_enable_signaling,
        .signaled = etnaviv_fence_signaled,
-       .wait = dma_fence_default_wait,
        .release = etnaviv_fence_release,
 };
 
@@ -1733,7 +1727,7 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
 
        gpu->dev = &pdev->dev;
        mutex_init(&gpu->lock);
-       mutex_init(&gpu->fence_idr_lock);
+       mutex_init(&gpu->fence_lock);
 
        /* Map registers: */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
index dd430f0f8ff5158975e21f26ad97fc3aa5bae2cb..9a75a6937268eebff62e04a13b3669fb3d5934b1 100644 (file)
@@ -118,7 +118,7 @@ struct etnaviv_gpu {
        u32 idle_mask;
 
        /* Fencing support */
-       struct mutex fence_idr_lock;
+       struct mutex fence_lock;
        struct idr fence_idr;
        u32 next_fence;
        u32 active_fence;
@@ -131,6 +131,9 @@ struct etnaviv_gpu {
        struct work_struct sync_point_work;
        int sync_point_event;
 
+       /* hang detection */
+       u32 hangcheck_dma_addr;
+
        void __iomem *mmio;
        int irq;
 
index 71fbc1f96cb63d6563284f26e7ec72cbf2cea9b6..f1c88d8ad5ba880fefbd123c74986ce6154713af 100644 (file)
@@ -119,8 +119,7 @@ static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
 
 static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
 {
-       u32 *p;
-       int ret, i;
+       int ret;
 
        /* allocate scratch page */
        etnaviv_domain->base.bad_page_cpu =
@@ -131,9 +130,9 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
                ret = -ENOMEM;
                goto fail_mem;
        }
-       p = etnaviv_domain->base.bad_page_cpu;
-       for (i = 0; i < SZ_4K / 4; i++)
-               *p++ = 0xdead55aa;
+
+       memset32(etnaviv_domain->base.bad_page_cpu, 0xdead55aa,
+                SZ_4K / sizeof(u32));
 
        etnaviv_domain->pta_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
                                               SZ_4K, &etnaviv_domain->pta_dma,
index a74eb57af15bc65ba2ff4a2ed3906da29afe959b..69e9b431bf1f02ec7c87e5f740f80595ad98dd53 100644 (file)
@@ -10,6 +10,7 @@
 #include "etnaviv_gem.h"
 #include "etnaviv_gpu.h"
 #include "etnaviv_sched.h"
+#include "state.xml.h"
 
 static int etnaviv_job_hang_limit = 0;
 module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
@@ -85,6 +86,29 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
 {
        struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
        struct etnaviv_gpu *gpu = submit->gpu;
+       u32 dma_addr;
+       int change;
+
+       /*
+        * If the GPU managed to complete this jobs fence, the timout is
+        * spurious. Bail out.
+        */
+       if (fence_completed(gpu, submit->out_fence->seqno))
+               return;
+
+       /*
+        * If the GPU is still making forward progress on the front-end (which
+        * should never loop) we shift out the timeout to give it a chance to
+        * finish the job.
+        */
+       dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
+       change = dma_addr - gpu->hangcheck_dma_addr;
+       if (change < 0 || change > 16) {
+               gpu->hangcheck_dma_addr = dma_addr;
+               schedule_delayed_work(&sched_job->work_tdr,
+                                     sched_job->sched->timeout);
+               return;
+       }
 
        /* block scheduler */
        kthread_park(gpu->sched.thread);
@@ -116,28 +140,38 @@ static const struct drm_sched_backend_ops etnaviv_sched_ops = {
 int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
                           struct etnaviv_gem_submit *submit)
 {
-       int ret;
+       int ret = 0;
 
-       ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched,
-                                sched_entity, submit->cmdbuf.ctx);
+       /*
+        * Hold the fence lock across the whole operation to avoid jobs being
+        * pushed out of order with regard to their sched fence seqnos as
+        * allocated in drm_sched_job_init.
+        */
+       mutex_lock(&submit->gpu->fence_lock);
+
+       ret = drm_sched_job_init(&submit->sched_job, sched_entity,
+                                submit->cmdbuf.ctx);
        if (ret)
-               return ret;
+               goto out_unlock;
 
        submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
-       mutex_lock(&submit->gpu->fence_idr_lock);
        submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
                                                submit->out_fence, 0,
                                                INT_MAX, GFP_KERNEL);
-       mutex_unlock(&submit->gpu->fence_idr_lock);
-       if (submit->out_fence_id < 0)
-               return -ENOMEM;
+       if (submit->out_fence_id < 0) {
+               ret = -ENOMEM;
+               goto out_unlock;
+       }
 
        /* the scheduler holds on to the job now */
        kref_get(&submit->refcount);
 
        drm_sched_entity_push_job(&submit->sched_job, sched_entity);
 
-       return 0;
+out_unlock:
+       mutex_unlock(&submit->gpu->fence_lock);
+
+       return ret;
 }
 
 int etnaviv_sched_init(struct etnaviv_gpu *gpu)
index 3b323f1e04754cf5461e84b03356ac3d54d15c13..2ad146bbf4f5488e8b6293dabc9238c03753c044 100644 (file)
@@ -4,7 +4,7 @@
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
 exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fb.o \
-               exynos_drm_gem.o exynos_drm_core.o exynos_drm_plane.o
+               exynos_drm_gem.o exynos_drm_plane.o
 
 exynosdrm-$(CONFIG_DRM_FBDEV_EMULATION) += exynos_drm_fbdev.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
index 82c95c34447fe19d34018b15a9b6a92e43ca917a..94529aa8233922b71cc36011fff305280651be53 100644 (file)
@@ -265,7 +265,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
        unsigned long val;
 
        val = readl(ctx->addr + DECON_WINCONx(win));
-       val &= ~WINCONx_BPPMODE_MASK;
+       val &= WINCONx_ENWIN_F;
 
        switch (fb->format->format) {
        case DRM_FORMAT_XRGB1555:
@@ -356,8 +356,8 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
                writel(val, ctx->addr + DECON_VIDOSDxB(win));
        }
 
-       val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
-               VIDOSD_Wx_ALPHA_B_F(0x0);
+       val = VIDOSD_Wx_ALPHA_R_F(0xff) | VIDOSD_Wx_ALPHA_G_F(0xff) |
+               VIDOSD_Wx_ALPHA_B_F(0xff);
        writel(val, ctx->addr + DECON_VIDOSDxC(win));
 
        val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
@@ -673,6 +673,8 @@ err:
 static const struct dev_pm_ops exynos5433_decon_pm_ops = {
        SET_RUNTIME_PM_OPS(exynos5433_decon_suspend, exynos5433_decon_resume,
                           NULL)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                                    pm_runtime_force_resume)
 };
 
 static const struct of_device_id exynos5433_decon_driver_dt_match[] = {
index 3931d5e33fe07aa77ec5a39c30875f9ac0b8edd6..88cbd000eb094cb480d85d77a7cf40f664f1bd1f 100644 (file)
@@ -832,6 +832,8 @@ static int exynos7_decon_resume(struct device *dev)
 static const struct dev_pm_ops exynos7_decon_pm_ops = {
        SET_RUNTIME_PM_OPS(exynos7_decon_suspend, exynos7_decon_resume,
                           NULL)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
 };
 
 struct platform_driver decon_driver = {
index 86330f396784ddb8ecc0ba277ebefd0fe3ec50d7..c8449ae4f4feda409f29c700f361b0560eaac981 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/clk.h>
 #include <linux/of_graph.h>
 #include <linux/component.h>
+#include <linux/pm_runtime.h>
 #include <video/of_display_timing.h>
 #include <video/of_videomode.h>
 #include <video/videomode.h>
@@ -232,9 +233,11 @@ static int exynos_dp_probe(struct platform_device *pdev)
        np = of_parse_phandle(dev->of_node, "panel", 0);
        if (np) {
                dp->plat_data.panel = of_drm_find_panel(np);
+
                of_node_put(np);
-               if (!dp->plat_data.panel)
-                       return -EPROBE_DEFER;
+               if (IS_ERR(dp->plat_data.panel))
+                       return PTR_ERR(dp->plat_data.panel);
+
                goto out;
        }
 
@@ -276,6 +279,8 @@ static int exynos_dp_resume(struct device *dev)
 
 static const struct dev_pm_ops exynos_dp_pm_ops = {
        SET_RUNTIME_PM_OPS(exynos_dp_suspend, exynos_dp_resume, NULL)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
 };
 
 static const struct of_device_id exynos_dp_match[] = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
deleted file mode 100644 (file)
index b0c0621..0000000
+++ /dev/null
@@ -1,119 +0,0 @@
-/* exynos_drm_core.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * Author:
- *     Inki Dae <inki.dae@samsung.com>
- *     Joonyoung Shim <jy0922.shim@samsung.com>
- *     Seung-Woo Kim <sw0312.kim@samsung.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#include <drm/drmP.h>
-
-#include "exynos_drm_drv.h"
-#include "exynos_drm_crtc.h"
-
-static LIST_HEAD(exynos_drm_subdrv_list);
-
-int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
-{
-       if (!subdrv)
-               return -EINVAL;
-
-       list_add_tail(&subdrv->list, &exynos_drm_subdrv_list);
-
-       return 0;
-}
-
-int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
-{
-       if (!subdrv)
-               return -EINVAL;
-
-       list_del(&subdrv->list);
-
-       return 0;
-}
-
-int exynos_drm_device_subdrv_probe(struct drm_device *dev)
-{
-       struct exynos_drm_subdrv *subdrv, *n;
-       int err;
-
-       if (!dev)
-               return -EINVAL;
-
-       list_for_each_entry_safe(subdrv, n, &exynos_drm_subdrv_list, list) {
-               if (subdrv->probe) {
-                       subdrv->drm_dev = dev;
-
-                       /*
-                        * this probe callback would be called by sub driver
-                        * after setting of all resources to this sub driver,
-                        * such as clock, irq and register map are done.
-                        */
-                       err = subdrv->probe(dev, subdrv->dev);
-                       if (err) {
-                               DRM_DEBUG("exynos drm subdrv probe failed.\n");
-                               list_del(&subdrv->list);
-                               continue;
-                       }
-               }
-       }
-
-       return 0;
-}
-
-int exynos_drm_device_subdrv_remove(struct drm_device *dev)
-{
-       struct exynos_drm_subdrv *subdrv;
-
-       if (!dev) {
-               WARN(1, "Unexpected drm device unregister!\n");
-               return -EINVAL;
-       }
-
-       list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
-               if (subdrv->remove)
-                       subdrv->remove(dev, subdrv->dev);
-       }
-
-       return 0;
-}
-
-int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
-{
-       struct exynos_drm_subdrv *subdrv;
-       int ret;
-
-       list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
-               if (subdrv->open) {
-                       ret = subdrv->open(dev, subdrv->dev, file);
-                       if (ret)
-                               goto err;
-               }
-       }
-
-       return 0;
-
-err:
-       list_for_each_entry_continue_reverse(subdrv, &exynos_drm_subdrv_list, list) {
-               if (subdrv->close)
-                       subdrv->close(dev, subdrv->dev, file);
-       }
-       return ret;
-}
-
-void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file)
-{
-       struct exynos_drm_subdrv *subdrv;
-
-       list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
-               if (subdrv->close)
-                       subdrv->close(dev, subdrv->dev, file);
-       }
-}
index 66945e0dc57fc6c723fe8e891a0fa655d9358354..2f0babb67c5104be6f1e617d5baa57a64d74499c 100644 (file)
@@ -113,7 +113,7 @@ static int exynos_dpi_create_connector(struct drm_encoder *encoder)
        }
 
        drm_connector_helper_add(connector, &exynos_dpi_connector_helper_funcs);
-       drm_mode_connector_attach_encoder(connector, encoder);
+       drm_connector_attach_encoder(connector, encoder);
 
        return 0;
 }
@@ -240,8 +240,8 @@ struct drm_encoder *exynos_dpi_probe(struct device *dev)
 
        if (ctx->panel_node) {
                ctx->panel = of_drm_find_panel(ctx->panel_node);
-               if (!ctx->panel)
-                       return ERR_PTR(-EPROBE_DEFER);
+               if (IS_ERR(ctx->panel))
+                       return ERR_CAST(ctx->panel);
        }
 
        return &ctx->encoder;
index a81b4a5e24a77397e4748a914357424cca60642d..b599f74692e5c60b79f933fc52f8201ff276e7ea 100644 (file)
@@ -55,8 +55,7 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
                return -ENOMEM;
 
        file->driver_priv = file_priv;
-
-       ret = exynos_drm_subdrv_open(dev, file);
+       ret = g2d_open(dev, file);
        if (ret)
                goto err_file_priv_free;
 
@@ -70,7 +69,7 @@ err_file_priv_free:
 
 static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
 {
-       exynos_drm_subdrv_close(dev, file);
+       g2d_close(dev, file);
        kfree(file->driver_priv);
        file->driver_priv = NULL;
 }
@@ -147,13 +146,12 @@ static struct drm_driver exynos_drm_driver = {
        .minor  = DRIVER_MINOR,
 };
 
-#ifdef CONFIG_PM_SLEEP
 static int exynos_drm_suspend(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
        struct exynos_drm_private *private;
 
-       if (pm_runtime_suspended(dev) || !drm_dev)
+       if (!drm_dev)
                return 0;
 
        private = drm_dev->dev_private;
@@ -170,25 +168,23 @@ static int exynos_drm_suspend(struct device *dev)
        return 0;
 }
 
-static int exynos_drm_resume(struct device *dev)
+static void exynos_drm_resume(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
        struct exynos_drm_private *private;
 
-       if (pm_runtime_suspended(dev) || !drm_dev)
-               return 0;
+       if (!drm_dev)
+               return;
 
        private = drm_dev->dev_private;
        drm_atomic_helper_resume(drm_dev, private->suspend_state);
        exynos_drm_fbdev_resume(drm_dev);
        drm_kms_helper_poll_enable(drm_dev);
-
-       return 0;
 }
-#endif
 
 static const struct dev_pm_ops exynos_drm_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(exynos_drm_suspend, exynos_drm_resume)
+       .prepare = exynos_drm_suspend,
+       .complete = exynos_drm_resume,
 };
 
 /* forward declaration */
@@ -240,6 +236,7 @@ static struct exynos_drm_driver_info exynos_drm_drivers[] = {
                DRM_COMPONENT_DRIVER | DRM_VIRTUAL_DEVICE
        }, {
                DRV_PTR(g2d_driver, CONFIG_DRM_EXYNOS_G2D),
+               DRM_COMPONENT_DRIVER
        }, {
                DRV_PTR(fimc_driver, CONFIG_DRM_EXYNOS_FIMC),
                DRM_COMPONENT_DRIVER | DRM_FIMC_DEVICE,
@@ -376,11 +373,6 @@ static int exynos_drm_bind(struct device *dev)
        if (ret)
                goto err_unbind_all;
 
-       /* Probe non kms sub drivers and virtual display driver. */
-       ret = exynos_drm_device_subdrv_probe(drm);
-       if (ret)
-               goto err_unbind_all;
-
        drm_mode_config_reset(drm);
 
        /*
@@ -411,7 +403,6 @@ err_cleanup_fbdev:
        exynos_drm_fbdev_fini(drm);
 err_cleanup_poll:
        drm_kms_helper_poll_fini(drm);
-       exynos_drm_device_subdrv_remove(drm);
 err_unbind_all:
        component_unbind_all(drm->dev, drm);
 err_mode_config_cleanup:
@@ -420,7 +411,7 @@ err_mode_config_cleanup:
 err_free_private:
        kfree(private);
 err_free_drm:
-       drm_dev_unref(drm);
+       drm_dev_put(drm);
 
        return ret;
 }
@@ -431,8 +422,6 @@ static void exynos_drm_unbind(struct device *dev)
 
        drm_dev_unregister(drm);
 
-       exynos_drm_device_subdrv_remove(drm);
-
        exynos_drm_fbdev_fini(drm);
        drm_kms_helper_poll_fini(drm);
 
@@ -444,7 +433,7 @@ static void exynos_drm_unbind(struct device *dev)
        drm->dev_private = NULL;
        dev_set_drvdata(dev, NULL);
 
-       drm_dev_unref(drm);
+       drm_dev_put(drm);
 }
 
 static const struct component_master_ops exynos_drm_ops = {
index 0f6d079a55c92a024aaaac2f75089934d814712e..c737c4bd2c19b3f2cf141fadc7e1b1d4344b88db 100644 (file)
@@ -179,17 +179,13 @@ static inline void exynos_drm_pipe_clk_enable(struct exynos_drm_crtc *crtc,
                crtc->pipe_clk->enable(crtc->pipe_clk, enable);
 }
 
-struct exynos_drm_g2d_private {
-       struct device           *dev;
+struct drm_exynos_file_private {
+       /* for g2d api */
        struct list_head        inuse_cmdlist;
        struct list_head        event_list;
        struct list_head        userptr_list;
 };
 
-struct drm_exynos_file_private {
-       struct exynos_drm_g2d_private   *g2d_priv;
-};
-
 /*
  * Exynos drm private structure.
  *
@@ -201,6 +197,7 @@ struct exynos_drm_private {
        struct drm_fb_helper *fb_helper;
        struct drm_atomic_state *suspend_state;
 
+       struct device *g2d_dev;
        struct device *dma_dev;
        void *mapping;
 
@@ -217,44 +214,6 @@ static inline struct device *to_dma_dev(struct drm_device *dev)
        return priv->dma_dev;
 }
 
-/*
- * Exynos drm sub driver structure.
- *
- * @list: sub driver has its own list object to register to exynos drm driver.
- * @dev: pointer to device object for subdrv device driver.
- * @drm_dev: pointer to drm_device and this pointer would be set
- *     when sub driver calls exynos_drm_subdrv_register().
- * @probe: this callback would be called by exynos drm driver after
- *     subdrv is registered to it.
- * @remove: this callback is used to release resources created
- *     by probe callback.
- * @open: this would be called with drm device file open.
- * @close: this would be called with drm device file close.
- */
-struct exynos_drm_subdrv {
-       struct list_head list;
-       struct device *dev;
-       struct drm_device *drm_dev;
-
-       int (*probe)(struct drm_device *drm_dev, struct device *dev);
-       void (*remove)(struct drm_device *drm_dev, struct device *dev);
-       int (*open)(struct drm_device *drm_dev, struct device *dev,
-                       struct drm_file *file);
-       void (*close)(struct drm_device *drm_dev, struct device *dev,
-                       struct drm_file *file);
-};
-
- /* This function would be called by non kms drivers such as g2d and ipp. */
-int exynos_drm_subdrv_register(struct exynos_drm_subdrv *drm_subdrv);
-
-/* this function removes subdrv list from exynos drm driver */
-int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv);
-
-int exynos_drm_device_subdrv_probe(struct drm_device *dev);
-int exynos_drm_device_subdrv_remove(struct drm_device *dev);
-int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
-void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
-
 #ifdef CONFIG_DRM_EXYNOS_DPI
 struct drm_encoder *exynos_dpi_probe(struct device *dev);
 int exynos_dpi_remove(struct drm_encoder *encoder);
index 6d29777884f931eab3cde84986823b5341f0df76..781b82c2c579b2707947c0c732001f6e794d9713 100644 (file)
@@ -1479,7 +1479,7 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
 
        connector->status = connector_status_disconnected;
        drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs);
-       drm_mode_connector_attach_encoder(connector, encoder);
+       drm_connector_attach_encoder(connector, encoder);
 
        return 0;
 }
@@ -1519,6 +1519,9 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
        dsi->format = device->format;
        dsi->mode_flags = device->mode_flags;
        dsi->panel = of_drm_find_panel(device->dev.of_node);
+       if (IS_ERR(dsi->panel))
+               dsi->panel = NULL;
+
        if (dsi->panel) {
                drm_panel_attach(dsi->panel, &dsi->connector);
                dsi->connector.status = connector_status_connected;
@@ -1860,6 +1863,8 @@ err_clk:
 
 static const struct dev_pm_ops exynos_dsi_pm_ops = {
        SET_RUNTIME_PM_OPS(exynos_dsi_suspend, exynos_dsi_resume, NULL)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
 };
 
 struct platform_driver dsi_driver = {
index 7fcc1a7ab1a079fe63bfa6d45687bae146ac2920..9f52382e19ee338e18dbe8212b0d1a69c604d3e9 100644 (file)
@@ -101,7 +101,6 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
 {
        const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd);
        struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
-       struct drm_gem_object *obj;
        struct drm_framebuffer *fb;
        int i;
        int ret;
@@ -112,15 +111,14 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
                unsigned long size = height * mode_cmd->pitches[i] +
                                     mode_cmd->offsets[i];
 
-               obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
-               if (!obj) {
+               exynos_gem[i] = exynos_drm_gem_get(file_priv,
+                                                  mode_cmd->handles[i]);
+               if (!exynos_gem[i]) {
                        DRM_ERROR("failed to lookup gem object\n");
                        ret = -ENOENT;
                        goto err;
                }
 
-               exynos_gem[i] = to_exynos_gem(obj);
-
                if (size > exynos_gem[i]->size) {
                        i++;
                        ret = -EINVAL;
@@ -138,7 +136,7 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
 
 err:
        while (i--)
-               drm_gem_object_unreference_unlocked(&exynos_gem[i]->base);
+               exynos_drm_gem_put(exynos_gem[i]);
 
        return ERR_PTR(ret);
 }
index 6127ef25acd60ec5ec6db92655d364220963fc0b..e8d0670bb5f8d280a9e17a1e4270c0464b1e4abc 100644 (file)
@@ -470,17 +470,18 @@ static void fimc_src_set_transf(struct fimc_context *ctx, unsigned int rotation)
 static void fimc_set_window(struct fimc_context *ctx,
                            struct exynos_drm_ipp_buffer *buf)
 {
+       unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
        u32 cfg, h1, h2, v1, v2;
 
        /* cropped image */
        h1 = buf->rect.x;
-       h2 = buf->buf.width - buf->rect.w - buf->rect.x;
+       h2 = real_width - buf->rect.w - buf->rect.x;
        v1 = buf->rect.y;
        v2 = buf->buf.height - buf->rect.h - buf->rect.y;
 
        DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
                buf->rect.x, buf->rect.y, buf->rect.w, buf->rect.h,
-               buf->buf.width, buf->buf.height);
+               real_width, buf->buf.height);
        DRM_DEBUG_KMS("h1[%d]h2[%d]v1[%d]v2[%d]\n", h1, h2, v1, v2);
 
        /*
@@ -503,12 +504,13 @@ static void fimc_set_window(struct fimc_context *ctx,
 static void fimc_src_set_size(struct fimc_context *ctx,
                              struct exynos_drm_ipp_buffer *buf)
 {
+       unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
        u32 cfg;
 
-       DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height);
+       DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", real_width, buf->buf.height);
 
        /* original size */
-       cfg = (EXYNOS_ORGISIZE_HORIZONTAL(buf->buf.width) |
+       cfg = (EXYNOS_ORGISIZE_HORIZONTAL(real_width) |
                EXYNOS_ORGISIZE_VERTICAL(buf->buf.height));
 
        fimc_write(ctx, cfg, EXYNOS_ORGISIZE);
@@ -529,7 +531,7 @@ static void fimc_src_set_size(struct fimc_context *ctx,
         * for now, we support only ITU601 8 bit mode
         */
        cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
-               EXYNOS_CISRCFMT_SOURCEHSIZE(buf->buf.width) |
+               EXYNOS_CISRCFMT_SOURCEHSIZE(real_width) |
                EXYNOS_CISRCFMT_SOURCEVSIZE(buf->buf.height));
        fimc_write(ctx, cfg, EXYNOS_CISRCFMT);
 
@@ -842,12 +844,13 @@ static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
 static void fimc_dst_set_size(struct fimc_context *ctx,
                             struct exynos_drm_ipp_buffer *buf)
 {
+       unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
        u32 cfg, cfg_ext;
 
-       DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height);
+       DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", real_width, buf->buf.height);
 
        /* original size */
-       cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(buf->buf.width) |
+       cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(real_width) |
                EXYNOS_ORGOSIZE_VERTICAL(buf->buf.height));
 
        fimc_write(ctx, cfg, EXYNOS_ORGOSIZE);
index 01b1570d0c3ab4b775a3b6871c074978df653155..b7f56935a46bc2a33fec03c18854774cb331f70d 100644 (file)
@@ -1192,6 +1192,8 @@ static int exynos_fimd_resume(struct device *dev)
 
 static const struct dev_pm_ops exynos_fimd_pm_ops = {
        SET_RUNTIME_PM_OPS(exynos_fimd_suspend, exynos_fimd_resume, NULL)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
 };
 
 struct platform_driver fimd_driver = {
index f68ef1b3a28c7fac6c427d975d8df015cd09b915..f2481a2014bb3c91ac1c8d60711a5fea89f43636 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <linux/kernel.h>
 #include <linux/clk.h>
+#include <linux/component.h>
 #include <linux/err.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
@@ -190,7 +191,7 @@ struct g2d_buf_desc {
 struct g2d_buf_info {
        unsigned int            map_nr;
        enum g2d_reg_type       reg_types[MAX_REG_TYPE_NR];
-       unsigned long           handles[MAX_REG_TYPE_NR];
+       void                    *obj[MAX_REG_TYPE_NR];
        unsigned int            types[MAX_REG_TYPE_NR];
        struct g2d_buf_desc     descs[MAX_REG_TYPE_NR];
 };
@@ -237,7 +238,7 @@ struct g2d_data {
        int                             irq;
        struct workqueue_struct         *g2d_workq;
        struct work_struct              runqueue_work;
-       struct exynos_drm_subdrv        subdrv;
+       struct drm_device               *drm_dev;
        unsigned long                   flags;
 
        /* cmdlist */
@@ -268,14 +269,13 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
 {
        struct device *dev = g2d->dev;
        struct g2d_cmdlist_node *node = g2d->cmdlist_node;
-       struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
        int nr;
        int ret;
        struct g2d_buf_info *buf_info;
 
        g2d->cmdlist_dma_attrs = DMA_ATTR_WRITE_COMBINE;
 
-       g2d->cmdlist_pool_virt = dma_alloc_attrs(to_dma_dev(subdrv->drm_dev),
+       g2d->cmdlist_pool_virt = dma_alloc_attrs(to_dma_dev(g2d->drm_dev),
                                                G2D_CMDLIST_POOL_SIZE,
                                                &g2d->cmdlist_pool, GFP_KERNEL,
                                                g2d->cmdlist_dma_attrs);
@@ -308,7 +308,7 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
        return 0;
 
 err:
-       dma_free_attrs(to_dma_dev(subdrv->drm_dev), G2D_CMDLIST_POOL_SIZE,
+       dma_free_attrs(to_dma_dev(g2d->drm_dev), G2D_CMDLIST_POOL_SIZE,
                        g2d->cmdlist_pool_virt,
                        g2d->cmdlist_pool, g2d->cmdlist_dma_attrs);
        return ret;
@@ -316,12 +316,10 @@ err:
 
 static void g2d_fini_cmdlist(struct g2d_data *g2d)
 {
-       struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
-
        kfree(g2d->cmdlist_node);
 
        if (g2d->cmdlist_pool_virt && g2d->cmdlist_pool) {
-               dma_free_attrs(to_dma_dev(subdrv->drm_dev),
+               dma_free_attrs(to_dma_dev(g2d->drm_dev),
                                G2D_CMDLIST_POOL_SIZE,
                                g2d->cmdlist_pool_virt,
                                g2d->cmdlist_pool, g2d->cmdlist_dma_attrs);
@@ -355,32 +353,31 @@ static void g2d_put_cmdlist(struct g2d_data *g2d, struct g2d_cmdlist_node *node)
        mutex_unlock(&g2d->cmdlist_mutex);
 }
 
-static void g2d_add_cmdlist_to_inuse(struct exynos_drm_g2d_private *g2d_priv,
+static void g2d_add_cmdlist_to_inuse(struct drm_exynos_file_private *file_priv,
                                     struct g2d_cmdlist_node *node)
 {
        struct g2d_cmdlist_node *lnode;
 
-       if (list_empty(&g2d_priv->inuse_cmdlist))
+       if (list_empty(&file_priv->inuse_cmdlist))
                goto add_to_list;
 
        /* this links to base address of new cmdlist */
-       lnode = list_entry(g2d_priv->inuse_cmdlist.prev,
+       lnode = list_entry(file_priv->inuse_cmdlist.prev,
                                struct g2d_cmdlist_node, list);
        lnode->cmdlist->data[lnode->cmdlist->last] = node->dma_addr;
 
 add_to_list:
-       list_add_tail(&node->list, &g2d_priv->inuse_cmdlist);
+       list_add_tail(&node->list, &file_priv->inuse_cmdlist);
 
        if (node->event)
-               list_add_tail(&node->event->base.link, &g2d_priv->event_list);
+               list_add_tail(&node->event->base.link, &file_priv->event_list);
 }
 
-static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
-                                       unsigned long obj,
+static void g2d_userptr_put_dma_addr(struct g2d_data *g2d,
+                                       void *obj,
                                        bool force)
 {
-       struct g2d_cmdlist_userptr *g2d_userptr =
-                                       (struct g2d_cmdlist_userptr *)obj;
+       struct g2d_cmdlist_userptr *g2d_userptr = obj;
        struct page **pages;
 
        if (!obj)
@@ -398,7 +395,7 @@ static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
                return;
 
 out:
-       dma_unmap_sg(to_dma_dev(drm_dev), g2d_userptr->sgt->sgl,
+       dma_unmap_sg(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt->sgl,
                        g2d_userptr->sgt->nents, DMA_BIDIRECTIONAL);
 
        pages = frame_vector_pages(g2d_userptr->vec);
@@ -419,16 +416,14 @@ out:
        kfree(g2d_userptr);
 }
 
-static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
+static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
                                        unsigned long userptr,
                                        unsigned long size,
                                        struct drm_file *filp,
-                                       unsigned long *obj)
+                                       void **obj)
 {
        struct drm_exynos_file_private *file_priv = filp->driver_priv;
-       struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
        struct g2d_cmdlist_userptr *g2d_userptr;
-       struct g2d_data *g2d;
        struct sg_table *sgt;
        unsigned long start, end;
        unsigned int npages, offset;
@@ -439,10 +434,8 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
                return ERR_PTR(-EINVAL);
        }
 
-       g2d = dev_get_drvdata(g2d_priv->dev);
-
        /* check if userptr already exists in userptr_list. */
-       list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) {
+       list_for_each_entry(g2d_userptr, &file_priv->userptr_list, list) {
                if (g2d_userptr->userptr == userptr) {
                        /*
                         * also check size because there could be same address
@@ -450,7 +443,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
                         */
                        if (g2d_userptr->size == size) {
                                atomic_inc(&g2d_userptr->refcount);
-                               *obj = (unsigned long)g2d_userptr;
+                               *obj = g2d_userptr;
 
                                return &g2d_userptr->dma_addr;
                        }
@@ -517,7 +510,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
 
        g2d_userptr->sgt = sgt;
 
-       if (!dma_map_sg(to_dma_dev(drm_dev), sgt->sgl, sgt->nents,
+       if (!dma_map_sg(to_dma_dev(g2d->drm_dev), sgt->sgl, sgt->nents,
                                DMA_BIDIRECTIONAL)) {
                DRM_ERROR("failed to map sgt with dma region.\n");
                ret = -ENOMEM;
@@ -527,14 +520,14 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
        g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
        g2d_userptr->userptr = userptr;
 
-       list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list);
+       list_add_tail(&g2d_userptr->list, &file_priv->userptr_list);
 
        if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) {
                g2d->current_pool += npages << PAGE_SHIFT;
                g2d_userptr->in_pool = true;
        }
 
-       *obj = (unsigned long)g2d_userptr;
+       *obj = g2d_userptr;
 
        return &g2d_userptr->dma_addr;
 
@@ -556,19 +549,14 @@ err_free:
        return ERR_PTR(ret);
 }
 
-static void g2d_userptr_free_all(struct drm_device *drm_dev,
-                                       struct g2d_data *g2d,
-                                       struct drm_file *filp)
+static void g2d_userptr_free_all(struct g2d_data *g2d, struct drm_file *filp)
 {
        struct drm_exynos_file_private *file_priv = filp->driver_priv;
-       struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
        struct g2d_cmdlist_userptr *g2d_userptr, *n;
 
-       list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list)
+       list_for_each_entry_safe(g2d_userptr, n, &file_priv->userptr_list, list)
                if (g2d_userptr->in_pool)
-                       g2d_userptr_put_dma_addr(drm_dev,
-                                               (unsigned long)g2d_userptr,
-                                               true);
+                       g2d_userptr_put_dma_addr(g2d, g2d_userptr, true);
 
        g2d->current_pool = 0;
 }
@@ -723,26 +711,23 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
                buf_desc = &buf_info->descs[reg_type];
 
                if (buf_info->types[reg_type] == BUF_TYPE_GEM) {
-                       unsigned long size;
+                       struct exynos_drm_gem *exynos_gem;
 
-                       size = exynos_drm_gem_get_size(drm_dev, handle, file);
-                       if (!size) {
+                       exynos_gem = exynos_drm_gem_get(file, handle);
+                       if (!exynos_gem) {
                                ret = -EFAULT;
                                goto err;
                        }
 
-                       if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type,
-                                                                       size)) {
+                       if (!g2d_check_buf_desc_is_valid(buf_desc,
+                                                        reg_type, exynos_gem->size)) {
+                               exynos_drm_gem_put(exynos_gem);
                                ret = -EFAULT;
                                goto err;
                        }
 
-                       addr = exynos_drm_gem_get_dma_addr(drm_dev, handle,
-                                                               file);
-                       if (IS_ERR(addr)) {
-                               ret = -EFAULT;
-                               goto err;
-                       }
+                       addr = &exynos_gem->dma_addr;
+                       buf_info->obj[reg_type] = exynos_gem;
                } else {
                        struct drm_exynos_g2d_userptr g2d_userptr;
 
@@ -758,11 +743,11 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
                                goto err;
                        }
 
-                       addr = g2d_userptr_get_dma_addr(drm_dev,
+                       addr = g2d_userptr_get_dma_addr(g2d,
                                                        g2d_userptr.userptr,
                                                        g2d_userptr.size,
                                                        file,
-                                                       &handle);
+                                                       &buf_info->obj[reg_type]);
                        if (IS_ERR(addr)) {
                                ret = -EFAULT;
                                goto err;
@@ -771,7 +756,6 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
 
                cmdlist->data[reg_pos + 1] = *addr;
                buf_info->reg_types[i] = reg_type;
-               buf_info->handles[reg_type] = handle;
        }
 
        return 0;
@@ -785,29 +769,26 @@ static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
                                  struct g2d_cmdlist_node *node,
                                  struct drm_file *filp)
 {
-       struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
        struct g2d_buf_info *buf_info = &node->buf_info;
        int i;
 
        for (i = 0; i < buf_info->map_nr; i++) {
                struct g2d_buf_desc *buf_desc;
                enum g2d_reg_type reg_type;
-               unsigned long handle;
+               void *obj;
 
                reg_type = buf_info->reg_types[i];
 
                buf_desc = &buf_info->descs[reg_type];
-               handle = buf_info->handles[reg_type];
+               obj = buf_info->obj[reg_type];
 
                if (buf_info->types[reg_type] == BUF_TYPE_GEM)
-                       exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle,
-                                                       filp);
+                       exynos_drm_gem_put(obj);
                else
-                       g2d_userptr_put_dma_addr(subdrv->drm_dev, handle,
-                                                       false);
+                       g2d_userptr_put_dma_addr(g2d, obj, false);
 
                buf_info->reg_types[i] = REG_TYPE_NONE;
-               buf_info->handles[reg_type] = 0;
+               buf_info->obj[reg_type] = NULL;
                buf_info->types[reg_type] = 0;
                memset(buf_desc, 0x00, sizeof(*buf_desc));
        }
@@ -922,7 +903,7 @@ static void g2d_runqueue_worker(struct work_struct *work)
 
 static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
 {
-       struct drm_device *drm_dev = g2d->subdrv.drm_dev;
+       struct drm_device *drm_dev = g2d->drm_dev;
        struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
        struct drm_exynos_pending_g2d_event *e;
        struct timespec64 now;
@@ -1031,7 +1012,7 @@ out:
        mutex_unlock(&g2d->runqueue_mutex);
 }
 
-static int g2d_check_reg_offset(struct device *dev,
+static int g2d_check_reg_offset(struct g2d_data *g2d,
                                struct g2d_cmdlist_node *node,
                                int nr, bool for_addr)
 {
@@ -1131,7 +1112,7 @@ static int g2d_check_reg_offset(struct device *dev,
        return 0;
 
 err:
-       dev_err(dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
+       dev_err(g2d->dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
        return -EINVAL;
 }
 
@@ -1139,23 +1120,8 @@ err:
 int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
                             struct drm_file *file)
 {
-       struct drm_exynos_file_private *file_priv = file->driver_priv;
-       struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
-       struct device *dev;
-       struct g2d_data *g2d;
        struct drm_exynos_g2d_get_ver *ver = data;
 
-       if (!g2d_priv)
-               return -ENODEV;
-
-       dev = g2d_priv->dev;
-       if (!dev)
-               return -ENODEV;
-
-       g2d = dev_get_drvdata(dev);
-       if (!g2d)
-               return -EFAULT;
-
        ver->major = G2D_HW_MAJOR_VER;
        ver->minor = G2D_HW_MINOR_VER;
 
@@ -1166,9 +1132,8 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
                                 struct drm_file *file)
 {
        struct drm_exynos_file_private *file_priv = file->driver_priv;
-       struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
-       struct device *dev;
-       struct g2d_data *g2d;
+       struct exynos_drm_private *priv = drm_dev->dev_private;
+       struct g2d_data *g2d = dev_get_drvdata(priv->g2d_dev);
        struct drm_exynos_g2d_set_cmdlist *req = data;
        struct drm_exynos_g2d_cmd *cmd;
        struct drm_exynos_pending_g2d_event *e;
@@ -1177,17 +1142,6 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
        int size;
        int ret;
 
-       if (!g2d_priv)
-               return -ENODEV;
-
-       dev = g2d_priv->dev;
-       if (!dev)
-               return -ENODEV;
-
-       g2d = dev_get_drvdata(dev);
-       if (!g2d)
-               return -EFAULT;
-
        node = g2d_get_cmdlist(g2d);
        if (!node)
                return -ENOMEM;
@@ -1199,7 +1153,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
         */
        if (req->cmd_nr > G2D_CMDLIST_DATA_NUM ||
            req->cmd_buf_nr > G2D_CMDLIST_DATA_NUM) {
-               dev_err(dev, "number of submitted G2D commands exceeds limit\n");
+               dev_err(g2d->dev, "number of submitted G2D commands exceeds limit\n");
                return -EINVAL;
        }
 
@@ -1267,7 +1221,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
         */
        size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
        if (size > G2D_CMDLIST_DATA_NUM) {
-               dev_err(dev, "cmdlist size is too big\n");
+               dev_err(g2d->dev, "cmdlist size is too big\n");
                ret = -EINVAL;
                goto err_free_event;
        }
@@ -1282,7 +1236,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
        }
        cmdlist->last += req->cmd_nr * 2;
 
-       ret = g2d_check_reg_offset(dev, node, req->cmd_nr, false);
+       ret = g2d_check_reg_offset(g2d, node, req->cmd_nr, false);
        if (ret < 0)
                goto err_free_event;
 
@@ -1301,7 +1255,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
                }
                cmdlist->last += req->cmd_buf_nr * 2;
 
-               ret = g2d_check_reg_offset(dev, node, req->cmd_buf_nr, true);
+               ret = g2d_check_reg_offset(g2d, node, req->cmd_buf_nr, true);
                if (ret < 0)
                        goto err_free_event;
 
@@ -1319,7 +1273,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
        /* tail */
        cmdlist->data[cmdlist->last] = 0;
 
-       g2d_add_cmdlist_to_inuse(g2d_priv, node);
+       g2d_add_cmdlist_to_inuse(file_priv, node);
 
        return 0;
 
@@ -1337,25 +1291,13 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
                          struct drm_file *file)
 {
        struct drm_exynos_file_private *file_priv = file->driver_priv;
-       struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
-       struct device *dev;
-       struct g2d_data *g2d;
+       struct exynos_drm_private *priv = drm_dev->dev_private;
+       struct g2d_data *g2d = dev_get_drvdata(priv->g2d_dev);
        struct drm_exynos_g2d_exec *req = data;
        struct g2d_runqueue_node *runqueue_node;
        struct list_head *run_cmdlist;
        struct list_head *event_list;
 
-       if (!g2d_priv)
-               return -ENODEV;
-
-       dev = g2d_priv->dev;
-       if (!dev)
-               return -ENODEV;
-
-       g2d = dev_get_drvdata(dev);
-       if (!g2d)
-               return -EFAULT;
-
        runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL);
        if (!runqueue_node)
                return -ENOMEM;
@@ -1367,11 +1309,11 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
        init_completion(&runqueue_node->complete);
        runqueue_node->async = req->async;
 
-       list_splice_init(&g2d_priv->inuse_cmdlist, run_cmdlist);
-       list_splice_init(&g2d_priv->event_list, event_list);
+       list_splice_init(&file_priv->inuse_cmdlist, run_cmdlist);
+       list_splice_init(&file_priv->event_list, event_list);
 
        if (list_empty(run_cmdlist)) {
-               dev_err(dev, "there is no inuse cmdlist\n");
+               dev_err(g2d->dev, "there is no inuse cmdlist\n");
                kmem_cache_free(g2d->runqueue_slab, runqueue_node);
                return -EPERM;
        }
@@ -1395,71 +1337,28 @@ out:
        return 0;
 }
 
-static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
-{
-       struct g2d_data *g2d;
-       int ret;
-
-       g2d = dev_get_drvdata(dev);
-       if (!g2d)
-               return -EFAULT;
-
-       /* allocate dma-aware cmdlist buffer. */
-       ret = g2d_init_cmdlist(g2d);
-       if (ret < 0) {
-               dev_err(dev, "cmdlist init failed\n");
-               return ret;
-       }
-
-       ret = drm_iommu_attach_device(drm_dev, dev);
-       if (ret < 0) {
-               dev_err(dev, "failed to enable iommu.\n");
-               g2d_fini_cmdlist(g2d);
-       }
-
-       return ret;
-
-}
-
-static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
-{
-       drm_iommu_detach_device(drm_dev, dev);
-}
-
-static int g2d_open(struct drm_device *drm_dev, struct device *dev,
-                       struct drm_file *file)
+int g2d_open(struct drm_device *drm_dev, struct drm_file *file)
 {
        struct drm_exynos_file_private *file_priv = file->driver_priv;
-       struct exynos_drm_g2d_private *g2d_priv;
-
-       g2d_priv = kzalloc(sizeof(*g2d_priv), GFP_KERNEL);
-       if (!g2d_priv)
-               return -ENOMEM;
 
-       g2d_priv->dev = dev;
-       file_priv->g2d_priv = g2d_priv;
-
-       INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
-       INIT_LIST_HEAD(&g2d_priv->event_list);
-       INIT_LIST_HEAD(&g2d_priv->userptr_list);
+       INIT_LIST_HEAD(&file_priv->inuse_cmdlist);
+       INIT_LIST_HEAD(&file_priv->event_list);
+       INIT_LIST_HEAD(&file_priv->userptr_list);
 
        return 0;
 }
 
-static void g2d_close(struct drm_device *drm_dev, struct device *dev,
-                       struct drm_file *file)
+void g2d_close(struct drm_device *drm_dev, struct drm_file *file)
 {
        struct drm_exynos_file_private *file_priv = file->driver_priv;
-       struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+       struct exynos_drm_private *priv = drm_dev->dev_private;
        struct g2d_data *g2d;
        struct g2d_cmdlist_node *node, *n;
 
-       if (!dev)
+       if (!priv->g2d_dev)
                return;
 
-       g2d = dev_get_drvdata(dev);
-       if (!g2d)
-               return;
+       g2d = dev_get_drvdata(priv->g2d_dev);
 
        /* Remove the runqueue nodes that belong to us. */
        mutex_lock(&g2d->runqueue_mutex);
@@ -1480,24 +1379,70 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev,
         * Properly unmap these buffers here.
         */
        mutex_lock(&g2d->cmdlist_mutex);
-       list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) {
+       list_for_each_entry_safe(node, n, &file_priv->inuse_cmdlist, list) {
                g2d_unmap_cmdlist_gem(g2d, node, file);
                list_move_tail(&node->list, &g2d->free_cmdlist);
        }
        mutex_unlock(&g2d->cmdlist_mutex);
 
        /* release all g2d_userptr in pool. */
-       g2d_userptr_free_all(drm_dev, g2d, file);
+       g2d_userptr_free_all(g2d, file);
+}
+
+static int g2d_bind(struct device *dev, struct device *master, void *data)
+{
+       struct g2d_data *g2d = dev_get_drvdata(dev);
+       struct drm_device *drm_dev = data;
+       struct exynos_drm_private *priv = drm_dev->dev_private;
+       int ret;
+
+       g2d->drm_dev = drm_dev;
 
-       kfree(file_priv->g2d_priv);
+       /* allocate dma-aware cmdlist buffer. */
+       ret = g2d_init_cmdlist(g2d);
+       if (ret < 0) {
+               dev_err(dev, "cmdlist init failed\n");
+               return ret;
+       }
+
+       ret = drm_iommu_attach_device(drm_dev, dev);
+       if (ret < 0) {
+               dev_err(dev, "failed to enable iommu.\n");
+               g2d_fini_cmdlist(g2d);
+               return ret;
+       }
+       priv->g2d_dev = dev;
+
+       dev_info(dev, "The Exynos G2D (ver %d.%d) successfully registered.\n",
+                       G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER);
+       return 0;
+}
+
+static void g2d_unbind(struct device *dev, struct device *master, void *data)
+{
+       struct g2d_data *g2d = dev_get_drvdata(dev);
+       struct drm_device *drm_dev = data;
+       struct exynos_drm_private *priv = drm_dev->dev_private;
+
+       /* Suspend operation and wait for engine idle. */
+       set_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
+       g2d_wait_finish(g2d, NULL);
+       priv->g2d_dev = NULL;
+
+       cancel_work_sync(&g2d->runqueue_work);
+       drm_iommu_detach_device(g2d->drm_dev, dev);
 }
 
+static const struct component_ops g2d_component_ops = {
+       .bind   = g2d_bind,
+       .unbind = g2d_unbind,
+};
+
 static int g2d_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct resource *res;
        struct g2d_data *g2d;
-       struct exynos_drm_subdrv *subdrv;
        int ret;
 
        g2d = devm_kzalloc(dev, sizeof(*g2d), GFP_KERNEL);
@@ -1564,22 +1509,12 @@ static int g2d_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, g2d);
 
-       subdrv = &g2d->subdrv;
-       subdrv->dev = dev;
-       subdrv->probe = g2d_subdrv_probe;
-       subdrv->remove = g2d_subdrv_remove;
-       subdrv->open = g2d_open;
-       subdrv->close = g2d_close;
-
-       ret = exynos_drm_subdrv_register(subdrv);
+       ret = component_add(dev, &g2d_component_ops);
        if (ret < 0) {
                dev_err(dev, "failed to register drm g2d device\n");
                goto err_put_clk;
        }
 
-       dev_info(dev, "The Exynos G2D (ver %d.%d) successfully probed.\n",
-                       G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER);
-
        return 0;
 
 err_put_clk:
@@ -1595,12 +1530,7 @@ static int g2d_remove(struct platform_device *pdev)
 {
        struct g2d_data *g2d = platform_get_drvdata(pdev);
 
-       /* Suspend operation and wait for engine idle. */
-       set_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
-       g2d_wait_finish(g2d, NULL);
-
-       cancel_work_sync(&g2d->runqueue_work);
-       exynos_drm_subdrv_unregister(&g2d->subdrv);
+       component_del(&pdev->dev, &g2d_component_ops);
 
        /* There should be no locking needed here. */
        g2d_remove_runqueue_nodes(g2d, NULL);
index 1a9c7ca8c15bb9952c7d2fbc3f432228cb3c8192..287b2ed8f1782104e8acf3b71ca3e93c691f71ab 100644 (file)
@@ -14,6 +14,9 @@ extern int exynos_g2d_set_cmdlist_ioctl(struct drm_device *dev, void *data,
                                        struct drm_file *file_priv);
 extern int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data,
                                 struct drm_file *file_priv);
+
+extern int g2d_open(struct drm_device *drm_dev, struct drm_file *file);
+extern void g2d_close(struct drm_device *drm_dev, struct drm_file *file);
 #else
 static inline int exynos_g2d_get_ver_ioctl(struct drm_device *dev, void *data,
                                           struct drm_file *file_priv)
@@ -33,4 +36,12 @@ static inline int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data,
 {
        return -ENODEV;
 }
+
+int g2d_open(struct drm_device *drm_dev, struct drm_file *file)
+{
+       return 0;
+}
+
+void g2d_close(struct drm_device *drm_dev, struct drm_file *file)
+{ }
 #endif
index 6e1494fa71b40d70a59b768a1ac2caf6d9799dfe..34ace85feb6883e3dcf9793d5057a5c1de2ebb21 100644 (file)
@@ -143,7 +143,7 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
        DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
 
        /* drop reference from allocate - handle holds it now. */
-       drm_gem_object_unreference_unlocked(obj);
+       drm_gem_object_put_unlocked(obj);
 
        return 0;
 }
@@ -171,26 +171,6 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
        kfree(exynos_gem);
 }
 
-unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
-                                               unsigned int gem_handle,
-                                               struct drm_file *file_priv)
-{
-       struct exynos_drm_gem *exynos_gem;
-       struct drm_gem_object *obj;
-
-       obj = drm_gem_object_lookup(file_priv, gem_handle);
-       if (!obj) {
-               DRM_ERROR("failed to lookup gem object.\n");
-               return 0;
-       }
-
-       exynos_gem = to_exynos_gem(obj);
-
-       drm_gem_object_unreference_unlocked(obj);
-
-       return exynos_gem->size;
-}
-
 static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
                                                  unsigned long size)
 {
@@ -299,43 +279,15 @@ int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
                                       &args->offset);
 }
 
-dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
-                                       unsigned int gem_handle,
-                                       struct drm_file *filp)
-{
-       struct exynos_drm_gem *exynos_gem;
-       struct drm_gem_object *obj;
-
-       obj = drm_gem_object_lookup(filp, gem_handle);
-       if (!obj) {
-               DRM_ERROR("failed to lookup gem object.\n");
-               return ERR_PTR(-EINVAL);
-       }
-
-       exynos_gem = to_exynos_gem(obj);
-
-       return &exynos_gem->dma_addr;
-}
-
-void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
-                                       unsigned int gem_handle,
-                                       struct drm_file *filp)
+struct exynos_drm_gem *exynos_drm_gem_get(struct drm_file *filp,
+                                         unsigned int gem_handle)
 {
        struct drm_gem_object *obj;
 
        obj = drm_gem_object_lookup(filp, gem_handle);
-       if (!obj) {
-               DRM_ERROR("failed to lookup gem object.\n");
-               return;
-       }
-
-       drm_gem_object_unreference_unlocked(obj);
-
-       /*
-        * decrease obj->refcount one more time because we has already
-        * increased it at exynos_drm_gem_get_dma_addr().
-        */
-       drm_gem_object_unreference_unlocked(obj);
+       if (!obj)
+               return NULL;
+       return to_exynos_gem(obj);
 }
 
 static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
@@ -383,7 +335,7 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
        args->flags = exynos_gem->flags;
        args->size = exynos_gem->size;
 
-       drm_gem_object_unreference_unlocked(obj);
+       drm_gem_object_put_unlocked(obj);
 
        return 0;
 }
index 9057d7f1d6ed437f309101b540c47bf97fc428b2..d46a62c30812ce7b067fd131e5ef12039a638af8 100644 (file)
@@ -77,32 +77,26 @@ int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
                             struct drm_file *file_priv);
 
 /*
- * get dma address from gem handle and this function could be used for
+ * get exynos drm object from gem handle, this function could be used for
  * other drivers such as 2d/3d acceleration drivers.
  * with this function call, gem object reference count would be increased.
  */
-dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
-                                       unsigned int gem_handle,
-                                       struct drm_file *filp);
+struct exynos_drm_gem *exynos_drm_gem_get(struct drm_file *filp,
+                                         unsigned int gem_handle);
 
 /*
- * put dma address from gem handle and this function could be used for
- * other drivers such as 2d/3d acceleration drivers.
- * with this function call, gem object reference count would be decreased.
+ * put exynos drm object acquired from exynos_drm_gem_get(),
+ * gem object reference count would be decreased.
  */
-void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
-                                       unsigned int gem_handle,
-                                       struct drm_file *filp);
+static inline void exynos_drm_gem_put(struct exynos_drm_gem *exynos_gem)
+{
+       drm_gem_object_put_unlocked(&exynos_gem->base);
+}
 
 /* get buffer information to memory region allocated by gem. */
 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
                                      struct drm_file *file_priv);
 
-/* get buffer size to gem handle. */
-unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
-                                               unsigned int gem_handle,
-                                               struct drm_file *file_priv);
-
 /* free gem object. */
 void exynos_drm_gem_free_object(struct drm_gem_object *obj);
 
index 35ac66730563944e83dcb1a6be7ec39999ead086..7ba414b52faa940595a028db7fa3e959bc7a58cd 100644 (file)
@@ -492,21 +492,25 @@ static void gsc_src_set_fmt(struct gsc_context *ctx, u32 fmt)
                        GSC_IN_CHROMA_ORDER_CRCB);
                break;
        case DRM_FORMAT_NV21:
+               cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_2P);
+               break;
        case DRM_FORMAT_NV61:
-               cfg |= (GSC_IN_CHROMA_ORDER_CRCB |
-                       GSC_IN_YUV420_2P);
+               cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV422_2P);
                break;
        case DRM_FORMAT_YUV422:
                cfg |= GSC_IN_YUV422_3P;
                break;
        case DRM_FORMAT_YUV420:
+               cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_3P);
+               break;
        case DRM_FORMAT_YVU420:
-               cfg |= GSC_IN_YUV420_3P;
+               cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_3P);
                break;
        case DRM_FORMAT_NV12:
+               cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_2P);
+               break;
        case DRM_FORMAT_NV16:
-               cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
-                       GSC_IN_YUV420_2P);
+               cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV422_2P);
                break;
        }
 
@@ -523,30 +527,30 @@ static void gsc_src_set_transf(struct gsc_context *ctx, unsigned int rotation)
 
        switch (degree) {
        case DRM_MODE_ROTATE_0:
-               if (rotation & DRM_MODE_REFLECT_Y)
-                       cfg |= GSC_IN_ROT_XFLIP;
                if (rotation & DRM_MODE_REFLECT_X)
+                       cfg |= GSC_IN_ROT_XFLIP;
+               if (rotation & DRM_MODE_REFLECT_Y)
                        cfg |= GSC_IN_ROT_YFLIP;
                break;
        case DRM_MODE_ROTATE_90:
                cfg |= GSC_IN_ROT_90;
-               if (rotation & DRM_MODE_REFLECT_Y)
-                       cfg |= GSC_IN_ROT_XFLIP;
                if (rotation & DRM_MODE_REFLECT_X)
+                       cfg |= GSC_IN_ROT_XFLIP;
+               if (rotation & DRM_MODE_REFLECT_Y)
                        cfg |= GSC_IN_ROT_YFLIP;
                break;
        case DRM_MODE_ROTATE_180:
                cfg |= GSC_IN_ROT_180;
-               if (rotation & DRM_MODE_REFLECT_Y)
-                       cfg &= ~GSC_IN_ROT_XFLIP;
                if (rotation & DRM_MODE_REFLECT_X)
+                       cfg &= ~GSC_IN_ROT_XFLIP;
+               if (rotation & DRM_MODE_REFLECT_Y)
                        cfg &= ~GSC_IN_ROT_YFLIP;
                break;
        case DRM_MODE_ROTATE_270:
                cfg |= GSC_IN_ROT_270;
-               if (rotation & DRM_MODE_REFLECT_Y)
-                       cfg &= ~GSC_IN_ROT_XFLIP;
                if (rotation & DRM_MODE_REFLECT_X)
+                       cfg &= ~GSC_IN_ROT_XFLIP;
+               if (rotation & DRM_MODE_REFLECT_Y)
                        cfg &= ~GSC_IN_ROT_YFLIP;
                break;
        }
@@ -577,7 +581,7 @@ static void gsc_src_set_size(struct gsc_context *ctx,
        cfg &= ~(GSC_SRCIMG_HEIGHT_MASK |
                GSC_SRCIMG_WIDTH_MASK);
 
-       cfg |= (GSC_SRCIMG_WIDTH(buf->buf.width) |
+       cfg |= (GSC_SRCIMG_WIDTH(buf->buf.pitch[0] / buf->format->cpp[0]) |
                GSC_SRCIMG_HEIGHT(buf->buf.height));
 
        gsc_write(cfg, GSC_SRCIMG_SIZE);
@@ -672,18 +676,25 @@ static void gsc_dst_set_fmt(struct gsc_context *ctx, u32 fmt)
                        GSC_OUT_CHROMA_ORDER_CRCB);
                break;
        case DRM_FORMAT_NV21:
-       case DRM_FORMAT_NV61:
                cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P);
                break;
+       case DRM_FORMAT_NV61:
+               cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV422_2P);
+               break;
        case DRM_FORMAT_YUV422:
+               cfg |= GSC_OUT_YUV422_3P;
+               break;
        case DRM_FORMAT_YUV420:
+               cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_3P);
+               break;
        case DRM_FORMAT_YVU420:
-               cfg |= GSC_OUT_YUV420_3P;
+               cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_3P);
                break;
        case DRM_FORMAT_NV12:
+               cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_2P);
+               break;
        case DRM_FORMAT_NV16:
-               cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
-                       GSC_OUT_YUV420_2P);
+               cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV422_2P);
                break;
        }
 
@@ -868,7 +879,7 @@ static void gsc_dst_set_size(struct gsc_context *ctx,
        /* original size */
        cfg = gsc_read(GSC_DSTIMG_SIZE);
        cfg &= ~(GSC_DSTIMG_HEIGHT_MASK | GSC_DSTIMG_WIDTH_MASK);
-       cfg |= GSC_DSTIMG_WIDTH(buf->buf.width) |
+       cfg |= GSC_DSTIMG_WIDTH(buf->buf.pitch[0] / buf->format->cpp[0]) |
               GSC_DSTIMG_HEIGHT(buf->buf.height);
        gsc_write(cfg, GSC_DSTIMG_SIZE);
 
@@ -1341,7 +1352,7 @@ static const struct drm_exynos_ipp_limit gsc_5420_limits[] = {
 };
 
 static const struct drm_exynos_ipp_limit gsc_5433_limits[] = {
-       { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 8191, 2 }, .v = { 16, 8191, 2 }) },
+       { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 8191, 16 }, .v = { 16, 8191, 2 }) },
        { IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 1 }, .v = { 8, 3344, 1 }) },
        { IPP_SIZE_LIMIT(ROTATED, .h = { 32, 2047 }, .v = { 8, 8191 }) },
        { IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 },
index 26374e58c5578dc326356e19520b522e1d45412c..23226a0212e8fd2d05d362c633ffe15815388ffe 100644 (file)
@@ -345,39 +345,18 @@ static int exynos_drm_ipp_task_setup_buffer(struct exynos_drm_ipp_buffer *buf,
        int ret = 0;
        int i;
 
-       /* basic checks */
-       if (buf->buf.width == 0 || buf->buf.height == 0)
-               return -EINVAL;
-       buf->format = drm_format_info(buf->buf.fourcc);
-       for (i = 0; i < buf->format->num_planes; i++) {
-               unsigned int width = (i == 0) ? buf->buf.width :
-                            DIV_ROUND_UP(buf->buf.width, buf->format->hsub);
-
-               if (buf->buf.pitch[i] == 0)
-                       buf->buf.pitch[i] = width * buf->format->cpp[i];
-               if (buf->buf.pitch[i] < width * buf->format->cpp[i])
-                       return -EINVAL;
-               if (!buf->buf.gem_id[i])
-                       return -ENOENT;
-       }
-
-       /* pitch for additional planes must match */
-       if (buf->format->num_planes > 2 &&
-           buf->buf.pitch[1] != buf->buf.pitch[2])
-               return -EINVAL;
-
        /* get GEM buffers and check their size */
        for (i = 0; i < buf->format->num_planes; i++) {
                unsigned int height = (i == 0) ? buf->buf.height :
                             DIV_ROUND_UP(buf->buf.height, buf->format->vsub);
                unsigned long size = height * buf->buf.pitch[i];
-               struct drm_gem_object *obj = drm_gem_object_lookup(filp,
+               struct exynos_drm_gem *gem = exynos_drm_gem_get(filp,
                                                            buf->buf.gem_id[i]);
-               if (!obj) {
+               if (!gem) {
                        ret = -ENOENT;
                        goto gem_free;
                }
-               buf->exynos_gem[i] = to_exynos_gem(obj);
+               buf->exynos_gem[i] = gem;
 
                if (size + buf->buf.offset[i] > buf->exynos_gem[i]->size) {
                        i++;
@@ -391,7 +370,7 @@ static int exynos_drm_ipp_task_setup_buffer(struct exynos_drm_ipp_buffer *buf,
        return 0;
 gem_free:
        while (i--) {
-               drm_gem_object_put_unlocked(&buf->exynos_gem[i]->base);
+               exynos_drm_gem_put(buf->exynos_gem[i]);
                buf->exynos_gem[i] = NULL;
        }
        return ret;
@@ -404,7 +383,7 @@ static void exynos_drm_ipp_task_release_buf(struct exynos_drm_ipp_buffer *buf)
        if (!buf->exynos_gem[0])
                return;
        for (i = 0; i < buf->format->num_planes; i++)
-               drm_gem_object_put_unlocked(&buf->exynos_gem[i]->base);
+               exynos_drm_gem_put(buf->exynos_gem[i]);
 }
 
 static void exynos_drm_ipp_task_free(struct exynos_drm_ipp *ipp,
@@ -428,7 +407,7 @@ enum drm_ipp_size_id {
        IPP_LIMIT_BUFFER, IPP_LIMIT_AREA, IPP_LIMIT_ROTATED, IPP_LIMIT_MAX
 };
 
-static const enum drm_ipp_size_id limit_id_fallback[IPP_LIMIT_MAX][4] = {
+static const enum drm_exynos_ipp_limit_type limit_id_fallback[IPP_LIMIT_MAX][4] = {
        [IPP_LIMIT_BUFFER]  = { DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
        [IPP_LIMIT_AREA]    = { DRM_EXYNOS_IPP_LIMIT_SIZE_AREA,
                                DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
@@ -495,12 +474,13 @@ static int exynos_drm_ipp_check_size_limits(struct exynos_drm_ipp_buffer *buf,
        enum drm_ipp_size_id id = rotate ? IPP_LIMIT_ROTATED : IPP_LIMIT_AREA;
        struct drm_ipp_limit l;
        struct drm_exynos_ipp_limit_val *lh = &l.h, *lv = &l.v;
+       int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
 
        if (!limits)
                return 0;
 
        __get_size_limit(limits, num_limits, IPP_LIMIT_BUFFER, &l);
-       if (!__size_limit_check(buf->buf.width, &l.h) ||
+       if (!__size_limit_check(real_width, &l.h) ||
            !__size_limit_check(buf->buf.height, &l.v))
                return -EINVAL;
 
@@ -560,10 +540,62 @@ static int exynos_drm_ipp_check_scale_limits(
        return 0;
 }
 
+static int exynos_drm_ipp_check_format(struct exynos_drm_ipp_task *task,
+                                      struct exynos_drm_ipp_buffer *buf,
+                                      struct exynos_drm_ipp_buffer *src,
+                                      struct exynos_drm_ipp_buffer *dst,
+                                      bool rotate, bool swap)
+{
+       const struct exynos_drm_ipp_formats *fmt;
+       int ret, i;
+
+       fmt = __ipp_format_get(task->ipp, buf->buf.fourcc, buf->buf.modifier,
+                              buf == src ? DRM_EXYNOS_IPP_FORMAT_SOURCE :
+                                           DRM_EXYNOS_IPP_FORMAT_DESTINATION);
+       if (!fmt) {
+               DRM_DEBUG_DRIVER("Task %pK: %s format not supported\n", task,
+                                buf == src ? "src" : "dst");
+               return -EINVAL;
+       }
+
+       /* basic checks */
+       if (buf->buf.width == 0 || buf->buf.height == 0)
+               return -EINVAL;
+
+       buf->format = drm_format_info(buf->buf.fourcc);
+       for (i = 0; i < buf->format->num_planes; i++) {
+               unsigned int width = (i == 0) ? buf->buf.width :
+                            DIV_ROUND_UP(buf->buf.width, buf->format->hsub);
+
+               if (buf->buf.pitch[i] == 0)
+                       buf->buf.pitch[i] = width * buf->format->cpp[i];
+               if (buf->buf.pitch[i] < width * buf->format->cpp[i])
+                       return -EINVAL;
+               if (!buf->buf.gem_id[i])
+                       return -ENOENT;
+       }
+
+       /* pitch for additional planes must match */
+       if (buf->format->num_planes > 2 &&
+           buf->buf.pitch[1] != buf->buf.pitch[2])
+               return -EINVAL;
+
+       /* check driver limits */
+       ret = exynos_drm_ipp_check_size_limits(buf, fmt->limits,
+                                              fmt->num_limits,
+                                              rotate,
+                                              buf == dst ? swap : false);
+       if (ret)
+               return ret;
+       ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
+                                               fmt->limits,
+                                               fmt->num_limits, swap);
+       return ret;
+}
+
 static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
 {
        struct exynos_drm_ipp *ipp = task->ipp;
-       const struct exynos_drm_ipp_formats *src_fmt, *dst_fmt;
        struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
        unsigned int rotation = task->transform.rotation;
        int ret = 0;
@@ -607,37 +639,11 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
                return -EINVAL;
        }
 
-       src_fmt = __ipp_format_get(ipp, src->buf.fourcc, src->buf.modifier,
-                                  DRM_EXYNOS_IPP_FORMAT_SOURCE);
-       if (!src_fmt) {
-               DRM_DEBUG_DRIVER("Task %pK: src format not supported\n", task);
-               return -EINVAL;
-       }
-       ret = exynos_drm_ipp_check_size_limits(src, src_fmt->limits,
-                                              src_fmt->num_limits,
-                                              rotate, false);
-       if (ret)
-               return ret;
-       ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
-                                               src_fmt->limits,
-                                               src_fmt->num_limits, swap);
+       ret = exynos_drm_ipp_check_format(task, src, src, dst, rotate, swap);
        if (ret)
                return ret;
 
-       dst_fmt = __ipp_format_get(ipp, dst->buf.fourcc, dst->buf.modifier,
-                                  DRM_EXYNOS_IPP_FORMAT_DESTINATION);
-       if (!dst_fmt) {
-               DRM_DEBUG_DRIVER("Task %pK: dst format not supported\n", task);
-               return -EINVAL;
-       }
-       ret = exynos_drm_ipp_check_size_limits(dst, dst_fmt->limits,
-                                              dst_fmt->num_limits,
-                                              false, swap);
-       if (ret)
-               return ret;
-       ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
-                                               dst_fmt->limits,
-                                               dst_fmt->num_limits, swap);
+       ret = exynos_drm_ipp_check_format(task, dst, src, dst, false, swap);
        if (ret)
                return ret;
 
index 2174814273e2071934c817e180848498627f6e14..2fd299a58297edd559b2b5928a63833b3c2622df 100644 (file)
@@ -367,6 +367,8 @@ static int exynos_mic_resume(struct device *dev)
 
 static const struct dev_pm_ops exynos_mic_pm_ops = {
        SET_RUNTIME_PM_OPS(exynos_mic_suspend, exynos_mic_resume, NULL)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
 };
 
 static int exynos_mic_probe(struct platform_device *pdev)
index 38a2a7f1204be7a9ef416556db031cf8eaa07770..dba29aec59b44d873df58555ece705b0d4de6d74 100644 (file)
@@ -132,7 +132,7 @@ static void exynos_drm_plane_reset(struct drm_plane *plane)
        if (plane->state) {
                exynos_state = to_exynos_plane_state(plane->state);
                if (exynos_state->base.fb)
-                       drm_framebuffer_unreference(exynos_state->base.fb);
+                       drm_framebuffer_put(exynos_state->base.fb);
                kfree(exynos_state);
                plane->state = NULL;
        }
@@ -263,8 +263,6 @@ static void exynos_plane_atomic_update(struct drm_plane *plane,
        if (!state->crtc)
                return;
 
-       plane->crtc = state->crtc;
-
        if (exynos_crtc->ops->update_plane)
                exynos_crtc->ops->update_plane(exynos_crtc, exynos_plane);
 }
index 1a76dd3d52e1dc5b63d81bd19b49e4120e23891d..a820a68429b9a8f56be132d251b2f4ea81f6c1b4 100644 (file)
@@ -168,9 +168,9 @@ static void rotator_dst_set_transf(struct rot_context *rot,
        val &= ~ROT_CONTROL_FLIP_MASK;
 
        if (rotation & DRM_MODE_REFLECT_X)
-               val |= ROT_CONTROL_FLIP_HORIZONTAL;
-       if (rotation & DRM_MODE_REFLECT_Y)
                val |= ROT_CONTROL_FLIP_VERTICAL;
+       if (rotation & DRM_MODE_REFLECT_Y)
+               val |= ROT_CONTROL_FLIP_HORIZONTAL;
 
        val &= ~ROT_CONTROL_ROT_MASK;
 
index 91d4382343d080abd4607fd78a58729878eda844..0ddb6eec7b113ea306fea4bde563e8ecb9945495 100644 (file)
@@ -30,6 +30,7 @@
 #define scaler_write(cfg, offset)      writel(cfg, scaler->regs + (offset))
 #define SCALER_MAX_CLK                 4
 #define SCALER_AUTOSUSPEND_DELAY       2000
+#define SCALER_RESET_WAIT_RETRIES      100
 
 struct scaler_data {
        const char      *clk_name[SCALER_MAX_CLK];
@@ -51,9 +52,9 @@ struct scaler_context {
 static u32 scaler_get_format(u32 drm_fmt)
 {
        switch (drm_fmt) {
-       case DRM_FORMAT_NV21:
-               return SCALER_YUV420_2P_UV;
        case DRM_FORMAT_NV12:
+               return SCALER_YUV420_2P_UV;
+       case DRM_FORMAT_NV21:
                return SCALER_YUV420_2P_VU;
        case DRM_FORMAT_YUV420:
                return SCALER_YUV420_3P;
@@ -63,15 +64,15 @@ static u32 scaler_get_format(u32 drm_fmt)
                return SCALER_YUV422_1P_UYVY;
        case DRM_FORMAT_YVYU:
                return SCALER_YUV422_1P_YVYU;
-       case DRM_FORMAT_NV61:
-               return SCALER_YUV422_2P_UV;
        case DRM_FORMAT_NV16:
+               return SCALER_YUV422_2P_UV;
+       case DRM_FORMAT_NV61:
                return SCALER_YUV422_2P_VU;
        case DRM_FORMAT_YUV422:
                return SCALER_YUV422_3P;
-       case DRM_FORMAT_NV42:
-               return SCALER_YUV444_2P_UV;
        case DRM_FORMAT_NV24:
+               return SCALER_YUV444_2P_UV;
+       case DRM_FORMAT_NV42:
                return SCALER_YUV444_2P_VU;
        case DRM_FORMAT_YUV444:
                return SCALER_YUV444_3P;
@@ -100,6 +101,23 @@ static u32 scaler_get_format(u32 drm_fmt)
        return 0;
 }
 
+static inline int scaler_reset(struct scaler_context *scaler)
+{
+       int retry = SCALER_RESET_WAIT_RETRIES;
+
+       scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG);
+       do {
+               cpu_relax();
+       } while (retry > 1 &&
+                scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET);
+       do {
+               cpu_relax();
+               scaler_write(1, SCALER_INT_EN);
+       } while (retry > 0 && scaler_read(SCALER_INT_EN) != 1);
+
+       return retry ? 0 : -EIO;
+}
+
 static inline void scaler_enable_int(struct scaler_context *scaler)
 {
        u32 val;
@@ -354,9 +372,13 @@ static int scaler_commit(struct exynos_drm_ipp *ipp,
        u32 dst_fmt = scaler_get_format(task->dst.buf.fourcc);
        struct drm_exynos_ipp_task_rect *dst_pos = &task->dst.rect;
 
-       scaler->task = task;
-
        pm_runtime_get_sync(scaler->dev);
+       if (scaler_reset(scaler)) {
+               pm_runtime_put(scaler->dev);
+               return -EIO;
+       }
+
+       scaler->task = task;
 
        scaler_set_src_fmt(scaler, src_fmt);
        scaler_set_src_base(scaler, &task->src);
@@ -394,7 +416,11 @@ static inline void scaler_disable_int(struct scaler_context *scaler)
 
 static inline u32 scaler_get_int_status(struct scaler_context *scaler)
 {
-       return scaler_read(SCALER_INT_STATUS);
+       u32 val = scaler_read(SCALER_INT_STATUS);
+
+       scaler_write(val, SCALER_INT_STATUS);
+
+       return val;
 }
 
 static inline int scaler_task_done(u32 val)
index e6b0940b1ac273f95a12c9a747fd4097b1e3cd1c..19697c1362d8facf536a55485c45b2b2aaae6a85 100644 (file)
@@ -319,7 +319,7 @@ static int vidi_get_modes(struct drm_connector *connector)
                return -ENOMEM;
        }
 
-       drm_mode_connector_update_edid_property(connector, edid);
+       drm_connector_update_edid_property(connector, edid);
 
        return drm_add_edid_modes(connector, edid);
 }
@@ -344,7 +344,7 @@ static int vidi_create_connector(struct drm_encoder *encoder)
        }
 
        drm_connector_helper_add(connector, &vidi_connector_helper_funcs);
-       drm_mode_connector_attach_encoder(connector, encoder);
+       drm_connector_attach_encoder(connector, encoder);
 
        return 0;
 }
index db91932550cf77d6459efab1b5540e7c8501dc38..2092a650df7d56b1262fc2f09036eb88ec62ce38 100644 (file)
@@ -888,7 +888,7 @@ static int hdmi_get_modes(struct drm_connector *connector)
                (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
                edid->width_cm, edid->height_cm);
 
-       drm_mode_connector_update_edid_property(connector, edid);
+       drm_connector_update_edid_property(connector, edid);
        cec_notifier_set_phys_addr_from_edid(hdata->notifier, edid);
 
        ret = drm_add_edid_modes(connector, edid);
@@ -951,7 +951,7 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
        }
 
        drm_connector_helper_add(connector, &hdmi_connector_helper_funcs);
-       drm_mode_connector_attach_encoder(connector, encoder);
+       drm_connector_attach_encoder(connector, encoder);
 
        if (hdata->bridge) {
                ret = drm_bridge_attach(encoder, hdata->bridge, NULL);
@@ -2093,6 +2093,8 @@ static int __maybe_unused exynos_hdmi_resume(struct device *dev)
 
 static const struct dev_pm_ops exynos_hdmi_pm_ops = {
        SET_RUNTIME_PM_OPS(exynos_hdmi_suspend, exynos_hdmi_resume, NULL)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
 };
 
 struct platform_driver hdmi_driver = {
index 272c79f5f5bff856dfa4a29a2574a70ee72366cf..ffbf4a950f696d13476b0bf641b9586ce8cc44d7 100644 (file)
@@ -837,8 +837,6 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
                        struct drm_device *drm_dev)
 {
        int ret;
-       struct exynos_drm_private *priv;
-       priv = drm_dev->dev_private;
 
        mixer_ctx->drm_dev = drm_dev;
 
@@ -1271,6 +1269,8 @@ static int __maybe_unused exynos_mixer_resume(struct device *dev)
 
 static const struct dev_pm_ops exynos_mixer_pm_ops = {
        SET_RUNTIME_PM_OPS(exynos_mixer_suspend, exynos_mixer_resume, NULL)
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
 };
 
 struct platform_driver mixer_driver = {
index 4704a993cbb7f003a51b901ae9b291a9adc9fe09..16b39734115c93855d82a5de94e16cc5e2570940 100644 (file)
 #define GSC_OUT_YUV420_3P              (3 << 4)
 #define GSC_OUT_YUV422_1P              (4 << 4)
 #define GSC_OUT_YUV422_2P              (5 << 4)
+#define GSC_OUT_YUV422_3P              (6 << 4)
 #define GSC_OUT_YUV444                 (7 << 4)
 #define GSC_OUT_TILE_TYPE_MASK         (1 << 2)
 #define GSC_OUT_TILE_C_16x8            (0 << 2)
index c54806d08dd78d0080ef42a314fe5ac2db40d7b6..2298ed2a9e1c02f8e3c37d7f6761a23f0a470ca3 100644 (file)
@@ -117,7 +117,7 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev,
        if (ret < 0)
                goto err_cleanup;
 
-       ret = drm_mode_connector_attach_encoder(connector, encoder);
+       ret = drm_connector_attach_encoder(connector, encoder);
        if (ret < 0)
                goto err_sysfs;
 
@@ -148,8 +148,9 @@ int fsl_dcu_create_outputs(struct fsl_dcu_drm_device *fsl_dev)
        if (panel_node) {
                fsl_dev->connector.panel = of_drm_find_panel(panel_node);
                of_node_put(panel_node);
-               if (!fsl_dev->connector.panel)
-                       return -EPROBE_DEFER;
+               if (IS_ERR(fsl_dev->connector.panel))
+                       return PTR_ERR(fsl_dev->connector.panel);
+
                return fsl_dcu_attach_panel(fsl_dev, fsl_dev->connector.panel);
        }
 
index c51d9259c7a7c3f8713fd8ca46710b0c28a89671..204c8e452eb77efd3535fab25d12888837bc3464 100644 (file)
@@ -251,7 +251,7 @@ static void psbfb_copyarea_accel(struct fb_info *info,
        if (!fb)
                return;
 
-       offset = psbfb->gtt->offset;
+       offset = to_gtt_range(fb->obj[0])->offset;
        stride = fb->pitches[0];
 
        switch (fb->format->depth) {
index 5ea785f07ba8eb9f0e7345356483c17547eea4d0..90ed20083009fb9bf6f3115f671da4edf26d559b 100644 (file)
@@ -1770,7 +1770,7 @@ static int cdv_intel_dp_get_modes(struct drm_connector *connector)
 
        edid = drm_get_edid(connector, &intel_dp->adapter);
        if (edid) {
-               drm_mode_connector_update_edid_property(connector, edid);
+               drm_connector_update_edid_property(connector, edid);
                ret = drm_add_edid_modes(connector, edid);
                kfree(edid);
        }
index f0878998526ac3c20b22827a21343f551c5072d2..4e4e4a66eaee3c7b2d3eca73632c9383f9a9a539 100644 (file)
@@ -216,7 +216,7 @@ static int cdv_hdmi_get_modes(struct drm_connector *connector)
 
        edid = drm_get_edid(connector, &gma_encoder->i2c_bus->adapter);
        if (edid) {
-               drm_mode_connector_update_edid_property(connector, edid);
+               drm_connector_update_edid_property(connector, edid);
                ret = drm_add_edid_modes(connector, edid);
                kfree(edid);
        }
index cb0a2ae916e094b4e1e52d123a320d5212c776aa..2f00a37684a22b22042027adfbdf6358d55ff81f 100644 (file)
@@ -33,6 +33,7 @@
 #include <drm/drm.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 
 #include "psb_drv.h"
 #include "psb_intel_reg.h"
 #include "framebuffer.h"
 #include "gtt.h"
 
-static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb);
-static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
-                                             struct drm_file *file_priv,
-                                             unsigned int *handle);
-
 static const struct drm_framebuffer_funcs psb_fb_funcs = {
-       .destroy = psb_user_framebuffer_destroy,
-       .create_handle = psb_user_framebuffer_create_handle,
+       .destroy = drm_gem_fb_destroy,
+       .create_handle = drm_gem_fb_create_handle,
 };
 
 #define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
@@ -96,17 +92,18 @@ static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
        struct psb_fbdev *fbdev = info->par;
        struct psb_framebuffer *psbfb = &fbdev->pfb;
        struct drm_device *dev = psbfb->base.dev;
+       struct gtt_range *gtt = to_gtt_range(psbfb->base.obj[0]);
 
        /*
         *      We have to poke our nose in here. The core fb code assumes
         *      panning is part of the hardware that can be invoked before
         *      the actual fb is mapped. In our case that isn't quite true.
         */
-       if (psbfb->gtt->npage) {
+       if (gtt->npage) {
                /* GTT roll shifts in 4K pages, we need to shift the right
                   number of pages */
                int pages = info->fix.line_length >> 12;
-               psb_gtt_roll(dev, psbfb->gtt, var->yoffset * pages);
+               psb_gtt_roll(dev, gtt, var->yoffset * pages);
        }
         return 0;
 }
@@ -117,13 +114,14 @@ static int psbfb_vm_fault(struct vm_fault *vmf)
        struct psb_framebuffer *psbfb = vma->vm_private_data;
        struct drm_device *dev = psbfb->base.dev;
        struct drm_psb_private *dev_priv = dev->dev_private;
+       struct gtt_range *gtt = to_gtt_range(psbfb->base.obj[0]);
        int page_num;
        int i;
        unsigned long address;
        int ret;
        unsigned long pfn;
        unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
-                                 psbfb->gtt->offset;
+                                 gtt->offset;
 
        page_num = vma_pages(vma);
        address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
@@ -246,7 +244,7 @@ static int psb_framebuffer_init(struct drm_device *dev,
                return -EINVAL;
 
        drm_helper_mode_fill_fb_struct(dev, &fb->base, mode_cmd);
-       fb->gtt = gt;
+       fb->base.obj[0] = &gt->gem;
        ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
        if (ret) {
                dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
@@ -518,8 +516,8 @@ static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
        drm_framebuffer_unregister_private(&psbfb->base);
        drm_framebuffer_cleanup(&psbfb->base);
 
-       if (psbfb->gtt)
-               drm_gem_object_unreference_unlocked(&psbfb->gtt->gem);
+       if (psbfb->base.obj[0])
+               drm_gem_object_put_unlocked(psbfb->base.obj[0]);
        return 0;
 }
 
@@ -576,44 +574,6 @@ static void psb_fbdev_fini(struct drm_device *dev)
        dev_priv->fbdev = NULL;
 }
 
-/**
- *     psb_user_framebuffer_create_handle - add hamdle to a framebuffer
- *     @fb: framebuffer
- *     @file_priv: our DRM file
- *     @handle: returned handle
- *
- *     Our framebuffer object is a GTT range which also contains a GEM
- *     object. We need to turn it into a handle for userspace. GEM will do
- *     the work for us
- */
-static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
-                                             struct drm_file *file_priv,
-                                             unsigned int *handle)
-{
-       struct psb_framebuffer *psbfb = to_psb_fb(fb);
-       struct gtt_range *r = psbfb->gtt;
-       return drm_gem_handle_create(file_priv, &r->gem, handle);
-}
-
-/**
- *     psb_user_framebuffer_destroy    -       destruct user created fb
- *     @fb: framebuffer
- *
- *     User framebuffers are backed by GEM objects so all we have to do is
- *     clean up a bit and drop the reference, GEM will handle the fallout
- */
-static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
-       struct psb_framebuffer *psbfb = to_psb_fb(fb);
-       struct gtt_range *r = psbfb->gtt;
-
-       /* Let DRM do its clean up */
-       drm_framebuffer_cleanup(fb);
-       /*  We are no longer using the resource in GEM */
-       drm_gem_object_unreference_unlocked(&r->gem);
-       kfree(fb);
-}
-
 static const struct drm_mode_config_funcs psb_mode_funcs = {
        .fb_create = psb_user_framebuffer_create,
        .output_poll_changed = drm_fb_helper_output_poll_changed,
index 395f20b07aabca38cd617f1e85c3ef2bdc19a898..23dc3c5f8f0de2b0a10fa946fcee0644d1477f17 100644 (file)
@@ -31,7 +31,6 @@ struct psb_framebuffer {
        struct drm_framebuffer base;
        struct address_space *addr_space;
        struct fb_info *fbdev;
-       struct gtt_range *gtt;
 };
 
 struct psb_fbdev {
index 131239759a75a2b9aafd4f3f805003480bed79b3..913bf4c256faffa1fb3324df6b259f052004ba64 100644 (file)
@@ -93,7 +93,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
                return ret;
        }
        /* We have the initial and handle reference but need only one now */
-       drm_gem_object_unreference_unlocked(&r->gem);
+       drm_gem_object_put_unlocked(&r->gem);
        *handlep = handle;
        return 0;
 }
index f3c48a2be71b87088e81becf739786bd7dc94755..09c1161a7ac635ebbfc9f4c80a9824b43286aead 100644 (file)
@@ -60,7 +60,7 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
        struct drm_psb_private *dev_priv = dev->dev_private;
        struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
        struct drm_framebuffer *fb = crtc->primary->fb;
-       struct psb_framebuffer *psbfb = to_psb_fb(fb);
+       struct gtt_range *gtt;
        int pipe = gma_crtc->pipe;
        const struct psb_offset *map = &dev_priv->regmap[pipe];
        unsigned long start, offset;
@@ -76,12 +76,14 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
                goto gma_pipe_cleaner;
        }
 
+       gtt = to_gtt_range(fb->obj[0]);
+
        /* We are displaying this buffer, make sure it is actually loaded
           into the GTT */
-       ret = psb_gtt_pin(psbfb->gtt);
+       ret = psb_gtt_pin(gtt);
        if (ret < 0)
                goto gma_pipe_set_base_exit;
-       start = psbfb->gtt->offset;
+       start = gtt->offset;
        offset = y * fb->pitches[0] + x * fb->format->cpp[0];
 
        REG_WRITE(map->stride, fb->pitches[0]);
@@ -129,7 +131,7 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
 gma_pipe_cleaner:
        /* If there was a previous display we can now unpin it */
        if (old_fb)
-               psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
+               psb_gtt_unpin(to_gtt_range(old_fb->obj[0]));
 
 gma_pipe_set_base_exit:
        gma_power_end(dev);
@@ -353,7 +355,7 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
                        gt = container_of(gma_crtc->cursor_obj,
                                          struct gtt_range, gem);
                        psb_gtt_unpin(gt);
-                       drm_gem_object_unreference_unlocked(gma_crtc->cursor_obj);
+                       drm_gem_object_put_unlocked(gma_crtc->cursor_obj);
                        gma_crtc->cursor_obj = NULL;
                }
                return 0;
@@ -429,7 +431,7 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
        if (gma_crtc->cursor_obj) {
                gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem);
                psb_gtt_unpin(gt);
-               drm_gem_object_unreference_unlocked(gma_crtc->cursor_obj);
+               drm_gem_object_put_unlocked(gma_crtc->cursor_obj);
        }
 
        gma_crtc->cursor_obj = obj;
@@ -437,7 +439,7 @@ unlock:
        return ret;
 
 unref_cursor:
-       drm_gem_object_unreference_unlocked(obj);
+       drm_gem_object_put_unlocked(obj);
        return ret;
 }
 
@@ -491,7 +493,7 @@ void gma_crtc_disable(struct drm_crtc *crtc)
        crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
 
        if (crtc->primary->fb) {
-               gt = to_psb_fb(crtc->primary->fb)->gtt;
+               gt = to_gtt_range(crtc->primary->fb->obj[0]);
                psb_gtt_unpin(gt);
        }
 }
@@ -663,7 +665,7 @@ void gma_connector_attach_encoder(struct gma_connector *connector,
                                  struct gma_encoder *encoder)
 {
        connector->encoder = encoder;
-       drm_mode_connector_attach_encoder(&connector->base,
+       drm_connector_attach_encoder(&connector->base,
                                          &encoder->base);
 }
 
index cdbb350c9d5d2b23c3272ac908163e8b6610677d..cb0c3a2a1fd4b6882505c490af2a656953f9cdfe 100644 (file)
@@ -53,6 +53,8 @@ struct gtt_range {
        int roll;                       /* Roll applied to the GTT entries */
 };
 
+#define to_gtt_range(x) container_of(x, struct gtt_range, gem)
+
 extern struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
                                             const char *name, int backed,
                                             u32 align);
index 978ae4b25e82267ad448b68ed50c0c369c75dc11..e0ccf1d19a4dc46aee14aa3a6cf4581e90767f1e 100644 (file)
@@ -34,7 +34,7 @@ struct vbt_header {
        u8 reserved0;
        u32 bdb_offset;                 /**< from beginning of VBT */
        u32 aim_offset[4];              /**< from beginning of VBT */
-} __attribute__((packed));
+} __packed;
 
 
 struct bdb_header {
@@ -61,7 +61,7 @@ struct vbios_data {
        u8 rsvd4; /* popup memory size */
        u8 resize_pci_bios;
        u8 rsvd5; /* is crt already on ddc2 */
-} __attribute__((packed));
+} __packed;
 
 /*
  * There are several types of BIOS data blocks (BDBs), each block has
@@ -133,7 +133,7 @@ struct bdb_general_features {
        u8 dp_ssc_enb:1;        /* PCH attached eDP supports SSC */
        u8 dp_ssc_freq:1;       /* SSC freq for PCH attached eDP */
        u8 rsvd11:3; /* finish byte */
-} __attribute__((packed));
+} __packed;
 
 /* pre-915 */
 #define GPIO_PIN_DVI_LVDS      0x03 /* "DVI/LVDS DDC GPIO pins" */
@@ -213,7 +213,7 @@ struct child_device_config {
        u8  dvo2_wiring;
        u16 extended_type;
        u8  dvo_function;
-} __attribute__((packed));
+} __packed;
 
 
 struct bdb_general_definitions {
@@ -256,7 +256,7 @@ struct bdb_lvds_options {
        u8 lvds_edid:1;
        u8 rsvd2:1;
        u8 rsvd4;
-} __attribute__((packed));
+} __packed;
 
 struct bdb_lvds_backlight {
        u8 type:2;
@@ -268,7 +268,7 @@ struct bdb_lvds_backlight {
        u8 i2caddr;
        u8 brightnesscmd;
        /*FIXME: more...*/
-} __attribute__((packed));
+} __packed;
 
 /* LFP pointer table contains entries to the struct below */
 struct bdb_lvds_lfp_data_ptr {
@@ -278,12 +278,12 @@ struct bdb_lvds_lfp_data_ptr {
        u8 dvo_table_size;
        u16 panel_pnp_id_offset;
        u8 pnp_table_size;
-} __attribute__((packed));
+} __packed;
 
 struct bdb_lvds_lfp_data_ptrs {
        u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
        struct bdb_lvds_lfp_data_ptr ptr[16];
-} __attribute__((packed));
+} __packed;
 
 /* LFP data has 3 blocks per entry */
 struct lvds_fp_timing {
@@ -300,7 +300,7 @@ struct lvds_fp_timing {
        u32 pfit_reg;
        u32 pfit_reg_val;
        u16 terminator;
-} __attribute__((packed));
+} __packed;
 
 struct lvds_dvo_timing {
        u16 clock;              /**< In 10khz */
@@ -328,7 +328,7 @@ struct lvds_dvo_timing {
        u8 vsync_positive:1;
        u8 hsync_positive:1;
        u8 rsvd2:1;
-} __attribute__((packed));
+} __packed;
 
 struct lvds_pnp_id {
        u16 mfg_name;
@@ -336,17 +336,17 @@ struct lvds_pnp_id {
        u32 serial;
        u8 mfg_week;
        u8 mfg_year;
-} __attribute__((packed));
+} __packed;
 
 struct bdb_lvds_lfp_data_entry {
        struct lvds_fp_timing fp_timing;
        struct lvds_dvo_timing dvo_timing;
        struct lvds_pnp_id pnp_id;
-} __attribute__((packed));
+} __packed;
 
 struct bdb_lvds_lfp_data {
        struct bdb_lvds_lfp_data_entry data[16];
-} __attribute__((packed));
+} __packed;
 
 struct aimdb_header {
        char signature[16];
@@ -354,12 +354,12 @@ struct aimdb_header {
        u16 aimdb_version;
        u16 aimdb_header_size;
        u16 aimdb_size;
-} __attribute__((packed));
+} __packed;
 
 struct aimdb_block {
        u8 aimdb_id;
        u16 aimdb_size;
-} __attribute__((packed));
+} __packed;
 
 struct vch_panel_data {
        u16 fp_timing_offset;
@@ -370,12 +370,12 @@ struct vch_panel_data {
        u8 text_fitting_size;
        u16 graphics_fitting_offset;
        u8 graphics_fitting_size;
-} __attribute__((packed));
+} __packed;
 
 struct vch_bdb_22 {
        struct aimdb_block aimdb_block;
        struct vch_panel_data panels[16];
-} __attribute__((packed));
+} __packed;
 
 struct bdb_sdvo_lvds_options {
        u8 panel_backlight;
@@ -391,7 +391,7 @@ struct bdb_sdvo_lvds_options {
        u8 panel_misc_bits_2;
        u8 panel_misc_bits_3;
        u8 panel_misc_bits_4;
-} __attribute__((packed));
+} __packed;
 
 #define BDB_DRIVER_FEATURE_NO_LVDS             0
 #define BDB_DRIVER_FEATURE_INT_LVDS            1
@@ -436,7 +436,7 @@ struct bdb_driver_features {
 
        u8 hdmi_termination;
        u8 custom_vbt_version;
-} __attribute__((packed));
+} __packed;
 
 #define EDP_18BPP      0
 #define EDP_24BPP      1
index a05c020602bda439b03fabdae42781d3dc627385..d0bf5a1e94e8751b187538822c463447a432565f 100644 (file)
@@ -999,7 +999,7 @@ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
                                p_funcs->encoder_helper_funcs);
 
        /*attach to given connector*/
-       drm_mode_connector_attach_encoder(connector, encoder);
+       drm_connector_attach_encoder(connector, encoder);
 
        /*set possible crtcs and clones*/
        if (dsi_connector->pipe) {
index 5c066448be5b7d6a69ab52259fea38f29e349d35..2b9fa0163dea7dcb848be9c351108cccc8e0fa2c 100644 (file)
@@ -167,7 +167,6 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
        struct drm_psb_private *dev_priv = dev->dev_private;
        struct drm_framebuffer *fb = crtc->primary->fb;
        struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
-       struct psb_framebuffer *psbfb = to_psb_fb(fb);
        int pipe = gma_crtc->pipe;
        const struct psb_offset *map = &dev_priv->regmap[pipe];
        unsigned long start, offset;
@@ -196,7 +195,7 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
        if (!gma_power_begin(dev, true))
                return 0;
 
-       start = psbfb->gtt->offset;
+       start = to_gtt_range(fb->obj[0])->offset;
        offset = y * fb->pitches[0] + x * fb->format->cpp[0];
 
        REG_WRITE(map->stride, fb->pitches[0]);
index 0fff269d3fe68ee3e036cbda37fdfd1e63b2a984..1b7fd6a9d8a518ba5ad635a8524a109d822b1c54 100644 (file)
@@ -600,7 +600,6 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
        struct drm_psb_private *dev_priv = dev->dev_private;
        struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
        struct drm_framebuffer *fb = crtc->primary->fb;
-       struct psb_framebuffer *psbfb = to_psb_fb(fb);
        int pipe = gma_crtc->pipe;
        const struct psb_offset *map = &dev_priv->regmap[pipe];
        unsigned long start, offset;
@@ -617,7 +616,7 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
        if (!gma_power_begin(dev, true))
                return 0;
 
-       start = psbfb->gtt->offset;
+       start = to_gtt_range(fb->obj[0])->offset;
        offset = y * fb->pitches[0] + x * fb->format->cpp[0];
 
        REG_WRITE(map->stride, fb->pitches[0]);
index 78566a80ad255acc35840a6668ee4ba9383ab29b..c6d72de1c0548bc79e92d76b9946a8a06bd55bbf 100644 (file)
@@ -578,7 +578,7 @@ static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
        }
 
        if (edid) {
-               drm_mode_connector_update_edid_property(connector, edid);
+               drm_connector_update_edid_property(connector, edid);
                ret = drm_add_edid_modes(connector, edid);
        }
        return ret;
index e6943fef0611d1202c1f4cab9bda5efe28cdb2d2..83babb815a5d83bde5c87aad065b23aeb444ddbb 100644 (file)
@@ -376,7 +376,7 @@ void oaktrail_lvds_init(struct drm_device *dev,
         * preferred mode is the right one.
         */
        if (edid) {
-               drm_mode_connector_update_edid_property(connector, edid);
+               drm_connector_update_edid_property(connector, edid);
                drm_add_edid_modes(connector, edid);
                kfree(edid);
 
index e5360726d80b7531df0006c25327de9e36eac127..fb4da3cd668163c9eba1b491e37615cad195ef84 100644 (file)
@@ -66,7 +66,7 @@ int psb_intel_ddc_get_modes(struct drm_connector *connector,
 
        edid = drm_get_edid(connector, adapter);
        if (edid) {
-               drm_mode_connector_update_edid_property(connector, edid);
+               drm_connector_update_edid_property(connector, edid);
                ret = drm_add_edid_modes(connector, edid);
                kfree(edid);
        }
index f2ee6aa10afa1e7e8fc143e582f044c187d663eb..dd3cec0e3190c835e04c8313410b8bfc88c8eb32 100644 (file)
@@ -429,13 +429,20 @@ static const char *cmd_status_names[] = {
        "Scaling not supported"
 };
 
+#define MAX_ARG_LEN 32
+
 static bool psb_intel_sdvo_write_cmd(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
                                 const void *args, int args_len)
 {
-       u8 buf[args_len*2 + 2], status;
-       struct i2c_msg msgs[args_len + 3];
+       u8 buf[MAX_ARG_LEN*2 + 2], status;
+       struct i2c_msg msgs[MAX_ARG_LEN + 3];
        int i, ret;
 
+       if (args_len > MAX_ARG_LEN) {
+               DRM_ERROR("Need to increase arg length\n");
+               return false;
+       }
+
        psb_intel_sdvo_debug_write(psb_intel_sdvo, cmd, args, args_len);
 
        for (i = 0; i < args_len; i++) {
@@ -1465,7 +1472,7 @@ static void psb_intel_sdvo_get_ddc_modes(struct drm_connector *connector)
                bool connector_is_digital = !!IS_TMDS(psb_intel_sdvo_connector);
 
                if (connector_is_digital == monitor_is_digital) {
-                       drm_mode_connector_update_edid_property(connector, edid);
+                       drm_connector_update_edid_property(connector, edid);
                        drm_add_edid_modes(connector, edid);
                }
 
index d2f4749ebf8dcf1fe0e685f7d756859a8883d249..744956cea7496afd8ec2672bd953eaf67ef661c3 100644 (file)
@@ -133,7 +133,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
        }
 
        drm_encoder_helper_add(encoder, &hibmc_encoder_helper_funcs);
-       drm_mode_connector_attach_encoder(connector, encoder);
+       drm_connector_attach_encoder(connector, encoder);
 
        return 0;
 }
index 2269be91f3e16936957854931d3c848bfcf54647..bb774202a5a1bc41d01dc6f8369db37822149f48 100644 (file)
@@ -859,7 +859,6 @@ static int ade_plane_atomic_check(struct drm_plane *plane,
                return PTR_ERR(crtc_state);
 
        if (src_w != crtc_w || src_h != crtc_h) {
-               DRM_ERROR("Scale not support!!!\n");
                return -EINVAL;
        }
 
index 6ebd8842dbcc8649c887d689becf69e017f5146b..a7c39f39793ff2c2ce152e3cba3c3036513a7f19 100644 (file)
@@ -69,6 +69,7 @@ struct tda998x_priv {
        bool edid_delay_active;
 
        struct drm_encoder encoder;
+       struct drm_bridge bridge;
        struct drm_connector connector;
 
        struct tda998x_audio_port audio_port[2];
@@ -79,9 +80,10 @@ struct tda998x_priv {
 
 #define conn_to_tda998x_priv(x) \
        container_of(x, struct tda998x_priv, connector)
-
 #define enc_to_tda998x_priv(x) \
        container_of(x, struct tda998x_priv, encoder)
+#define bridge_to_tda998x_priv(x) \
+       container_of(x, struct tda998x_priv, bridge)
 
 /* The TDA9988 series of devices use a paged register scheme.. to simplify
  * things we encode the page # in upper bits of the register #.  To read/
@@ -589,13 +591,22 @@ out:
        return ret;
 }
 
+#define MAX_WRITE_RANGE_BUF 32
+
 static void
 reg_write_range(struct tda998x_priv *priv, u16 reg, u8 *p, int cnt)
 {
        struct i2c_client *client = priv->hdmi;
-       u8 buf[cnt+1];
+       /* This is the maximum size of the buffer passed in */
+       u8 buf[MAX_WRITE_RANGE_BUF + 1];
        int ret;
 
+       if (cnt > MAX_WRITE_RANGE_BUF) {
+               dev_err(&client->dev, "Fixed write buffer too small (%d)\n",
+                               MAX_WRITE_RANGE_BUF);
+               return;
+       }
+
        buf[0] = REG2ADDR(reg);
        memcpy(&buf[1], p, cnt);
 
@@ -753,7 +764,7 @@ static void tda998x_detect_work(struct work_struct *work)
 {
        struct tda998x_priv *priv =
                container_of(work, struct tda998x_priv, detect_work);
-       struct drm_device *dev = priv->encoder.dev;
+       struct drm_device *dev = priv->connector.dev;
 
        if (dev)
                drm_kms_helper_hotplug_event(dev);
@@ -805,7 +816,7 @@ static void
 tda998x_write_if(struct tda998x_priv *priv, u8 bit, u16 addr,
                 union hdmi_infoframe *frame)
 {
-       u8 buf[32];
+       u8 buf[MAX_WRITE_RANGE_BUF];
        ssize_t len;
 
        len = hdmi_infoframe_pack(frame, buf, sizeof(buf));
@@ -1095,29 +1106,6 @@ static int tda998x_audio_codec_init(struct tda998x_priv *priv,
 
 /* DRM connector functions */
 
-static int tda998x_connector_fill_modes(struct drm_connector *connector,
-                                       uint32_t maxX, uint32_t maxY)
-{
-       struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
-       int ret;
-
-       mutex_lock(&priv->audio_mutex);
-       ret = drm_helper_probe_single_connector_modes(connector, maxX, maxY);
-
-       if (connector->edid_blob_ptr) {
-               struct edid *edid = (void *)connector->edid_blob_ptr->data;
-
-               cec_notifier_set_phys_addr_from_edid(priv->cec_notify, edid);
-
-               priv->sink_has_audio = drm_detect_monitor_audio(edid);
-       } else {
-               priv->sink_has_audio = false;
-       }
-       mutex_unlock(&priv->audio_mutex);
-
-       return ret;
-}
-
 static enum drm_connector_status
 tda998x_connector_detect(struct drm_connector *connector, bool force)
 {
@@ -1136,7 +1124,7 @@ static void tda998x_connector_destroy(struct drm_connector *connector)
 static const struct drm_connector_funcs tda998x_connector_funcs = {
        .dpms = drm_helper_connector_dpms,
        .reset = drm_atomic_helper_connector_reset,
-       .fill_modes = tda998x_connector_fill_modes,
+       .fill_modes = drm_helper_probe_single_connector_modes,
        .detect = tda998x_connector_detect,
        .destroy = tda998x_connector_destroy,
        .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -1234,41 +1222,30 @@ static int tda998x_connector_get_modes(struct drm_connector *connector)
                return 0;
        }
 
-       drm_mode_connector_update_edid_property(connector, edid);
+       drm_connector_update_edid_property(connector, edid);
+       cec_notifier_set_phys_addr_from_edid(priv->cec_notify, edid);
+
+       mutex_lock(&priv->audio_mutex);
        n = drm_add_edid_modes(connector, edid);
+       priv->sink_has_audio = drm_detect_monitor_audio(edid);
+       mutex_unlock(&priv->audio_mutex);
 
        kfree(edid);
 
        return n;
 }
 
-static enum drm_mode_status tda998x_connector_mode_valid(struct drm_connector *connector,
-                                       struct drm_display_mode *mode)
-{
-       /* TDA19988 dotclock can go up to 165MHz */
-       struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
-
-       if (mode->clock > ((priv->rev == TDA19988) ? 165000 : 150000))
-               return MODE_CLOCK_HIGH;
-       if (mode->htotal >= BIT(13))
-               return MODE_BAD_HVALUE;
-       if (mode->vtotal >= BIT(11))
-               return MODE_BAD_VVALUE;
-       return MODE_OK;
-}
-
 static struct drm_encoder *
 tda998x_connector_best_encoder(struct drm_connector *connector)
 {
        struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
 
-       return &priv->encoder;
+       return priv->bridge.encoder;
 }
 
 static
 const struct drm_connector_helper_funcs tda998x_connector_helper_funcs = {
        .get_modes = tda998x_connector_get_modes,
-       .mode_valid = tda998x_connector_mode_valid,
        .best_encoder = tda998x_connector_best_encoder,
 };
 
@@ -1292,25 +1269,48 @@ static int tda998x_connector_init(struct tda998x_priv *priv,
        if (ret)
                return ret;
 
-       drm_mode_connector_attach_encoder(&priv->connector, &priv->encoder);
+       drm_connector_attach_encoder(&priv->connector,
+                                    priv->bridge.encoder);
 
        return 0;
 }
 
-/* DRM encoder functions */
+/* DRM bridge functions */
 
-static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
+static int tda998x_bridge_attach(struct drm_bridge *bridge)
 {
-       struct tda998x_priv *priv = enc_to_tda998x_priv(encoder);
-       bool on;
+       struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
 
-       /* we only care about on or off: */
-       on = mode == DRM_MODE_DPMS_ON;
+       return tda998x_connector_init(priv, bridge->dev);
+}
 
-       if (on == priv->is_on)
-               return;
+static void tda998x_bridge_detach(struct drm_bridge *bridge)
+{
+       struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
 
-       if (on) {
+       drm_connector_cleanup(&priv->connector);
+}
+
+static enum drm_mode_status tda998x_bridge_mode_valid(struct drm_bridge *bridge,
+                                    const struct drm_display_mode *mode)
+{
+       /* TDA19988 dotclock can go up to 165MHz */
+       struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
+
+       if (mode->clock > ((priv->rev == TDA19988) ? 165000 : 150000))
+               return MODE_CLOCK_HIGH;
+       if (mode->htotal >= BIT(13))
+               return MODE_BAD_HVALUE;
+       if (mode->vtotal >= BIT(11))
+               return MODE_BAD_VVALUE;
+       return MODE_OK;
+}
+
+static void tda998x_bridge_enable(struct drm_bridge *bridge)
+{
+       struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
+
+       if (!priv->is_on) {
                /* enable video ports, audio will be enabled later */
                reg_write(priv, REG_ENA_VP_0, 0xff);
                reg_write(priv, REG_ENA_VP_1, 0xff);
@@ -1321,7 +1321,14 @@ static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
                reg_write(priv, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
 
                priv->is_on = true;
-       } else {
+       }
+}
+
+static void tda998x_bridge_disable(struct drm_bridge *bridge)
+{
+       struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
+
+       if (priv->is_on) {
                /* disable video ports */
                reg_write(priv, REG_ENA_VP_0, 0x00);
                reg_write(priv, REG_ENA_VP_1, 0x00);
@@ -1331,12 +1338,12 @@ static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
        }
 }
 
-static void
-tda998x_encoder_mode_set(struct drm_encoder *encoder,
-                        struct drm_display_mode *mode,
-                        struct drm_display_mode *adjusted_mode)
+static void tda998x_bridge_mode_set(struct drm_bridge *bridge,
+                                   struct drm_display_mode *mode,
+                                   struct drm_display_mode *adjusted_mode)
 {
-       struct tda998x_priv *priv = enc_to_tda998x_priv(encoder);
+       struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
+       unsigned long tmds_clock;
        u16 ref_pix, ref_line, n_pix, n_line;
        u16 hs_pix_s, hs_pix_e;
        u16 vs1_pix_s, vs1_pix_e, vs1_line_s, vs1_line_e;
@@ -1407,12 +1414,19 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
                               (mode->vsync_end - mode->vsync_start)/2;
        }
 
-       div = 148500 / mode->clock;
-       if (div != 0) {
-               div--;
-               if (div > 3)
-                       div = 3;
-       }
+       tmds_clock = mode->clock;
+
+       /*
+        * The divisor is power-of-2. The TDA9983B datasheet gives
+        * this as ranges of Msample/s, which is 10x the TMDS clock:
+        *   0 - 800 to 1500 Msample/s
+        *   1 - 400 to 800 Msample/s
+        *   2 - 200 to 400 Msample/s
+        *   3 - as 2 above
+        */
+       for (div = 0; div < 3; div++)
+               if (80000 >> div <= tmds_clock)
+                       break;
 
        mutex_lock(&priv->audio_mutex);
 
@@ -1543,26 +1557,14 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
        mutex_unlock(&priv->audio_mutex);
 }
 
-static void tda998x_destroy(struct tda998x_priv *priv)
-{
-       /* disable all IRQs and free the IRQ handler */
-       cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
-       reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
-
-       if (priv->audio_pdev)
-               platform_device_unregister(priv->audio_pdev);
-
-       if (priv->hdmi->irq)
-               free_irq(priv->hdmi->irq, priv);
-
-       del_timer_sync(&priv->edid_delay_timer);
-       cancel_work_sync(&priv->detect_work);
-
-       i2c_unregister_device(priv->cec);
-
-       if (priv->cec_notify)
-               cec_notifier_put(priv->cec_notify);
-}
+static const struct drm_bridge_funcs tda998x_bridge_funcs = {
+       .attach = tda998x_bridge_attach,
+       .detach = tda998x_bridge_detach,
+       .mode_valid = tda998x_bridge_mode_valid,
+       .disable = tda998x_bridge_disable,
+       .mode_set = tda998x_bridge_mode_set,
+       .enable = tda998x_bridge_enable,
+};
 
 /* I2C driver functions */
 
@@ -1608,16 +1610,69 @@ static int tda998x_get_audio_ports(struct tda998x_priv *priv,
        return 0;
 }
 
-static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
+static void tda998x_set_config(struct tda998x_priv *priv,
+                              const struct tda998x_encoder_params *p)
 {
+       priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
+                           (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
+                           VIP_CNTRL_0_SWAP_B(p->swap_b) |
+                           (p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0);
+       priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) |
+                           (p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) |
+                           VIP_CNTRL_1_SWAP_D(p->swap_d) |
+                           (p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0);
+       priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) |
+                           (p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) |
+                           VIP_CNTRL_2_SWAP_F(p->swap_f) |
+                           (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
+
+       priv->audio_params = p->audio_params;
+}
+
+static void tda998x_destroy(struct device *dev)
+{
+       struct tda998x_priv *priv = dev_get_drvdata(dev);
+
+       drm_bridge_remove(&priv->bridge);
+
+       /* disable all IRQs and free the IRQ handler */
+       cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
+       reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
+
+       if (priv->audio_pdev)
+               platform_device_unregister(priv->audio_pdev);
+
+       if (priv->hdmi->irq)
+               free_irq(priv->hdmi->irq, priv);
+
+       del_timer_sync(&priv->edid_delay_timer);
+       cancel_work_sync(&priv->detect_work);
+
+       i2c_unregister_device(priv->cec);
+
+       if (priv->cec_notify)
+               cec_notifier_put(priv->cec_notify);
+}
+
+static int tda998x_create(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
        struct device_node *np = client->dev.of_node;
        struct i2c_board_info cec_info;
+       struct tda998x_priv *priv;
        u32 video;
        int rev_lo, rev_hi, ret;
 
+       priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       dev_set_drvdata(dev, priv);
+
        mutex_init(&priv->mutex);       /* protect the page access */
        mutex_init(&priv->audio_mutex); /* protect access from audio thread */
        mutex_init(&priv->edid_mutex);
+       INIT_LIST_HEAD(&priv->bridge.list);
        init_waitqueue_head(&priv->edid_delay_waitq);
        timer_setup(&priv->edid_delay_timer, tda998x_edid_delay_done, 0);
        INIT_WORK(&priv->detect_work, tda998x_detect_work);
@@ -1640,13 +1695,13 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
        /* read version: */
        rev_lo = reg_read(priv, REG_VERSION_LSB);
        if (rev_lo < 0) {
-               dev_err(&client->dev, "failed to read version: %d\n", rev_lo);
+               dev_err(dev, "failed to read version: %d\n", rev_lo);
                return rev_lo;
        }
 
        rev_hi = reg_read(priv, REG_VERSION_MSB);
        if (rev_hi < 0) {
-               dev_err(&client->dev, "failed to read version: %d\n", rev_hi);
+               dev_err(dev, "failed to read version: %d\n", rev_hi);
                return rev_hi;
        }
 
@@ -1657,20 +1712,19 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
 
        switch (priv->rev) {
        case TDA9989N2:
-               dev_info(&client->dev, "found TDA9989 n2");
+               dev_info(dev, "found TDA9989 n2");
                break;
        case TDA19989:
-               dev_info(&client->dev, "found TDA19989");
+               dev_info(dev, "found TDA19989");
                break;
        case TDA19989N2:
-               dev_info(&client->dev, "found TDA19989 n2");
+               dev_info(dev, "found TDA19989 n2");
                break;
        case TDA19988:
-               dev_info(&client->dev, "found TDA19988");
+               dev_info(dev, "found TDA19988");
                break;
        default:
-               dev_err(&client->dev, "found unsupported device: %04x\n",
-                       priv->rev);
+               dev_err(dev, "found unsupported device: %04x\n", priv->rev);
                return -ENXIO;
        }
 
@@ -1713,8 +1767,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
                                           tda998x_irq_thread, irq_flags,
                                           "tda998x", priv);
                if (ret) {
-                       dev_err(&client->dev,
-                               "failed to request IRQ#%u: %d\n",
+                       dev_err(dev, "failed to request IRQ#%u: %d\n",
                                client->irq, ret);
                        goto err_irq;
                }
@@ -1723,13 +1776,13 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
                cec_write(priv, REG_CEC_RXSHPDINTENA, CEC_RXSHPDLEV_HPD);
        }
 
-       priv->cec_notify = cec_notifier_get(&client->dev);
+       priv->cec_notify = cec_notifier_get(dev);
        if (!priv->cec_notify) {
                ret = -ENOMEM;
                goto fail;
        }
 
-       priv->cec_glue.parent = &client->dev;
+       priv->cec_glue.parent = dev;
        priv->cec_glue.data = priv;
        priv->cec_glue.init = tda998x_cec_hook_init;
        priv->cec_glue.exit = tda998x_cec_hook_exit;
@@ -1759,61 +1812,44 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
        /* enable EDID read irq: */
        reg_set(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
 
-       if (!np)
-               return 0;               /* non-DT */
+       if (np) {
+               /* get the device tree parameters */
+               ret = of_property_read_u32(np, "video-ports", &video);
+               if (ret == 0) {
+                       priv->vip_cntrl_0 = video >> 16;
+                       priv->vip_cntrl_1 = video >> 8;
+                       priv->vip_cntrl_2 = video;
+               }
+
+               ret = tda998x_get_audio_ports(priv, np);
+               if (ret)
+                       goto fail;
 
-       /* get the device tree parameters */
-       ret = of_property_read_u32(np, "video-ports", &video);
-       if (ret == 0) {
-               priv->vip_cntrl_0 = video >> 16;
-               priv->vip_cntrl_1 = video >> 8;
-               priv->vip_cntrl_2 = video;
+               if (priv->audio_port[0].format != AFMT_UNUSED)
+                       tda998x_audio_codec_init(priv, &client->dev);
+       } else if (dev->platform_data) {
+               tda998x_set_config(priv, dev->platform_data);
        }
 
-       ret = tda998x_get_audio_ports(priv, np);
-       if (ret)
-               goto fail;
+       priv->bridge.funcs = &tda998x_bridge_funcs;
+#ifdef CONFIG_OF
+       priv->bridge.of_node = dev->of_node;
+#endif
 
-       if (priv->audio_port[0].format != AFMT_UNUSED)
-               tda998x_audio_codec_init(priv, &client->dev);
+       drm_bridge_add(&priv->bridge);
 
        return 0;
 
 fail:
-       /* if encoder_init fails, the encoder slave is never registered,
-        * so cleanup here:
-        */
-       i2c_unregister_device(priv->cec);
-       if (priv->cec_notify)
-               cec_notifier_put(priv->cec_notify);
-       if (client->irq)
-               free_irq(client->irq, priv);
+       tda998x_destroy(dev);
 err_irq:
        return ret;
 }
 
-static void tda998x_encoder_prepare(struct drm_encoder *encoder)
-{
-       tda998x_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-}
-
-static void tda998x_encoder_commit(struct drm_encoder *encoder)
-{
-       tda998x_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
-}
-
-static const struct drm_encoder_helper_funcs tda998x_encoder_helper_funcs = {
-       .dpms = tda998x_encoder_dpms,
-       .prepare = tda998x_encoder_prepare,
-       .commit = tda998x_encoder_commit,
-       .mode_set = tda998x_encoder_mode_set,
-};
+/* DRM encoder functions */
 
 static void tda998x_encoder_destroy(struct drm_encoder *encoder)
 {
-       struct tda998x_priv *priv = enc_to_tda998x_priv(encoder);
-
-       tda998x_destroy(priv);
        drm_encoder_cleanup(encoder);
 }
 
@@ -1821,40 +1857,12 @@ static const struct drm_encoder_funcs tda998x_encoder_funcs = {
        .destroy = tda998x_encoder_destroy,
 };
 
-static void tda998x_set_config(struct tda998x_priv *priv,
-                              const struct tda998x_encoder_params *p)
-{
-       priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
-                           (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
-                           VIP_CNTRL_0_SWAP_B(p->swap_b) |
-                           (p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0);
-       priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) |
-                           (p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) |
-                           VIP_CNTRL_1_SWAP_D(p->swap_d) |
-                           (p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0);
-       priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) |
-                           (p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) |
-                           VIP_CNTRL_2_SWAP_F(p->swap_f) |
-                           (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
-
-       priv->audio_params = p->audio_params;
-}
-
-static int tda998x_bind(struct device *dev, struct device *master, void *data)
+static int tda998x_encoder_init(struct device *dev, struct drm_device *drm)
 {
-       struct tda998x_encoder_params *params = dev->platform_data;
-       struct i2c_client *client = to_i2c_client(dev);
-       struct drm_device *drm = data;
-       struct tda998x_priv *priv;
+       struct tda998x_priv *priv = dev_get_drvdata(dev);
        u32 crtcs = 0;
        int ret;
 
-       priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
-       if (!priv)
-               return -ENOMEM;
-
-       dev_set_drvdata(dev, priv);
-
        if (dev->of_node)
                crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
 
@@ -1866,40 +1874,36 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
 
        priv->encoder.possible_crtcs = crtcs;
 
-       ret = tda998x_create(client, priv);
-       if (ret)
-               return ret;
-
-       if (!dev->of_node && params)
-               tda998x_set_config(priv, params);
-
-       drm_encoder_helper_add(&priv->encoder, &tda998x_encoder_helper_funcs);
        ret = drm_encoder_init(drm, &priv->encoder, &tda998x_encoder_funcs,
                               DRM_MODE_ENCODER_TMDS, NULL);
        if (ret)
                goto err_encoder;
 
-       ret = tda998x_connector_init(priv, drm);
+       ret = drm_bridge_attach(&priv->encoder, &priv->bridge, NULL);
        if (ret)
-               goto err_connector;
+               goto err_bridge;
 
        return 0;
 
-err_connector:
+err_bridge:
        drm_encoder_cleanup(&priv->encoder);
 err_encoder:
-       tda998x_destroy(priv);
        return ret;
 }
 
+static int tda998x_bind(struct device *dev, struct device *master, void *data)
+{
+       struct drm_device *drm = data;
+
+       return tda998x_encoder_init(dev, drm);
+}
+
 static void tda998x_unbind(struct device *dev, struct device *master,
                           void *data)
 {
        struct tda998x_priv *priv = dev_get_drvdata(dev);
 
-       drm_connector_cleanup(&priv->connector);
        drm_encoder_cleanup(&priv->encoder);
-       tda998x_destroy(priv);
 }
 
 static const struct component_ops tda998x_ops = {
@@ -1910,16 +1914,27 @@ static const struct component_ops tda998x_ops = {
 static int
 tda998x_probe(struct i2c_client *client, const struct i2c_device_id *id)
 {
+       int ret;
+
        if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
                dev_warn(&client->dev, "adapter does not support I2C\n");
                return -EIO;
        }
-       return component_add(&client->dev, &tda998x_ops);
+
+       ret = tda998x_create(&client->dev);
+       if (ret)
+               return ret;
+
+       ret = component_add(&client->dev, &tda998x_ops);
+       if (ret)
+               tda998x_destroy(&client->dev);
+       return ret;
 }
 
 static int tda998x_remove(struct i2c_client *client)
 {
        component_del(&client->dev, &tda998x_ops);
+       tda998x_destroy(&client->dev);
        return 0;
 }
 
index 576a417690d4774df01345e956237bc868f6fff1..3b378936f57559fbfe80a30a6b14ee00aa9f7136 100644 (file)
@@ -934,7 +934,7 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
        DRM_DEBUG("idx %d used %d discard %d\n",
                  vertex->idx, vertex->used, vertex->discard);
 
-       if (vertex->idx < 0 || vertex->idx > dma->buf_count)
+       if (vertex->idx < 0 || vertex->idx >= dma->buf_count)
                return -EINVAL;
 
        i810_dma_dispatch_vertex(dev,
index 9de8b1c51a5ccda8f6f7140f90ed4707b40d1da3..459f8f88a34cda0747ca37e7ccf43c4ca64fe283 100644 (file)
@@ -51,6 +51,18 @@ config DRM_I915_DEBUG_GEM
 
           If in doubt, say "N".
 
+config DRM_I915_ERRLOG_GEM
+       bool "Insert extra logging (very verbose) for common GEM errors"
+       default n
+       depends on DRM_I915_DEBUG_GEM
+       help
+         Enable additional logging that may help track down the cause of
+         principally userspace errors.
+
+         Recommended for driver developers only.
+
+         If in doubt, say "N".
+
 config DRM_I915_TRACE_GEM
        bool "Insert extra ftrace output from the GEM internals"
        depends on DRM_I915_DEBUG_GEM
index 4c6adae23e18e18195e468e2abb959cf0ed67219..5794f102f9b8f0cde364f2a04123e1893ffbfd84 100644 (file)
@@ -135,15 +135,14 @@ i915-y += dvo_ch7017.o \
          dvo_ns2501.o \
          dvo_sil164.o \
          dvo_tfp410.o \
+         icl_dsi.o \
          intel_crt.o \
          intel_ddi.o \
          intel_dp_aux_backlight.o \
          intel_dp_link_training.o \
          intel_dp_mst.o \
          intel_dp.o \
-         intel_dsi.o \
          intel_dsi_dcs_backlight.o \
-         intel_dsi_pll.o \
          intel_dsi_vbt.o \
          intel_dvo.o \
          intel_hdmi.o \
@@ -152,7 +151,9 @@ i915-y += dvo_ch7017.o \
          intel_lvds.o \
          intel_panel.o \
          intel_sdvo.o \
-         intel_tv.o
+         intel_tv.o \
+         vlv_dsi.o \
+         vlv_dsi_pll.o
 
 # Post-mortem debug and GPU hang state capture
 i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
index 80b3e16cf48c0a0819fb4a9514cb21e5e6820772..caac9942e1e3ab52ac0fc1c0150536658215adf5 100644 (file)
 #define CH7017_BANG_LIMIT_CONTROL      0x7f
 
 struct ch7017_priv {
-       uint8_t dummy;
+       u8 dummy;
 };
 
 static void ch7017_dump_regs(struct intel_dvo_device *dvo);
@@ -186,7 +186,7 @@ static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
 
 static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val)
 {
-       uint8_t buf[2] = { addr, val };
+       u8 buf[2] = { addr, val };
        struct i2c_msg msg = {
                .addr = dvo->slave_addr,
                .flags = 0,
@@ -258,11 +258,11 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
                            const struct drm_display_mode *mode,
                            const struct drm_display_mode *adjusted_mode)
 {
-       uint8_t lvds_pll_feedback_div, lvds_pll_vco_control;
-       uint8_t outputs_enable, lvds_control_2, lvds_power_down;
-       uint8_t horizontal_active_pixel_input;
-       uint8_t horizontal_active_pixel_output, vertical_active_line_output;
-       uint8_t active_input_line_output;
+       u8 lvds_pll_feedback_div, lvds_pll_vco_control;
+       u8 outputs_enable, lvds_control_2, lvds_power_down;
+       u8 horizontal_active_pixel_input;
+       u8 horizontal_active_pixel_output, vertical_active_line_output;
+       u8 active_input_line_output;
 
        DRM_DEBUG_KMS("Registers before mode setting\n");
        ch7017_dump_regs(dvo);
@@ -333,7 +333,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
 /* set the CH7017 power state */
 static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable)
 {
-       uint8_t val;
+       u8 val;
 
        ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
 
@@ -361,7 +361,7 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable)
 
 static bool ch7017_get_hw_state(struct intel_dvo_device *dvo)
 {
-       uint8_t val;
+       u8 val;
 
        ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
 
@@ -373,7 +373,7 @@ static bool ch7017_get_hw_state(struct intel_dvo_device *dvo)
 
 static void ch7017_dump_regs(struct intel_dvo_device *dvo)
 {
-       uint8_t val;
+       u8 val;
 
 #define DUMP(reg)                                      \
 do {                                                   \
index 7aeeffd2428b688e5c404a59e1d331f633d879f1..397ac523372675e5baf37c96d56d7478de3f9897 100644 (file)
@@ -85,7 +85,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  */
 
 static struct ch7xxx_id_struct {
-       uint8_t vid;
+       u8 vid;
        char *name;
 } ch7xxx_ids[] = {
        { CH7011_VID, "CH7011" },
@@ -96,7 +96,7 @@ static struct ch7xxx_id_struct {
 };
 
 static struct ch7xxx_did_struct {
-       uint8_t did;
+       u8 did;
        char *name;
 } ch7xxx_dids[] = {
        { CH7xxx_DID, "CH7XXX" },
@@ -107,7 +107,7 @@ struct ch7xxx_priv {
        bool quiet;
 };
 
-static char *ch7xxx_get_id(uint8_t vid)
+static char *ch7xxx_get_id(u8 vid)
 {
        int i;
 
@@ -119,7 +119,7 @@ static char *ch7xxx_get_id(uint8_t vid)
        return NULL;
 }
 
-static char *ch7xxx_get_did(uint8_t did)
+static char *ch7xxx_get_did(u8 did)
 {
        int i;
 
@@ -132,7 +132,7 @@ static char *ch7xxx_get_did(uint8_t did)
 }
 
 /** Reads an 8 bit register */
-static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
 {
        struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
        struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -170,11 +170,11 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
 }
 
 /** Writes an 8 bit register */
-static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
 {
        struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
        struct i2c_adapter *adapter = dvo->i2c_bus;
-       uint8_t out_buf[2];
+       u8 out_buf[2];
        struct i2c_msg msg = {
                .addr = dvo->slave_addr,
                .flags = 0,
@@ -201,7 +201,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
 {
        /* this will detect the CH7xxx chip on the specified i2c bus */
        struct ch7xxx_priv *ch7xxx;
-       uint8_t vendor, device;
+       u8 vendor, device;
        char *name, *devid;
 
        ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL);
@@ -244,7 +244,7 @@ out:
 
 static enum drm_connector_status ch7xxx_detect(struct intel_dvo_device *dvo)
 {
-       uint8_t cdet, orig_pm, pm;
+       u8 cdet, orig_pm, pm;
 
        ch7xxx_readb(dvo, CH7xxx_PM, &orig_pm);
 
@@ -276,7 +276,7 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
                            const struct drm_display_mode *mode,
                            const struct drm_display_mode *adjusted_mode)
 {
-       uint8_t tvco, tpcp, tpd, tlpf, idf;
+       u8 tvco, tpcp, tpd, tlpf, idf;
 
        if (mode->clock <= 65000) {
                tvco = 0x23;
@@ -336,7 +336,7 @@ static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
        int i;
 
        for (i = 0; i < CH7xxx_NUM_REGS; i++) {
-               uint8_t val;
+               u8 val;
                if ((i % 8) == 0)
                        DRM_DEBUG_KMS("\n %02X: ", i);
                ch7xxx_readb(dvo, i, &val);
index c73aff163908af1933ea0d2d0afb4f42145ccbaf..24278cc490905dcf04e6de7dfbfec60ebda4097a 100644 (file)
  * instead. The following list contains all registers that
  * require saving.
  */
-static const uint16_t backup_addresses[] = {
+static const u16 backup_addresses[] = {
        0x11, 0x12,
        0x18, 0x19, 0x1a, 0x1f,
        0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
@@ -174,11 +174,11 @@ static const uint16_t backup_addresses[] = {
 struct ivch_priv {
        bool quiet;
 
-       uint16_t width, height;
+       u16 width, height;
 
        /* Register backup */
 
-       uint16_t reg_backup[ARRAY_SIZE(backup_addresses)];
+       u16 reg_backup[ARRAY_SIZE(backup_addresses)];
 };
 
 
@@ -188,7 +188,7 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo);
  *
  * Each of the 256 registers are 16 bits long.
  */
-static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
+static bool ivch_read(struct intel_dvo_device *dvo, int addr, u16 *data)
 {
        struct ivch_priv *priv = dvo->dev_priv;
        struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -231,7 +231,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
 }
 
 /* Writes a 16-bit register on the ivch */
-static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
+static bool ivch_write(struct intel_dvo_device *dvo, int addr, u16 data)
 {
        struct ivch_priv *priv = dvo->dev_priv;
        struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -263,7 +263,7 @@ static bool ivch_init(struct intel_dvo_device *dvo,
                      struct i2c_adapter *adapter)
 {
        struct ivch_priv *priv;
-       uint16_t temp;
+       u16 temp;
        int i;
 
        priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL);
@@ -342,7 +342,7 @@ static void ivch_reset(struct intel_dvo_device *dvo)
 static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
 {
        int i;
-       uint16_t vr01, vr30, backlight;
+       u16 vr01, vr30, backlight;
 
        ivch_reset(dvo);
 
@@ -379,7 +379,7 @@ static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
 
 static bool ivch_get_hw_state(struct intel_dvo_device *dvo)
 {
-       uint16_t vr01;
+       u16 vr01;
 
        ivch_reset(dvo);
 
@@ -398,9 +398,9 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
                          const struct drm_display_mode *adjusted_mode)
 {
        struct ivch_priv *priv = dvo->dev_priv;
-       uint16_t vr40 = 0;
-       uint16_t vr01 = 0;
-       uint16_t vr10;
+       u16 vr40 = 0;
+       u16 vr01 = 0;
+       u16 vr10;
 
        ivch_reset(dvo);
 
@@ -416,7 +416,7 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
 
        if (mode->hdisplay != adjusted_mode->crtc_hdisplay ||
            mode->vdisplay != adjusted_mode->crtc_vdisplay) {
-               uint16_t x_ratio, y_ratio;
+               u16 x_ratio, y_ratio;
 
                vr01 |= VR01_PANEL_FIT_ENABLE;
                vr40 |= VR40_CLOCK_GATING_ENABLE;
@@ -438,7 +438,7 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
 
 static void ivch_dump_regs(struct intel_dvo_device *dvo)
 {
-       uint16_t val;
+       u16 val;
 
        ivch_read(dvo, VR00, &val);
        DRM_DEBUG_KMS("VR00: 0x%04x\n", val);
index 2379c33cfe51eee3f312708ebde8617b7ce6e2a2..c584e01dc8dc39c75b78bb31252a4efe8d666616 100644 (file)
@@ -191,8 +191,8 @@ enum {
 };
 
 struct ns2501_reg {
-        uint8_t offset;
-        uint8_t value;
+       u8 offset;
+       u8 value;
 };
 
 /*
@@ -202,23 +202,23 @@ struct ns2501_reg {
  * read all this with a grain of salt.
  */
 struct ns2501_configuration {
-       uint8_t sync;           /* configuration of the C0 register */
-       uint8_t conf;           /* configuration register 8 */
-       uint8_t syncb;          /* configuration register 41 */
-       uint8_t dither;         /* configuration of the dithering */
-       uint8_t pll_a;          /* PLL configuration, register A, 1B */
-       uint16_t pll_b;         /* PLL configuration, register B, 1C/1D */
-       uint16_t hstart;        /* horizontal start, registers C1/C2 */
-       uint16_t hstop;         /* horizontal total, registers C3/C4 */
-       uint16_t vstart;        /* vertical start, registers C5/C6 */
-       uint16_t vstop;         /* vertical total, registers C7/C8 */
-       uint16_t vsync;         /* manual vertical sync start, 80/81 */
-       uint16_t vtotal;        /* number of lines generated, 82/83 */
-       uint16_t hpos;          /* horizontal position + 256, 98/99  */
-       uint16_t vpos;          /* vertical position, 8e/8f */
-       uint16_t voffs;         /* vertical output offset, 9c/9d */
-       uint16_t hscale;        /* horizontal scaling factor, b8/b9 */
-       uint16_t vscale;        /* vertical scaling factor, 10/11 */
+       u8 sync;                /* configuration of the C0 register */
+       u8 conf;                /* configuration register 8 */
+       u8 syncb;               /* configuration register 41 */
+       u8 dither;              /* configuration of the dithering */
+       u8 pll_a;               /* PLL configuration, register A, 1B */
+       u16 pll_b;              /* PLL configuration, register B, 1C/1D */
+       u16 hstart;             /* horizontal start, registers C1/C2 */
+       u16 hstop;              /* horizontal total, registers C3/C4 */
+       u16 vstart;             /* vertical start, registers C5/C6 */
+       u16 vstop;              /* vertical total, registers C7/C8 */
+       u16 vsync;              /* manual vertical sync start, 80/81 */
+       u16 vtotal;             /* number of lines generated, 82/83 */
+       u16 hpos;               /* horizontal position + 256, 98/99  */
+       u16 vpos;               /* vertical position, 8e/8f */
+       u16 voffs;              /* vertical output offset, 9c/9d */
+       u16 hscale;             /* horizontal scaling factor, b8/b9 */
+       u16 vscale;             /* vertical scaling factor, 10/11 */
 };
 
 /*
@@ -389,7 +389,7 @@ struct ns2501_priv {
 ** If it returns false, it might be wise to enable the
 ** DVO with the above function.
 */
-static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
+static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
 {
        struct ns2501_priv *ns = dvo->dev_priv;
        struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -434,11 +434,11 @@ static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
 ** If it returns false, it might be wise to enable the
 ** DVO with the above function.
 */
-static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
 {
        struct ns2501_priv *ns = dvo->dev_priv;
        struct i2c_adapter *adapter = dvo->i2c_bus;
-       uint8_t out_buf[2];
+       u8 out_buf[2];
 
        struct i2c_msg msg = {
                .addr = dvo->slave_addr,
index 1c1a0674dbab986014919316a39d57d98923cce2..4ae5d8fd9ff0fcf5d542e0405cefd2b08694c76a 100644 (file)
@@ -65,7 +65,7 @@ struct sil164_priv {
 
 #define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr))
 
-static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+static bool sil164_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
 {
        struct sil164_priv *sil = dvo->dev_priv;
        struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -102,11 +102,11 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
        return false;
 }
 
-static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
 {
        struct sil164_priv *sil = dvo->dev_priv;
        struct i2c_adapter *adapter = dvo->i2c_bus;
-       uint8_t out_buf[2];
+       u8 out_buf[2];
        struct i2c_msg msg = {
                .addr = dvo->slave_addr,
                .flags = 0,
@@ -173,7 +173,7 @@ out:
 
 static enum drm_connector_status sil164_detect(struct intel_dvo_device *dvo)
 {
-       uint8_t reg9;
+       u8 reg9;
 
        sil164_readb(dvo, SIL164_REG9, &reg9);
 
@@ -243,7 +243,7 @@ static bool sil164_get_hw_state(struct intel_dvo_device *dvo)
 
 static void sil164_dump_regs(struct intel_dvo_device *dvo)
 {
-       uint8_t val;
+       u8 val;
 
        sil164_readb(dvo, SIL164_FREQ_LO, &val);
        DRM_DEBUG_KMS("SIL164_FREQ_LO: 0x%02x\n", val);
index 31e181da93db7b8187873efc2003a310540e2377..d603bc2f2506c5f20b2e17d83a6adb3605b1b8b5 100644 (file)
@@ -90,7 +90,7 @@ struct tfp410_priv {
        bool quiet;
 };
 
-static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, u8 *ch)
 {
        struct tfp410_priv *tfp = dvo->dev_priv;
        struct i2c_adapter *adapter = dvo->i2c_bus;
@@ -127,11 +127,11 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
        return false;
 }
 
-static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, u8 ch)
 {
        struct tfp410_priv *tfp = dvo->dev_priv;
        struct i2c_adapter *adapter = dvo->i2c_bus;
-       uint8_t out_buf[2];
+       u8 out_buf[2];
        struct i2c_msg msg = {
                .addr = dvo->slave_addr,
                .flags = 0,
@@ -155,7 +155,7 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
 
 static int tfp410_getid(struct intel_dvo_device *dvo, int addr)
 {
-       uint8_t ch1, ch2;
+       u8 ch1, ch2;
 
        if (tfp410_readb(dvo, addr+0, &ch1) &&
            tfp410_readb(dvo, addr+1, &ch2))
@@ -203,7 +203,7 @@ out:
 static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo)
 {
        enum drm_connector_status ret = connector_status_disconnected;
-       uint8_t ctl2;
+       u8 ctl2;
 
        if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) {
                if (ctl2 & TFP410_CTL_2_RSEN)
@@ -236,7 +236,7 @@ static void tfp410_mode_set(struct intel_dvo_device *dvo,
 /* set the tfp410 power state */
 static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable)
 {
-       uint8_t ctl1;
+       u8 ctl1;
 
        if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
                return;
@@ -251,7 +251,7 @@ static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable)
 
 static bool tfp410_get_hw_state(struct intel_dvo_device *dvo)
 {
-       uint8_t ctl1;
+       u8 ctl1;
 
        if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
                return false;
@@ -264,7 +264,7 @@ static bool tfp410_get_hw_state(struct intel_dvo_device *dvo)
 
 static void tfp410_dump_regs(struct intel_dvo_device *dvo)
 {
-       uint8_t val, val2;
+       u8 val, val2;
 
        tfp410_readb(dvo, TFP410_REV, &val);
        DRM_DEBUG_KMS("TFP410_REV: 0x%02X\n", val);
index 7c9ec4f4f36c747464342d14c8e3b5dd4c778335..380eeb2a0e83c60c1067a6d7150bd8608662d5b1 100644 (file)
@@ -61,7 +61,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
        }
 
        mutex_lock(&dev_priv->drm.struct_mutex);
-       ret = i915_gem_gtt_insert(&dev_priv->ggtt.base, node,
+       ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node,
                                  size, I915_GTT_PAGE_SIZE,
                                  I915_COLOR_UNEVICTABLE,
                                  start, end, flags);
index b51c05d03f14a1790ba43e70065af6acd74887ef..45e89b1e048183164e218c5aa868dca458ab93a4 100644 (file)
@@ -172,6 +172,7 @@ struct decode_info {
 #define OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD      OP_3D_MEDIA(0x2, 0x0, 0x2)
 #define OP_MEDIA_GATEWAY_STATE                  OP_3D_MEDIA(0x2, 0x0, 0x3)
 #define OP_MEDIA_STATE_FLUSH                    OP_3D_MEDIA(0x2, 0x0, 0x4)
+#define OP_MEDIA_POOL_STATE                     OP_3D_MEDIA(0x2, 0x0, 0x5)
 
 #define OP_MEDIA_OBJECT                         OP_3D_MEDIA(0x2, 0x1, 0x0)
 #define OP_MEDIA_OBJECT_PRT                     OP_3D_MEDIA(0x2, 0x1, 0x2)
@@ -862,6 +863,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
 {
        struct intel_vgpu *vgpu = s->vgpu;
        struct intel_gvt *gvt = vgpu->gvt;
+       u32 ctx_sr_ctl;
 
        if (offset + 4 > gvt->device_info.mmio_size) {
                gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
@@ -894,6 +896,28 @@ static int cmd_reg_handler(struct parser_exec_state *s,
                patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
        }
 
+       /* TODO
+        * Right now only scan LRI command on KBL and in inhibit context.
+        * It's good enough to support initializing mmio by lri command in
+        * vgpu inhibit context on KBL.
+        */
+       if (IS_KABYLAKE(s->vgpu->gvt->dev_priv) &&
+                       intel_gvt_mmio_is_in_ctx(gvt, offset) &&
+                       !strncmp(cmd, "lri", 3)) {
+               intel_gvt_hypervisor_read_gpa(s->vgpu,
+                       s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
+               /* check inhibit context */
+               if (ctx_sr_ctl & 1) {
+                       u32 data = cmd_val(s, index + 1);
+
+                       if (intel_gvt_mmio_has_mode_mask(s->vgpu->gvt, offset))
+                               intel_vgpu_mask_mmio_write(vgpu,
+                                                       offset, &data, 4);
+                       else
+                               vgpu_vreg(vgpu, offset) = data;
+               }
+       }
+
        /* TODO: Update the global mask if this MMIO is a masked-MMIO */
        intel_gvt_mmio_set_cmd_accessed(gvt, offset);
        return 0;
@@ -1256,7 +1280,9 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s,
        if (!info->async_flip)
                return 0;
 
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+       if (IS_SKYLAKE(dev_priv)
+               || IS_KABYLAKE(dev_priv)
+               || IS_BROXTON(dev_priv)) {
                stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
                tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
                                GENMASK(12, 10)) >> 10;
@@ -1284,7 +1310,9 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
 
        set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
                      info->surf_val << 12);
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+       if (IS_SKYLAKE(dev_priv)
+               || IS_KABYLAKE(dev_priv)
+               || IS_BROXTON(dev_priv)) {
                set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0),
                              info->stride_val);
                set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10),
@@ -1308,7 +1336,9 @@ static int decode_mi_display_flip(struct parser_exec_state *s,
 
        if (IS_BROADWELL(dev_priv))
                return gen8_decode_mi_display_flip(s, info);
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+       if (IS_SKYLAKE(dev_priv)
+               || IS_KABYLAKE(dev_priv)
+               || IS_BROXTON(dev_priv))
                return skl_decode_mi_display_flip(s, info);
 
        return -ENODEV;
@@ -1317,26 +1347,14 @@ static int decode_mi_display_flip(struct parser_exec_state *s,
 static int check_mi_display_flip(struct parser_exec_state *s,
                struct mi_display_flip_command_info *info)
 {
-       struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
-
-       if (IS_BROADWELL(dev_priv)
-               || IS_SKYLAKE(dev_priv)
-               || IS_KABYLAKE(dev_priv))
-               return gen8_check_mi_display_flip(s, info);
-       return -ENODEV;
+       return gen8_check_mi_display_flip(s, info);
 }
 
 static int update_plane_mmio_from_mi_display_flip(
                struct parser_exec_state *s,
                struct mi_display_flip_command_info *info)
 {
-       struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
-
-       if (IS_BROADWELL(dev_priv)
-               || IS_SKYLAKE(dev_priv)
-               || IS_KABYLAKE(dev_priv))
-               return gen8_update_plane_mmio_from_mi_display_flip(s, info);
-       return -ENODEV;
+       return gen8_update_plane_mmio_from_mi_display_flip(s, info);
 }
 
 static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
@@ -1615,15 +1633,10 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
  */
 static int batch_buffer_needs_scan(struct parser_exec_state *s)
 {
-       struct intel_gvt *gvt = s->vgpu->gvt;
-
-       if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
-               || IS_KABYLAKE(gvt->dev_priv)) {
-               /* BDW decides privilege based on address space */
-               if (cmd_val(s, 0) & (1 << 8) &&
+       /* Decide privilege based on address space */
+       if (cmd_val(s, 0) & (1 << 8) &&
                        !(s->vgpu->scan_nonprivbb & (1 << s->ring_id)))
-                       return 0;
-       }
+               return 0;
        return 1;
 }
 
@@ -2349,6 +2362,9 @@ static struct cmd_info cmd_info[] = {
        {"MEDIA_STATE_FLUSH", OP_MEDIA_STATE_FLUSH, F_LEN_VAR, R_RCS, D_ALL,
                0, 16, NULL},
 
+       {"MEDIA_POOL_STATE", OP_MEDIA_POOL_STATE, F_LEN_VAR, R_RCS, D_ALL,
+               0, 16, NULL},
+
        {"MEDIA_OBJECT", OP_MEDIA_OBJECT, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
 
        {"MEDIA_CURBE_LOAD", OP_MEDIA_CURBE_LOAD, F_LEN_VAR, R_RCS, D_ALL,
index 6d8180e8d1e21a71916e8b6cad404dab8d8c3257..3019dbc39aef22573fa04b7120fc9ad021d83c15 100644 (file)
@@ -171,6 +171,29 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
        int pipe;
 
+       if (IS_BROXTON(dev_priv)) {
+               vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~(BXT_DE_PORT_HP_DDIA |
+                       BXT_DE_PORT_HP_DDIB |
+                       BXT_DE_PORT_HP_DDIC);
+
+               if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
+                       vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
+                               BXT_DE_PORT_HP_DDIA;
+               }
+
+               if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
+                       vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
+                               BXT_DE_PORT_HP_DDIB;
+               }
+
+               if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
+                       vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
+                               BXT_DE_PORT_HP_DDIC;
+               }
+
+               return;
+       }
+
        vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
                        SDE_PORTC_HOTPLUG_CPT |
                        SDE_PORTD_HOTPLUG_CPT);
@@ -196,7 +219,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
                        ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
                        TRANS_DDI_PORT_MASK);
                vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
-                       (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+                       (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
                        (PORT_B << TRANS_DDI_PORT_SHIFT) |
                        TRANS_DDI_FUNC_ENABLE);
                if (IS_BROADWELL(dev_priv)) {
@@ -216,7 +239,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
                        ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
                        TRANS_DDI_PORT_MASK);
                vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
-                       (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+                       (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
                        (PORT_C << TRANS_DDI_PORT_SHIFT) |
                        TRANS_DDI_FUNC_ENABLE);
                if (IS_BROADWELL(dev_priv)) {
@@ -236,7 +259,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
                        ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
                        TRANS_DDI_PORT_MASK);
                vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
-                       (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+                       (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
                        (PORT_D << TRANS_DDI_PORT_SHIFT) |
                        TRANS_DDI_FUNC_ENABLE);
                if (IS_BROADWELL(dev_priv)) {
@@ -273,8 +296,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
        for_each_pipe(dev_priv, pipe) {
                vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE;
                vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE;
-               vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~CURSOR_MODE;
-               vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= CURSOR_MODE_DISABLE;
+               vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE;
+               vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= MCURSOR_MODE_DISABLE;
        }
 
        vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
@@ -337,26 +360,28 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
        struct intel_gvt_irq *irq = &gvt->irq;
        struct intel_vgpu *vgpu;
        int pipe, id;
+       int found = false;
 
-       if (WARN_ON(!mutex_is_locked(&gvt->lock)))
-               return;
-
+       mutex_lock(&gvt->lock);
        for_each_active_vgpu(gvt, vgpu, id) {
                for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
-                       if (pipe_is_enabled(vgpu, pipe))
-                               goto out;
+                       if (pipe_is_enabled(vgpu, pipe)) {
+                               found = true;
+                               break;
+                       }
                }
+               if (found)
+                       break;
        }
 
        /* all the pipes are disabled */
-       hrtimer_cancel(&irq->vblank_timer.timer);
-       return;
-
-out:
-       hrtimer_start(&irq->vblank_timer.timer,
-               ktime_add_ns(ktime_get(), irq->vblank_timer.period),
-               HRTIMER_MODE_ABS);
-
+       if (!found)
+               hrtimer_cancel(&irq->vblank_timer.timer);
+       else
+               hrtimer_start(&irq->vblank_timer.timer,
+                       ktime_add_ns(ktime_get(), irq->vblank_timer.period),
+                       HRTIMER_MODE_ABS);
+       mutex_unlock(&gvt->lock);
 }
 
 static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
@@ -393,8 +418,10 @@ static void emulate_vblank(struct intel_vgpu *vgpu)
 {
        int pipe;
 
+       mutex_lock(&vgpu->vgpu_lock);
        for_each_pipe(vgpu->gvt->dev_priv, pipe)
                emulate_vblank_on_pipe(vgpu, pipe);
+       mutex_unlock(&vgpu->vgpu_lock);
 }
 
 /**
@@ -409,11 +436,10 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
        struct intel_vgpu *vgpu;
        int id;
 
-       if (WARN_ON(!mutex_is_locked(&gvt->lock)))
-               return;
-
+       mutex_lock(&gvt->lock);
        for_each_active_vgpu(gvt, vgpu, id)
                emulate_vblank(vgpu);
+       mutex_unlock(&gvt->lock);
 }
 
 /**
index 6f4f8e941fc200aa66972be703a811c17fff93d8..6e3f56684f4ec03e7285b9e3715688ce31de311d 100644 (file)
@@ -164,7 +164,9 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
 
        obj->read_domains = I915_GEM_DOMAIN_GTT;
        obj->write_domain = 0;
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+       if (IS_SKYLAKE(dev_priv)
+               || IS_KABYLAKE(dev_priv)
+               || IS_BROXTON(dev_priv)) {
                unsigned int tiling_mode = 0;
                unsigned int stride = 0;
 
@@ -192,6 +194,14 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
        return obj;
 }
 
+static bool validate_hotspot(struct intel_vgpu_cursor_plane_format *c)
+{
+       if (c && c->x_hot <= c->width && c->y_hot <= c->height)
+               return true;
+       else
+               return false;
+}
+
 static int vgpu_get_plane_info(struct drm_device *dev,
                struct intel_vgpu *vgpu,
                struct intel_vgpu_fb_info *info,
@@ -229,12 +239,14 @@ static int vgpu_get_plane_info(struct drm_device *dev,
                info->x_pos = c.x_pos;
                info->y_pos = c.y_pos;
 
-               /* The invalid cursor hotspot value is delivered to host
-                * until we find a way to get the cursor hotspot info of
-                * guest OS.
-                */
-               info->x_hot = UINT_MAX;
-               info->y_hot = UINT_MAX;
+               if (validate_hotspot(&c)) {
+                       info->x_hot = c.x_hot;
+                       info->y_hot = c.y_hot;
+               } else {
+                       info->x_hot = UINT_MAX;
+                       info->y_hot = UINT_MAX;
+               }
+
                info->size = (((info->stride * c.height * c.bpp) / 8)
                                + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
        } else {
index f61337632969d4df534c9807fd6dddb6faf87fde..4b98539025c5b51014f9d515285f6c1f90d8cb59 100644 (file)
@@ -77,6 +77,20 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
        return chr;
 }
 
+static inline int bxt_get_port_from_gmbus0(u32 gmbus0)
+{
+       int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
+       int port = -EINVAL;
+
+       if (port_select == 1)
+               port = PORT_B;
+       else if (port_select == 2)
+               port = PORT_C;
+       else if (port_select == 3)
+               port = PORT_D;
+       return port;
+}
+
 static inline int get_port_from_gmbus0(u32 gmbus0)
 {
        int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
@@ -105,6 +119,7 @@ static void reset_gmbus_controller(struct intel_vgpu *vgpu)
 static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
                        unsigned int offset, void *p_data, unsigned int bytes)
 {
+       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
        int port, pin_select;
 
        memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
@@ -116,7 +131,10 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
        if (pin_select == 0)
                return 0;
 
-       port = get_port_from_gmbus0(pin_select);
+       if (IS_BROXTON(dev_priv))
+               port = bxt_get_port_from_gmbus0(pin_select);
+       else
+               port = get_port_from_gmbus0(pin_select);
        if (WARN_ON(port < 0))
                return 0;
 
index 427e40e64d41e882c114fda05478fb6e60bb8379..714d709829a2a11ab4a83c45c1a3e6a3ecaca976 100644 (file)
@@ -146,14 +146,11 @@ struct execlist_ring_context {
        u32 nop4;
        u32 lri_cmd_2;
        struct execlist_mmio_pair ctx_timestamp;
-       struct execlist_mmio_pair pdp3_UDW;
-       struct execlist_mmio_pair pdp3_LDW;
-       struct execlist_mmio_pair pdp2_UDW;
-       struct execlist_mmio_pair pdp2_LDW;
-       struct execlist_mmio_pair pdp1_UDW;
-       struct execlist_mmio_pair pdp1_LDW;
-       struct execlist_mmio_pair pdp0_UDW;
-       struct execlist_mmio_pair pdp0_LDW;
+       /*
+        * pdps[8]={ pdp3_UDW, pdp3_LDW, pdp2_UDW, pdp2_LDW,
+        *           pdp1_UDW, pdp1_LDW, pdp0_UDW, pdp0_LDW}
+        */
+       struct execlist_mmio_pair pdps[8];
 };
 
 struct intel_vgpu_elsp_dwords {
index 1c120683e9588c5a1d379c370e2760234f2d3ab1..face664be3e8e8bf673e589c7ff9b176f8102a76 100644 (file)
@@ -36,6 +36,7 @@
 #include <uapi/drm/drm_fourcc.h>
 #include "i915_drv.h"
 #include "gvt.h"
+#include "i915_pvinfo.h"
 
 #define PRIMARY_FORMAT_NUM     16
 struct pixel_format {
@@ -150,7 +151,9 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
        u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask;
        u32 stride = stride_reg;
 
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+       if (IS_SKYLAKE(dev_priv)
+               || IS_KABYLAKE(dev_priv)
+               || IS_BROXTON(dev_priv)) {
                switch (tiled) {
                case PLANE_CTL_TILED_LINEAR:
                        stride = stride_reg * 64;
@@ -214,7 +217,9 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
        if (!plane->enabled)
                return -ENODEV;
 
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+       if (IS_SKYLAKE(dev_priv)
+               || IS_KABYLAKE(dev_priv)
+               || IS_BROXTON(dev_priv)) {
                plane->tiled = (val & PLANE_CTL_TILED_MASK) >>
                _PLANE_CTL_TILED_SHIFT;
                fmt = skl_format_to_drm(
@@ -256,7 +261,9 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
        }
 
        plane->stride = intel_vgpu_get_stride(vgpu, pipe, (plane->tiled << 10),
-               (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) ?
+               (IS_SKYLAKE(dev_priv)
+               || IS_KABYLAKE(dev_priv)
+               || IS_BROXTON(dev_priv)) ?
                        (_PRI_PLANE_STRIDE_MASK >> 6) :
                                _PRI_PLANE_STRIDE_MASK, plane->bpp);
 
@@ -300,16 +307,16 @@ static int cursor_mode_to_drm(int mode)
        int cursor_pixel_formats_index = 4;
 
        switch (mode) {
-       case CURSOR_MODE_128_ARGB_AX:
+       case MCURSOR_MODE_128_ARGB_AX:
                cursor_pixel_formats_index = 0;
                break;
-       case CURSOR_MODE_256_ARGB_AX:
+       case MCURSOR_MODE_256_ARGB_AX:
                cursor_pixel_formats_index = 1;
                break;
-       case CURSOR_MODE_64_ARGB_AX:
+       case MCURSOR_MODE_64_ARGB_AX:
                cursor_pixel_formats_index = 2;
                break;
-       case CURSOR_MODE_64_32B_AX:
+       case MCURSOR_MODE_64_32B_AX:
                cursor_pixel_formats_index = 3;
                break;
 
@@ -342,8 +349,8 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
                return -ENODEV;
 
        val = vgpu_vreg_t(vgpu, CURCNTR(pipe));
-       mode = val & CURSOR_MODE;
-       plane->enabled = (mode != CURSOR_MODE_DISABLE);
+       mode = val & MCURSOR_MODE;
+       plane->enabled = (mode != MCURSOR_MODE_DISABLE);
        if (!plane->enabled)
                return -ENODEV;
 
@@ -384,6 +391,8 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
        plane->y_pos = (val & _CURSOR_POS_Y_MASK) >> _CURSOR_POS_Y_SHIFT;
        plane->y_sign = (val & _CURSOR_SIGN_Y_MASK) >> _CURSOR_SIGN_Y_SHIFT;
 
+       plane->x_hot = vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot));
+       plane->y_hot = vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot));
        return 0;
 }
 
index a73e1d418c228f20ac29cbfc161132a8d54669ae..4ac18b44724769f458b6ff677edcd79a589c3826 100644 (file)
@@ -162,7 +162,7 @@ static int verify_firmware(struct intel_gvt *gvt,
 
        h = (struct gvt_firmware_header *)fw->data;
 
-       crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
+       crc32_start = offsetofend(struct gvt_firmware_header, crc32);
        mem = fw->data + crc32_start;
 
 #define VERIFY(s, a, b) do { \
index 23296547da95e8634c3bbaa401225c2d415498d5..00aad8164dec2037f8fc8709298aa3dba2c8c0fa 100644 (file)
@@ -216,16 +216,22 @@ static struct gtt_type_table_entry gtt_type_table[] = {
                        GTT_TYPE_PPGTT_PDE_PT,
                        GTT_TYPE_PPGTT_PTE_PT,
                        GTT_TYPE_PPGTT_PTE_2M_ENTRY),
+       /* We take IPS bit as 'PSE' for PTE level. */
        GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
                        GTT_TYPE_PPGTT_PTE_4K_ENTRY,
                        GTT_TYPE_PPGTT_PTE_PT,
                        GTT_TYPE_INVALID,
-                       GTT_TYPE_INVALID),
+                       GTT_TYPE_PPGTT_PTE_64K_ENTRY),
        GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
                        GTT_TYPE_PPGTT_PTE_4K_ENTRY,
                        GTT_TYPE_PPGTT_PTE_PT,
                        GTT_TYPE_INVALID,
-                       GTT_TYPE_INVALID),
+                       GTT_TYPE_PPGTT_PTE_64K_ENTRY),
+       GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_64K_ENTRY,
+                       GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+                       GTT_TYPE_PPGTT_PTE_PT,
+                       GTT_TYPE_INVALID,
+                       GTT_TYPE_PPGTT_PTE_64K_ENTRY),
        GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
                        GTT_TYPE_PPGTT_PDE_ENTRY,
                        GTT_TYPE_PPGTT_PDE_PT,
@@ -339,8 +345,14 @@ static inline int gtt_set_entry64(void *pt,
 
 #define ADDR_1G_MASK   GENMASK_ULL(GTT_HAW - 1, 30)
 #define ADDR_2M_MASK   GENMASK_ULL(GTT_HAW - 1, 21)
+#define ADDR_64K_MASK  GENMASK_ULL(GTT_HAW - 1, 16)
 #define ADDR_4K_MASK   GENMASK_ULL(GTT_HAW - 1, 12)
 
+#define GTT_SPTE_FLAG_MASK GENMASK_ULL(62, 52)
+#define GTT_SPTE_FLAG_64K_SPLITED BIT(52) /* splited 64K gtt entry */
+
+#define GTT_64K_PTE_STRIDE 16
+
 static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
 {
        unsigned long pfn;
@@ -349,6 +361,8 @@ static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
                pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT;
        else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
                pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT;
+       else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY)
+               pfn = (e->val64 & ADDR_64K_MASK) >> PAGE_SHIFT;
        else
                pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT;
        return pfn;
@@ -362,6 +376,9 @@ static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
        } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
                e->val64 &= ~ADDR_2M_MASK;
                pfn &= (ADDR_2M_MASK >> PAGE_SHIFT);
+       } else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) {
+               e->val64 &= ~ADDR_64K_MASK;
+               pfn &= (ADDR_64K_MASK >> PAGE_SHIFT);
        } else {
                e->val64 &= ~ADDR_4K_MASK;
                pfn &= (ADDR_4K_MASK >> PAGE_SHIFT);
@@ -372,16 +389,41 @@ static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
 
 static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
 {
-       /* Entry doesn't have PSE bit. */
-       if (get_pse_type(e->type) == GTT_TYPE_INVALID)
-               return false;
+       return !!(e->val64 & _PAGE_PSE);
+}
 
-       e->type = get_entry_type(e->type);
-       if (!(e->val64 & _PAGE_PSE))
+static void gen8_gtt_clear_pse(struct intel_gvt_gtt_entry *e)
+{
+       if (gen8_gtt_test_pse(e)) {
+               switch (e->type) {
+               case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
+                       e->val64 &= ~_PAGE_PSE;
+                       e->type = GTT_TYPE_PPGTT_PDE_ENTRY;
+                       break;
+               case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
+                       e->type = GTT_TYPE_PPGTT_PDP_ENTRY;
+                       e->val64 &= ~_PAGE_PSE;
+                       break;
+               default:
+                       WARN_ON(1);
+               }
+       }
+}
+
+static bool gen8_gtt_test_ips(struct intel_gvt_gtt_entry *e)
+{
+       if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
                return false;
 
-       e->type = get_pse_type(e->type);
-       return true;
+       return !!(e->val64 & GEN8_PDE_IPS_64K);
+}
+
+static void gen8_gtt_clear_ips(struct intel_gvt_gtt_entry *e)
+{
+       if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
+               return;
+
+       e->val64 &= ~GEN8_PDE_IPS_64K;
 }
 
 static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
@@ -408,6 +450,21 @@ static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
        e->val64 |= _PAGE_PRESENT;
 }
 
+static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e)
+{
+       return !!(e->val64 & GTT_SPTE_FLAG_64K_SPLITED);
+}
+
+static void gen8_gtt_set_64k_splited(struct intel_gvt_gtt_entry *e)
+{
+       e->val64 |= GTT_SPTE_FLAG_64K_SPLITED;
+}
+
+static void gen8_gtt_clear_64k_splited(struct intel_gvt_gtt_entry *e)
+{
+       e->val64 &= ~GTT_SPTE_FLAG_64K_SPLITED;
+}
+
 /*
  * Per-platform GMA routines.
  */
@@ -440,6 +497,12 @@ static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
        .set_present = gtt_entry_set_present,
        .test_present = gen8_gtt_test_present,
        .test_pse = gen8_gtt_test_pse,
+       .clear_pse = gen8_gtt_clear_pse,
+       .clear_ips = gen8_gtt_clear_ips,
+       .test_ips = gen8_gtt_test_ips,
+       .clear_64k_splited = gen8_gtt_clear_64k_splited,
+       .set_64k_splited = gen8_gtt_set_64k_splited,
+       .test_64k_splited = gen8_gtt_test_64k_splited,
        .get_pfn = gen8_gtt_get_pfn,
        .set_pfn = gen8_gtt_set_pfn,
 };
@@ -453,6 +516,27 @@ static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
        .gma_to_pml4_index = gen8_gma_to_pml4_index,
 };
 
+/* Update entry type per pse and ips bit. */
+static void update_entry_type_for_real(struct intel_gvt_gtt_pte_ops *pte_ops,
+       struct intel_gvt_gtt_entry *entry, bool ips)
+{
+       switch (entry->type) {
+       case GTT_TYPE_PPGTT_PDE_ENTRY:
+       case GTT_TYPE_PPGTT_PDP_ENTRY:
+               if (pte_ops->test_pse(entry))
+                       entry->type = get_pse_type(entry->type);
+               break;
+       case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
+               if (ips)
+                       entry->type = get_pse_type(entry->type);
+               break;
+       default:
+               GEM_BUG_ON(!gtt_type_is_entry(entry->type));
+       }
+
+       GEM_BUG_ON(entry->type == GTT_TYPE_INVALID);
+}
+
 /*
  * MM helpers.
  */
@@ -468,8 +552,7 @@ static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm,
        pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps :
                           mm->ppgtt_mm.shadow_pdps,
                           entry, index, false, 0, mm->vgpu);
-
-       pte_ops->test_pse(entry);
+       update_entry_type_for_real(pte_ops, entry, false);
 }
 
 static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm,
@@ -574,7 +657,8 @@ static inline int ppgtt_spt_get_entry(
        if (ret)
                return ret;
 
-       ops->test_pse(e);
+       update_entry_type_for_real(ops, e, guest ?
+                                  spt->guest_page.pde_ips : false);
 
        gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
                    type, e->type, index, e->val64);
@@ -653,10 +737,12 @@ static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
 
        radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn);
 
-       if (spt->guest_page.oos_page)
-               detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
+       if (spt->guest_page.gfn) {
+               if (spt->guest_page.oos_page)
+                       detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
 
-       intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
+               intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
+       }
 
        list_del_init(&spt->post_shadow_list);
        free_spt(spt);
@@ -717,8 +803,9 @@ static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn(
 
 static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
 
+/* Allocate shadow page table without guest page. */
 static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
-               struct intel_vgpu *vgpu, int type, unsigned long gfn)
+               struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type)
 {
        struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
        struct intel_vgpu_ppgtt_spt *spt = NULL;
@@ -753,26 +840,12 @@ retry:
        spt->shadow_page.vaddr = page_address(spt->shadow_page.page);
        spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT;
 
-       /*
-        * Init guest_page.
-        */
-       spt->guest_page.type = type;
-       spt->guest_page.gfn = gfn;
-
-       ret = intel_vgpu_register_page_track(vgpu, spt->guest_page.gfn,
-                                       ppgtt_write_protection_handler, spt);
-       if (ret)
-               goto err_unmap_dma;
-
        ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt);
        if (ret)
-               goto err_unreg_page_track;
+               goto err_unmap_dma;
 
-       trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
        return spt;
 
-err_unreg_page_track:
-       intel_vgpu_unregister_page_track(vgpu, spt->guest_page.gfn);
 err_unmap_dma:
        dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 err_free_spt:
@@ -780,6 +853,37 @@ err_free_spt:
        return ERR_PTR(ret);
 }
 
+/* Allocate shadow page table associated with specific gfn. */
+static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn(
+               struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type,
+               unsigned long gfn, bool guest_pde_ips)
+{
+       struct intel_vgpu_ppgtt_spt *spt;
+       int ret;
+
+       spt = ppgtt_alloc_spt(vgpu, type);
+       if (IS_ERR(spt))
+               return spt;
+
+       /*
+        * Init guest_page.
+        */
+       ret = intel_vgpu_register_page_track(vgpu, gfn,
+                       ppgtt_write_protection_handler, spt);
+       if (ret) {
+               ppgtt_free_spt(spt);
+               return ERR_PTR(ret);
+       }
+
+       spt->guest_page.type = type;
+       spt->guest_page.gfn = gfn;
+       spt->guest_page.pde_ips = guest_pde_ips;
+
+       trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
+
+       return spt;
+}
+
 #define pt_entry_size_shift(spt) \
        ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
 
@@ -787,24 +891,38 @@ err_free_spt:
        (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
 
 #define for_each_present_guest_entry(spt, e, i) \
-       for (i = 0; i < pt_entries(spt); i++) \
+       for (i = 0; i < pt_entries(spt); \
+            i += spt->guest_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
                if (!ppgtt_get_guest_entry(spt, e, i) && \
                    spt->vgpu->gvt->gtt.pte_ops->test_present(e))
 
 #define for_each_present_shadow_entry(spt, e, i) \
-       for (i = 0; i < pt_entries(spt); i++) \
+       for (i = 0; i < pt_entries(spt); \
+            i += spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
                if (!ppgtt_get_shadow_entry(spt, e, i) && \
                    spt->vgpu->gvt->gtt.pte_ops->test_present(e))
 
-static void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
+#define for_each_shadow_entry(spt, e, i) \
+       for (i = 0; i < pt_entries(spt); \
+            i += (spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1)) \
+               if (!ppgtt_get_shadow_entry(spt, e, i))
+
+static inline void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
 {
        int v = atomic_read(&spt->refcount);
 
        trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
-
        atomic_inc(&spt->refcount);
 }
 
+static inline int ppgtt_put_spt(struct intel_vgpu_ppgtt_spt *spt)
+{
+       int v = atomic_read(&spt->refcount);
+
+       trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
+       return atomic_dec_return(&spt->refcount);
+}
+
 static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);
 
 static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
@@ -843,7 +961,8 @@ static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt,
        pfn = ops->get_pfn(entry);
        type = spt->shadow_page.type;
 
-       if (pfn == vgpu->gtt.scratch_pt[type].page_mfn)
+       /* Uninitialized spte or unshadowed spte. */
+       if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn)
                return;
 
        intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
@@ -855,14 +974,11 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
        struct intel_gvt_gtt_entry e;
        unsigned long index;
        int ret;
-       int v = atomic_read(&spt->refcount);
 
        trace_spt_change(spt->vgpu->id, "die", spt,
                        spt->guest_page.gfn, spt->shadow_page.type);
 
-       trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
-
-       if (atomic_dec_return(&spt->refcount) > 0)
+       if (ppgtt_put_spt(spt) > 0)
                return 0;
 
        for_each_present_shadow_entry(spt, &e, index) {
@@ -871,9 +987,15 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
                        gvt_vdbg_mm("invalidate 4K entry\n");
                        ppgtt_invalidate_pte(spt, &e);
                        break;
+               case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
+                       /* We don't setup 64K shadow entry so far. */
+                       WARN(1, "suspicious 64K gtt entry\n");
+                       continue;
                case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
+                       gvt_vdbg_mm("invalidate 2M entry\n");
+                       continue;
                case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
-                       WARN(1, "GVT doesn't support 2M/1GB page\n");
+                       WARN(1, "GVT doesn't support 1GB page\n");
                        continue;
                case GTT_TYPE_PPGTT_PML4_ENTRY:
                case GTT_TYPE_PPGTT_PDP_ENTRY:
@@ -899,6 +1021,22 @@ fail:
        return ret;
 }
 
+static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
+{
+       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+       if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) {
+               u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
+                       GAMW_ECO_ENABLE_64K_IPS_FIELD;
+
+               return ips == GAMW_ECO_ENABLE_64K_IPS_FIELD;
+       } else if (INTEL_GEN(dev_priv) >= 11) {
+               /* 64K paging only controlled by IPS bit in PTE now. */
+               return true;
+       } else
+               return false;
+}
+
 static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt);
 
 static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
@@ -906,35 +1044,54 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
 {
        struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
        struct intel_vgpu_ppgtt_spt *spt = NULL;
+       bool ips = false;
        int ret;
 
        GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type)));
 
+       if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY)
+               ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we);
+
        spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we));
-       if (spt)
+       if (spt) {
                ppgtt_get_spt(spt);
-       else {
+
+               if (ips != spt->guest_page.pde_ips) {
+                       spt->guest_page.pde_ips = ips;
+
+                       gvt_dbg_mm("reshadow PDE since ips changed\n");
+                       clear_page(spt->shadow_page.vaddr);
+                       ret = ppgtt_populate_spt(spt);
+                       if (ret) {
+                               ppgtt_put_spt(spt);
+                               goto err;
+                       }
+               }
+       } else {
                int type = get_next_pt_type(we->type);
 
-               spt = ppgtt_alloc_spt(vgpu, type, ops->get_pfn(we));
+               spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips);
                if (IS_ERR(spt)) {
                        ret = PTR_ERR(spt);
-                       goto fail;
+                       goto err;
                }
 
                ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn);
                if (ret)
-                       goto fail;
+                       goto err_free_spt;
 
                ret = ppgtt_populate_spt(spt);
                if (ret)
-                       goto fail;
+                       goto err_free_spt;
 
                trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn,
                                 spt->shadow_page.type);
        }
        return spt;
-fail:
+
+err_free_spt:
+       ppgtt_free_spt(spt);
+err:
        gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
                     spt, we->val64, we->type);
        return ERR_PTR(ret);
@@ -948,16 +1105,118 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
        se->type = ge->type;
        se->val64 = ge->val64;
 
+       /* Because we always split 64KB pages, so clear IPS in shadow PDE. */
+       if (se->type == GTT_TYPE_PPGTT_PDE_ENTRY)
+               ops->clear_ips(se);
+
        ops->set_pfn(se, s->shadow_page.mfn);
 }
 
+/**
+ * Return 1 if 2MB huge gtt shadowing is possilbe, 0 if miscondition,
+ * negtive if found err.
+ */
+static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
+       struct intel_gvt_gtt_entry *entry)
+{
+       struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+       unsigned long pfn;
+
+       if (!HAS_PAGE_SIZES(vgpu->gvt->dev_priv, I915_GTT_PAGE_SIZE_2M))
+               return 0;
+
+       pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry));
+       if (pfn == INTEL_GVT_INVALID_ADDR)
+               return -EINVAL;
+
+       return PageTransHuge(pfn_to_page(pfn));
+}
+
+static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
+       struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
+       struct intel_gvt_gtt_entry *se)
+{
+       struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+       struct intel_vgpu_ppgtt_spt *sub_spt;
+       struct intel_gvt_gtt_entry sub_se;
+       unsigned long start_gfn;
+       dma_addr_t dma_addr;
+       unsigned long sub_index;
+       int ret;
+
+       gvt_dbg_mm("Split 2M gtt entry, index %lu\n", index);
+
+       start_gfn = ops->get_pfn(se);
+
+       sub_spt = ppgtt_alloc_spt(vgpu, GTT_TYPE_PPGTT_PTE_PT);
+       if (IS_ERR(sub_spt))
+               return PTR_ERR(sub_spt);
+
+       for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
+               ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
+                               start_gfn + sub_index, PAGE_SIZE, &dma_addr);
+               if (ret) {
+                       ppgtt_invalidate_spt(spt);
+                       return ret;
+               }
+               sub_se.val64 = se->val64;
+
+               /* Copy the PAT field from PDE. */
+               sub_se.val64 &= ~_PAGE_PAT;
+               sub_se.val64 |= (se->val64 & _PAGE_PAT_LARGE) >> 5;
+
+               ops->set_pfn(&sub_se, dma_addr >> PAGE_SHIFT);
+               ppgtt_set_shadow_entry(sub_spt, &sub_se, sub_index);
+       }
+
+       /* Clear dirty field. */
+       se->val64 &= ~_PAGE_DIRTY;
+
+       ops->clear_pse(se);
+       ops->clear_ips(se);
+       ops->set_pfn(se, sub_spt->shadow_page.mfn);
+       ppgtt_set_shadow_entry(spt, se, index);
+       return 0;
+}
+
+static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
+       struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
+       struct intel_gvt_gtt_entry *se)
+{
+       struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+       struct intel_gvt_gtt_entry entry = *se;
+       unsigned long start_gfn;
+       dma_addr_t dma_addr;
+       int i, ret;
+
+       gvt_vdbg_mm("Split 64K gtt entry, index %lu\n", index);
+
+       GEM_BUG_ON(index % GTT_64K_PTE_STRIDE);
+
+       start_gfn = ops->get_pfn(se);
+
+       entry.type = GTT_TYPE_PPGTT_PTE_4K_ENTRY;
+       ops->set_64k_splited(&entry);
+
+       for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
+               ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
+                                       start_gfn + i, PAGE_SIZE, &dma_addr);
+               if (ret)
+                       return ret;
+
+               ops->set_pfn(&entry, dma_addr >> PAGE_SHIFT);
+               ppgtt_set_shadow_entry(spt, &entry, index + i);
+       }
+       return 0;
+}
+
 static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
        struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
        struct intel_gvt_gtt_entry *ge)
 {
        struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
        struct intel_gvt_gtt_entry se = *ge;
-       unsigned long gfn;
+       unsigned long gfn, page_size = PAGE_SIZE;
        dma_addr_t dma_addr;
        int ret;
 
@@ -970,16 +1229,33 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
        case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
                gvt_vdbg_mm("shadow 4K gtt entry\n");
                break;
+       case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
+               gvt_vdbg_mm("shadow 64K gtt entry\n");
+               /*
+                * The layout of 64K page is special, the page size is
+                * controlled by uper PDE. To be simple, we always split
+                * 64K page to smaller 4K pages in shadow PT.
+                */
+               return split_64KB_gtt_entry(vgpu, spt, index, &se);
        case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
+               gvt_vdbg_mm("shadow 2M gtt entry\n");
+               ret = is_2MB_gtt_possible(vgpu, ge);
+               if (ret == 0)
+                       return split_2MB_gtt_entry(vgpu, spt, index, &se);
+               else if (ret < 0)
+                       return ret;
+               page_size = I915_GTT_PAGE_SIZE_2M;
+               break;
        case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
-               gvt_vgpu_err("GVT doesn't support 2M/1GB entry\n");
+               gvt_vgpu_err("GVT doesn't support 1GB entry\n");
                return -EINVAL;
        default:
                GEM_BUG_ON(1);
        };
 
        /* direct shadow */
-       ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, &dma_addr);
+       ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size,
+                                                     &dma_addr);
        if (ret)
                return -ENXIO;
 
@@ -1062,8 +1338,12 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt,
                ret = ppgtt_invalidate_spt(s);
                if (ret)
                        goto fail;
-       } else
+       } else {
+               /* We don't setup 64K shadow entry so far. */
+               WARN(se->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY,
+                    "suspicious 64K entry\n");
                ppgtt_invalidate_pte(spt, se);
+       }
 
        return 0;
 fail:
@@ -1286,7 +1566,7 @@ static int ppgtt_handle_guest_write_page_table(
        struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
        struct intel_gvt_gtt_entry old_se;
        int new_present;
-       int ret;
+       int i, ret;
 
        new_present = ops->test_present(we);
 
@@ -1308,8 +1588,27 @@ static int ppgtt_handle_guest_write_page_table(
                goto fail;
 
        if (!new_present) {
-               ops->set_pfn(&old_se, vgpu->gtt.scratch_pt[type].page_mfn);
-               ppgtt_set_shadow_entry(spt, &old_se, index);
+               /* For 64KB splited entries, we need clear them all. */
+               if (ops->test_64k_splited(&old_se) &&
+                   !(index % GTT_64K_PTE_STRIDE)) {
+                       gvt_vdbg_mm("remove splited 64K shadow entries\n");
+                       for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
+                               ops->clear_64k_splited(&old_se);
+                               ops->set_pfn(&old_se,
+                                       vgpu->gtt.scratch_pt[type].page_mfn);
+                               ppgtt_set_shadow_entry(spt, &old_se, index + i);
+                       }
+               } else if (old_se.type == GTT_TYPE_PPGTT_PTE_2M_ENTRY ||
+                          old_se.type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
+                       ops->clear_pse(&old_se);
+                       ops->set_pfn(&old_se,
+                                    vgpu->gtt.scratch_pt[type].page_mfn);
+                       ppgtt_set_shadow_entry(spt, &old_se, index);
+               } else {
+                       ops->set_pfn(&old_se,
+                                    vgpu->gtt.scratch_pt[type].page_mfn);
+                       ppgtt_set_shadow_entry(spt, &old_se, index);
+               }
        }
 
        return 0;
@@ -1391,7 +1690,17 @@ static int ppgtt_handle_guest_write_page_table_bytes(
 
        ppgtt_get_guest_entry(spt, &we, index);
 
-       ops->test_pse(&we);
+       /*
+        * For page table which has 64K gtt entry, only PTE#0, PTE#16,
+        * PTE#32, ... PTE#496 are used. Unused PTEs update should be
+        * ignored.
+        */
+       if (we.type == GTT_TYPE_PPGTT_PTE_64K_ENTRY &&
+           (index % GTT_64K_PTE_STRIDE)) {
+               gvt_vdbg_mm("Ignore write to unused PTE entry, index %lu\n",
+                           index);
+               return 0;
+       }
 
        if (bytes == info->gtt_entry_size) {
                ret = ppgtt_handle_guest_write_page_table(spt, &we, index);
@@ -1592,6 +1901,7 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
                vgpu_free_mm(mm);
                return ERR_PTR(-ENOMEM);
        }
+       mm->ggtt_mm.last_partial_off = -1UL;
 
        return mm;
 }
@@ -1616,6 +1926,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
                invalidate_ppgtt_mm(mm);
        } else {
                vfree(mm->ggtt_mm.virtual_ggtt);
+               mm->ggtt_mm.last_partial_off = -1UL;
        }
 
        vgpu_free_mm(mm);
@@ -1868,6 +2179,62 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
        memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
                        bytes);
 
+       /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
+        * write, we assume the two 4 bytes writes are consecutive.
+        * Otherwise, we abort and report error
+        */
+       if (bytes < info->gtt_entry_size) {
+               if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) {
+                       /* the first partial part*/
+                       ggtt_mm->ggtt_mm.last_partial_off = off;
+                       ggtt_mm->ggtt_mm.last_partial_data = e.val64;
+                       return 0;
+               } else if ((g_gtt_index ==
+                               (ggtt_mm->ggtt_mm.last_partial_off >>
+                               info->gtt_entry_size_shift)) &&
+                       (off != ggtt_mm->ggtt_mm.last_partial_off)) {
+                       /* the second partial part */
+
+                       int last_off = ggtt_mm->ggtt_mm.last_partial_off &
+                               (info->gtt_entry_size - 1);
+
+                       memcpy((void *)&e.val64 + last_off,
+                               (void *)&ggtt_mm->ggtt_mm.last_partial_data +
+                               last_off, bytes);
+
+                       ggtt_mm->ggtt_mm.last_partial_off = -1UL;
+               } else {
+                       int last_offset;
+
+                       gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n",
+                                       ggtt_mm->ggtt_mm.last_partial_off, off,
+                                       bytes, info->gtt_entry_size);
+
+                       /* set host ggtt entry to scratch page and clear
+                        * virtual ggtt entry as not present for last
+                        * partially write offset
+                        */
+                       last_offset = ggtt_mm->ggtt_mm.last_partial_off &
+                                       (~(info->gtt_entry_size - 1));
+
+                       ggtt_get_host_entry(ggtt_mm, &m, last_offset);
+                       ggtt_invalidate_pte(vgpu, &m);
+                       ops->set_pfn(&m, gvt->gtt.scratch_mfn);
+                       ops->clear_present(&m);
+                       ggtt_set_host_entry(ggtt_mm, &m, last_offset);
+                       ggtt_invalidate(gvt->dev_priv);
+
+                       ggtt_get_guest_entry(ggtt_mm, &e, last_offset);
+                       ops->clear_present(&e);
+                       ggtt_set_guest_entry(ggtt_mm, &e, last_offset);
+
+                       ggtt_mm->ggtt_mm.last_partial_off = off;
+                       ggtt_mm->ggtt_mm.last_partial_data = e.val64;
+
+                       return 0;
+               }
+       }
+
        if (ops->test_present(&e)) {
                gfn = ops->get_pfn(&e);
                m = e;
@@ -1881,7 +2248,7 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
                }
 
                ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn,
-                                                             &dma_addr);
+                                                       PAGE_SIZE, &dma_addr);
                if (ret) {
                        gvt_vgpu_err("fail to populate guest ggtt entry\n");
                        /* guest driver may read/write the entry when partial
@@ -1973,7 +2340,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
         * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
         * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
         */
-       if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
+       if (type > GTT_TYPE_PPGTT_PTE_PT) {
                struct intel_gvt_gtt_entry se;
 
                memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
@@ -2257,13 +2624,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
 
        gvt_dbg_core("init gtt\n");
 
-       if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
-               || IS_KABYLAKE(gvt->dev_priv)) {
-               gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
-               gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
-       } else {
-               return -ENODEV;
-       }
+       gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
+       gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
 
        page = (void *)get_zeroed_page(GFP_KERNEL);
        if (!page) {
index 3792f2b7f4ff0686832458efcf533248c4aa356d..7a9b36176efb7fca7198527512f8873ad21248cb 100644 (file)
@@ -63,6 +63,12 @@ struct intel_gvt_gtt_pte_ops {
        void (*clear_present)(struct intel_gvt_gtt_entry *e);
        void (*set_present)(struct intel_gvt_gtt_entry *e);
        bool (*test_pse)(struct intel_gvt_gtt_entry *e);
+       void (*clear_pse)(struct intel_gvt_gtt_entry *e);
+       bool (*test_ips)(struct intel_gvt_gtt_entry *e);
+       void (*clear_ips)(struct intel_gvt_gtt_entry *e);
+       bool (*test_64k_splited)(struct intel_gvt_gtt_entry *e);
+       void (*clear_64k_splited)(struct intel_gvt_gtt_entry *e);
+       void (*set_64k_splited)(struct intel_gvt_gtt_entry *e);
        void (*set_pfn)(struct intel_gvt_gtt_entry *e, unsigned long pfn);
        unsigned long (*get_pfn)(struct intel_gvt_gtt_entry *e);
 };
@@ -95,6 +101,7 @@ typedef enum {
        GTT_TYPE_GGTT_PTE,
 
        GTT_TYPE_PPGTT_PTE_4K_ENTRY,
+       GTT_TYPE_PPGTT_PTE_64K_ENTRY,
        GTT_TYPE_PPGTT_PTE_2M_ENTRY,
        GTT_TYPE_PPGTT_PTE_1G_ENTRY,
 
@@ -150,6 +157,8 @@ struct intel_vgpu_mm {
                } ppgtt_mm;
                struct {
                        void *virtual_ggtt;
+                       unsigned long last_partial_off;
+                       u64 last_partial_data;
                } ggtt_mm;
        };
 };
@@ -220,6 +229,7 @@ struct intel_vgpu_ppgtt_spt {
 
        struct {
                intel_gvt_gtt_type_t type;
+               bool pde_ips; /* for 64KB PTEs */
                void *vaddr;
                struct page *page;
                unsigned long mfn;
@@ -227,6 +237,7 @@ struct intel_vgpu_ppgtt_spt {
 
        struct {
                intel_gvt_gtt_type_t type;
+               bool pde_ips; /* for 64KB PTEs */
                unsigned long gfn;
                unsigned long write_cnt;
                struct intel_vgpu_oos_page *oos_page;
index 61bd14fcb649fab8af972faf8ee4ecf5cc96010b..712f9d14e7200678228065bd16d5d0bdec7a009b 100644 (file)
@@ -238,18 +238,15 @@ static void init_device_info(struct intel_gvt *gvt)
        struct intel_gvt_device_info *info = &gvt->device_info;
        struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
 
-       if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
-               || IS_KABYLAKE(gvt->dev_priv)) {
-               info->max_support_vgpus = 8;
-               info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
-               info->mmio_size = 2 * 1024 * 1024;
-               info->mmio_bar = 0;
-               info->gtt_start_offset = 8 * 1024 * 1024;
-               info->gtt_entry_size = 8;
-               info->gtt_entry_size_shift = 3;
-               info->gmadr_bytes_in_cmd = 8;
-               info->max_surface_size = 36 * 1024 * 1024;
-       }
+       info->max_support_vgpus = 8;
+       info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
+       info->mmio_size = 2 * 1024 * 1024;
+       info->mmio_bar = 0;
+       info->gtt_start_offset = 8 * 1024 * 1024;
+       info->gtt_entry_size = 8;
+       info->gtt_entry_size_shift = 3;
+       info->gmadr_bytes_in_cmd = 8;
+       info->max_surface_size = 36 * 1024 * 1024;
        info->msi_cap_offset = pdev->msi_cap;
 }
 
@@ -271,11 +268,8 @@ static int gvt_service_thread(void *data)
                        continue;
 
                if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK,
-                                       (void *)&gvt->service_request)) {
-                       mutex_lock(&gvt->lock);
+                                       (void *)&gvt->service_request))
                        intel_gvt_emulate_vblank(gvt);
-                       mutex_unlock(&gvt->lock);
-               }
 
                if (test_bit(INTEL_GVT_REQUEST_SCHED,
                                (void *)&gvt->service_request) ||
@@ -379,6 +373,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
        idr_init(&gvt->vgpu_idr);
        spin_lock_init(&gvt->scheduler.mmio_context_lock);
        mutex_init(&gvt->lock);
+       mutex_init(&gvt->sched_lock);
        gvt->dev_priv = dev_priv;
 
        init_device_info(gvt);
@@ -473,3 +468,7 @@ out_clean_idr:
        kfree(gvt);
        return ret;
 }
+
+#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
+MODULE_SOFTDEP("pre: kvmgt");
+#endif
index 05d15a095310d41b75d6ab64aa04162799f393a1..9a967152277494ccbf1dbd5ac55b4b7f5b842eba 100644 (file)
@@ -170,12 +170,18 @@ struct intel_vgpu_submission {
 
 struct intel_vgpu {
        struct intel_gvt *gvt;
+       struct mutex vgpu_lock;
        int id;
        unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
        bool active;
        bool pv_notified;
        bool failsafe;
        unsigned int resetting_eng;
+
+       /* Both sched_data and sched_ctl can be seen a part of the global gvt
+        * scheduler structure. So below 2 vgpu data are protected
+        * by sched_lock, not vgpu_lock.
+        */
        void *sched_data;
        struct vgpu_sched_ctl sched_ctl;
 
@@ -268,6 +274,8 @@ struct intel_gvt_mmio {
 #define F_CMD_ACCESSED (1 << 5)
 /* This reg could be accessed by unaligned address */
 #define F_UNALIGN      (1 << 6)
+/* This reg is saved/restored in context */
+#define F_IN_CTX       (1 << 7)
 
        struct gvt_mmio_block *mmio_block;
        unsigned int num_mmio_block;
@@ -294,7 +302,13 @@ struct intel_vgpu_type {
 };
 
 struct intel_gvt {
+       /* GVT scope lock, protect GVT itself, and all resource currently
+        * not yet protected by special locks(vgpu and scheduler lock).
+        */
        struct mutex lock;
+       /* scheduler scope lock, protect gvt and vgpu schedule related data */
+       struct mutex sched_lock;
+
        struct drm_i915_private *dev_priv;
        struct idr vgpu_idr;    /* vGPU IDR pool */
 
@@ -314,6 +328,10 @@ struct intel_gvt {
 
        struct task_struct *service_thread;
        wait_queue_head_t service_thread_wq;
+
+       /* service_request is always used in bit operation, we should always
+        * use it with atomic bit ops so that no need to use gvt big lock.
+        */
        unsigned long service_request;
 
        struct {
@@ -361,9 +379,9 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
 #define gvt_aperture_sz(gvt)     (gvt->dev_priv->ggtt.mappable_end)
 #define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
 
-#define gvt_ggtt_gm_sz(gvt)      (gvt->dev_priv->ggtt.base.total)
+#define gvt_ggtt_gm_sz(gvt)      (gvt->dev_priv->ggtt.vm.total)
 #define gvt_ggtt_sz(gvt) \
-       ((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3)
+       ((gvt->dev_priv->ggtt.vm.total >> PAGE_SHIFT) << 3)
 #define gvt_hidden_sz(gvt)       (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
 
 #define gvt_aperture_gmadr_base(gvt) (0)
@@ -639,6 +657,33 @@ static inline bool intel_gvt_mmio_has_mode_mask(
        return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
 }
 
+/**
+ * intel_gvt_mmio_is_in_ctx - check if a MMIO has in-ctx mask
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ * Returns:
+ * True if a MMIO has a in-context mask, false if it isn't.
+ *
+ */
+static inline bool intel_gvt_mmio_is_in_ctx(
+                       struct intel_gvt *gvt, unsigned int offset)
+{
+       return gvt->mmio.mmio_attribute[offset >> 2] & F_IN_CTX;
+}
+
+/**
+ * intel_gvt_mmio_set_in_ctx - mask a MMIO in logical context
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+static inline void intel_gvt_mmio_set_in_ctx(
+                       struct intel_gvt *gvt, unsigned int offset)
+{
+       gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX;
+}
+
 int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
 void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
 int intel_gvt_debugfs_init(struct intel_gvt *gvt);
index bcbc47a88a7006a06107005b0faad5c02820c215..7a58ca5551977a086ce8b25dbe18aa26497f45bc 100644 (file)
@@ -55,6 +55,8 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
                return D_SKL;
        else if (IS_KABYLAKE(gvt->dev_priv))
                return D_KBL;
+       else if (IS_BROXTON(gvt->dev_priv))
+               return D_BXT;
 
        return 0;
 }
@@ -208,6 +210,31 @@ static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
        return 0;
 }
 
+static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu,
+               unsigned int offset, void *p_data, unsigned int bytes)
+{
+       u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD;
+
+       if (INTEL_GEN(vgpu->gvt->dev_priv) <= 10) {
+               if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD)
+                       gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id);
+               else if (!ips)
+                       gvt_dbg_core("vgpu%d: ips disabled\n", vgpu->id);
+               else {
+                       /* All engines must be enabled together for vGPU,
+                        * since we don't know which engine the ppgtt will
+                        * bind to when shadowing.
+                        */
+                       gvt_vgpu_err("Unsupported IPS setting %x, cannot enable 64K gtt.\n",
+                                    ips);
+                       return -EINVAL;
+               }
+       }
+
+       write_vreg(vgpu, offset, p_data, bytes);
+       return 0;
+}
+
 static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
                void *p_data, unsigned int bytes)
 {
@@ -255,7 +282,8 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
        new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
 
        if (IS_SKYLAKE(vgpu->gvt->dev_priv)
-               || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
+               || IS_KABYLAKE(vgpu->gvt->dev_priv)
+               || IS_BROXTON(vgpu->gvt->dev_priv)) {
                switch (offset) {
                case FORCEWAKE_RENDER_GEN9_REG:
                        ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
@@ -316,6 +344,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
                }
        }
 
+       /* vgpu_lock already hold by emulate mmio r/w */
        intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
 
        /* sw will wait for the device to ack the reset request */
@@ -420,7 +449,10 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
                vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE;
        else
                vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE;
+       /* vgpu_lock already hold by emulate mmio r/w */
+       mutex_unlock(&vgpu->vgpu_lock);
        intel_gvt_check_vblank_emulation(vgpu->gvt);
+       mutex_lock(&vgpu->vgpu_lock);
        return 0;
 }
 
@@ -857,7 +889,8 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
        data = vgpu_vreg(vgpu, offset);
 
        if ((IS_SKYLAKE(vgpu->gvt->dev_priv)
-               || IS_KABYLAKE(vgpu->gvt->dev_priv))
+               || IS_KABYLAKE(vgpu->gvt->dev_priv)
+               || IS_BROXTON(vgpu->gvt->dev_priv))
                && offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
                /* SKL DPB/C/D aux ctl register changed */
                return 0;
@@ -1209,8 +1242,8 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
                ret = handle_g2v_notification(vgpu, data);
                break;
        /* add xhot and yhot to handled list to avoid error log */
-       case 0x78830:
-       case 0x78834:
+       case _vgtif_reg(cursor_x_hot):
+       case _vgtif_reg(cursor_y_hot):
        case _vgtif_reg(pdp[0].lo):
        case _vgtif_reg(pdp[0].hi):
        case _vgtif_reg(pdp[1].lo):
@@ -1369,6 +1402,16 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
                                *data0 = 0x1e1a1100;
                        else
                                *data0 = 0x61514b3d;
+               } else if (IS_BROXTON(vgpu->gvt->dev_priv)) {
+                       /**
+                        * "Read memory latency" command on gen9.
+                        * Below memory latency values are read
+                        * from Broxton MRB.
+                        */
+                       if (!*data0)
+                               *data0 = 0x16080707;
+                       else
+                               *data0 = 0x16161616;
                }
                break;
        case SKL_PCODE_CDCLK_CONTROL:
@@ -1426,8 +1469,11 @@ static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
 {
        u32 v = *(u32 *)p_data;
 
-       v &= (1 << 31) | (1 << 29) | (1 << 9) |
-            (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
+       if (IS_BROXTON(vgpu->gvt->dev_priv))
+               v &= (1 << 31) | (1 << 29);
+       else
+               v &= (1 << 31) | (1 << 29) | (1 << 9) |
+                       (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
        v |= (v >> 1);
 
        return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
@@ -1447,6 +1493,109 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
        return 0;
 }
 
+static int bxt_de_pll_enable_write(struct intel_vgpu *vgpu,
+               unsigned int offset, void *p_data, unsigned int bytes)
+{
+       u32 v = *(u32 *)p_data;
+
+       if (v & BXT_DE_PLL_PLL_ENABLE)
+               v |= BXT_DE_PLL_LOCK;
+
+       vgpu_vreg(vgpu, offset) = v;
+
+       return 0;
+}
+
+static int bxt_port_pll_enable_write(struct intel_vgpu *vgpu,
+               unsigned int offset, void *p_data, unsigned int bytes)
+{
+       u32 v = *(u32 *)p_data;
+
+       if (v & PORT_PLL_ENABLE)
+               v |= PORT_PLL_LOCK;
+
+       vgpu_vreg(vgpu, offset) = v;
+
+       return 0;
+}
+
+static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu,
+               unsigned int offset, void *p_data, unsigned int bytes)
+{
+       u32 v = *(u32 *)p_data;
+       u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0;
+
+       vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
+       vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
+       vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
+
+       vgpu_vreg(vgpu, offset) = v;
+
+       return 0;
+}
+
+static int bxt_port_tx_dw3_read(struct intel_vgpu *vgpu,
+               unsigned int offset, void *p_data, unsigned int bytes)
+{
+       u32 v = vgpu_vreg(vgpu, offset);
+
+       v &= ~UNIQUE_TRANGE_EN_METHOD;
+
+       vgpu_vreg(vgpu, offset) = v;
+
+       return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
+}
+
+static int bxt_pcs_dw12_grp_write(struct intel_vgpu *vgpu,
+               unsigned int offset, void *p_data, unsigned int bytes)
+{
+       u32 v = *(u32 *)p_data;
+
+       if (offset == _PORT_PCS_DW12_GRP_A || offset == _PORT_PCS_DW12_GRP_B) {
+               vgpu_vreg(vgpu, offset - 0x600) = v;
+               vgpu_vreg(vgpu, offset - 0x800) = v;
+       } else {
+               vgpu_vreg(vgpu, offset - 0x400) = v;
+               vgpu_vreg(vgpu, offset - 0x600) = v;
+       }
+
+       vgpu_vreg(vgpu, offset) = v;
+
+       return 0;
+}
+
+static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu,
+               unsigned int offset, void *p_data, unsigned int bytes)
+{
+       u32 v = *(u32 *)p_data;
+
+       if (v & BIT(0)) {
+               vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
+                       ~PHY_RESERVED;
+               vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |=
+                       PHY_POWER_GOOD;
+       }
+
+       if (v & BIT(1)) {
+               vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
+                       ~PHY_RESERVED;
+               vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |=
+                       PHY_POWER_GOOD;
+       }
+
+
+       vgpu_vreg(vgpu, offset) = v;
+
+       return 0;
+}
+
+static int bxt_edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
+               unsigned int offset, void *p_data, unsigned int bytes)
+{
+       vgpu_vreg(vgpu, offset) = 0;
+       return 0;
+}
+
 static int mmio_read_from_hw(struct intel_vgpu *vgpu,
                unsigned int offset, void *p_data, unsigned int bytes)
 {
@@ -1657,7 +1806,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
 
        MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL);
 
-       MMIO_GM_RDR(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+       MMIO_DH(GEN8_GAMW_ECO_DEV_RW_IA, D_BDW_PLUS, NULL,
+               gamw_echo_dev_rw_ia_write);
+
        MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
        MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
        MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
@@ -2670,17 +2821,17 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        MMIO_D(_MMIO(0x45504), D_SKL_PLUS);
        MMIO_D(_MMIO(0x45520), D_SKL_PLUS);
        MMIO_D(_MMIO(0x46000), D_SKL_PLUS);
-       MMIO_DH(_MMIO(0x46010), D_SKL | D_KBL, NULL, skl_lcpll_write);
-       MMIO_DH(_MMIO(0x46014), D_SKL | D_KBL, NULL, skl_lcpll_write);
-       MMIO_D(_MMIO(0x6C040), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x6C048), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x6C050), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x6C044), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x6C04C), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x6C054), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x6c058), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x6c05c), D_SKL | D_KBL);
-       MMIO_DH(_MMIO(0x6c060), D_SKL | D_KBL, dpll_status_read, NULL);
+       MMIO_DH(_MMIO(0x46010), D_SKL_PLUS, NULL, skl_lcpll_write);
+       MMIO_DH(_MMIO(0x46014), D_SKL_PLUS, NULL, skl_lcpll_write);
+       MMIO_D(_MMIO(0x6C040), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x6C048), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x6C050), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x6C044), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x6C04C), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x6C054), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x6c058), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x6c05c), D_SKL_PLUS);
+       MMIO_DH(_MMIO(0x6c060), D_SKL_PLUS, dpll_status_read, NULL);
 
        MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
        MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
@@ -2805,53 +2956,57 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
        MMIO_D(_MMIO(0x7039c), D_SKL_PLUS);
 
-       MMIO_D(_MMIO(0x8f074), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x8f004), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x8f034), D_SKL | D_KBL);
+       MMIO_D(_MMIO(0x8f074), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x8f004), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x8f034), D_SKL_PLUS);
 
-       MMIO_D(_MMIO(0xb11c), D_SKL | D_KBL);
+       MMIO_D(_MMIO(0xb11c), D_SKL_PLUS);
 
-       MMIO_D(_MMIO(0x51000), D_SKL | D_KBL);
+       MMIO_D(_MMIO(0x51000), D_SKL_PLUS);
        MMIO_D(_MMIO(0x6c00c), D_SKL_PLUS);
 
-       MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
-       MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
+       MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
+               NULL, NULL);
+       MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
+               NULL, NULL);
 
        MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
        MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
        MMIO_D(RC6_LOCATION, D_SKL_PLUS);
        MMIO_DFH(_MMIO(0x20e0), D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
-       MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
+               NULL, NULL);
 
        /* TRTT */
-       MMIO_DFH(_MMIO(0x4de0), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
-       MMIO_DFH(_MMIO(0x4de4), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
-       MMIO_DFH(_MMIO(0x4de8), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
-       MMIO_DFH(_MMIO(0x4dec), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
-       MMIO_DFH(_MMIO(0x4df0), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
-       MMIO_DFH(_MMIO(0x4df4), D_SKL | D_KBL, F_CMD_ACCESS, NULL, gen9_trtte_write);
-       MMIO_DH(_MMIO(0x4dfc), D_SKL | D_KBL, NULL, gen9_trtt_chicken_write);
+       MMIO_DFH(_MMIO(0x4de0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(_MMIO(0x4de4), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(_MMIO(0x4de8), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(_MMIO(0x4dec), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(_MMIO(0x4df0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+       MMIO_DFH(_MMIO(0x4df4), D_SKL_PLUS, F_CMD_ACCESS,
+               NULL, gen9_trtte_write);
+       MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
 
-       MMIO_D(_MMIO(0x45008), D_SKL | D_KBL);
+       MMIO_D(_MMIO(0x45008), D_SKL_PLUS);
 
-       MMIO_D(_MMIO(0x46430), D_SKL | D_KBL);
+       MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
 
-       MMIO_D(_MMIO(0x46520), D_SKL | D_KBL);
+       MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
 
-       MMIO_D(_MMIO(0xc403c), D_SKL | D_KBL);
+       MMIO_D(_MMIO(0xc403c), D_SKL_PLUS);
        MMIO_D(_MMIO(0xb004), D_SKL_PLUS);
        MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
 
        MMIO_D(_MMIO(0x65900), D_SKL_PLUS);
-       MMIO_D(_MMIO(0x1082c0), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x4068), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x67054), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x6e560), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x6e554), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x2b20), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x65f00), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x65f08), D_SKL | D_KBL);
-       MMIO_D(_MMIO(0x320f0), D_SKL | D_KBL);
+       MMIO_D(_MMIO(0x1082c0), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x4068), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x67054), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x6e560), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x6e554), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x2b20), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x65f00), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x65f08), D_SKL_PLUS);
+       MMIO_D(_MMIO(0x320f0), D_SKL_PLUS);
 
        MMIO_D(_MMIO(0x70034), D_SKL_PLUS);
        MMIO_D(_MMIO(0x71034), D_SKL_PLUS);
@@ -2869,11 +3024,188 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
 
        MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
        MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
-       MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL | D_KBL, F_MODE_MASK | F_CMD_ACCESS,
+       MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
                NULL, NULL);
 
        MMIO_D(_MMIO(0x4ab8), D_KBL);
-       MMIO_D(_MMIO(0x2248), D_SKL_PLUS | D_KBL);
+       MMIO_D(_MMIO(0x2248), D_KBL | D_SKL);
+
+       return 0;
+}
+
+static int init_bxt_mmio_info(struct intel_gvt *gvt)
+{
+       struct drm_i915_private *dev_priv = gvt->dev_priv;
+       int ret;
+
+       MMIO_F(_MMIO(0x80000), 0x3000, 0, 0, 0, D_BXT, NULL, NULL);
+
+       MMIO_D(GEN7_SAMPLER_INSTDONE, D_BXT);
+       MMIO_D(GEN7_ROW_INSTDONE, D_BXT);
+       MMIO_D(GEN8_FAULT_TLB_DATA0, D_BXT);
+       MMIO_D(GEN8_FAULT_TLB_DATA1, D_BXT);
+       MMIO_D(ERROR_GEN6, D_BXT);
+       MMIO_D(DONE_REG, D_BXT);
+       MMIO_D(EIR, D_BXT);
+       MMIO_D(PGTBL_ER, D_BXT);
+       MMIO_D(_MMIO(0x4194), D_BXT);
+       MMIO_D(_MMIO(0x4294), D_BXT);
+       MMIO_D(_MMIO(0x4494), D_BXT);
+
+       MMIO_RING_D(RING_PSMI_CTL, D_BXT);
+       MMIO_RING_D(RING_DMA_FADD, D_BXT);
+       MMIO_RING_D(RING_DMA_FADD_UDW, D_BXT);
+       MMIO_RING_D(RING_IPEHR, D_BXT);
+       MMIO_RING_D(RING_INSTPS, D_BXT);
+       MMIO_RING_D(RING_BBADDR_UDW, D_BXT);
+       MMIO_RING_D(RING_BBSTATE, D_BXT);
+       MMIO_RING_D(RING_IPEIR, D_BXT);
+
+       MMIO_F(SOFT_SCRATCH(0), 16 * 4, 0, 0, 0, D_BXT, NULL, NULL);
+
+       MMIO_DH(BXT_P_CR_GT_DISP_PWRON, D_BXT, NULL, bxt_gt_disp_pwron_write);
+       MMIO_D(BXT_RP_STATE_CAP, D_BXT);
+       MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY0), D_BXT,
+               NULL, bxt_phy_ctl_family_write);
+       MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY1), D_BXT,
+               NULL, bxt_phy_ctl_family_write);
+       MMIO_D(BXT_PHY_CTL(PORT_A), D_BXT);
+       MMIO_D(BXT_PHY_CTL(PORT_B), D_BXT);
+       MMIO_D(BXT_PHY_CTL(PORT_C), D_BXT);
+       MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_A), D_BXT,
+               NULL, bxt_port_pll_enable_write);
+       MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_B), D_BXT,
+               NULL, bxt_port_pll_enable_write);
+       MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_C), D_BXT, NULL,
+               bxt_port_pll_enable_write);
+
+       MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY0), D_BXT);
+       MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY0), D_BXT);
+       MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY0), D_BXT);
+       MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY0), D_BXT);
+       MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY0), D_BXT);
+       MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY0), D_BXT);
+       MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY0), D_BXT);
+       MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY0), D_BXT);
+       MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY0), D_BXT);
+
+       MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY1), D_BXT);
+       MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY1), D_BXT);
+       MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY1), D_BXT);
+       MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY1), D_BXT);
+       MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY1), D_BXT);
+       MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY1), D_BXT);
+       MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY1), D_BXT);
+       MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY1), D_BXT);
+       MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY1), D_BXT);
+
+       MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH0), D_BXT);
+       MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0), D_BXT,
+               NULL, bxt_pcs_dw12_grp_write);
+       MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
+       MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0), D_BXT,
+               bxt_port_tx_dw3_read, NULL);
+       MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 0), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 1), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 2), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 3), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 0), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 1), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 2), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 3), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 6), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 8), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 9), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 10), D_BXT);
+
+       MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH1), D_BXT);
+       MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH1), D_BXT);
+       MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH1), D_BXT);
+       MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
+       MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH1), D_BXT);
+       MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH1), D_BXT);
+       MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1), D_BXT,
+               NULL, bxt_pcs_dw12_grp_write);
+       MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH1), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
+       MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1), D_BXT,
+               bxt_port_tx_dw3_read, NULL);
+       MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH1), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 0), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 1), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 2), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 3), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 0), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 1), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 2), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 3), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 6), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 8), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 9), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 10), D_BXT);
+
+       MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY1, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY1, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY1, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY1, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY1, DPIO_CH0), D_BXT);
+       MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0), D_BXT,
+               NULL, bxt_pcs_dw12_grp_write);
+       MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY1, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
+       MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0), D_BXT,
+               bxt_port_tx_dw3_read, NULL);
+       MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY1, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 0), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 1), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 2), D_BXT);
+       MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 3), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 0), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 1), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 2), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 3), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 6), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 8), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 9), D_BXT);
+       MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 10), D_BXT);
+
+       MMIO_D(BXT_DE_PLL_CTL, D_BXT);
+       MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write);
+       MMIO_D(BXT_DSI_PLL_CTL, D_BXT);
+       MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT);
+
+       MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT);
+
+       MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT);
+       MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
+       MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT);
+
+       MMIO_DH(EDP_PSR_IMR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
+       MMIO_DH(EDP_PSR_IIR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
+
+       MMIO_D(RC6_CTX_BASE, D_BXT);
+
+       MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT);
+       MMIO_D(GEN8_PUSHBUS_ENABLE, D_BXT);
+       MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT);
+       MMIO_D(GEN6_GFXPAUSE, D_BXT);
+       MMIO_D(GEN8_L3SQCREG1, D_BXT);
+
+       MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL);
 
        return 0;
 }
@@ -2965,6 +3297,16 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
                ret = init_skl_mmio_info(gvt);
                if (ret)
                        goto err;
+       } else if (IS_BROXTON(dev_priv)) {
+               ret = init_broadwell_mmio_info(gvt);
+               if (ret)
+                       goto err;
+               ret = init_skl_mmio_info(gvt);
+               if (ret)
+                       goto err;
+               ret = init_bxt_mmio_info(gvt);
+               if (ret)
+                       goto err;
        }
 
        gvt->mmio.mmio_block = mmio_blocks;
@@ -3045,6 +3387,30 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
        return 0;
 }
 
+/**
+ * intel_vgpu_mask_mmio_write - write mask register
+ * @vgpu: a vGPU
+ * @offset: access offset
+ * @p_data: write data buffer
+ * @bytes: access data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+               void *p_data, unsigned int bytes)
+{
+       u32 mask, old_vreg;
+
+       old_vreg = vgpu_vreg(vgpu, offset);
+       write_vreg(vgpu, offset, p_data, bytes);
+       mask = vgpu_vreg(vgpu, offset) >> 16;
+       vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) |
+                               (vgpu_vreg(vgpu, offset) & mask);
+
+       return 0;
+}
+
 /**
  * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
  * force-nopriv register
index f6dd9f71788834eb388f2703425999c6acedbb04..5af11cf1b48235c46079f376686112c5234abbfd 100644 (file)
@@ -53,7 +53,7 @@ struct intel_gvt_mpt {
        unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
 
        int (*dma_map_guest_page)(unsigned long handle, unsigned long gfn,
-                                 dma_addr_t *dma_addr);
+                                 unsigned long size, dma_addr_t *dma_addr);
        void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr);
 
        int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
index 7a041b368f68861e552b590a88ca633416802cc3..5daa23ae566b0849379a58fa35af64b540bece33 100644 (file)
@@ -350,7 +350,8 @@ static void update_upstream_irq(struct intel_vgpu *vgpu,
                        clear_bits |= (1 << bit);
        }
 
-       WARN_ON(!up_irq_info);
+       if (WARN_ON(!up_irq_info))
+               return;
 
        if (up_irq_info->group == INTEL_GVT_IRQ_INFO_MASTER) {
                u32 isr = i915_mmio_reg_offset(up_irq_info->reg_base);
@@ -580,7 +581,9 @@ static void gen8_init_irq(
 
                SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
                SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
-       } else if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) {
+       } else if (IS_SKYLAKE(gvt->dev_priv)
+                       || IS_KABYLAKE(gvt->dev_priv)
+                       || IS_BROXTON(gvt->dev_priv)) {
                SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
                SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
                SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
@@ -690,14 +693,8 @@ int intel_gvt_init_irq(struct intel_gvt *gvt)
 
        gvt_dbg_core("init irq framework\n");
 
-       if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
-               || IS_KABYLAKE(gvt->dev_priv)) {
-               irq->ops = &gen8_irq_ops;
-               irq->irq_map = gen8_irq_map;
-       } else {
-               WARN_ON(1);
-               return -ENODEV;
-       }
+       irq->ops = &gen8_irq_ops;
+       irq->irq_map = gen8_irq_map;
 
        /* common event initialization */
        init_events(irq);
index df4e4a07db3d6809fa741e7c19c88e754700734e..4d2f53ae9f0f26907433aabc3cae4c9d1e7d3361 100644 (file)
@@ -94,6 +94,7 @@ struct gvt_dma {
        struct rb_node dma_addr_node;
        gfn_t gfn;
        dma_addr_t dma_addr;
+       unsigned long size;
        struct kref ref;
 };
 
@@ -106,51 +107,103 @@ static int kvmgt_guest_init(struct mdev_device *mdev);
 static void intel_vgpu_release_work(struct work_struct *work);
 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
 
-static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
-               dma_addr_t *dma_addr)
+static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
+               unsigned long size)
 {
-       struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
-       struct page *page;
-       unsigned long pfn;
+       int total_pages;
+       int npage;
        int ret;
 
-       /* Pin the page first. */
-       ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1,
-                            IOMMU_READ | IOMMU_WRITE, &pfn);
-       if (ret != 1) {
-               gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n",
-                            gfn, ret);
-               return -EINVAL;
+       total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
+
+       for (npage = 0; npage < total_pages; npage++) {
+               unsigned long cur_gfn = gfn + npage;
+
+               ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1);
+               WARN_ON(ret != 1);
        }
+}
 
-       if (!pfn_valid(pfn)) {
-               gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn);
-               vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1);
-               return -EINVAL;
+/* Pin a normal or compound guest page for dma. */
+static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
+               unsigned long size, struct page **page)
+{
+       unsigned long base_pfn = 0;
+       int total_pages;
+       int npage;
+       int ret;
+
+       total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
+       /*
+        * We pin the pages one-by-one to avoid allocating a big arrary
+        * on stack to hold pfns.
+        */
+       for (npage = 0; npage < total_pages; npage++) {
+               unsigned long cur_gfn = gfn + npage;
+               unsigned long pfn;
+
+               ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1,
+                                    IOMMU_READ | IOMMU_WRITE, &pfn);
+               if (ret != 1) {
+                       gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
+                                    cur_gfn, ret);
+                       goto err;
+               }
+
+               if (!pfn_valid(pfn)) {
+                       gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn);
+                       npage++;
+                       ret = -EFAULT;
+                       goto err;
+               }
+
+               if (npage == 0)
+                       base_pfn = pfn;
+               else if (base_pfn + npage != pfn) {
+                       gvt_vgpu_err("The pages are not continuous\n");
+                       ret = -EINVAL;
+                       npage++;
+                       goto err;
+               }
        }
 
+       *page = pfn_to_page(base_pfn);
+       return 0;
+err:
+       gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
+       return ret;
+}
+
+static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
+               dma_addr_t *dma_addr, unsigned long size)
+{
+       struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+       struct page *page = NULL;
+       int ret;
+
+       ret = gvt_pin_guest_page(vgpu, gfn, size, &page);
+       if (ret)
+               return ret;
+
        /* Setup DMA mapping. */
-       page = pfn_to_page(pfn);
-       *dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE,
-                                PCI_DMA_BIDIRECTIONAL);
-       if (dma_mapping_error(dev, *dma_addr)) {
-               gvt_vgpu_err("DMA mapping failed for gfn 0x%lx\n", gfn);
-               vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1);
-               return -ENOMEM;
+       *dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL);
+       ret = dma_mapping_error(dev, *dma_addr);
+       if (ret) {
+               gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n",
+                            page_to_pfn(page), ret);
+               gvt_unpin_guest_page(vgpu, gfn, size);
        }
 
-       return 0;
+       return ret;
 }
 
 static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
-               dma_addr_t dma_addr)
+               dma_addr_t dma_addr, unsigned long size)
 {
        struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
-       int ret;
 
-       dma_unmap_page(dev, dma_addr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-       ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1);
-       WARN_ON(ret != 1);
+       dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
+       gvt_unpin_guest_page(vgpu, gfn, size);
 }
 
 static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
@@ -191,7 +244,7 @@ static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
 }
 
 static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
-               dma_addr_t dma_addr)
+               dma_addr_t dma_addr, unsigned long size)
 {
        struct gvt_dma *new, *itr;
        struct rb_node **link, *parent = NULL;
@@ -203,6 +256,7 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
        new->vgpu = vgpu;
        new->gfn = gfn;
        new->dma_addr = dma_addr;
+       new->size = size;
        kref_init(&new->ref);
 
        /* gfn_cache maps gfn to struct gvt_dma. */
@@ -260,7 +314,7 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu)
                        break;
                }
                dma = rb_entry(node, struct gvt_dma, gfn_node);
-               gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr);
+               gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size);
                __gvt_cache_remove_entry(vgpu, dma);
                mutex_unlock(&vgpu->vdev.cache_lock);
        }
@@ -515,7 +569,8 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
                        if (!entry)
                                continue;
 
-                       gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr);
+                       gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr,
+                                          entry->size);
                        __gvt_cache_remove_entry(vgpu, entry);
                }
                mutex_unlock(&vgpu->vdev.cache_lock);
@@ -1648,7 +1703,7 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
 }
 
 int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
-               dma_addr_t *dma_addr)
+               unsigned long size, dma_addr_t *dma_addr)
 {
        struct kvmgt_guest_info *info;
        struct intel_vgpu *vgpu;
@@ -1665,11 +1720,11 @@ int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
 
        entry = __gvt_cache_find_gfn(info->vgpu, gfn);
        if (!entry) {
-               ret = gvt_dma_map_page(vgpu, gfn, dma_addr);
+               ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
                if (ret)
                        goto err_unlock;
 
-               ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr);
+               ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
                if (ret)
                        goto err_unmap;
        } else {
@@ -1681,7 +1736,7 @@ int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
        return 0;
 
 err_unmap:
-       gvt_dma_unmap_page(vgpu, gfn, *dma_addr);
+       gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size);
 err_unlock:
        mutex_unlock(&info->vgpu->vdev.cache_lock);
        return ret;
@@ -1691,7 +1746,8 @@ static void __gvt_dma_release(struct kref *ref)
 {
        struct gvt_dma *entry = container_of(ref, typeof(*entry), ref);
 
-       gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr);
+       gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr,
+                          entry->size);
        __gvt_cache_remove_entry(entry->vgpu, entry);
 }
 
index b31eb36fc102e212218424f890e16b02ff2161ff..994366035364b7576db8ed2ec1036d417cc39d9c 100644 (file)
@@ -67,7 +67,7 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
                return;
 
        gvt = vgpu->gvt;
-       mutex_lock(&gvt->lock);
+       mutex_lock(&vgpu->vgpu_lock);
        offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
        if (reg_is_mmio(gvt, offset)) {
                if (read)
@@ -85,7 +85,7 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
                        memcpy(pt, p_data, bytes);
 
        }
-       mutex_unlock(&gvt->lock);
+       mutex_unlock(&vgpu->vgpu_lock);
 }
 
 /**
@@ -109,7 +109,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
                failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true);
                return 0;
        }
-       mutex_lock(&gvt->lock);
+       mutex_lock(&vgpu->vgpu_lock);
 
        offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
 
@@ -156,7 +156,7 @@ err:
        gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
                        offset, bytes);
 out:
-       mutex_unlock(&gvt->lock);
+       mutex_unlock(&vgpu->vgpu_lock);
        return ret;
 }
 
@@ -182,7 +182,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
                return 0;
        }
 
-       mutex_lock(&gvt->lock);
+       mutex_lock(&vgpu->vgpu_lock);
 
        offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
 
@@ -220,7 +220,7 @@ err:
        gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
                     bytes);
 out:
-       mutex_unlock(&gvt->lock);
+       mutex_unlock(&vgpu->vgpu_lock);
        return ret;
 }
 
index 71b6208759439d8ca74d3dbee4ea4a95059e779d..1ffc69eba30e385a21469847c6681f420254d67d 100644 (file)
@@ -42,15 +42,16 @@ struct intel_vgpu;
 #define D_BDW   (1 << 0)
 #define D_SKL  (1 << 1)
 #define D_KBL  (1 << 2)
+#define D_BXT  (1 << 3)
 
-#define D_GEN9PLUS     (D_SKL | D_KBL)
-#define D_GEN8PLUS     (D_BDW | D_SKL | D_KBL)
+#define D_GEN9PLUS     (D_SKL | D_KBL | D_BXT)
+#define D_GEN8PLUS     (D_BDW | D_SKL | D_KBL | D_BXT)
 
-#define D_SKL_PLUS     (D_SKL | D_KBL)
-#define D_BDW_PLUS     (D_BDW | D_SKL | D_KBL)
+#define D_SKL_PLUS     (D_SKL | D_KBL | D_BXT)
+#define D_BDW_PLUS     (D_BDW | D_SKL | D_KBL | D_BXT)
 
 #define D_PRE_SKL      (D_BDW)
-#define D_ALL          (D_BDW | D_SKL | D_KBL)
+#define D_ALL          (D_BDW | D_SKL | D_KBL | D_BXT)
 
 typedef int (*gvt_mmio_func)(struct intel_vgpu *, unsigned int, void *,
                             unsigned int);
@@ -98,4 +99,6 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
 int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
                           void *pdata, unsigned int bytes, bool is_read);
 
+int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+                                 void *p_data, unsigned int bytes);
 #endif
index 0f949554d118c22e1313cfecc2948d0fcc4672ef..42e1e6bdcc2cfe64a3446eea8019b9a912141ba0 100644 (file)
@@ -364,7 +364,8 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
         */
        fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
                                            FW_REG_READ | FW_REG_WRITE);
-       if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
+       if (ring_id == RCS && (IS_SKYLAKE(dev_priv) ||
+                       IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)))
                fw |= FORCEWAKE_RENDER;
 
        intel_uncore_forcewake_get(dev_priv, fw);
@@ -401,7 +402,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
        if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
                return;
 
-       if (IS_KABYLAKE(dev_priv) && ring_id == RCS)
+       if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)) && ring_id == RCS)
                return;
 
        if (!pre && !gen9_render_mocs.initialized)
@@ -446,9 +447,9 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
 
 #define CTX_CONTEXT_CONTROL_VAL        0x03
 
-bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id)
+bool is_inhibit_context(struct intel_context *ce)
 {
-       u32 *reg_state = ctx->__engine[ring_id].lrc_reg_state;
+       const u32 *reg_state = ce->lrc_reg_state;
        u32 inhibit_mask =
                _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
 
@@ -467,7 +468,9 @@ static void switch_mmio(struct intel_vgpu *pre,
        u32 old_v, new_v;
 
        dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+       if (IS_SKYLAKE(dev_priv)
+               || IS_KABYLAKE(dev_priv)
+               || IS_BROXTON(dev_priv))
                switch_mocs(pre, next, ring_id);
 
        for (mmio = dev_priv->gvt->engine_mmio_list.mmio;
@@ -479,7 +482,8 @@ static void switch_mmio(struct intel_vgpu *pre,
                 * state image on kabylake, it's initialized by lri command and
                 * save or restore with context together.
                 */
-               if (IS_KABYLAKE(dev_priv) && mmio->in_context)
+               if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv))
+                       && mmio->in_context)
                        continue;
 
                // save
@@ -501,7 +505,7 @@ static void switch_mmio(struct intel_vgpu *pre,
                         * itself.
                         */
                        if (mmio->in_context &&
-                           !is_inhibit_context(s->shadow_ctx, ring_id))
+                           !is_inhibit_context(&s->shadow_ctx->__engine[ring_id]))
                                continue;
 
                        if (mmio->mask)
@@ -574,14 +578,18 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
 {
        struct engine_mmio *mmio;
 
-       if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
+       if (IS_SKYLAKE(gvt->dev_priv) ||
+               IS_KABYLAKE(gvt->dev_priv) ||
+               IS_BROXTON(gvt->dev_priv))
                gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
        else
                gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
 
        for (mmio = gvt->engine_mmio_list.mmio;
             i915_mmio_reg_valid(mmio->reg); mmio++) {
-               if (mmio->in_context)
+               if (mmio->in_context) {
                        gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++;
+                       intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg);
+               }
        }
 }
index 0439eb8057a8a51068263a4272043d4463777c97..5c3b9ff9f96aa979edebd277a8b000715a3a7d72 100644 (file)
@@ -49,7 +49,7 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
 
 void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);
 
-bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id);
+bool is_inhibit_context(struct intel_context *ce);
 
 int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
                                       struct i915_request *req);
index 32ffcd566cddf8965a53d0534682a50fab8594d4..67f19992b226f29a13d408be36da8b4820095b13 100644 (file)
@@ -230,17 +230,18 @@ static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
 /**
  * intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page
  * @vgpu: a vGPU
- * @gpfn: guest pfn
+ * @gfn: guest pfn
+ * @size: page size
  * @dma_addr: retrieve allocated dma addr
  *
  * Returns:
  * 0 on success, negative error code if failed.
  */
 static inline int intel_gvt_hypervisor_dma_map_guest_page(
-               struct intel_vgpu *vgpu, unsigned long gfn,
+               struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size,
                dma_addr_t *dma_addr)
 {
-       return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn,
+       return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn, size,
                                                      dma_addr);
 }
 
index 53e2bd79c97d9d9ae81d4b77a7007fb155132009..256d0db8bbb1553f3539925dc8ebab698a6070ef 100644 (file)
@@ -157,11 +157,10 @@ int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn)
 int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
                void *data, unsigned int bytes)
 {
-       struct intel_gvt *gvt = vgpu->gvt;
        struct intel_vgpu_page_track *page_track;
        int ret = 0;
 
-       mutex_lock(&gvt->lock);
+       mutex_lock(&vgpu->vgpu_lock);
 
        page_track = intel_vgpu_find_page_track(vgpu, gpa >> PAGE_SHIFT);
        if (!page_track) {
@@ -179,6 +178,6 @@ int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
        }
 
 out:
-       mutex_unlock(&gvt->lock);
+       mutex_unlock(&vgpu->vgpu_lock);
        return ret;
 }
index d053cbe1dc94c9bcd1ce1edd2fe32ab5dc8efc17..09d7bb72b4ff30e45688add463c1fdaaa9a71ad1 100644 (file)
@@ -228,7 +228,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt)
        struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
        ktime_t cur_time;
 
-       mutex_lock(&gvt->lock);
+       mutex_lock(&gvt->sched_lock);
        cur_time = ktime_get();
 
        if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
@@ -244,7 +244,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt)
        vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time);
        tbs_sched_func(sched_data);
 
-       mutex_unlock(&gvt->lock);
+       mutex_unlock(&gvt->sched_lock);
 }
 
 static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data)
@@ -359,39 +359,65 @@ static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
 
 int intel_gvt_init_sched_policy(struct intel_gvt *gvt)
 {
+       int ret;
+
+       mutex_lock(&gvt->sched_lock);
        gvt->scheduler.sched_ops = &tbs_schedule_ops;
+       ret = gvt->scheduler.sched_ops->init(gvt);
+       mutex_unlock(&gvt->sched_lock);
 
-       return gvt->scheduler.sched_ops->init(gvt);
+       return ret;
 }
 
 void intel_gvt_clean_sched_policy(struct intel_gvt *gvt)
 {
+       mutex_lock(&gvt->sched_lock);
        gvt->scheduler.sched_ops->clean(gvt);
+       mutex_unlock(&gvt->sched_lock);
 }
 
+/* for per-vgpu scheduler policy, there are 2 per-vgpu data:
+ * sched_data, and sched_ctl. We see these 2 data as part of
+ * the global scheduler which are proteced by gvt->sched_lock.
+ * Caller should make their decision if the vgpu_lock should
+ * be hold outside.
+ */
+
 int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu)
 {
-       return vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
+       int ret;
+
+       mutex_lock(&vgpu->gvt->sched_lock);
+       ret = vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
+       mutex_unlock(&vgpu->gvt->sched_lock);
+
+       return ret;
 }
 
 void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
 {
+       mutex_lock(&vgpu->gvt->sched_lock);
        vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu);
+       mutex_unlock(&vgpu->gvt->sched_lock);
 }
 
 void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
 {
        struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
 
+       mutex_lock(&vgpu->gvt->sched_lock);
        if (!vgpu_data->active) {
                gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
                vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
        }
+       mutex_unlock(&vgpu->gvt->sched_lock);
 }
 
 void intel_gvt_kick_schedule(struct intel_gvt *gvt)
 {
+       mutex_lock(&gvt->sched_lock);
        intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
+       mutex_unlock(&gvt->sched_lock);
 }
 
 void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
@@ -406,6 +432,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
 
        gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
 
+       mutex_lock(&vgpu->gvt->sched_lock);
        scheduler->sched_ops->stop_schedule(vgpu);
 
        if (scheduler->next_vgpu == vgpu)
@@ -425,4 +452,5 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
                }
        }
        spin_unlock_bh(&scheduler->mmio_context_lock);
+       mutex_unlock(&vgpu->gvt->sched_lock);
 }
index c2d183b91500b72fccb8acd335e36f522b7403ee..b0e566956b8d5ce1609c0f19f31ffa45e610adee 100644 (file)
@@ -45,20 +45,16 @@ static void set_context_pdp_root_pointer(
                struct execlist_ring_context *ring_context,
                u32 pdp[8])
 {
-       struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
        int i;
 
        for (i = 0; i < 8; i++)
-               pdp_pair[i].val = pdp[7 - i];
+               ring_context->pdps[i].val = pdp[7 - i];
 }
 
 static void update_shadow_pdps(struct intel_vgpu_workload *workload)
 {
-       struct intel_vgpu *vgpu = workload->vgpu;
-       int ring_id = workload->ring_id;
-       struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
        struct drm_i915_gem_object *ctx_obj =
-               shadow_ctx->__engine[ring_id].state->obj;
+               workload->req->hw_context->state->obj;
        struct execlist_ring_context *shadow_ring_context;
        struct page *page;
 
@@ -128,9 +124,8 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
        struct intel_vgpu *vgpu = workload->vgpu;
        struct intel_gvt *gvt = vgpu->gvt;
        int ring_id = workload->ring_id;
-       struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
        struct drm_i915_gem_object *ctx_obj =
-               shadow_ctx->__engine[ring_id].state->obj;
+               workload->req->hw_context->state->obj;
        struct execlist_ring_context *shadow_ring_context;
        struct page *page;
        void *dst;
@@ -205,7 +200,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 
 static inline bool is_gvt_request(struct i915_request *req)
 {
-       return i915_gem_context_force_single_submission(req->ctx);
+       return i915_gem_context_force_single_submission(req->gem_context);
 }
 
 static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
@@ -280,10 +275,8 @@ static int shadow_context_status_change(struct notifier_block *nb,
        return NOTIFY_OK;
 }
 
-static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
-               struct intel_engine_cs *engine)
+static void shadow_context_descriptor_update(struct intel_context *ce)
 {
-       struct intel_context *ce = to_intel_context(ctx, engine);
        u64 desc = 0;
 
        desc = ce->lrc_desc;
@@ -292,7 +285,7 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
         * like GEN8_CTX_* cached in desc_template
         */
        desc &= U64_MAX << 12;
-       desc |= ctx->desc_template & ((1ULL << 12) - 1);
+       desc |= ce->gem_context->desc_template & ((1ULL << 12) - 1);
 
        ce->lrc_desc = desc;
 }
@@ -300,12 +293,12 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
 static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
 {
        struct intel_vgpu *vgpu = workload->vgpu;
+       struct i915_request *req = workload->req;
        void *shadow_ring_buffer_va;
        u32 *cs;
-       struct i915_request *req = workload->req;
 
-       if (IS_KABYLAKE(req->i915) &&
-           is_inhibit_context(req->ctx, req->engine->id))
+       if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915))
+               && is_inhibit_context(req->hw_context))
                intel_vgpu_restore_inhibit_context(vgpu, req);
 
        /* allocate shadow ring buffer */
@@ -353,92 +346,67 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
        struct intel_vgpu_submission *s = &vgpu->submission;
        struct i915_gem_context *shadow_ctx = s->shadow_ctx;
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-       int ring_id = workload->ring_id;
-       struct intel_engine_cs *engine = dev_priv->engine[ring_id];
-       struct intel_ring *ring;
+       struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
+       struct intel_context *ce;
+       struct i915_request *rq;
        int ret;
 
        lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
-       if (workload->shadowed)
+       if (workload->req)
                return 0;
 
+       /* pin shadow context by gvt even the shadow context will be pinned
+        * when i915 alloc request. That is because gvt will update the guest
+        * context from shadow context when workload is completed, and at that
+        * moment, i915 may already unpined the shadow context to make the
+        * shadow_ctx pages invalid. So gvt need to pin itself. After update
+        * the guest context, gvt can unpin the shadow_ctx safely.
+        */
+       ce = intel_context_pin(shadow_ctx, engine);
+       if (IS_ERR(ce)) {
+               gvt_vgpu_err("fail to pin shadow context\n");
+               return PTR_ERR(ce);
+       }
+
        shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
        shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
                                    GEN8_CTX_ADDRESSING_MODE_SHIFT;
 
-       if (!test_and_set_bit(ring_id, s->shadow_ctx_desc_updated))
-               shadow_context_descriptor_update(shadow_ctx,
-                                       dev_priv->engine[ring_id]);
+       if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated))
+               shadow_context_descriptor_update(ce);
 
        ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
        if (ret)
-               goto err_scan;
+               goto err_unpin;
 
        if ((workload->ring_id == RCS) &&
            (workload->wa_ctx.indirect_ctx.size != 0)) {
                ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
                if (ret)
-                       goto err_scan;
+                       goto err_shadow;
        }
 
-       /* pin shadow context by gvt even the shadow context will be pinned
-        * when i915 alloc request. That is because gvt will update the guest
-        * context from shadow context when workload is completed, and at that
-        * moment, i915 may already unpined the shadow context to make the
-        * shadow_ctx pages invalid. So gvt need to pin itself. After update
-        * the guest context, gvt can unpin the shadow_ctx safely.
-        */
-       ring = intel_context_pin(shadow_ctx, engine);
-       if (IS_ERR(ring)) {
-               ret = PTR_ERR(ring);
-               gvt_vgpu_err("fail to pin shadow context\n");
+       rq = i915_request_alloc(engine, shadow_ctx);
+       if (IS_ERR(rq)) {
+               gvt_vgpu_err("fail to allocate gem request\n");
+               ret = PTR_ERR(rq);
                goto err_shadow;
        }
+       workload->req = i915_request_get(rq);
 
        ret = populate_shadow_context(workload);
        if (ret)
-               goto err_unpin;
-       workload->shadowed = true;
-       return 0;
+               goto err_req;
 
-err_unpin:
-       intel_context_unpin(shadow_ctx, engine);
+       return 0;
+err_req:
+       rq = fetch_and_zero(&workload->req);
+       i915_request_put(rq);
 err_shadow:
        release_shadow_wa_ctx(&workload->wa_ctx);
-err_scan:
-       return ret;
-}
-
-static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
-{
-       int ring_id = workload->ring_id;
-       struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
-       struct intel_engine_cs *engine = dev_priv->engine[ring_id];
-       struct i915_request *rq;
-       struct intel_vgpu *vgpu = workload->vgpu;
-       struct intel_vgpu_submission *s = &vgpu->submission;
-       struct i915_gem_context *shadow_ctx = s->shadow_ctx;
-       int ret;
-
-       rq = i915_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
-       if (IS_ERR(rq)) {
-               gvt_vgpu_err("fail to allocate gem request\n");
-               ret = PTR_ERR(rq);
-               goto err_unpin;
-       }
-
-       gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
-
-       workload->req = i915_request_get(rq);
-       ret = copy_workload_to_ring_buffer(workload);
-       if (ret)
-               goto err_unpin;
-       return 0;
-
 err_unpin:
-       intel_context_unpin(shadow_ctx, engine);
-       release_shadow_wa_ctx(&workload->wa_ctx);
+       intel_context_unpin(ce);
        return ret;
 }
 
@@ -508,7 +476,11 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
                        i915_gem_obj_finish_shmem_access(bb->obj);
                        bb->accessing = false;
 
-                       i915_vma_move_to_active(bb->vma, workload->req, 0);
+                       ret = i915_vma_move_to_active(bb->vma,
+                                                     workload->req,
+                                                     0);
+                       if (ret)
+                               goto err;
                }
        }
        return 0;
@@ -517,21 +489,13 @@ err:
        return ret;
 }
 
-static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 {
-       struct intel_vgpu_workload *workload = container_of(wa_ctx,
-                                       struct intel_vgpu_workload,
-                                       wa_ctx);
-       int ring_id = workload->ring_id;
-       struct intel_vgpu_submission *s = &workload->vgpu->submission;
-       struct i915_gem_context *shadow_ctx = s->shadow_ctx;
-       struct drm_i915_gem_object *ctx_obj =
-               shadow_ctx->__engine[ring_id].state->obj;
-       struct execlist_ring_context *shadow_ring_context;
-       struct page *page;
-
-       page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
-       shadow_ring_context = kmap_atomic(page);
+       struct intel_vgpu_workload *workload =
+               container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx);
+       struct i915_request *rq = workload->req;
+       struct execlist_ring_context *shadow_ring_context =
+               (struct execlist_ring_context *)rq->hw_context->lrc_reg_state;
 
        shadow_ring_context->bb_per_ctx_ptr.val =
                (shadow_ring_context->bb_per_ctx_ptr.val &
@@ -539,9 +503,6 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
        shadow_ring_context->rcs_indirect_ctx.val =
                (shadow_ring_context->rcs_indirect_ctx.val &
                (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
-
-       kunmap_atomic(shadow_ring_context);
-       return 0;
 }
 
 static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
@@ -633,7 +594,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
                goto err_unpin_mm;
        }
 
-       ret = intel_gvt_generate_request(workload);
+       ret = copy_workload_to_ring_buffer(workload);
        if (ret) {
                gvt_vgpu_err("fail to generate request\n");
                goto err_unpin_mm;
@@ -670,16 +631,14 @@ err_unpin_mm:
 static int dispatch_workload(struct intel_vgpu_workload *workload)
 {
        struct intel_vgpu *vgpu = workload->vgpu;
-       struct intel_vgpu_submission *s = &vgpu->submission;
-       struct i915_gem_context *shadow_ctx = s->shadow_ctx;
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
        int ring_id = workload->ring_id;
-       struct intel_engine_cs *engine = dev_priv->engine[ring_id];
-       int ret = 0;
+       int ret;
 
        gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
                ring_id, workload);
 
+       mutex_lock(&vgpu->vgpu_lock);
        mutex_lock(&dev_priv->drm.struct_mutex);
 
        ret = intel_gvt_scan_and_shadow_workload(workload);
@@ -687,10 +646,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
                goto out;
 
        ret = prepare_workload(workload);
-       if (ret) {
-               intel_context_unpin(shadow_ctx, engine);
-               goto out;
-       }
 
 out:
        if (ret)
@@ -704,6 +659,7 @@ out:
        }
 
        mutex_unlock(&dev_priv->drm.struct_mutex);
+       mutex_unlock(&vgpu->vgpu_lock);
        return ret;
 }
 
@@ -713,7 +669,7 @@ static struct intel_vgpu_workload *pick_next_workload(
        struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
        struct intel_vgpu_workload *workload = NULL;
 
-       mutex_lock(&gvt->lock);
+       mutex_lock(&gvt->sched_lock);
 
        /*
         * no current vgpu / will be scheduled out / no workload
@@ -759,33 +715,29 @@ static struct intel_vgpu_workload *pick_next_workload(
 
        atomic_inc(&workload->vgpu->submission.running_workload_num);
 out:
-       mutex_unlock(&gvt->lock);
+       mutex_unlock(&gvt->sched_lock);
        return workload;
 }
 
 static void update_guest_context(struct intel_vgpu_workload *workload)
 {
+       struct i915_request *rq = workload->req;
        struct intel_vgpu *vgpu = workload->vgpu;
        struct intel_gvt *gvt = vgpu->gvt;
-       struct intel_vgpu_submission *s = &vgpu->submission;
-       struct i915_gem_context *shadow_ctx = s->shadow_ctx;
-       int ring_id = workload->ring_id;
-       struct drm_i915_gem_object *ctx_obj =
-               shadow_ctx->__engine[ring_id].state->obj;
+       struct drm_i915_gem_object *ctx_obj = rq->hw_context->state->obj;
        struct execlist_ring_context *shadow_ring_context;
        struct page *page;
        void *src;
        unsigned long context_gpa, context_page_num;
        int i;
 
-       gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
-                       workload->ctx_desc.lrca);
-
-       context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
+       gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
+                     workload->ctx_desc.lrca);
 
+       context_page_num = rq->engine->context_size;
        context_page_num = context_page_num >> PAGE_SHIFT;
 
-       if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
+       if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS)
                context_page_num = 19;
 
        i = 2;
@@ -858,19 +810,17 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
                scheduler->current_workload[ring_id];
        struct intel_vgpu *vgpu = workload->vgpu;
        struct intel_vgpu_submission *s = &vgpu->submission;
+       struct i915_request *rq = workload->req;
        int event;
 
-       mutex_lock(&gvt->lock);
+       mutex_lock(&vgpu->vgpu_lock);
+       mutex_lock(&gvt->sched_lock);
 
        /* For the workload w/ request, needs to wait for the context
         * switch to make sure request is completed.
         * For the workload w/o request, directly complete the workload.
         */
-       if (workload->req) {
-               struct drm_i915_private *dev_priv =
-                       workload->vgpu->gvt->dev_priv;
-               struct intel_engine_cs *engine =
-                       dev_priv->engine[workload->ring_id];
+       if (rq) {
                wait_event(workload->shadow_ctx_status_wq,
                           !atomic_read(&workload->shadow_ctx_active));
 
@@ -886,8 +836,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
                                workload->status = 0;
                }
 
-               i915_request_put(fetch_and_zero(&workload->req));
-
                if (!workload->status && !(vgpu->resetting_eng &
                                           ENGINE_MASK(ring_id))) {
                        update_guest_context(workload);
@@ -896,10 +844,13 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
                                         INTEL_GVT_EVENT_MAX)
                                intel_vgpu_trigger_virtual_event(vgpu, event);
                }
-               mutex_lock(&dev_priv->drm.struct_mutex);
+
                /* unpin shadow ctx as the shadow_ctx update is done */
-               intel_context_unpin(s->shadow_ctx, engine);
-               mutex_unlock(&dev_priv->drm.struct_mutex);
+               mutex_lock(&rq->i915->drm.struct_mutex);
+               intel_context_unpin(rq->hw_context);
+               mutex_unlock(&rq->i915->drm.struct_mutex);
+
+               i915_request_put(fetch_and_zero(&workload->req));
        }
 
        gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -939,7 +890,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
        if (gvt->scheduler.need_reschedule)
                intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
 
-       mutex_unlock(&gvt->lock);
+       mutex_unlock(&gvt->sched_lock);
+       mutex_unlock(&vgpu->vgpu_lock);
 }
 
 struct workload_thread_param {
@@ -957,7 +909,8 @@ static int workload_thread(void *priv)
        struct intel_vgpu *vgpu = NULL;
        int ret;
        bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
-                       || IS_KABYLAKE(gvt->dev_priv);
+                       || IS_KABYLAKE(gvt->dev_priv)
+                       || IS_BROXTON(gvt->dev_priv);
        DEFINE_WAIT_FUNC(wait, woken_wake_function);
 
        kfree(p);
@@ -991,9 +944,7 @@ static int workload_thread(void *priv)
                        intel_uncore_forcewake_get(gvt->dev_priv,
                                        FORCEWAKE_ALL);
 
-               mutex_lock(&gvt->lock);
                ret = dispatch_workload(workload);
-               mutex_unlock(&gvt->lock);
 
                if (ret) {
                        vgpu = workload->vgpu;
@@ -1270,7 +1221,6 @@ alloc_workload(struct intel_vgpu *vgpu)
        atomic_set(&workload->shadow_ctx_active, 0);
 
        workload->status = -EINPROGRESS;
-       workload->shadowed = false;
        workload->vgpu = vgpu;
 
        return workload;
@@ -1285,7 +1235,7 @@ static void read_guest_pdps(struct intel_vgpu *vgpu,
        u64 gpa;
        int i;
 
-       gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
+       gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
 
        for (i = 0; i < 8; i++)
                intel_gvt_hypervisor_read_gpa(vgpu,
index 6c644782193eaf28241174ea3e3f8d59776b2bf6..21eddab4a9cd465b1f835cb310ea091387d9bea2 100644 (file)
@@ -83,7 +83,6 @@ struct intel_vgpu_workload {
        struct i915_request *req;
        /* if this workload has been dispatched to i915? */
        bool dispatched;
-       bool shadowed;
        int status;
 
        struct intel_vgpu_mm *shadow_mm;
index 572a18c2bfb509a4bbf6dc62056c5fece4b147a0..f6fa916517c32fc1de1ddb47b3703b30283bbf59 100644 (file)
@@ -46,6 +46,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
 
        vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
        vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
+       vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT;
 
        vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
                vgpu_aperture_gmadr_base(vgpu);
@@ -58,6 +59,9 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
 
        vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
 
+       vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot)) = UINT_MAX;
+       vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot)) = UINT_MAX;
+
        gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
        gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
                vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu));
@@ -223,22 +227,20 @@ void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
  */
 void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
 {
-       struct intel_gvt *gvt = vgpu->gvt;
-
-       mutex_lock(&gvt->lock);
+       mutex_lock(&vgpu->vgpu_lock);
 
        vgpu->active = false;
 
        if (atomic_read(&vgpu->submission.running_workload_num)) {
-               mutex_unlock(&gvt->lock);
+               mutex_unlock(&vgpu->vgpu_lock);
                intel_gvt_wait_vgpu_idle(vgpu);
-               mutex_lock(&gvt->lock);
+               mutex_lock(&vgpu->vgpu_lock);
        }
 
        intel_vgpu_stop_schedule(vgpu);
        intel_vgpu_dmabuf_cleanup(vgpu);
 
-       mutex_unlock(&gvt->lock);
+       mutex_unlock(&vgpu->vgpu_lock);
 }
 
 /**
@@ -252,14 +254,11 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
 {
        struct intel_gvt *gvt = vgpu->gvt;
 
-       mutex_lock(&gvt->lock);
+       mutex_lock(&vgpu->vgpu_lock);
 
        WARN(vgpu->active, "vGPU is still active!\n");
 
        intel_gvt_debugfs_remove_vgpu(vgpu);
-       idr_remove(&gvt->vgpu_idr, vgpu->id);
-       if (idr_is_empty(&gvt->vgpu_idr))
-               intel_gvt_clean_irq(gvt);
        intel_vgpu_clean_sched_policy(vgpu);
        intel_vgpu_clean_submission(vgpu);
        intel_vgpu_clean_display(vgpu);
@@ -269,10 +268,16 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
        intel_vgpu_free_resource(vgpu);
        intel_vgpu_clean_mmio(vgpu);
        intel_vgpu_dmabuf_cleanup(vgpu);
-       vfree(vgpu);
+       mutex_unlock(&vgpu->vgpu_lock);
 
+       mutex_lock(&gvt->lock);
+       idr_remove(&gvt->vgpu_idr, vgpu->id);
+       if (idr_is_empty(&gvt->vgpu_idr))
+               intel_gvt_clean_irq(gvt);
        intel_gvt_update_vgpu_types(gvt);
        mutex_unlock(&gvt->lock);
+
+       vfree(vgpu);
 }
 
 #define IDLE_VGPU_IDR 0
@@ -298,6 +303,7 @@ struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
 
        vgpu->id = IDLE_VGPU_IDR;
        vgpu->gvt = gvt;
+       mutex_init(&vgpu->vgpu_lock);
 
        for (i = 0; i < I915_NUM_ENGINES; i++)
                INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]);
@@ -324,7 +330,10 @@ out_free_vgpu:
  */
 void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu)
 {
+       mutex_lock(&vgpu->vgpu_lock);
        intel_vgpu_clean_sched_policy(vgpu);
+       mutex_unlock(&vgpu->vgpu_lock);
+
        vfree(vgpu);
 }
 
@@ -342,8 +351,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
        if (!vgpu)
                return ERR_PTR(-ENOMEM);
 
-       mutex_lock(&gvt->lock);
-
        ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
                GFP_KERNEL);
        if (ret < 0)
@@ -353,6 +360,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
        vgpu->handle = param->handle;
        vgpu->gvt = gvt;
        vgpu->sched_ctl.weight = param->weight;
+       mutex_init(&vgpu->vgpu_lock);
        INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
        INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
        idr_init(&vgpu->object_idr);
@@ -400,8 +408,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
        if (ret)
                goto out_clean_sched_policy;
 
-       mutex_unlock(&gvt->lock);
-
        return vgpu;
 
 out_clean_sched_policy:
@@ -424,7 +430,6 @@ out_clean_idr:
        idr_remove(&gvt->vgpu_idr, vgpu->id);
 out_free_vgpu:
        vfree(vgpu);
-       mutex_unlock(&gvt->lock);
        return ERR_PTR(ret);
 }
 
@@ -456,12 +461,12 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
        param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
        param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
 
+       mutex_lock(&gvt->lock);
        vgpu = __intel_gvt_create_vgpu(gvt, &param);
-       if (IS_ERR(vgpu))
-               return vgpu;
-
-       /* calculate left instance change for types */
-       intel_gvt_update_vgpu_types(gvt);
+       if (!IS_ERR(vgpu))
+               /* calculate left instance change for types */
+               intel_gvt_update_vgpu_types(gvt);
+       mutex_unlock(&gvt->lock);
 
        return vgpu;
 }
@@ -473,7 +478,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
  * @engine_mask: engines to reset for GT reset
  *
  * This function is called when user wants to reset a virtual GPU through
- * device model reset or GT reset. The caller should hold the gvt lock.
+ * device model reset or GT reset. The caller should hold the vgpu lock.
  *
  * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
  * the whole vGPU to default state as when it is created. This vGPU function
@@ -513,9 +518,9 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
         * scheduler when the reset is triggered by current vgpu.
         */
        if (scheduler->current_vgpu == NULL) {
-               mutex_unlock(&gvt->lock);
+               mutex_unlock(&vgpu->vgpu_lock);
                intel_gvt_wait_vgpu_idle(vgpu);
-               mutex_lock(&gvt->lock);
+               mutex_lock(&vgpu->vgpu_lock);
        }
 
        intel_vgpu_reset_submission(vgpu, resetting_eng);
@@ -555,7 +560,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
  */
 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
 {
-       mutex_lock(&vgpu->gvt->lock);
+       mutex_lock(&vgpu->vgpu_lock);
        intel_gvt_reset_vgpu_locked(vgpu, true, 0);
-       mutex_unlock(&vgpu->gvt->lock);
+       mutex_unlock(&vgpu->vgpu_lock);
 }
index 13e7b9e4a6e6ffa7f15aa4e9740df9ffd3b68737..f9ce35da4123ec52657f55f6a704c12c9c286080 100644 (file)
@@ -328,7 +328,7 @@ static int per_file_stats(int id, void *ptr, void *data)
                } else {
                        struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
 
-                       if (ppgtt->base.file != stats->file_priv)
+                       if (ppgtt->vm.file != stats->file_priv)
                                continue;
                }
 
@@ -508,7 +508,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
                   dpy_count, dpy_size);
 
        seq_printf(m, "%llu [%pa] gtt total\n",
-                  ggtt->base.total, &ggtt->mappable_end);
+                  ggtt->vm.total, &ggtt->mappable_end);
        seq_printf(m, "Supported page sizes: %s\n",
                   stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
                                        buf, sizeof(buf)));
@@ -542,8 +542,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
                                                   struct i915_request,
                                                   client_link);
                rcu_read_lock();
-               task = pid_task(request && request->ctx->pid ?
-                               request->ctx->pid : file->pid,
+               task = pid_task(request && request->gem_context->pid ?
+                               request->gem_context->pid : file->pid,
                                PIDTYPE_PID);
                print_file_stats(m, task ? task->comm : "<unknown>", stats);
                rcu_read_unlock();
@@ -1162,19 +1162,28 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 
                intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 
-               if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
-                       pm_ier = I915_READ(GEN6_PMIER);
-                       pm_imr = I915_READ(GEN6_PMIMR);
-                       pm_isr = I915_READ(GEN6_PMISR);
-                       pm_iir = I915_READ(GEN6_PMIIR);
-                       pm_mask = I915_READ(GEN6_PMINTRMSK);
-               } else {
+               if (INTEL_GEN(dev_priv) >= 11) {
+                       pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
+                       pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
+                       /*
+                        * The equivalent to the PM ISR & IIR cannot be read
+                        * without affecting the current state of the system
+                        */
+                       pm_isr = 0;
+                       pm_iir = 0;
+               } else if (INTEL_GEN(dev_priv) >= 8) {
                        pm_ier = I915_READ(GEN8_GT_IER(2));
                        pm_imr = I915_READ(GEN8_GT_IMR(2));
                        pm_isr = I915_READ(GEN8_GT_ISR(2));
                        pm_iir = I915_READ(GEN8_GT_IIR(2));
-                       pm_mask = I915_READ(GEN6_PMINTRMSK);
+               } else {
+                       pm_ier = I915_READ(GEN6_PMIER);
+                       pm_imr = I915_READ(GEN6_PMIMR);
+                       pm_isr = I915_READ(GEN6_PMISR);
+                       pm_iir = I915_READ(GEN6_PMIIR);
                }
+               pm_mask = I915_READ(GEN6_PMINTRMSK);
+
                seq_printf(m, "Video Turbo Mode: %s\n",
                           yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
                seq_printf(m, "HW control enabled: %s\n",
@@ -1182,8 +1191,12 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                seq_printf(m, "SW control enabled: %s\n",
                           yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
                                  GEN6_RP_MEDIA_SW_MODE));
-               seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
-                          pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
+
+               seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
+                          pm_ier, pm_imr, pm_mask);
+               if (INTEL_GEN(dev_priv) <= 10)
+                       seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
+                                  pm_isr, pm_iir);
                seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
                           rps->pm_intrmsk_mbz);
                seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
@@ -1205,7 +1218,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                           rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
                seq_printf(m, "RP PREV UP: %d (%dus)\n",
                           rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
-               seq_printf(m, "Up threshold: %d%%\n", rps->up_threshold);
+               seq_printf(m, "Up threshold: %d%%\n",
+                          rps->power.up_threshold);
 
                seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
                           rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
@@ -1213,7 +1227,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                           rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
                seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
                           rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
-               seq_printf(m, "Down threshold: %d%%\n", rps->down_threshold);
+               seq_printf(m, "Down threshold: %d%%\n",
+                          rps->power.down_threshold);
 
                max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
                            rp_state_cap >> 16) & 0xff;
@@ -1346,11 +1361,12 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
                seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
                           engine->hangcheck.seqno, seqno[id],
                           intel_engine_last_submit(engine));
-               seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n",
+               seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s, wedged? %s\n",
                           yesno(intel_engine_has_waiter(engine)),
                           yesno(test_bit(engine->id,
                                          &dev_priv->gpu_error.missed_irq_rings)),
-                          yesno(engine->hangcheck.stalled));
+                          yesno(engine->hangcheck.stalled),
+                          yesno(engine->hangcheck.wedged));
 
                spin_lock_irq(&b->rb_lock);
                for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
@@ -1645,11 +1661,6 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
        else
                seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
 
-       if (fbc->work.scheduled)
-               seq_printf(m, "FBC worker scheduled on vblank %llu, now %llu\n",
-                          fbc->work.scheduled_vblank,
-                          drm_crtc_vblank_count(&fbc->crtc->base));
-
        if (intel_fbc_is_active(dev_priv)) {
                u32 mask;
 
@@ -1895,7 +1906,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
                           fbdev_fb->base.format->cpp[0] * 8,
                           fbdev_fb->base.modifier,
                           drm_framebuffer_read_refcount(&fbdev_fb->base));
-               describe_obj(m, fbdev_fb->obj);
+               describe_obj(m, intel_fb_obj(&fbdev_fb->base));
                seq_putc(m, '\n');
        }
 #endif
@@ -1913,7 +1924,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
                           fb->base.format->cpp[0] * 8,
                           fb->base.modifier,
                           drm_framebuffer_read_refcount(&fb->base));
-               describe_obj(m, fb->obj);
+               describe_obj(m, intel_fb_obj(&fb->base));
                seq_putc(m, '\n');
        }
        mutex_unlock(&dev->mode_config.fb_lock);
@@ -2209,6 +2220,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
        seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
        seq_printf(m, "Boosts outstanding? %d\n",
                   atomic_read(&rps->num_waiters));
+       seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
        seq_printf(m, "Frequency requested %d\n",
                   intel_gpu_freq(dev_priv, rps->cur_freq));
        seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
@@ -2252,13 +2264,13 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
                intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 
                seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
-                          rps_power_to_str(rps->power));
+                          rps_power_to_str(rps->power.mode));
                seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
                           rpup && rpupei ? 100 * rpup / rpupei : 0,
-                          rps->up_threshold);
+                          rps->power.up_threshold);
                seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
                           rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
-                          rps->down_threshold);
+                          rps->power.down_threshold);
        } else {
                seq_puts(m, "\nRPS Autotuning inactive\n");
        }
@@ -2523,7 +2535,7 @@ static int i915_guc_log_level_get(void *data, u64 *val)
        if (!USES_GUC(dev_priv))
                return -ENODEV;
 
-       *val = intel_guc_log_level_get(&dev_priv->guc.log);
+       *val = intel_guc_log_get_level(&dev_priv->guc.log);
 
        return 0;
 }
@@ -2535,7 +2547,7 @@ static int i915_guc_log_level_set(void *data, u64 val)
        if (!USES_GUC(dev_priv))
                return -ENODEV;
 
-       return intel_guc_log_level_set(&dev_priv->guc.log, val);
+       return intel_guc_log_set_level(&dev_priv->guc.log, val);
 }
 
 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
@@ -2583,31 +2595,9 @@ static const struct file_operations i915_guc_log_relay_fops = {
        .release = i915_guc_log_relay_release,
 };
 
-static const char *psr2_live_status(u32 val)
-{
-       static const char * const live_status[] = {
-               "IDLE",
-               "CAPTURE",
-               "CAPTURE_FS",
-               "SLEEP",
-               "BUFON_FW",
-               "ML_UP",
-               "SU_STANDBY",
-               "FAST_SLEEP",
-               "DEEP_SLEEP",
-               "BUF_ON",
-               "TG_ON"
-       };
-
-       val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT;
-       if (val < ARRAY_SIZE(live_status))
-               return live_status[val];
-
-       return "unknown";
-}
-
-static const char *psr_sink_status(u8 val)
+static int i915_psr_sink_status_show(struct seq_file *m, void *data)
 {
+       u8 val;
        static const char * const sink_status[] = {
                "inactive",
                "transition to active, capture and display",
@@ -2616,22 +2606,94 @@ static const char *psr_sink_status(u8 val)
                "transition to inactive, capture and display, timing re-sync",
                "reserved",
                "reserved",
-               "sink internal error"
+               "sink internal error",
        };
+       struct drm_connector *connector = m->private;
+       struct drm_i915_private *dev_priv = to_i915(connector->dev);
+       struct intel_dp *intel_dp =
+               enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+       int ret;
 
-       val &= DP_PSR_SINK_STATE_MASK;
-       if (val < ARRAY_SIZE(sink_status))
-               return sink_status[val];
+       if (!CAN_PSR(dev_priv)) {
+               seq_puts(m, "PSR Unsupported\n");
+               return -ENODEV;
+       }
 
-       return "unknown";
+       if (connector->status != connector_status_connected)
+               return -ENODEV;
+
+       ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
+
+       if (ret == 1) {
+               const char *str = "unknown";
+
+               val &= DP_PSR_SINK_STATE_MASK;
+               if (val < ARRAY_SIZE(sink_status))
+                       str = sink_status[val];
+               seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
+       } else {
+               return ret;
+       }
+
+       return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
+
+static void
+psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
+{
+       u32 val, psr_status;
+
+       if (dev_priv->psr.psr2_enabled) {
+               static const char * const live_status[] = {
+                       "IDLE",
+                       "CAPTURE",
+                       "CAPTURE_FS",
+                       "SLEEP",
+                       "BUFON_FW",
+                       "ML_UP",
+                       "SU_STANDBY",
+                       "FAST_SLEEP",
+                       "DEEP_SLEEP",
+                       "BUF_ON",
+                       "TG_ON"
+               };
+               psr_status = I915_READ(EDP_PSR2_STATUS);
+               val = (psr_status & EDP_PSR2_STATUS_STATE_MASK) >>
+                       EDP_PSR2_STATUS_STATE_SHIFT;
+               if (val < ARRAY_SIZE(live_status)) {
+                       seq_printf(m, "Source PSR status: 0x%x [%s]\n",
+                                  psr_status, live_status[val]);
+                       return;
+               }
+       } else {
+               static const char * const live_status[] = {
+                       "IDLE",
+                       "SRDONACK",
+                       "SRDENT",
+                       "BUFOFF",
+                       "BUFON",
+                       "AUXACK",
+                       "SRDOFFACK",
+                       "SRDENT_ON",
+               };
+               psr_status = I915_READ(EDP_PSR_STATUS);
+               val = (psr_status & EDP_PSR_STATUS_STATE_MASK) >>
+                       EDP_PSR_STATUS_STATE_SHIFT;
+               if (val < ARRAY_SIZE(live_status)) {
+                       seq_printf(m, "Source PSR status: 0x%x [%s]\n",
+                                  psr_status, live_status[val]);
+                       return;
+               }
+       }
+
+       seq_printf(m, "Source PSR status: 0x%x [%s]\n", psr_status, "unknown");
 }
 
 static int i915_edp_psr_status(struct seq_file *m, void *data)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
        u32 psrperf = 0;
-       u32 stat[3];
-       enum pipe pipe;
        bool enabled = false;
        bool sink_support;
 
@@ -2649,50 +2711,18 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
        seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
        seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
                   dev_priv->psr.busy_frontbuffer_bits);
-       seq_printf(m, "Re-enable work scheduled: %s\n",
-                  yesno(work_busy(&dev_priv->psr.work.work)));
-
-       if (HAS_DDI(dev_priv)) {
-               if (dev_priv->psr.psr2_enabled)
-                       enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
-               else
-                       enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
-       } else {
-               for_each_pipe(dev_priv, pipe) {
-                       enum transcoder cpu_transcoder =
-                               intel_pipe_to_cpu_transcoder(dev_priv, pipe);
-                       enum intel_display_power_domain power_domain;
 
-                       power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
-                       if (!intel_display_power_get_if_enabled(dev_priv,
-                                                               power_domain))
-                               continue;
-
-                       stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
-                               VLV_EDP_PSR_CURR_STATE_MASK;
-                       if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
-                           (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
-                               enabled = true;
-
-                       intel_display_power_put(dev_priv, power_domain);
-               }
-       }
+       if (dev_priv->psr.psr2_enabled)
+               enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
+       else
+               enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
 
        seq_printf(m, "Main link in standby mode: %s\n",
                   yesno(dev_priv->psr.link_standby));
 
-       seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
-
-       if (!HAS_DDI(dev_priv))
-               for_each_pipe(dev_priv, pipe) {
-                       if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
-                           (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
-                               seq_printf(m, " pipe %c", pipe_name(pipe));
-               }
-       seq_puts(m, "\n");
+       seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
 
        /*
-        * VLV/CHV PSR has no kind of performance counter
         * SKL+ Perf counter is reset to 0 everytime DC state is entered
         */
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -2701,21 +2731,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
 
                seq_printf(m, "Performance_Counter: %u\n", psrperf);
        }
-       if (dev_priv->psr.psr2_enabled) {
-               u32 psr2 = I915_READ(EDP_PSR2_STATUS);
-
-               seq_printf(m, "EDP_PSR2_STATUS: %x [%s]\n",
-                          psr2, psr2_live_status(psr2));
-       }
 
-       if (dev_priv->psr.enabled) {
-               struct drm_dp_aux *aux = &dev_priv->psr.enabled->aux;
-               u8 val;
-
-               if (drm_dp_dpcd_readb(aux, DP_PSR_STATUS, &val) == 1)
-                       seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val,
-                                  psr_sink_status(val));
-       }
+       psr_source_status(dev_priv, m);
        mutex_unlock(&dev_priv->psr.lock);
 
        if (READ_ONCE(dev_priv->psr.debug)) {
@@ -2762,86 +2779,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
                        i915_edp_psr_debug_get, i915_edp_psr_debug_set,
                        "%llu\n");
 
-static int i915_sink_crc(struct seq_file *m, void *data)
-{
-       struct drm_i915_private *dev_priv = node_to_i915(m->private);
-       struct drm_device *dev = &dev_priv->drm;
-       struct intel_connector *connector;
-       struct drm_connector_list_iter conn_iter;
-       struct intel_dp *intel_dp = NULL;
-       struct drm_modeset_acquire_ctx ctx;
-       int ret;
-       u8 crc[6];
-
-       drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
-
-       drm_connector_list_iter_begin(dev, &conn_iter);
-
-       for_each_intel_connector_iter(connector, &conn_iter) {
-               struct drm_crtc *crtc;
-               struct drm_connector_state *state;
-               struct intel_crtc_state *crtc_state;
-
-               if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
-                       continue;
-
-retry:
-               ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
-               if (ret)
-                       goto err;
-
-               state = connector->base.state;
-               if (!state->best_encoder)
-                       continue;
-
-               crtc = state->crtc;
-               ret = drm_modeset_lock(&crtc->mutex, &ctx);
-               if (ret)
-                       goto err;
-
-               crtc_state = to_intel_crtc_state(crtc->state);
-               if (!crtc_state->base.active)
-                       continue;
-
-               /*
-                * We need to wait for all crtc updates to complete, to make
-                * sure any pending modesets and plane updates are completed.
-                */
-               if (crtc_state->base.commit) {
-                       ret = wait_for_completion_interruptible(&crtc_state->base.commit->hw_done);
-
-                       if (ret)
-                               goto err;
-               }
-
-               intel_dp = enc_to_intel_dp(state->best_encoder);
-
-               ret = intel_dp_sink_crc(intel_dp, crtc_state, crc);
-               if (ret)
-                       goto err;
-
-               seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
-                          crc[0], crc[1], crc[2],
-                          crc[3], crc[4], crc[5]);
-               goto out;
-
-err:
-               if (ret == -EDEADLK) {
-                       ret = drm_modeset_backoff(&ctx);
-                       if (!ret)
-                               goto retry;
-               }
-               goto out;
-       }
-       ret = -ENODEV;
-out:
-       drm_connector_list_iter_end(&conn_iter);
-       drm_modeset_drop_locks(&ctx);
-       drm_modeset_acquire_fini(&ctx);
-
-       return ret;
-}
-
 static int i915_energy_uJ(struct seq_file *m, void *data)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -3398,28 +3335,13 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
 
 static int i915_wa_registers(struct seq_file *m, void *unused)
 {
-       struct drm_i915_private *dev_priv = node_to_i915(m->private);
-       struct i915_workarounds *workarounds = &dev_priv->workarounds;
+       struct i915_workarounds *wa = &node_to_i915(m->private)->workarounds;
        int i;
 
-       intel_runtime_pm_get(dev_priv);
-
-       seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
-       for (i = 0; i < workarounds->count; ++i) {
-               i915_reg_t addr;
-               u32 mask, value, read;
-               bool ok;
-
-               addr = workarounds->reg[i].addr;
-               mask = workarounds->reg[i].mask;
-               value = workarounds->reg[i].value;
-               read = I915_READ(addr);
-               ok = (value & mask) == (read & mask);
-               seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
-                          i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL");
-       }
-
-       intel_runtime_pm_put(dev_priv);
+       seq_printf(m, "Workarounds applied: %d\n", wa->count);
+       for (i = 0; i < wa->count; ++i)
+               seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
+                          wa->reg[i].addr, wa->reg[i].value, wa->reg[i].mask);
 
        return 0;
 }
@@ -4121,7 +4043,8 @@ fault_irq_set(struct drm_i915_private *i915,
 
        err = i915_gem_wait_for_idle(i915,
                                     I915_WAIT_LOCKED |
-                                    I915_WAIT_INTERRUPTIBLE);
+                                    I915_WAIT_INTERRUPTIBLE,
+                                    MAX_SCHEDULE_TIMEOUT);
        if (err)
                goto err_unlock;
 
@@ -4226,7 +4149,8 @@ i915_drop_caches_set(void *data, u64 val)
                if (val & DROP_ACTIVE)
                        ret = i915_gem_wait_for_idle(dev_priv,
                                                     I915_WAIT_INTERRUPTIBLE |
-                                                    I915_WAIT_LOCKED);
+                                                    I915_WAIT_LOCKED,
+                                                    MAX_SCHEDULE_TIMEOUT);
 
                if (val & DROP_RETIRE)
                        i915_retire_requests(dev_priv);
@@ -4245,8 +4169,13 @@ i915_drop_caches_set(void *data, u64 val)
                i915_gem_shrink_all(dev_priv);
        fs_reclaim_release(GFP_KERNEL);
 
-       if (val & DROP_IDLE)
-               drain_delayed_work(&dev_priv->gt.idle_work);
+       if (val & DROP_IDLE) {
+               do {
+                       if (READ_ONCE(dev_priv->gt.active_requests))
+                               flush_delayed_work(&dev_priv->gt.retire_work);
+                       drain_delayed_work(&dev_priv->gt.idle_work);
+               } while (READ_ONCE(dev_priv->gt.awake));
+       }
 
        if (val & DROP_FREED)
                i915_gem_drain_freed_objects(dev_priv);
@@ -4795,7 +4724,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
        {"i915_ppgtt_info", i915_ppgtt_info, 0},
        {"i915_llc", i915_llc, 0},
        {"i915_edp_psr_status", i915_edp_psr_status, 0},
-       {"i915_sink_crc_eDP1", i915_sink_crc, 0},
        {"i915_energy_uJ", i915_energy_uJ, 0},
        {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
        {"i915_power_domain_info", i915_power_domain_info, 0},
@@ -4829,7 +4757,6 @@ static const struct i915_debugfs_files {
 #endif
        {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
        {"i915_next_seqno", &i915_next_seqno_fops},
-       {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
        {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
        {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
        {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
@@ -4849,7 +4776,7 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv)
 {
        struct drm_minor *minor = dev_priv->drm.primary;
        struct dentry *ent;
-       int ret, i;
+       int i;
 
        ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
                                  minor->debugfs_root, to_i915(minor->dev),
@@ -4857,10 +4784,6 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv)
        if (!ent)
                return -ENOMEM;
 
-       ret = intel_pipe_crc_create(minor);
-       if (ret)
-               return ret;
-
        for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
                ent = debugfs_create_file(i915_debugfs_files[i].name,
                                          S_IRUGO | S_IWUSR,
@@ -4982,9 +4905,12 @@ int i915_debugfs_connector_add(struct drm_connector *connector)
                debugfs_create_file("i915_dpcd", S_IRUGO, root,
                                    connector, &i915_dpcd_fops);
 
-       if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+       if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
                debugfs_create_file("i915_panel_timings", S_IRUGO, root,
                                    connector, &i915_panel_fops);
+               debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
+                                   connector, &i915_psr_sink_status_fops);
+       }
 
        return 0;
 }
index 9c449b8d8eabb666fa6a420ec3301bc0ee84d845..f8cfd16be534cf3eece97c4a59a456e5d8769bde 100644 (file)
@@ -67,11 +67,18 @@ bool __i915_inject_load_failure(const char *func, int line)
        if (++i915_load_fail_count == i915_modparams.inject_load_failure) {
                DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
                         i915_modparams.inject_load_failure, func, line);
+               i915_modparams.inject_load_failure = 0;
                return true;
        }
 
        return false;
 }
+
+bool i915_error_injected(void)
+{
+       return i915_load_fail_count && !i915_modparams.inject_load_failure;
+}
+
 #endif
 
 #define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
@@ -97,8 +104,13 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
        vaf.fmt = fmt;
        vaf.va = &args;
 
-       dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
-                  __builtin_return_address(0), &vaf);
+       if (is_error)
+               dev_printk(level, kdev, "%pV", &vaf);
+       else
+               dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
+                          __builtin_return_address(0), &vaf);
+
+       va_end(args);
 
        if (is_error && !shown_bug_once) {
                /*
@@ -110,25 +122,8 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
                        dev_notice(kdev, "%s", FDO_BUG_MSG);
                shown_bug_once = true;
        }
-
-       va_end(args);
-}
-
-static bool i915_error_injected(struct drm_i915_private *dev_priv)
-{
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
-       return i915_modparams.inject_load_failure &&
-              i915_load_fail_count == i915_modparams.inject_load_failure;
-#else
-       return false;
-#endif
 }
 
-#define i915_load_error(dev_priv, fmt, ...)                                 \
-       __i915_printk(dev_priv,                                              \
-                     i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
-                     fmt, ##__VA_ARGS__)
-
 /* Map PCH device id to PCH type, or PCH_NONE if unknown. */
 static enum intel_pch
 intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
@@ -233,6 +228,8 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
                id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
        else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv))
                id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
+       else if (IS_ICELAKE(dev_priv))
+               id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
 
        if (id)
                DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id);
@@ -246,14 +243,6 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
 {
        struct pci_dev *pch = NULL;
 
-       /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
-        * (which really amounts to a PCH but no South Display).
-        */
-       if (INTEL_INFO(dev_priv)->num_pipes == 0) {
-               dev_priv->pch_type = PCH_NOP;
-               return;
-       }
-
        /*
         * The reason to probe ISA bridge instead of Dev31:Fun0 is to
         * make graphics device passthrough work easy for VMM, that only
@@ -282,18 +271,28 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
                } else if (intel_is_virt_pch(id, pch->subsystem_vendor,
                                         pch->subsystem_device)) {
                        id = intel_virt_detect_pch(dev_priv);
-                       if (id) {
-                               pch_type = intel_pch_type(dev_priv, id);
-                               if (WARN_ON(pch_type == PCH_NONE))
-                                       pch_type = PCH_NOP;
-                       } else {
-                               pch_type = PCH_NOP;
-                       }
+                       pch_type = intel_pch_type(dev_priv, id);
+
+                       /* Sanity check virtual PCH id */
+                       if (WARN_ON(id && pch_type == PCH_NONE))
+                               id = 0;
+
                        dev_priv->pch_type = pch_type;
                        dev_priv->pch_id = id;
                        break;
                }
        }
+
+       /*
+        * Use PCH_NOP (PCH but no South Display) for PCH platforms without
+        * display.
+        */
+       if (pch && INTEL_INFO(dev_priv)->num_pipes == 0) {
+               DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n");
+               dev_priv->pch_type = PCH_NOP;
+               dev_priv->pch_id = 0;
+       }
+
        if (!pch)
                DRM_DEBUG_KMS("No PCH found.\n");
 
@@ -634,26 +633,6 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
        .can_switch = i915_switcheroo_can_switch,
 };
 
-static void i915_gem_fini(struct drm_i915_private *dev_priv)
-{
-       /* Flush any outstanding unpin_work. */
-       i915_gem_drain_workqueue(dev_priv);
-
-       mutex_lock(&dev_priv->drm.struct_mutex);
-       intel_uc_fini_hw(dev_priv);
-       intel_uc_fini(dev_priv);
-       i915_gem_cleanup_engines(dev_priv);
-       i915_gem_contexts_fini(dev_priv);
-       mutex_unlock(&dev_priv->drm.struct_mutex);
-
-       intel_uc_fini_misc(dev_priv);
-       i915_gem_cleanup_userptr(dev_priv);
-
-       i915_gem_drain_freed_objects(dev_priv);
-
-       WARN_ON(!list_empty(&dev_priv->contexts.list));
-}
-
 static int i915_load_modeset_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -703,7 +682,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
 
        ret = i915_gem_init(dev_priv);
        if (ret)
-               goto cleanup_irq;
+               goto cleanup_modeset;
 
        intel_setup_overlay(dev_priv);
 
@@ -723,6 +702,8 @@ cleanup_gem:
        if (i915_gem_suspend(dev_priv))
                DRM_ERROR("failed to idle hardware; continuing to unload!\n");
        i915_gem_fini(dev_priv);
+cleanup_modeset:
+       intel_modeset_cleanup(dev);
 cleanup_irq:
        drm_irq_uninstall(dev);
        intel_teardown_gmbus(dev_priv);
@@ -919,7 +900,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
        spin_lock_init(&dev_priv->uncore.lock);
 
        mutex_init(&dev_priv->sb_lock);
-       mutex_init(&dev_priv->modeset_restore_lock);
        mutex_init(&dev_priv->av_mutex);
        mutex_init(&dev_priv->wm.wm_mutex);
        mutex_init(&dev_priv->pps_mutex);
@@ -1173,8 +1153,6 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 
        intel_uncore_sanitize(dev_priv);
 
-       intel_opregion_setup(dev_priv);
-
        i915_gem_load_init_fences(dev_priv);
 
        /* On the 945G/GM, the chipset reports the MSI capability on the
@@ -1189,6 +1167,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
         * get lost on g4x as well, and interrupt delivery seems to stay
         * properly dead afterwards. So we'll just disable them for all
         * pre-gen5 chipsets.
+        *
+        * dp aux and gmbus irq on gen4 seems to be able to generate legacy
+        * interrupts even when in MSI mode. This results in spurious
+        * interrupt warnings if the legacy irq no. is shared with another
+        * device. The kernel then disables that interrupt source and so
+        * prevents the other device from working properly.
         */
        if (INTEL_GEN(dev_priv) >= 5) {
                if (pci_enable_msi(pdev) < 0)
@@ -1197,10 +1181,16 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 
        ret = intel_gvt_init(dev_priv);
        if (ret)
-               goto err_ggtt;
+               goto err_msi;
+
+       intel_opregion_setup(dev_priv);
 
        return 0;
 
+err_msi:
+       if (pdev->msi_enabled)
+               pci_disable_msi(pdev);
+       pm_qos_remove_request(&dev_priv->pm_qos);
 err_ggtt:
        i915_ggtt_cleanup_hw(dev_priv);
 err_perf:
@@ -1433,6 +1423,7 @@ out_fini:
        drm_dev_fini(&dev_priv->drm);
 out_free:
        kfree(dev_priv);
+       pci_set_drvdata(pdev, NULL);
        return ret;
 }
 
@@ -1553,17 +1544,30 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv)
        return false;
 }
 
+static int i915_drm_prepare(struct drm_device *dev)
+{
+       struct drm_i915_private *i915 = to_i915(dev);
+       int err;
+
+       /*
+        * NB intel_display_suspend() may issue new requests after we've
+        * ostensibly marked the GPU as ready-to-sleep here. We need to
+        * split out that work and pull it forward so that after point,
+        * the GPU is not woken again.
+        */
+       err = i915_gem_suspend(i915);
+       if (err)
+               dev_err(&i915->drm.pdev->dev,
+                       "GEM idle failed, suspend/resume might fail\n");
+
+       return err;
+}
+
 static int i915_drm_suspend(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct pci_dev *pdev = dev_priv->drm.pdev;
        pci_power_t opregion_target_state;
-       int error;
-
-       /* ignore lid events during suspend */
-       mutex_lock(&dev_priv->modeset_restore_lock);
-       dev_priv->modeset_restore = MODESET_SUSPENDED;
-       mutex_unlock(&dev_priv->modeset_restore_lock);
 
        disable_rpm_wakeref_asserts(dev_priv);
 
@@ -1575,16 +1579,9 @@ static int i915_drm_suspend(struct drm_device *dev)
 
        pci_save_state(pdev);
 
-       error = i915_gem_suspend(dev_priv);
-       if (error) {
-               dev_err(&pdev->dev,
-                       "GEM idle failed, resume might fail\n");
-               goto out;
-       }
-
        intel_display_suspend(dev);
 
-       intel_dp_mst_suspend(dev);
+       intel_dp_mst_suspend(dev_priv);
 
        intel_runtime_pm_disable_interrupts(dev_priv);
        intel_hpd_cancel_work(dev_priv);
@@ -1600,7 +1597,6 @@ static int i915_drm_suspend(struct drm_device *dev)
        opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
        intel_opregion_notify_adapter(dev_priv, opregion_target_state);
 
-       intel_uncore_suspend(dev_priv);
        intel_opregion_unregister(dev_priv);
 
        intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
@@ -1609,10 +1605,9 @@ static int i915_drm_suspend(struct drm_device *dev)
 
        intel_csr_ucode_suspend(dev_priv);
 
-out:
        enable_rpm_wakeref_asserts(dev_priv);
 
-       return error;
+       return 0;
 }
 
 static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
@@ -1623,7 +1618,10 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
 
        disable_rpm_wakeref_asserts(dev_priv);
 
+       i915_gem_suspend_late(dev_priv);
+
        intel_display_set_init_power(dev_priv, false);
+       intel_uncore_suspend(dev_priv);
 
        /*
         * In case of firmware assisted context save/restore don't manually
@@ -1710,6 +1708,8 @@ static int i915_drm_resume(struct drm_device *dev)
        disable_rpm_wakeref_asserts(dev_priv);
        intel_sanitize_gt_powersave(dev_priv);
 
+       i915_gem_sanitize(dev_priv);
+
        ret = i915_ggtt_enable_hw(dev_priv);
        if (ret)
                DRM_ERROR("failed to re-enable GGTT\n");
@@ -1746,7 +1746,7 @@ static int i915_drm_resume(struct drm_device *dev)
                dev_priv->display.hpd_irq_setup(dev_priv);
        spin_unlock_irq(&dev_priv->irq_lock);
 
-       intel_dp_mst_resume(dev);
+       intel_dp_mst_resume(dev_priv);
 
        intel_display_resume(dev);
 
@@ -1764,10 +1764,6 @@ static int i915_drm_resume(struct drm_device *dev)
 
        intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
 
-       mutex_lock(&dev_priv->modeset_restore_lock);
-       dev_priv->modeset_restore = MODESET_DONE;
-       mutex_unlock(&dev_priv->modeset_restore_lock);
-
        intel_opregion_notify_adapter(dev_priv, PCI_D0);
 
        enable_rpm_wakeref_asserts(dev_priv);
@@ -1851,7 +1847,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
        else
                intel_display_set_init_power(dev_priv, true);
 
-       i915_gem_sanitize(dev_priv);
+       intel_engines_sanitize(dev_priv);
 
        enable_rpm_wakeref_asserts(dev_priv);
 
@@ -2081,6 +2077,22 @@ out:
        return ret;
 }
 
+static int i915_pm_prepare(struct device *kdev)
+{
+       struct pci_dev *pdev = to_pci_dev(kdev);
+       struct drm_device *dev = pci_get_drvdata(pdev);
+
+       if (!dev) {
+               dev_err(kdev, "DRM not initialized, aborting suspend.\n");
+               return -ENODEV;
+       }
+
+       if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+               return 0;
+
+       return i915_drm_prepare(dev);
+}
+
 static int i915_pm_suspend(struct device *kdev)
 {
        struct pci_dev *pdev = to_pci_dev(kdev);
@@ -2731,6 +2743,7 @@ const struct dev_pm_ops i915_pm_ops = {
         * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
         * PMSG_RESUME]
         */
+       .prepare = i915_pm_prepare,
        .suspend = i915_pm_suspend,
        .suspend_late = i915_pm_suspend_late,
        .resume_early = i915_pm_resume_early,
index 34c125e2d90c094c98759e127d2f93780cd65788..4aca5344863d6fc013470b41a706c4e7bd18d567 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/hash.h>
 #include <linux/intel-iommu.h>
 #include <linux/kref.h>
+#include <linux/mm_types.h>
 #include <linux/perf_event.h>
 #include <linux/pm_qos.h>
 #include <linux/reservation.h>
@@ -85,8 +86,8 @@
 
 #define DRIVER_NAME            "i915"
 #define DRIVER_DESC            "Intel Graphics"
-#define DRIVER_DATE            "20180514"
-#define DRIVER_TIMESTAMP       1526300884
+#define DRIVER_DATE            "20180719"
+#define DRIVER_TIMESTAMP       1532015279
 
 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
  * WARN_ON()) for hw state sanity checks to check for unexpected conditions
        I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
 
 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
+
 bool __i915_inject_load_failure(const char *func, int line);
 #define i915_inject_load_failure() \
        __i915_inject_load_failure(__func__, __LINE__)
+
+bool i915_error_injected(void);
+
 #else
+
 #define i915_inject_load_failure() false
+#define i915_error_injected() false
+
 #endif
 
+#define i915_load_error(i915, fmt, ...)                                         \
+       __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
+                     fmt, ##__VA_ARGS__)
+
 typedef struct {
        uint32_t val;
 } uint_fixed_16_16_t;
@@ -287,7 +299,6 @@ struct i915_hotplug {
        u32 event_bits;
        struct delayed_work reenable_work;
 
-       struct intel_digital_port *irq_port[I915_MAX_PORTS];
        u32 long_port_mask;
        u32 short_port_mask;
        struct work_struct dig_port_work;
@@ -340,14 +351,21 @@ struct drm_i915_file_private {
 
        unsigned int bsd_engine;
 
-/* Client can have a maximum of 3 contexts banned before
- * it is denied of creating new contexts. As one context
- * ban needs 4 consecutive hangs, and more if there is
- * progress in between, this is a last resort stop gap measure
- * to limit the badly behaving clients access to gpu.
+/*
+ * Every context ban increments per client ban score. Also
+ * hangs in short succession increments ban score. If ban threshold
+ * is reached, client is considered banned and submitting more work
+ * will fail. This is a stop gap measure to limit the badly behaving
+ * clients access to gpu. Note that unbannable contexts never increment
+ * the client ban score.
  */
-#define I915_MAX_CLIENT_CONTEXT_BANS 3
-       atomic_t context_bans;
+#define I915_CLIENT_SCORE_HANG_FAST    1
+#define   I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ)
+#define I915_CLIENT_SCORE_CONTEXT_BAN   3
+#define I915_CLIENT_SCORE_BANNED       9
+       /** ban_score: Accumulated score of all ctx bans and fast hangs. */
+       atomic_t ban_score;
+       unsigned long hang_timestamp;
 };
 
 /* Interface history:
@@ -493,6 +511,7 @@ struct intel_fbc {
 
        bool enabled;
        bool active;
+       bool flip_pending;
 
        bool underrun_detected;
        struct work_struct underrun_work;
@@ -560,12 +579,6 @@ struct intel_fbc {
                unsigned int gen9_wa_cfb_stride;
        } params;
 
-       struct intel_fbc_work {
-               bool scheduled;
-               u64 scheduled_vblank;
-               struct work_struct work;
-       } work;
-
        const char *no_fbc_reason;
 };
 
@@ -601,26 +614,17 @@ struct i915_psr {
        bool sink_support;
        struct intel_dp *enabled;
        bool active;
-       struct delayed_work work;
+       struct work_struct work;
        unsigned busy_frontbuffer_bits;
        bool sink_psr2_support;
        bool link_standby;
        bool colorimetry_support;
        bool alpm;
-       bool has_hw_tracking;
        bool psr2_enabled;
        u8 sink_sync_latency;
        bool debug;
        ktime_t last_entry_attempt;
        ktime_t last_exit;
-
-       void (*enable_source)(struct intel_dp *,
-                             const struct intel_crtc_state *);
-       void (*disable_source)(struct intel_dp *,
-                              const struct intel_crtc_state *);
-       void (*enable_sink)(struct intel_dp *);
-       void (*activate)(struct intel_dp *);
-       void (*setup_vsc)(struct intel_dp *, const struct intel_crtc_state *);
 };
 
 enum intel_pch {
@@ -632,7 +636,7 @@ enum intel_pch {
        PCH_KBP,        /* Kaby Lake PCH */
        PCH_CNP,        /* Cannon Lake PCH */
        PCH_ICP,        /* Ice Lake PCH */
-       PCH_NOP,
+       PCH_NOP,        /* PCH without south display */
 };
 
 enum intel_sbi_destination {
@@ -645,6 +649,7 @@ enum intel_sbi_destination {
 #define QUIRK_BACKLIGHT_PRESENT (1<<3)
 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
 #define QUIRK_INCREASE_T12_DELAY (1<<6)
+#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
 
 struct intel_fbdev;
 struct intel_fbc_work;
@@ -774,11 +779,17 @@ struct intel_rps {
        u8 rp0_freq;            /* Non-overclocked max frequency. */
        u16 gpll_ref_freq;      /* vlv/chv GPLL reference frequency */
 
-       u8 up_threshold; /* Current %busy required to uplock */
-       u8 down_threshold; /* Current %busy required to downclock */
-
        int last_adj;
-       enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
+
+       struct {
+               struct mutex mutex;
+
+               enum { LOW_POWER, BETWEEN, HIGH_POWER } mode;
+               unsigned int interactive;
+
+               u8 up_threshold; /* Current %busy required to uplock */
+               u8 down_threshold; /* Current %busy required to downclock */
+       } power;
 
        bool enabled;
        atomic_t num_waiters;
@@ -947,7 +958,7 @@ struct i915_gem_mm {
        /**
         * Small stash of WC pages
         */
-       struct pagevec wc_stash;
+       struct pagestash wc_stash;
 
        /**
         * tmpfs instance used for shmem backed objects
@@ -995,16 +1006,13 @@ struct i915_gem_mm {
 #define I915_ENGINE_DEAD_TIMEOUT  (4 * HZ)  /* Seqno, head and subunits dead */
 #define I915_SEQNO_DEAD_TIMEOUT   (12 * HZ) /* Seqno dead with active head */
 
-enum modeset_restore {
-       MODESET_ON_LID_OPEN,
-       MODESET_DONE,
-       MODESET_SUSPENDED,
-};
+#define I915_ENGINE_WEDGED_TIMEOUT  (60 * HZ)  /* Reset but no recovery? */
 
 #define DP_AUX_A 0x40
 #define DP_AUX_B 0x10
 #define DP_AUX_C 0x20
 #define DP_AUX_D 0x30
+#define DP_AUX_E 0x50
 #define DP_AUX_F 0x60
 
 #define DDC_PIN_B  0x05
@@ -1049,9 +1057,9 @@ struct intel_vbt_data {
        /* Feature bits */
        unsigned int int_tv_support:1;
        unsigned int lvds_dither:1;
-       unsigned int lvds_vbt:1;
        unsigned int int_crt_support:1;
        unsigned int lvds_use_ssc:1;
+       unsigned int int_lvds_support:1;
        unsigned int display_clock_mode:1;
        unsigned int fdi_rx_polarity_inverted:1;
        unsigned int panel_type:4;
@@ -1067,7 +1075,6 @@ struct intel_vbt_data {
                int vswing;
                bool low_vswing;
                bool initialized;
-               bool support;
                int bpp;
                struct edp_power_seq pps;
        } edp;
@@ -1078,8 +1085,8 @@ struct intel_vbt_data {
                bool require_aux_wakeup;
                int idle_frames;
                enum psr_lines_to_wait lines_to_wait;
-               int tp1_wakeup_time;
-               int tp2_tp3_wakeup_time;
+               int tp1_wakeup_time_us;
+               int tp2_tp3_wakeup_time_us;
        } psr;
 
        struct {
@@ -1264,20 +1271,11 @@ enum intel_pipe_crc_source {
        INTEL_PIPE_CRC_SOURCE_MAX,
 };
 
-struct intel_pipe_crc_entry {
-       uint32_t frame;
-       uint32_t crc[5];
-};
-
 #define INTEL_PIPE_CRC_ENTRIES_NR      128
 struct intel_pipe_crc {
        spinlock_t lock;
-       bool opened;            /* exclusive access to the result file */
-       struct intel_pipe_crc_entry *entries;
-       enum intel_pipe_crc_source source;
-       int head, tail;
-       wait_queue_head_t wq;
        int skipped;
+       enum intel_pipe_crc_source source;
 };
 
 struct i915_frontbuffer_tracking {
@@ -1292,7 +1290,7 @@ struct i915_frontbuffer_tracking {
 };
 
 struct i915_wa_reg {
-       i915_reg_t addr;
+       u32 addr;
        u32 value;
        /* bitmask representing WA bits */
        u32 mask;
@@ -1732,12 +1730,9 @@ struct drm_i915_private {
 
        unsigned long quirks;
 
-       enum modeset_restore modeset_restore;
-       struct mutex modeset_restore_lock;
        struct drm_atomic_state *modeset_restore_state;
        struct drm_modeset_acquire_ctx reset_ctx;
 
-       struct list_head vm_list; /* Global list of all address spaces */
        struct i915_ggtt ggtt; /* VM representing the global address space */
 
        struct i915_gem_mm mm;
@@ -1843,6 +1838,7 @@ struct drm_i915_private {
                 */
                struct ida hw_ida;
 #define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
+#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
 #define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
        } contexts;
 
@@ -1950,7 +1946,9 @@ struct drm_i915_private {
                         */
                        struct i915_perf_stream *exclusive_stream;
 
+                       struct intel_context *pinned_ctx;
                        u32 specific_ctx_id;
+                       u32 specific_ctx_id_mask;
 
                        struct hrtimer poll_check_timer;
                        wait_queue_head_t poll_wq;
@@ -2238,9 +2236,6 @@ static inline struct scatterlist *____sg_next(struct scatterlist *sg)
  **/
 static inline struct scatterlist *__sg_next(struct scatterlist *sg)
 {
-#ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
        return sg_is_last(sg) ? NULL : ____sg_next(sg);
 }
 
@@ -2306,6 +2301,7 @@ intel_info(const struct drm_i915_private *dev_priv)
 }
 
 #define INTEL_INFO(dev_priv)   intel_info((dev_priv))
+#define DRIVER_CAPS(dev_priv)  (&(dev_priv)->caps)
 
 #define INTEL_GEN(dev_priv)    ((dev_priv)->info.gen)
 #define INTEL_DEVID(dev_priv)  ((dev_priv)->info.device_id)
@@ -2558,17 +2554,10 @@ intel_info(const struct drm_i915_private *dev_priv)
        (IS_CANNONLAKE(dev_priv) || \
         IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
 
-/*
- * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
- * even when in MSI mode. This results in spurious interrupt warnings if the
- * legacy irq no. is shared with another device. The kernel then disables that
- * interrupt source and so prevents the other device from working properly.
- *
- * Since we don't enable MSI anymore on gen4, we can always use GMBUS/AUX
- * interrupts.
- */
-#define HAS_AUX_IRQ(dev_priv)   true
 #define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
+#define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
+                                       IS_GEMINILAKE(dev_priv) || \
+                                       IS_KABYLAKE(dev_priv))
 
 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
  * rows, which changed the alignment requirements and fence programming.
@@ -2743,14 +2732,14 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
 int intel_engines_init_mmio(struct drm_i915_private *dev_priv);
 int intel_engines_init(struct drm_i915_private *dev_priv);
 
+u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv);
+
 /* intel_hotplug.c */
 void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
                           u32 pin_mask, u32 long_mask);
 void intel_hpd_init(struct drm_i915_private *dev_priv);
 void intel_hpd_init_work(struct drm_i915_private *dev_priv);
 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
-enum port intel_hpd_pin_to_port(struct drm_i915_private *dev_priv,
-                               enum hpd_pin pin);
 enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
                                   enum port port);
 bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
@@ -3097,9 +3086,6 @@ i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
 }
 
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
-void i915_vma_move_to_active(struct i915_vma *vma,
-                            struct i915_request *rq,
-                            unsigned int flags);
 int i915_gem_dumb_create(struct drm_file *file_priv,
                         struct drm_device *dev,
                         struct drm_mode_create_dumb *args);
@@ -3164,12 +3150,14 @@ void i915_gem_init_mmio(struct drm_i915_private *i915);
 int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
 int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
 void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
+void i915_gem_fini(struct drm_i915_private *dev_priv);
 void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv);
 int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
-                          unsigned int flags);
+                          unsigned int flags, long timeout);
 int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv);
+void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
 void i915_gem_resume(struct drm_i915_private *dev_priv);
-int i915_gem_fault(struct vm_fault *vmf);
+vm_fault_t i915_gem_fault(struct vm_fault *vmf);
 int i915_gem_object_wait(struct drm_i915_gem_object *obj,
                         unsigned int flags,
                         long timeout,
@@ -3208,7 +3196,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
 static inline struct i915_hw_ppgtt *
 i915_vm_to_ppgtt(struct i915_address_space *vm)
 {
-       return container_of(vm, struct i915_hw_ppgtt, base);
+       return container_of(vm, struct i915_hw_ppgtt, vm);
 }
 
 /* i915_gem_fence_reg.c */
@@ -3315,7 +3303,7 @@ unsigned long i915_gem_shrink(struct drm_i915_private *i915,
 unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
 void i915_gem_shrinker_register(struct drm_i915_private *i915);
 void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
-
+void i915_gem_shrinker_taints_mutex(struct mutex *mutex);
 
 /* i915_gem_tiling.c */
 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
@@ -3440,6 +3428,8 @@ extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
 extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
 extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
 extern int intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
+extern void intel_rps_mark_interactive(struct drm_i915_private *i915,
+                                      bool interactive);
 extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
                                  bool enable);
 
@@ -3673,14 +3663,6 @@ static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
         return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
 }
 
-static inline unsigned long
-timespec_to_jiffies_timeout(const struct timespec *value)
-{
-       unsigned long j = timespec_to_jiffies(value);
-
-       return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
-}
-
 /*
  * If you need to wait X milliseconds between events A and B, but event B
  * doesn't happen exactly after event A, you record the timestamp (jiffies) of
index 3704f4c0c2c970c31b0c6031050b20126e7ae2a9..fcc73a6ab503e2f4ffd5b9a5967b36d6ba5f1958 100644 (file)
@@ -65,7 +65,7 @@ insert_mappable_node(struct i915_ggtt *ggtt,
                      struct drm_mm_node *node, u32 size)
 {
        memset(node, 0, sizeof(*node));
-       return drm_mm_insert_node_in_range(&ggtt->base.mm, node,
+       return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
                                           size, 0, I915_COLOR_UNEVICTABLE,
                                           0, ggtt->mappable_end,
                                           DRM_MM_INSERT_LOW);
@@ -139,6 +139,8 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
 
 static u32 __i915_gem_park(struct drm_i915_private *i915)
 {
+       GEM_TRACE("\n");
+
        lockdep_assert_held(&i915->drm.struct_mutex);
        GEM_BUG_ON(i915->gt.active_requests);
        GEM_BUG_ON(!list_empty(&i915->gt.active_rings));
@@ -181,6 +183,8 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
 
 void i915_gem_park(struct drm_i915_private *i915)
 {
+       GEM_TRACE("\n");
+
        lockdep_assert_held(&i915->drm.struct_mutex);
        GEM_BUG_ON(i915->gt.active_requests);
 
@@ -193,6 +197,8 @@ void i915_gem_park(struct drm_i915_private *i915)
 
 void i915_gem_unpark(struct drm_i915_private *i915)
 {
+       GEM_TRACE("\n");
+
        lockdep_assert_held(&i915->drm.struct_mutex);
        GEM_BUG_ON(!i915->gt.active_requests);
 
@@ -243,17 +249,17 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
        struct i915_vma *vma;
        u64 pinned;
 
-       pinned = ggtt->base.reserved;
+       pinned = ggtt->vm.reserved;
        mutex_lock(&dev->struct_mutex);
-       list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
+       list_for_each_entry(vma, &ggtt->vm.active_list, vm_link)
                if (i915_vma_is_pinned(vma))
                        pinned += vma->node.size;
-       list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
+       list_for_each_entry(vma, &ggtt->vm.inactive_list, vm_link)
                if (i915_vma_is_pinned(vma))
                        pinned += vma->node.size;
        mutex_unlock(&dev->struct_mutex);
 
-       args->aper_size = ggtt->base.total;
+       args->aper_size = ggtt->vm.total;
        args->aper_available_size = args->aper_size - pinned;
 
        return 0;
@@ -796,7 +802,7 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
         * that was!).
         */
 
-       wmb();
+       i915_gem_chipset_flush(dev_priv);
 
        intel_runtime_pm_get(dev_priv);
        spin_lock_irq(&dev_priv->uncore.lock);
@@ -831,6 +837,10 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
                }
                break;
 
+       case I915_GEM_DOMAIN_WC:
+               wmb();
+               break;
+
        case I915_GEM_DOMAIN_CPU:
                i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
                break;
@@ -1217,9 +1227,9 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
                page_length = remain < page_length ? remain : page_length;
                if (node.allocated) {
                        wmb();
-                       ggtt->base.insert_page(&ggtt->base,
-                                              i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
-                                              node.start, I915_CACHE_NONE, 0);
+                       ggtt->vm.insert_page(&ggtt->vm,
+                                            i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
+                                            node.start, I915_CACHE_NONE, 0);
                        wmb();
                } else {
                        page_base += offset & PAGE_MASK;
@@ -1240,8 +1250,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
 out_unpin:
        if (node.allocated) {
                wmb();
-               ggtt->base.clear_range(&ggtt->base,
-                                      node.start, node.size);
+               ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
                remove_mappable_node(&node);
        } else {
                i915_vma_unpin(vma);
@@ -1420,9 +1429,9 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
                page_length = remain < page_length ? remain : page_length;
                if (node.allocated) {
                        wmb(); /* flush the write before we modify the GGTT */
-                       ggtt->base.insert_page(&ggtt->base,
-                                              i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
-                                              node.start, I915_CACHE_NONE, 0);
+                       ggtt->vm.insert_page(&ggtt->vm,
+                                            i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
+                                            node.start, I915_CACHE_NONE, 0);
                        wmb(); /* flush modifications to the GGTT (insert_page) */
                } else {
                        page_base += offset & PAGE_MASK;
@@ -1449,8 +1458,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
 out_unpin:
        if (node.allocated) {
                wmb();
-               ggtt->base.clear_range(&ggtt->base,
-                                      node.start, node.size);
+               ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
                remove_mappable_node(&node);
        } else {
                i915_vma_unpin(vma);
@@ -1619,6 +1627,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                goto err;
        }
 
+       /* Writes not allowed into this read-only object */
+       if (i915_gem_object_is_readonly(obj)) {
+               ret = -EINVAL;
+               goto err;
+       }
+
        trace_i915_gem_object_pwrite(obj, args->offset, args->size);
 
        ret = -ENODEV;
@@ -1991,9 +2005,9 @@ compute_partial_view(struct drm_i915_gem_object *obj,
  * The current feature set supported by i915_gem_fault() and thus GTT mmaps
  * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
  */
-int i915_gem_fault(struct vm_fault *vmf)
+vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 {
-#define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
+#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
        struct vm_area_struct *area = vmf->vma;
        struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
        struct drm_device *dev = obj->base.dev;
@@ -2002,9 +2016,12 @@ int i915_gem_fault(struct vm_fault *vmf)
        bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
        struct i915_vma *vma;
        pgoff_t page_offset;
-       unsigned int flags;
        int ret;
 
+       /* Sanity check that we allow writing into this object */
+       if (i915_gem_object_is_readonly(obj) && write)
+               return VM_FAULT_SIGBUS;
+
        /* We don't use vmf->pgoff since that has the fake offset */
        page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
 
@@ -2038,27 +2055,34 @@ int i915_gem_fault(struct vm_fault *vmf)
                goto err_unlock;
        }
 
-       /* If the object is smaller than a couple of partial vma, it is
-        * not worth only creating a single partial vma - we may as well
-        * clear enough space for the full object.
-        */
-       flags = PIN_MAPPABLE;
-       if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
-               flags |= PIN_NONBLOCK | PIN_NONFAULT;
 
        /* Now pin it into the GTT as needed */
-       vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
+       vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+                                      PIN_MAPPABLE |
+                                      PIN_NONBLOCK |
+                                      PIN_NONFAULT);
        if (IS_ERR(vma)) {
                /* Use a partial view if it is bigger than available space */
                struct i915_ggtt_view view =
                        compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
+               unsigned int flags;
 
-               /* Userspace is now writing through an untracked VMA, abandon
+               flags = PIN_MAPPABLE;
+               if (view.type == I915_GGTT_VIEW_NORMAL)
+                       flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
+
+               /*
+                * Userspace is now writing through an untracked VMA, abandon
                 * all hope that the hardware is able to track future writes.
                 */
                obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
 
-               vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
+               vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
+               if (IS_ERR(vma) && !view.type) {
+                       flags = PIN_MAPPABLE;
+                       view.type = I915_GGTT_VIEW_PARTIAL;
+                       vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
+               }
        }
        if (IS_ERR(vma)) {
                ret = PTR_ERR(vma);
@@ -2108,10 +2132,9 @@ err:
                 * fail). But any other -EIO isn't ours (e.g. swap in failure)
                 * and so needs to be reported.
                 */
-               if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
-                       ret = VM_FAULT_SIGBUS;
-                       break;
-               }
+               if (!i915_terminally_wedged(&dev_priv->gpu_error))
+                       return VM_FAULT_SIGBUS;
+               /* else: fall through */
        case -EAGAIN:
                /*
                 * EAGAIN means the gpu is hung and we'll wait for the error
@@ -2126,21 +2149,16 @@ err:
                 * EBUSY is ok: this just means that another thread
                 * already did the job.
                 */
-               ret = VM_FAULT_NOPAGE;
-               break;
+               return VM_FAULT_NOPAGE;
        case -ENOMEM:
-               ret = VM_FAULT_OOM;
-               break;
+               return VM_FAULT_OOM;
        case -ENOSPC:
        case -EFAULT:
-               ret = VM_FAULT_SIGBUS;
-               break;
+               return VM_FAULT_SIGBUS;
        default:
                WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
-               ret = VM_FAULT_SIGBUS;
-               break;
+               return VM_FAULT_SIGBUS;
        }
-       return ret;
 }
 
 static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
@@ -2259,7 +2277,9 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
 
        /* Attempt to reap some mmap space from dead objects */
        do {
-               err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
+               err = i915_gem_wait_for_idle(dev_priv,
+                                            I915_WAIT_INTERRUPTIBLE,
+                                            MAX_SCHEDULE_TIMEOUT);
                if (err)
                        break;
 
@@ -2404,29 +2424,15 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
        rcu_read_unlock();
 }
 
-void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
-                                enum i915_mm_subclass subclass)
+static struct sg_table *
+__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct sg_table *pages;
 
-       if (i915_gem_object_has_pinned_pages(obj))
-               return;
-
-       GEM_BUG_ON(obj->bind_count);
-       if (!i915_gem_object_has_pages(obj))
-               return;
-
-       /* May be called by shrinker from within get_pages() (on another bo) */
-       mutex_lock_nested(&obj->mm.lock, subclass);
-       if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
-               goto unlock;
-
-       /* ->put_pages might need to allocate memory for the bit17 swizzle
-        * array, hence protect them from being reaped by removing them from gtt
-        * lists early. */
        pages = fetch_and_zero(&obj->mm.pages);
-       GEM_BUG_ON(!pages);
+       if (!pages)
+               return NULL;
 
        spin_lock(&i915->mm.obj_lock);
        list_del(&obj->mm.link);
@@ -2445,12 +2451,37 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
        }
 
        __i915_gem_object_reset_page_iter(obj);
+       obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
 
+       return pages;
+}
+
+void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+                                enum i915_mm_subclass subclass)
+{
+       struct sg_table *pages;
+
+       if (i915_gem_object_has_pinned_pages(obj))
+               return;
+
+       GEM_BUG_ON(obj->bind_count);
+       if (!i915_gem_object_has_pages(obj))
+               return;
+
+       /* May be called by shrinker from within get_pages() (on another bo) */
+       mutex_lock_nested(&obj->mm.lock, subclass);
+       if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
+               goto unlock;
+
+       /*
+        * ->put_pages might need to allocate memory for the bit17 swizzle
+        * array, hence protect them from being reaped by removing them from gtt
+        * lists early.
+        */
+       pages = __i915_gem_object_unset_pages(obj);
        if (!IS_ERR(pages))
                obj->ops->put_pages(obj, pages);
 
-       obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
-
 unlock:
        mutex_unlock(&obj->mm.lock);
 }
@@ -2933,32 +2964,54 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
        return 0;
 }
 
-static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
+static void i915_gem_client_mark_guilty(struct drm_i915_file_private *file_priv,
+                                       const struct i915_gem_context *ctx)
 {
-       bool banned;
+       unsigned int score;
+       unsigned long prev_hang;
 
-       atomic_inc(&ctx->guilty_count);
+       if (i915_gem_context_is_banned(ctx))
+               score = I915_CLIENT_SCORE_CONTEXT_BAN;
+       else
+               score = 0;
 
-       banned = false;
-       if (i915_gem_context_is_bannable(ctx)) {
-               unsigned int score;
+       prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
+       if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
+               score += I915_CLIENT_SCORE_HANG_FAST;
 
-               score = atomic_add_return(CONTEXT_SCORE_GUILTY,
-                                         &ctx->ban_score);
-               banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
+       if (score) {
+               atomic_add(score, &file_priv->ban_score);
 
-               DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
-                                ctx->name, score, yesno(banned));
+               DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
+                                ctx->name, score,
+                                atomic_read(&file_priv->ban_score));
        }
-       if (!banned)
+}
+
+static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
+{
+       unsigned int score;
+       bool banned, bannable;
+
+       atomic_inc(&ctx->guilty_count);
+
+       bannable = i915_gem_context_is_bannable(ctx);
+       score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
+       banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
+
+       /* Cool contexts don't accumulate client ban score */
+       if (!bannable)
                return;
 
-       i915_gem_context_set_banned(ctx);
-       if (!IS_ERR_OR_NULL(ctx->file_priv)) {
-               atomic_inc(&ctx->file_priv->context_bans);
-               DRM_DEBUG_DRIVER("client %s has had %d context banned\n",
-                                ctx->name, atomic_read(&ctx->file_priv->context_bans));
+       if (banned) {
+               DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, banned\n",
+                                ctx->name, atomic_read(&ctx->guilty_count),
+                                score);
+               i915_gem_context_set_banned(ctx);
        }
+
+       if (!IS_ERR_OR_NULL(ctx->file_priv))
+               i915_gem_client_mark_guilty(ctx->file_priv, ctx);
 }
 
 static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
@@ -3003,7 +3056,7 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
 struct i915_request *
 i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
 {
-       struct i915_request *request = NULL;
+       struct i915_request *request;
 
        /*
         * During the reset sequence, we must prevent the engine from
@@ -3014,52 +3067,7 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
         */
        intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
 
-       /*
-        * Prevent the signaler thread from updating the request
-        * state (by calling dma_fence_signal) as we are processing
-        * the reset. The write from the GPU of the seqno is
-        * asynchronous and the signaler thread may see a different
-        * value to us and declare the request complete, even though
-        * the reset routine have picked that request as the active
-        * (incomplete) request. This conflict is not handled
-        * gracefully!
-        */
-       kthread_park(engine->breadcrumbs.signaler);
-
-       /*
-        * Prevent request submission to the hardware until we have
-        * completed the reset in i915_gem_reset_finish(). If a request
-        * is completed by one engine, it may then queue a request
-        * to a second via its execlists->tasklet *just* as we are
-        * calling engine->init_hw() and also writing the ELSP.
-        * Turning off the execlists->tasklet until the reset is over
-        * prevents the race.
-        *
-        * Note that this needs to be a single atomic operation on the
-        * tasklet (flush existing tasks, prevent new tasks) to prevent
-        * a race between reset and set-wedged. It is not, so we do the best
-        * we can atm and make sure we don't lock the machine up in the more
-        * common case of recursively being called from set-wedged from inside
-        * i915_reset.
-        */
-       if (!atomic_read(&engine->execlists.tasklet.count))
-               tasklet_kill(&engine->execlists.tasklet);
-       tasklet_disable(&engine->execlists.tasklet);
-
-       /*
-        * We're using worker to queue preemption requests from the tasklet in
-        * GuC submission mode.
-        * Even though tasklet was disabled, we may still have a worker queued.
-        * Let's make sure that all workers scheduled before disabling the
-        * tasklet are completed before continuing with the reset.
-        */
-       if (engine->i915->guc.preempt_wq)
-               flush_workqueue(engine->i915->guc.preempt_wq);
-
-       if (engine->irq_seqno_barrier)
-               engine->irq_seqno_barrier(engine);
-
-       request = i915_gem_find_active_request(engine);
+       request = engine->reset.prepare(engine);
        if (request && request->fence.error == -EIO)
                request = ERR_PTR(-EIO); /* Previous reset failed! */
 
@@ -3089,43 +3097,24 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
        return err;
 }
 
-static void skip_request(struct i915_request *request)
-{
-       void *vaddr = request->ring->vaddr;
-       u32 head;
-
-       /* As this request likely depends on state from the lost
-        * context, clear out all the user operations leaving the
-        * breadcrumb at the end (so we get the fence notifications).
-        */
-       head = request->head;
-       if (request->postfix < head) {
-               memset(vaddr + head, 0, request->ring->size - head);
-               head = 0;
-       }
-       memset(vaddr + head, 0, request->postfix - head);
-
-       dma_fence_set_error(&request->fence, -EIO);
-}
-
 static void engine_skip_context(struct i915_request *request)
 {
        struct intel_engine_cs *engine = request->engine;
-       struct i915_gem_context *hung_ctx = request->ctx;
+       struct i915_gem_context *hung_ctx = request->gem_context;
        struct i915_timeline *timeline = request->timeline;
        unsigned long flags;
 
        GEM_BUG_ON(timeline == &engine->timeline);
 
        spin_lock_irqsave(&engine->timeline.lock, flags);
-       spin_lock_nested(&timeline->lock, SINGLE_DEPTH_NESTING);
+       spin_lock(&timeline->lock);
 
        list_for_each_entry_continue(request, &engine->timeline.requests, link)
-               if (request->ctx == hung_ctx)
-                       skip_request(request);
+               if (request->gem_context == hung_ctx)
+                       i915_request_skip(request, -EIO);
 
        list_for_each_entry(request, &timeline->requests, link)
-               skip_request(request);
+               i915_request_skip(request, -EIO);
 
        spin_unlock(&timeline->lock);
        spin_unlock_irqrestore(&engine->timeline.lock, flags);
@@ -3167,11 +3156,11 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
        }
 
        if (stalled) {
-               i915_gem_context_mark_guilty(request->ctx);
-               skip_request(request);
+               i915_gem_context_mark_guilty(request->gem_context);
+               i915_request_skip(request, -EIO);
 
                /* If this context is now banned, skip all pending requests. */
-               if (i915_gem_context_is_banned(request->ctx))
+               if (i915_gem_context_is_banned(request->gem_context))
                        engine_skip_context(request);
        } else {
                /*
@@ -3181,15 +3170,17 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
                 */
                request = i915_gem_find_active_request(engine);
                if (request) {
-                       i915_gem_context_mark_innocent(request->ctx);
+                       unsigned long flags;
+
+                       i915_gem_context_mark_innocent(request->gem_context);
                        dma_fence_set_error(&request->fence, -EAGAIN);
 
                        /* Rewind the engine to replay the incomplete rq */
-                       spin_lock_irq(&engine->timeline.lock);
+                       spin_lock_irqsave(&engine->timeline.lock, flags);
                        request = list_prev_entry(request, link);
                        if (&request->link == &engine->timeline.requests)
                                request = NULL;
-                       spin_unlock_irq(&engine->timeline.lock);
+                       spin_unlock_irqrestore(&engine->timeline.lock, flags);
                }
        }
 
@@ -3210,13 +3201,8 @@ void i915_gem_reset_engine(struct intel_engine_cs *engine,
        if (request)
                request = i915_gem_reset_request(engine, request, stalled);
 
-       if (request) {
-               DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
-                                engine->name, request->global_seqno);
-       }
-
        /* Setup the CS to resume from the breadcrumb of the hung request */
-       engine->reset_hw(engine, request);
+       engine->reset.reset(engine, request);
 }
 
 void i915_gem_reset(struct drm_i915_private *dev_priv,
@@ -3230,14 +3216,14 @@ void i915_gem_reset(struct drm_i915_private *dev_priv,
        i915_retire_requests(dev_priv);
 
        for_each_engine(engine, dev_priv, id) {
-               struct i915_gem_context *ctx;
+               struct intel_context *ce;
 
                i915_gem_reset_engine(engine,
                                      engine->hangcheck.active_request,
                                      stalled_mask & ENGINE_MASK(id));
-               ctx = fetch_and_zero(&engine->last_retired_context);
-               if (ctx)
-                       intel_context_unpin(ctx, engine);
+               ce = fetch_and_zero(&engine->last_retired_context);
+               if (ce)
+                       intel_context_unpin(ce);
 
                /*
                 * Ostensibily, we always want a context loaded for powersaving,
@@ -3255,7 +3241,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv,
                        rq = i915_request_alloc(engine,
                                                dev_priv->kernel_context);
                        if (!IS_ERR(rq))
-                               __i915_request_add(rq, false);
+                               i915_request_add(rq);
                }
        }
 
@@ -3264,8 +3250,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv,
 
 void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
 {
-       tasklet_enable(&engine->execlists.tasklet);
-       kthread_unpark(engine->breadcrumbs.signaler);
+       engine->reset.finish(engine);
 
        intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
 }
@@ -3543,6 +3528,22 @@ new_requests_since_last_retire(const struct drm_i915_private *i915)
                work_pending(&i915->gt.idle_work.work));
 }
 
+static void assert_kernel_context_is_current(struct drm_i915_private *i915)
+{
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+
+       if (i915_terminally_wedged(&i915->gpu_error))
+               return;
+
+       GEM_BUG_ON(i915->gt.active_requests);
+       for_each_engine(engine, i915, id) {
+               GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request));
+               GEM_BUG_ON(engine->last_retired_context !=
+                          to_intel_context(i915->kernel_context, engine));
+       }
+}
+
 static void
 i915_gem_idle_work_handler(struct work_struct *work)
 {
@@ -3554,6 +3555,24 @@ i915_gem_idle_work_handler(struct work_struct *work)
        if (!READ_ONCE(dev_priv->gt.awake))
                return;
 
+       if (READ_ONCE(dev_priv->gt.active_requests))
+               return;
+
+       /*
+        * Flush out the last user context, leaving only the pinned
+        * kernel context resident. When we are idling on the kernel_context,
+        * no more new requests (with a context switch) are emitted and we
+        * can finally rest. A consequence is that the idle work handler is
+        * always called at least twice before idling (and if the system is
+        * idle that implies a round trip through the retire worker).
+        */
+       mutex_lock(&dev_priv->drm.struct_mutex);
+       i915_gem_switch_to_kernel_context(dev_priv);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
+
+       GEM_TRACE("active_requests=%d (after switch-to-kernel-context)\n",
+                 READ_ONCE(dev_priv->gt.active_requests));
+
        /*
         * Wait for last execlists context complete, but bail out in case a
         * new request is submitted. As we don't trust the hardware, we
@@ -3587,6 +3606,8 @@ i915_gem_idle_work_handler(struct work_struct *work)
 
        epoch = __i915_gem_park(dev_priv);
 
+       assert_kernel_context_is_current(dev_priv);
+
        rearm_hangcheck = false;
 out_unlock:
        mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -3733,9 +3754,31 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        return ret;
 }
 
-static int wait_for_timeline(struct i915_timeline *tl, unsigned int flags)
+static long wait_for_timeline(struct i915_timeline *tl,
+                             unsigned int flags, long timeout)
 {
-       return i915_gem_active_wait(&tl->last_request, flags);
+       struct i915_request *rq;
+
+       rq = i915_gem_active_get_unlocked(&tl->last_request);
+       if (!rq)
+               return timeout;
+
+       /*
+        * "Race-to-idle".
+        *
+        * Switching to the kernel context is often used a synchronous
+        * step prior to idling, e.g. in suspend for flushing all
+        * current operations to memory before sleeping. These we
+        * want to complete as quickly as possible to avoid prolonged
+        * stalls, so allow the gpu to boost to maximum clocks.
+        */
+       if (flags & I915_WAIT_FOR_IDLE_BOOST)
+               gen6_rps_boost(rq, NULL);
+
+       timeout = i915_request_wait(rq, flags, timeout);
+       i915_request_put(rq);
+
+       return timeout;
 }
 
 static int wait_for_engines(struct drm_i915_private *i915)
@@ -3751,8 +3794,13 @@ static int wait_for_engines(struct drm_i915_private *i915)
        return 0;
 }
 
-int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
+int i915_gem_wait_for_idle(struct drm_i915_private *i915,
+                          unsigned int flags, long timeout)
 {
+       GEM_TRACE("flags=%x (%s), timeout=%ld%s\n",
+                 flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
+                 timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "");
+
        /* If the device is asleep, we have no requests outstanding */
        if (!READ_ONCE(i915->gt.awake))
                return 0;
@@ -3764,26 +3812,31 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
                lockdep_assert_held(&i915->drm.struct_mutex);
 
                list_for_each_entry(tl, &i915->gt.timelines, link) {
-                       err = wait_for_timeline(tl, flags);
-                       if (err)
-                               return err;
+                       timeout = wait_for_timeline(tl, flags, timeout);
+                       if (timeout < 0)
+                               return timeout;
                }
-               i915_retire_requests(i915);
 
-               return wait_for_engines(i915);
+               err = wait_for_engines(i915);
+               if (err)
+                       return err;
+
+               i915_retire_requests(i915);
+               GEM_BUG_ON(i915->gt.active_requests);
        } else {
                struct intel_engine_cs *engine;
                enum intel_engine_id id;
-               int err;
 
                for_each_engine(engine, i915, id) {
-                       err = wait_for_timeline(&engine->timeline, flags);
-                       if (err)
-                               return err;
-               }
+                       struct i915_timeline *tl = &engine->timeline;
 
-               return 0;
+                       timeout = wait_for_timeline(tl, flags, timeout);
+                       if (timeout < 0)
+                               return timeout;
+               }
        }
+
+       return 0;
 }
 
 static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
@@ -4357,7 +4410,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
                         u64 flags)
 {
        struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-       struct i915_address_space *vm = &dev_priv->ggtt.base;
+       struct i915_address_space *vm = &dev_priv->ggtt.vm;
        struct i915_vma *vma;
        int ret;
 
@@ -4945,25 +4998,25 @@ void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
                i915_gem_object_put(obj);
 }
 
-static void assert_kernel_context_is_current(struct drm_i915_private *i915)
+void i915_gem_sanitize(struct drm_i915_private *i915)
 {
-       struct i915_gem_context *kernel_context = i915->kernel_context;
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
+       int err;
 
-       for_each_engine(engine, i915, id) {
-               GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request));
-               GEM_BUG_ON(engine->last_retired_context != kernel_context);
-       }
-}
+       GEM_TRACE("\n");
 
-void i915_gem_sanitize(struct drm_i915_private *i915)
-{
-       if (i915_terminally_wedged(&i915->gpu_error)) {
-               mutex_lock(&i915->drm.struct_mutex);
+       mutex_lock(&i915->drm.struct_mutex);
+
+       intel_runtime_pm_get(i915);
+       intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+
+       /*
+        * As we have just resumed the machine and woken the device up from
+        * deep PCI sleep (presumably D3_cold), assume the HW has been reset
+        * back to defaults, recovering from whatever wedged state we left it
+        * in and so worth trying to use the device once more.
+        */
+       if (i915_terminally_wedged(&i915->gpu_error))
                i915_gem_unset_wedged(i915);
-               mutex_unlock(&i915->drm.struct_mutex);
-       }
 
        /*
         * If we inherit context state from the BIOS or earlier occupants
@@ -4973,60 +5026,94 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
         * it may impact the display and we are uncertain about the stability
         * of the reset, so this could be applied to even earlier gen.
         */
+       err = -ENODEV;
        if (INTEL_GEN(i915) >= 5 && intel_has_gpu_reset(i915))
-               WARN_ON(intel_gpu_reset(i915, ALL_ENGINES));
+               err = WARN_ON(intel_gpu_reset(i915, ALL_ENGINES));
+       if (!err)
+               intel_engines_sanitize(i915);
+
+       intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+       intel_runtime_pm_put(i915);
+
+       i915_gem_contexts_lost(i915);
+       mutex_unlock(&i915->drm.struct_mutex);
 }
 
-int i915_gem_suspend(struct drm_i915_private *dev_priv)
+int i915_gem_suspend(struct drm_i915_private *i915)
 {
-       struct drm_device *dev = &dev_priv->drm;
        int ret;
 
-       intel_runtime_pm_get(dev_priv);
-       intel_suspend_gt_powersave(dev_priv);
+       GEM_TRACE("\n");
 
-       mutex_lock(&dev->struct_mutex);
+       intel_runtime_pm_get(i915);
+       intel_suspend_gt_powersave(i915);
+
+       mutex_lock(&i915->drm.struct_mutex);
 
-       /* We have to flush all the executing contexts to main memory so
+       /*
+        * We have to flush all the executing contexts to main memory so
         * that they can saved in the hibernation image. To ensure the last
         * context image is coherent, we have to switch away from it. That
-        * leaves the dev_priv->kernel_context still active when
+        * leaves the i915->kernel_context still active when
         * we actually suspend, and its image in memory may not match the GPU
         * state. Fortunately, the kernel_context is disposable and we do
         * not rely on its state.
         */
-       if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
-               ret = i915_gem_switch_to_kernel_context(dev_priv);
+       if (!i915_terminally_wedged(&i915->gpu_error)) {
+               ret = i915_gem_switch_to_kernel_context(i915);
                if (ret)
                        goto err_unlock;
 
-               ret = i915_gem_wait_for_idle(dev_priv,
+               ret = i915_gem_wait_for_idle(i915,
                                             I915_WAIT_INTERRUPTIBLE |
-                                            I915_WAIT_LOCKED);
+                                            I915_WAIT_LOCKED |
+                                            I915_WAIT_FOR_IDLE_BOOST,
+                                            MAX_SCHEDULE_TIMEOUT);
                if (ret && ret != -EIO)
                        goto err_unlock;
 
-               assert_kernel_context_is_current(dev_priv);
+               assert_kernel_context_is_current(i915);
        }
-       i915_gem_contexts_lost(dev_priv);
-       mutex_unlock(&dev->struct_mutex);
+       i915_retire_requests(i915); /* ensure we flush after wedging */
+
+       mutex_unlock(&i915->drm.struct_mutex);
 
-       intel_uc_suspend(dev_priv);
+       intel_uc_suspend(i915);
 
-       cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
-       cancel_delayed_work_sync(&dev_priv->gt.retire_work);
+       cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
+       cancel_delayed_work_sync(&i915->gt.retire_work);
 
-       /* As the idle_work is rearming if it detects a race, play safe and
+       /*
+        * As the idle_work is rearming if it detects a race, play safe and
         * repeat the flush until it is definitely idle.
         */
-       drain_delayed_work(&dev_priv->gt.idle_work);
+       drain_delayed_work(&i915->gt.idle_work);
 
-       /* Assert that we sucessfully flushed all the work and
+       /*
+        * Assert that we successfully flushed all the work and
         * reset the GPU back to its idle, low power state.
         */
-       WARN_ON(dev_priv->gt.awake);
-       if (WARN_ON(!intel_engines_are_idle(dev_priv)))
-               i915_gem_set_wedged(dev_priv); /* no hope, discard everything */
+       WARN_ON(i915->gt.awake);
+       if (WARN_ON(!intel_engines_are_idle(i915)))
+               i915_gem_set_wedged(i915); /* no hope, discard everything */
+
+       intel_runtime_pm_put(i915);
+       return 0;
+
+err_unlock:
+       mutex_unlock(&i915->drm.struct_mutex);
+       intel_runtime_pm_put(i915);
+       return ret;
+}
+
+void i915_gem_suspend_late(struct drm_i915_private *i915)
+{
+       struct drm_i915_gem_object *obj;
+       struct list_head *phases[] = {
+               &i915->mm.unbound_list,
+               &i915->mm.bound_list,
+               NULL
+       }, **phase;
 
        /*
         * Neither the BIOS, ourselves or any other kernel
@@ -5047,20 +5134,22 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
         * machines is a good idea, we don't - just in case it leaves the
         * machine in an unusable condition.
         */
-       intel_uc_sanitize(dev_priv);
-       i915_gem_sanitize(dev_priv);
 
-       intel_runtime_pm_put(dev_priv);
-       return 0;
+       mutex_lock(&i915->drm.struct_mutex);
+       for (phase = phases; *phase; phase++) {
+               list_for_each_entry(obj, *phase, mm.link)
+                       WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
+       }
+       mutex_unlock(&i915->drm.struct_mutex);
 
-err_unlock:
-       mutex_unlock(&dev->struct_mutex);
-       intel_runtime_pm_put(dev_priv);
-       return ret;
+       intel_uc_sanitize(i915);
+       i915_gem_sanitize(i915);
 }
 
 void i915_gem_resume(struct drm_i915_private *i915)
 {
+       GEM_TRACE("\n");
+
        WARN_ON(i915->gt.awake);
 
        mutex_lock(&i915->drm.struct_mutex);
@@ -5234,8 +5323,18 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
 
        /* Only when the HW is re-initialised, can we replay the requests */
        ret = __i915_gem_restart_engines(dev_priv);
+       if (ret)
+               goto cleanup_uc;
+
+       intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+       return 0;
+
+cleanup_uc:
+       intel_uc_fini_hw(dev_priv);
 out:
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
        return ret;
 }
 
@@ -5272,7 +5371,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
                if (engine->init_context)
                        err = engine->init_context(rq);
 
-               __i915_request_add(rq, true);
+               i915_request_add(rq);
                if (err)
                        goto err_active;
        }
@@ -5281,9 +5380,11 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
        if (err)
                goto err_active;
 
-       err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
-       if (err)
+       if (i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED, HZ / 5)) {
+               i915_gem_set_wedged(i915);
+               err = -EIO; /* Caller will declare us wedged */
                goto err_active;
+       }
 
        assert_kernel_context_is_current(i915);
 
@@ -5346,7 +5447,9 @@ err_active:
        if (WARN_ON(i915_gem_switch_to_kernel_context(i915)))
                goto out_ctx;
 
-       if (WARN_ON(i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED)))
+       if (WARN_ON(i915_gem_wait_for_idle(i915,
+                                          I915_WAIT_LOCKED,
+                                          MAX_SCHEDULE_TIMEOUT)))
                goto out_ctx;
 
        i915_gem_contexts_lost(i915);
@@ -5357,12 +5460,8 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 {
        int ret;
 
-       /*
-        * We need to fallback to 4K pages since gvt gtt handling doesn't
-        * support huge page entries - we will need to check either hypervisor
-        * mm can support huge guest page or just do emulation in gvt.
-        */
-       if (intel_vgpu_active(dev_priv))
+       /* We need to fallback to 4K pages if host doesn't support huge gtt. */
+       if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
                mkwrite_device_info(dev_priv)->page_sizes =
                        I915_GTT_PAGE_SIZE_4K;
 
@@ -5380,13 +5479,13 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
        if (ret)
                return ret;
 
-       ret = intel_wopcm_init(&dev_priv->wopcm);
+       ret = intel_uc_init_misc(dev_priv);
        if (ret)
                return ret;
 
-       ret = intel_uc_init_misc(dev_priv);
+       ret = intel_wopcm_init(&dev_priv->wopcm);
        if (ret)
-               return ret;
+               goto err_uc_misc;
 
        /* This is just a security blanket to placate dragons.
         * On some systems, we very sporadically observe that the first TLBs
@@ -5462,8 +5561,14 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
         * driver doesn't explode during runtime.
         */
 err_init_hw:
-       i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
-       i915_gem_contexts_lost(dev_priv);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
+
+       WARN_ON(i915_gem_suspend(dev_priv));
+       i915_gem_suspend_late(dev_priv);
+
+       i915_gem_drain_workqueue(dev_priv);
+
+       mutex_lock(&dev_priv->drm.struct_mutex);
        intel_uc_fini_hw(dev_priv);
 err_uc_init:
        intel_uc_fini(dev_priv);
@@ -5480,6 +5585,7 @@ err_unlock:
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
        mutex_unlock(&dev_priv->drm.struct_mutex);
 
+err_uc_misc:
        intel_uc_fini_misc(dev_priv);
 
        if (ret != -EIO)
@@ -5492,7 +5598,8 @@ err_unlock:
                 * for all other failure, such as an allocation failure, bail.
                 */
                if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
-                       DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
+                       i915_load_error(dev_priv,
+                                       "Failed to initialize GPU, declaring it wedged!\n");
                        i915_gem_set_wedged(dev_priv);
                }
                ret = 0;
@@ -5502,6 +5609,28 @@ err_unlock:
        return ret;
 }
 
+void i915_gem_fini(struct drm_i915_private *dev_priv)
+{
+       i915_gem_suspend_late(dev_priv);
+
+       /* Flush any outstanding unpin_work. */
+       i915_gem_drain_workqueue(dev_priv);
+
+       mutex_lock(&dev_priv->drm.struct_mutex);
+       intel_uc_fini_hw(dev_priv);
+       intel_uc_fini(dev_priv);
+       i915_gem_cleanup_engines(dev_priv);
+       i915_gem_contexts_fini(dev_priv);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
+
+       intel_uc_fini_misc(dev_priv);
+       i915_gem_cleanup_userptr(dev_priv);
+
+       i915_gem_drain_freed_objects(dev_priv);
+
+       WARN_ON(!list_empty(&dev_priv->contexts.list));
+}
+
 void i915_gem_init_mmio(struct drm_i915_private *i915)
 {
        i915_gem_sanitize(i915);
@@ -5666,16 +5795,17 @@ int i915_gem_freeze(struct drm_i915_private *dev_priv)
        return 0;
 }
 
-int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
+int i915_gem_freeze_late(struct drm_i915_private *i915)
 {
        struct drm_i915_gem_object *obj;
        struct list_head *phases[] = {
-               &dev_priv->mm.unbound_list,
-               &dev_priv->mm.bound_list,
+               &i915->mm.unbound_list,
+               &i915->mm.bound_list,
                NULL
-       }, **p;
+       }, **phase;
 
-       /* Called just before we write the hibernation image.
+       /*
+        * Called just before we write the hibernation image.
         *
         * We need to update the domain tracking to reflect that the CPU
         * will be accessing all the pages to create and restore from the
@@ -5689,15 +5819,15 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
         * the objects as well, see i915_gem_freeze()
         */
 
-       i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND);
-       i915_gem_drain_freed_objects(dev_priv);
+       i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_UNBOUND);
+       i915_gem_drain_freed_objects(i915);
 
-       spin_lock(&dev_priv->mm.obj_lock);
-       for (p = phases; *p; p++) {
-               list_for_each_entry(obj, *p, mm.link)
-                       __start_cpu_write(obj);
+       mutex_lock(&i915->drm.struct_mutex);
+       for (phase = phases; *phase; phase++) {
+               list_for_each_entry(obj, *phase, mm.link)
+                       WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
        }
-       spin_unlock(&dev_priv->mm.obj_lock);
+       mutex_unlock(&i915->drm.struct_mutex);
 
        return 0;
 }
@@ -5736,6 +5866,7 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
        INIT_LIST_HEAD(&file_priv->mm.request_list);
 
        file_priv->bsd_engine = -1;
+       file_priv->hang_timestamp = jiffies;
 
        ret = i915_gem_context_open(i915, file);
        if (ret)
@@ -6016,16 +6147,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
                goto err_unlock;
        }
 
-       pages = fetch_and_zero(&obj->mm.pages);
-       if (pages) {
-               struct drm_i915_private *i915 = to_i915(obj->base.dev);
-
-               __i915_gem_object_reset_page_iter(obj);
-
-               spin_lock(&i915->mm.obj_lock);
-               list_del(&obj->mm.link);
-               spin_unlock(&i915->mm.obj_lock);
-       }
+       pages = __i915_gem_object_unset_pages(obj);
 
        obj->ops = &i915_gem_phys_ops;
 
@@ -6043,7 +6165,11 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
 
 err_xfer:
        obj->ops = &i915_gem_object_ops;
-       obj->mm.pages = pages;
+       if (!IS_ERR_OR_NULL(pages)) {
+               unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
+
+               __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
+       }
 err_unlock:
        mutex_unlock(&obj->mm.lock);
        return err;
index 525920404ede44b18da84d7947ecee8345bf7f6b..e465929568726c31a39dfb6037da594c1a93f3ac 100644 (file)
@@ -26,6 +26,7 @@
 #define __I915_GEM_H__
 
 #include <linux/bug.h>
+#include <linux/interrupt.h>
 
 struct drm_i915_private;
 
@@ -62,9 +63,12 @@ struct drm_i915_private;
 #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM)
 #define GEM_TRACE(...) trace_printk(__VA_ARGS__)
 #define GEM_TRACE_DUMP() ftrace_dump(DUMP_ALL)
+#define GEM_TRACE_DUMP_ON(expr) \
+       do { if (expr) ftrace_dump(DUMP_ALL); } while (0)
 #else
 #define GEM_TRACE(...) do { } while (0)
 #define GEM_TRACE_DUMP() do { } while (0)
+#define GEM_TRACE_DUMP_ON(expr) BUILD_BUG_ON_INVALID(expr)
 #endif
 
 #define I915_NUM_ENGINES 8
@@ -72,4 +76,21 @@ struct drm_i915_private;
 void i915_gem_park(struct drm_i915_private *i915);
 void i915_gem_unpark(struct drm_i915_private *i915);
 
+static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
+{
+       if (atomic_inc_return(&t->count) == 1)
+               tasklet_unlock_wait(t);
+}
+
+static inline void __tasklet_enable_sync_once(struct tasklet_struct *t)
+{
+       if (atomic_dec_return(&t->count) == 0)
+               tasklet_kill(t);
+}
+
+static inline bool __tasklet_is_enabled(const struct tasklet_struct *t)
+{
+       return !atomic_read(&t->count);
+}
+
 #endif /* __I915_GEM_H__ */
index 33f8a4b3c98170f2857e15255e4fc23ae8bbb49e..b10770cfccd24bedd80a7fd67ac06d78dde695c1 100644 (file)
@@ -127,14 +127,8 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
        for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
                struct intel_context *ce = &ctx->__engine[n];
 
-               if (!ce->state)
-                       continue;
-
-               WARN_ON(ce->pin_count);
-               if (ce->ring)
-                       intel_ring_free(ce->ring);
-
-               __i915_gem_object_release_unless_active(ce->state->obj);
+               if (ce->ops)
+                       ce->ops->destroy(ce);
        }
 
        kfree(ctx->name);
@@ -203,7 +197,7 @@ static void context_close(struct i915_gem_context *ctx)
         */
        lut_close(ctx);
        if (ctx->ppgtt)
-               i915_ppgtt_close(&ctx->ppgtt->base);
+               i915_ppgtt_close(&ctx->ppgtt->vm);
 
        ctx->file_priv = ERR_PTR(-EBADF);
        i915_gem_context_put(ctx);
@@ -214,10 +208,19 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
        int ret;
        unsigned int max;
 
-       if (INTEL_GEN(dev_priv) >= 11)
+       if (INTEL_GEN(dev_priv) >= 11) {
                max = GEN11_MAX_CONTEXT_HW_ID;
-       else
-               max = MAX_CONTEXT_HW_ID;
+       } else {
+               /*
+                * When using GuC in proxy submission, GuC consumes the
+                * highest bit in the context id to indicate proxy submission.
+                */
+               if (USES_GUC_SUBMISSION(dev_priv))
+                       max = MAX_GUC_CONTEXT_HW_ID;
+               else
+                       max = MAX_CONTEXT_HW_ID;
+       }
+
 
        ret = ida_simple_get(&dev_priv->contexts.hw_ida,
                             0, max, GFP_KERNEL);
@@ -246,7 +249,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
        desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
 
        address_mode = INTEL_LEGACY_32B_CONTEXT;
-       if (ppgtt && i915_vm_is_48bit(&ppgtt->base))
+       if (ppgtt && i915_vm_is_48bit(&ppgtt->vm))
                address_mode = INTEL_LEGACY_64B_CONTEXT;
        desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
 
@@ -266,6 +269,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
                    struct drm_i915_file_private *file_priv)
 {
        struct i915_gem_context *ctx;
+       unsigned int n;
        int ret;
 
        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -283,6 +287,12 @@ __create_hw_context(struct drm_i915_private *dev_priv,
        ctx->i915 = dev_priv;
        ctx->sched.priority = I915_PRIORITY_NORMAL;
 
+       for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
+               struct intel_context *ce = &ctx->__engine[n];
+
+               ce->gem_context = ctx;
+       }
+
        INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
        INIT_LIST_HEAD(&ctx->handles_list);
 
@@ -364,7 +374,7 @@ i915_gem_create_context(struct drm_i915_private *dev_priv,
        if (USES_FULL_PPGTT(dev_priv)) {
                struct i915_hw_ppgtt *ppgtt;
 
-               ppgtt = i915_ppgtt_create(dev_priv, file_priv, ctx->name);
+               ppgtt = i915_ppgtt_create(dev_priv, file_priv);
                if (IS_ERR(ppgtt)) {
                        DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
                                         PTR_ERR(ppgtt));
@@ -502,8 +512,8 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
        }
 
        DRM_DEBUG_DRIVER("%s context support initialized\n",
-                        dev_priv->engine[RCS]->context_size ? "logical" :
-                        "fake");
+                        DRIVER_CAPS(dev_priv)->has_logical_contexts ?
+                        "logical" : "fake");
        return 0;
 }
 
@@ -514,16 +524,8 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
 
        lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
-       for_each_engine(engine, dev_priv, id) {
-               engine->legacy_active_context = NULL;
-               engine->legacy_active_ppgtt = NULL;
-
-               if (!engine->last_retired_context)
-                       continue;
-
-               intel_context_unpin(engine->last_retired_context, engine);
-               engine->last_retired_context = NULL;
-       }
+       for_each_engine(engine, dev_priv, id)
+               intel_engine_lost_context(engine);
 }
 
 void i915_gem_contexts_fini(struct drm_i915_private *i915)
@@ -583,68 +585,122 @@ last_request_on_engine(struct i915_timeline *timeline,
 {
        struct i915_request *rq;
 
-       if (timeline == &engine->timeline)
-               return NULL;
+       GEM_BUG_ON(timeline == &engine->timeline);
 
        rq = i915_gem_active_raw(&timeline->last_request,
                                 &engine->i915->drm.struct_mutex);
-       if (rq && rq->engine == engine)
+       if (rq && rq->engine == engine) {
+               GEM_TRACE("last request for %s on engine %s: %llx:%d\n",
+                         timeline->name, engine->name,
+                         rq->fence.context, rq->fence.seqno);
+               GEM_BUG_ON(rq->timeline != timeline);
                return rq;
+       }
 
        return NULL;
 }
 
-static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine)
+static bool engine_has_kernel_context_barrier(struct intel_engine_cs *engine)
 {
-       struct i915_timeline *timeline;
+       struct drm_i915_private *i915 = engine->i915;
+       const struct intel_context * const ce =
+               to_intel_context(i915->kernel_context, engine);
+       struct i915_timeline *barrier = ce->ring->timeline;
+       struct intel_ring *ring;
+       bool any_active = false;
 
-       list_for_each_entry(timeline, &engine->i915->gt.timelines, link) {
-               if (last_request_on_engine(timeline, engine))
+       lockdep_assert_held(&i915->drm.struct_mutex);
+       list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
+               struct i915_request *rq;
+
+               rq = last_request_on_engine(ring->timeline, engine);
+               if (!rq)
+                       continue;
+
+               any_active = true;
+
+               if (rq->hw_context == ce)
+                       continue;
+
+               /*
+                * Was this request submitted after the previous
+                * switch-to-kernel-context?
+                */
+               if (!i915_timeline_sync_is_later(barrier, &rq->fence)) {
+                       GEM_TRACE("%s needs barrier for %llx:%d\n",
+                                 ring->timeline->name,
+                                 rq->fence.context,
+                                 rq->fence.seqno);
                        return false;
+               }
+
+               GEM_TRACE("%s has barrier after %llx:%d\n",
+                         ring->timeline->name,
+                         rq->fence.context,
+                         rq->fence.seqno);
        }
 
-       return intel_engine_has_kernel_context(engine);
+       /*
+        * If any other timeline was still active and behind the last barrier,
+        * then our last switch-to-kernel-context must still be queued and
+        * will run last (leaving the engine in the kernel context when it
+        * eventually idles).
+        */
+       if (any_active)
+               return true;
+
+       /* The engine is idle; check that it is idling in the kernel context. */
+       return engine->last_retired_context == ce;
 }
 
-int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
+int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
 {
        struct intel_engine_cs *engine;
-       struct i915_timeline *timeline;
        enum intel_engine_id id;
 
-       lockdep_assert_held(&dev_priv->drm.struct_mutex);
+       GEM_TRACE("awake?=%s\n", yesno(i915->gt.awake));
 
-       i915_retire_requests(dev_priv);
+       lockdep_assert_held(&i915->drm.struct_mutex);
+       GEM_BUG_ON(!i915->kernel_context);
+
+       i915_retire_requests(i915);
 
-       for_each_engine(engine, dev_priv, id) {
+       for_each_engine(engine, i915, id) {
+               struct intel_ring *ring;
                struct i915_request *rq;
 
-               if (engine_has_idle_kernel_context(engine))
+               GEM_BUG_ON(!to_intel_context(i915->kernel_context, engine));
+               if (engine_has_kernel_context_barrier(engine))
                        continue;
 
-               rq = i915_request_alloc(engine, dev_priv->kernel_context);
+               GEM_TRACE("emit barrier on %s\n", engine->name);
+
+               rq = i915_request_alloc(engine, i915->kernel_context);
                if (IS_ERR(rq))
                        return PTR_ERR(rq);
 
                /* Queue this switch after all other activity */
-               list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
+               list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
                        struct i915_request *prev;
 
-                       prev = last_request_on_engine(timeline, engine);
-                       if (prev)
-                               i915_sw_fence_await_sw_fence_gfp(&rq->submit,
-                                                                &prev->submit,
-                                                                I915_FENCE_GFP);
+                       prev = last_request_on_engine(ring->timeline, engine);
+                       if (!prev)
+                               continue;
+
+                       if (prev->gem_context == i915->kernel_context)
+                               continue;
+
+                       GEM_TRACE("add barrier on %s for %llx:%d\n",
+                                 engine->name,
+                                 prev->fence.context,
+                                 prev->fence.seqno);
+                       i915_sw_fence_await_sw_fence_gfp(&rq->submit,
+                                                        &prev->submit,
+                                                        I915_FENCE_GFP);
+                       i915_timeline_sync_set(rq->timeline, &prev->fence);
                }
 
-               /*
-                * Force a flush after the switch to ensure that all rendering
-                * and operations prior to switching to the kernel context hits
-                * memory. This should be guaranteed by the previous request,
-                * but an extra layer of paranoia before we declare the system
-                * idle (on suspend etc) is advisable!
-                */
-               __i915_request_add(rq, true);
+               i915_request_add(rq);
        }
 
        return 0;
@@ -652,7 +708,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
 
 static bool client_is_banned(struct drm_i915_file_private *file_priv)
 {
-       return atomic_read(&file_priv->context_bans) > I915_MAX_CLIENT_CONTEXT_BANS;
+       return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
 }
 
 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
@@ -664,7 +720,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
        struct i915_gem_context *ctx;
        int ret;
 
-       if (!dev_priv->engine[RCS]->context_size)
+       if (!DRIVER_CAPS(dev_priv)->has_logical_contexts)
                return -ENODEV;
 
        if (args->pad != 0)
@@ -747,11 +803,11 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
                break;
        case I915_CONTEXT_PARAM_GTT_SIZE:
                if (ctx->ppgtt)
-                       args->value = ctx->ppgtt->base.total;
+                       args->value = ctx->ppgtt->vm.total;
                else if (to_i915(dev)->mm.aliasing_ppgtt)
-                       args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
+                       args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
                else
-                       args->value = to_i915(dev)->ggtt.base.total;
+                       args->value = to_i915(dev)->ggtt.vm.total;
                break;
        case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
                args->value = i915_gem_context_no_error_capture(ctx);
index ace3b129c18966f9c76d88f11bc75b5b463e9f6c..b116e4942c10d13eb7e9771a4f50e737e47d185a 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/radix-tree.h>
 
 #include "i915_gem.h"
+#include "i915_scheduler.h"
 
 struct pid;
 
@@ -45,6 +46,13 @@ struct intel_ring;
 
 #define DEFAULT_CONTEXT_HANDLE 0
 
+struct intel_context;
+
+struct intel_context_ops {
+       void (*unpin)(struct intel_context *ce);
+       void (*destroy)(struct intel_context *ce);
+};
+
 /**
  * struct i915_gem_context - client state
  *
@@ -144,11 +152,14 @@ struct i915_gem_context {
 
        /** engine: per-engine logical HW state */
        struct intel_context {
+               struct i915_gem_context *gem_context;
                struct i915_vma *state;
                struct intel_ring *ring;
                u32 *lrc_reg_state;
                u64 lrc_desc;
                int pin_count;
+
+               const struct intel_context_ops *ops;
        } __engine[I915_NUM_ENGINES];
 
        /** ring_size: size for allocating the per-engine ring buffer */
@@ -263,25 +274,26 @@ to_intel_context(struct i915_gem_context *ctx,
        return &ctx->__engine[engine->id];
 }
 
-static inline struct intel_ring *
+static inline struct intel_context *
 intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
 {
        return engine->context_pin(engine, ctx);
 }
 
-static inline void __intel_context_pin(struct i915_gem_context *ctx,
-                                      const struct intel_engine_cs *engine)
+static inline void __intel_context_pin(struct intel_context *ce)
 {
-       struct intel_context *ce = to_intel_context(ctx, engine);
-
        GEM_BUG_ON(!ce->pin_count);
        ce->pin_count++;
 }
 
-static inline void intel_context_unpin(struct i915_gem_context *ctx,
-                                      struct intel_engine_cs *engine)
+static inline void intel_context_unpin(struct intel_context *ce)
 {
-       engine->context_unpin(engine, ctx);
+       GEM_BUG_ON(!ce->pin_count);
+       if (--ce->pin_count)
+               return;
+
+       GEM_BUG_ON(!ce->ops);
+       ce->ops->unpin(ce);
 }
 
 /* i915_gem_context.c */
index 69a7aec49e84e06d70b5800af653d951d522049a..82e2ca17a441eed4c9f562b9d9ee9aa45f2e740a 100644 (file)
@@ -111,15 +111,6 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
        i915_gem_object_unpin_map(obj);
 }
 
-static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
-{
-       return NULL;
-}
-
-static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
-{
-
-}
 static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
 {
        struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
@@ -225,9 +216,7 @@ static const struct dma_buf_ops i915_dmabuf_ops =  {
        .unmap_dma_buf = i915_gem_unmap_dma_buf,
        .release = drm_gem_dmabuf_release,
        .map = i915_gem_dmabuf_kmap,
-       .map_atomic = i915_gem_dmabuf_kmap_atomic,
        .unmap = i915_gem_dmabuf_kunmap,
-       .unmap_atomic = i915_gem_dmabuf_kunmap_atomic,
        .mmap = i915_gem_dmabuf_mmap,
        .vmap = i915_gem_dmabuf_vmap,
        .vunmap = i915_gem_dmabuf_vunmap,
index 54814a196ee4d3fe99d16a72083a926595eda6d6..02b83a5ed96c9ec7b539bec4bdc88ed3ac1946cd 100644 (file)
@@ -69,7 +69,8 @@ static int ggtt_flush(struct drm_i915_private *i915)
 
        err = i915_gem_wait_for_idle(i915,
                                     I915_WAIT_INTERRUPTIBLE |
-                                    I915_WAIT_LOCKED);
+                                    I915_WAIT_LOCKED,
+                                    MAX_SCHEDULE_TIMEOUT);
        if (err)
                return err;
 
index f627a8c47c58a36f6ff92f17a4d6d672b28bc00b..3f0c612d42e786d44cff5c86b59bb1da27c0fea9 100644 (file)
@@ -66,6 +66,15 @@ enum {
 #define __I915_EXEC_ILLEGAL_FLAGS \
        (__I915_EXEC_UNKNOWN_FLAGS | I915_EXEC_CONSTANTS_MASK)
 
+/* Catch emission of unexpected errors for CI! */
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+#undef EINVAL
+#define EINVAL ({ \
+       DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \
+       22; \
+})
+#endif
+
 /**
  * DOC: User command execution
  *
@@ -489,7 +498,9 @@ eb_validate_vma(struct i915_execbuffer *eb,
 }
 
 static int
-eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
+eb_add_vma(struct i915_execbuffer *eb,
+          unsigned int i, unsigned batch_idx,
+          struct i915_vma *vma)
 {
        struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
        int err;
@@ -522,6 +533,25 @@ eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
        eb->flags[i] = entry->flags;
        vma->exec_flags = &eb->flags[i];
 
+       /*
+        * SNA is doing fancy tricks with compressing batch buffers, which leads
+        * to negative relocation deltas. Usually that works out ok since the
+        * relocate address is still positive, except when the batch is placed
+        * very low in the GTT. Ensure this doesn't happen.
+        *
+        * Note that actual hangs have only been observed on gen7, but for
+        * paranoia do it everywhere.
+        */
+       if (i == batch_idx) {
+               if (entry->relocation_count &&
+                   !(eb->flags[i] & EXEC_OBJECT_PINNED))
+                       eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
+               if (eb->reloc_cache.has_fence)
+                       eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
+
+               eb->batch = vma;
+       }
+
        err = 0;
        if (eb_pin_vma(eb, entry, vma)) {
                if (entry->offset != vma->node.start) {
@@ -703,7 +733,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
                return -ENOENT;
 
        eb->ctx = ctx;
-       eb->vm = ctx->ppgtt ? &ctx->ppgtt->base : &eb->i915->ggtt.base;
+       eb->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &eb->i915->ggtt.vm;
 
        eb->context_flags = 0;
        if (ctx->flags & CONTEXT_NO_ZEROMAP)
@@ -716,7 +746,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
 {
        struct radix_tree_root *handles_vma = &eb->ctx->handles_vma;
        struct drm_i915_gem_object *obj;
-       unsigned int i;
+       unsigned int i, batch;
        int err;
 
        if (unlikely(i915_gem_context_is_closed(eb->ctx)))
@@ -728,6 +758,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
        INIT_LIST_HEAD(&eb->relocs);
        INIT_LIST_HEAD(&eb->unbound);
 
+       batch = eb_batch_index(eb);
+
        for (i = 0; i < eb->buffer_count; i++) {
                u32 handle = eb->exec[i].handle;
                struct i915_lut_handle *lut;
@@ -770,33 +802,16 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
                lut->handle = handle;
 
 add_vma:
-               err = eb_add_vma(eb, i, vma);
+               err = eb_add_vma(eb, i, batch, vma);
                if (unlikely(err))
                        goto err_vma;
 
                GEM_BUG_ON(vma != eb->vma[i]);
                GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
+               GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
+                          eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i]));
        }
 
-       /* take note of the batch buffer before we might reorder the lists */
-       i = eb_batch_index(eb);
-       eb->batch = eb->vma[i];
-       GEM_BUG_ON(eb->batch->exec_flags != &eb->flags[i]);
-
-       /*
-        * SNA is doing fancy tricks with compressing batch buffers, which leads
-        * to negative relocation deltas. Usually that works out ok since the
-        * relocate address is still positive, except when the batch is placed
-        * very low in the GTT. Ensure this doesn't happen.
-        *
-        * Note that actual hangs have only been observed on gen7, but for
-        * paranoia do it everywhere.
-        */
-       if (!(eb->flags[i] & EXEC_OBJECT_PINNED))
-               eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
-       if (eb->reloc_cache.has_fence)
-               eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
-
        eb->args->flags |= __EXEC_VALIDATED;
        return eb_reserve(eb);
 
@@ -916,7 +931,7 @@ static void reloc_gpu_flush(struct reloc_cache *cache)
        i915_gem_object_unpin_map(cache->rq->batch->obj);
        i915_gem_chipset_flush(cache->rq->i915);
 
-       __i915_request_add(cache->rq, true);
+       i915_request_add(cache->rq);
        cache->rq = NULL;
 }
 
@@ -943,9 +958,9 @@ static void reloc_cache_reset(struct reloc_cache *cache)
                if (cache->node.allocated) {
                        struct i915_ggtt *ggtt = cache_to_ggtt(cache);
 
-                       ggtt->base.clear_range(&ggtt->base,
-                                              cache->node.start,
-                                              cache->node.size);
+                       ggtt->vm.clear_range(&ggtt->vm,
+                                            cache->node.start,
+                                            cache->node.size);
                        drm_mm_remove_node(&cache->node);
                } else {
                        i915_vma_unpin((struct i915_vma *)cache->node.mm);
@@ -1016,7 +1031,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
                if (IS_ERR(vma)) {
                        memset(&cache->node, 0, sizeof(cache->node));
                        err = drm_mm_insert_node_in_range
-                               (&ggtt->base.mm, &cache->node,
+                               (&ggtt->vm.mm, &cache->node,
                                 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
                                 0, ggtt->mappable_end,
                                 DRM_MM_INSERT_LOW);
@@ -1037,9 +1052,9 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
        offset = cache->node.start;
        if (cache->node.allocated) {
                wmb();
-               ggtt->base.insert_page(&ggtt->base,
-                                      i915_gem_object_get_dma_address(obj, page),
-                                      offset, I915_CACHE_NONE, 0);
+               ggtt->vm.insert_page(&ggtt->vm,
+                                    i915_gem_object_get_dma_address(obj, page),
+                                    offset, I915_CACHE_NONE, 0);
        } else {
                offset += page << PAGE_SHIFT;
        }
@@ -1150,18 +1165,16 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
                goto err_request;
 
        GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
-       i915_vma_move_to_active(batch, rq, 0);
-       reservation_object_lock(batch->resv, NULL);
-       reservation_object_add_excl_fence(batch->resv, &rq->fence);
-       reservation_object_unlock(batch->resv);
-       i915_vma_unpin(batch);
+       err = i915_vma_move_to_active(batch, rq, 0);
+       if (err)
+               goto skip_request;
 
-       i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
-       reservation_object_lock(vma->resv, NULL);
-       reservation_object_add_excl_fence(vma->resv, &rq->fence);
-       reservation_object_unlock(vma->resv);
+       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       if (err)
+               goto skip_request;
 
        rq->batch = batch;
+       i915_vma_unpin(batch);
 
        cache->rq = rq;
        cache->rq_cmd = cmd;
@@ -1170,6 +1183,8 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
        /* Return with batch mapping (cmd) still pinned */
        return 0;
 
+skip_request:
+       i915_request_skip(rq, err);
 err_request:
        i915_request_add(rq);
 err_unpin:
@@ -1756,25 +1771,6 @@ slow:
        return eb_relocate_slow(eb);
 }
 
-static void eb_export_fence(struct i915_vma *vma,
-                           struct i915_request *rq,
-                           unsigned int flags)
-{
-       struct reservation_object *resv = vma->resv;
-
-       /*
-        * Ignore errors from failing to allocate the new fence, we can't
-        * handle an error right now. Worst case should be missed
-        * synchronisation leading to rendering corruption.
-        */
-       reservation_object_lock(resv, NULL);
-       if (flags & EXEC_OBJECT_WRITE)
-               reservation_object_add_excl_fence(resv, &rq->fence);
-       else if (reservation_object_reserve_shared(resv) == 0)
-               reservation_object_add_shared_fence(resv, &rq->fence);
-       reservation_object_unlock(resv);
-}
-
 static int eb_move_to_gpu(struct i915_execbuffer *eb)
 {
        const unsigned int count = eb->buffer_count;
@@ -1828,8 +1824,11 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
                unsigned int flags = eb->flags[i];
                struct i915_vma *vma = eb->vma[i];
 
-               i915_vma_move_to_active(vma, eb->request, flags);
-               eb_export_fence(vma, eb->request, flags);
+               err = i915_vma_move_to_active(vma, eb->request, flags);
+               if (unlikely(err)) {
+                       i915_request_skip(eb->request, err);
+                       return err;
+               }
 
                __eb_unreserve_vma(vma, flags);
                vma->exec_flags = NULL;
@@ -1869,45 +1868,6 @@ static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
        return true;
 }
 
-void i915_vma_move_to_active(struct i915_vma *vma,
-                            struct i915_request *rq,
-                            unsigned int flags)
-{
-       struct drm_i915_gem_object *obj = vma->obj;
-       const unsigned int idx = rq->engine->id;
-
-       lockdep_assert_held(&rq->i915->drm.struct_mutex);
-       GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
-
-       /*
-        * Add a reference if we're newly entering the active list.
-        * The order in which we add operations to the retirement queue is
-        * vital here: mark_active adds to the start of the callback list,
-        * such that subsequent callbacks are called first. Therefore we
-        * add the active reference first and queue for it to be dropped
-        * *last*.
-        */
-       if (!i915_vma_is_active(vma))
-               obj->active_count++;
-       i915_vma_set_active(vma, idx);
-       i915_gem_active_set(&vma->last_read[idx], rq);
-       list_move_tail(&vma->vm_link, &vma->vm->active_list);
-
-       obj->write_domain = 0;
-       if (flags & EXEC_OBJECT_WRITE) {
-               obj->write_domain = I915_GEM_DOMAIN_RENDER;
-
-               if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
-                       i915_gem_active_set(&obj->frontbuffer_write, rq);
-
-               obj->read_domains = 0;
-       }
-       obj->read_domains |= I915_GEM_GPU_DOMAINS;
-
-       if (flags & EXEC_OBJECT_NEEDS_FENCE)
-               i915_gem_active_set(&vma->last_fence, rq);
-}
-
 static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
 {
        u32 *cs;
@@ -2433,7 +2393,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
        trace_i915_request_queue(eb.request, eb.batch_flags);
        err = eb_submit(&eb);
 err_request:
-       __i915_request_add(eb.request, err == 0);
+       i915_request_add(eb.request);
        add_to_client(eb.request, file);
 
        if (fences)
index 996ab2ad6c45cade8d7e20a1d28e9bffddfac8ef..f00c7fbef79efc6886116e01dc023fe9008c78f4 100644 (file)
@@ -42,7 +42,7 @@
 #include "intel_drv.h"
 #include "intel_frontbuffer.h"
 
-#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
+#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
 
 /**
  * DOC: Global GTT views
@@ -195,18 +195,18 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
                          u32 unused)
 {
        u32 pte_flags;
-       int ret;
+       int err;
 
        if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
-               ret = vma->vm->allocate_va_range(vma->vm, vma->node.start,
-                                                vma->size);
-               if (ret)
-                       return ret;
+               err = vma->vm->allocate_va_range(vma->vm,
+                                                vma->node.start, vma->size);
+               if (err)
+                       return err;
        }
 
-       /* Currently applicable only to VLV */
+       /* Applicable to VLV, and gen8+ */
        pte_flags = 0;
-       if (vma->obj->gt_ro)
+       if (i915_gem_object_is_readonly(vma->obj))
                pte_flags |= PTE_READ_ONLY;
 
        vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
@@ -244,10 +244,13 @@ static void clear_pages(struct i915_vma *vma)
 }
 
 static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
-                                 enum i915_cache_level level)
+                                 enum i915_cache_level level,
+                                 u32 flags)
 {
-       gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
-       pte |= addr;
+       gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
+
+       if (unlikely(flags & PTE_READ_ONLY))
+               pte &= ~_PAGE_RW;
 
        switch (level) {
        case I915_CACHE_NONE:
@@ -375,37 +378,70 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
        return pte;
 }
 
+static void stash_init(struct pagestash *stash)
+{
+       pagevec_init(&stash->pvec);
+       spin_lock_init(&stash->lock);
+}
+
+static struct page *stash_pop_page(struct pagestash *stash)
+{
+       struct page *page = NULL;
+
+       spin_lock(&stash->lock);
+       if (likely(stash->pvec.nr))
+               page = stash->pvec.pages[--stash->pvec.nr];
+       spin_unlock(&stash->lock);
+
+       return page;
+}
+
+static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec)
+{
+       int nr;
+
+       spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING);
+
+       nr = min_t(int, pvec->nr, pagevec_space(&stash->pvec));
+       memcpy(stash->pvec.pages + stash->pvec.nr,
+              pvec->pages + pvec->nr - nr,
+              sizeof(pvec->pages[0]) * nr);
+       stash->pvec.nr += nr;
+
+       spin_unlock(&stash->lock);
+
+       pvec->nr -= nr;
+}
+
 static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
 {
-       struct pagevec *pvec = &vm->free_pages;
-       struct pagevec stash;
+       struct pagevec stack;
+       struct page *page;
 
        if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
                i915_gem_shrink_all(vm->i915);
 
-       if (likely(pvec->nr))
-               return pvec->pages[--pvec->nr];
+       page = stash_pop_page(&vm->free_pages);
+       if (page)
+               return page;
 
        if (!vm->pt_kmap_wc)
                return alloc_page(gfp);
 
-       /* A placeholder for a specific mutex to guard the WC stash */
-       lockdep_assert_held(&vm->i915->drm.struct_mutex);
-
        /* Look in our global stash of WC pages... */
-       pvec = &vm->i915->mm.wc_stash;
-       if (likely(pvec->nr))
-               return pvec->pages[--pvec->nr];
+       page = stash_pop_page(&vm->i915->mm.wc_stash);
+       if (page)
+               return page;
 
        /*
-        * Otherwise batch allocate pages to amoritize cost of set_pages_wc.
+        * Otherwise batch allocate pages to amortize cost of set_pages_wc.
         *
         * We have to be careful as page allocation may trigger the shrinker
         * (via direct reclaim) which will fill up the WC stash underneath us.
         * So we add our WB pages into a temporary pvec on the stack and merge
         * them into the WC stash after all the allocations are complete.
         */
-       pagevec_init(&stash);
+       pagevec_init(&stack);
        do {
                struct page *page;
 
@@ -413,59 +449,67 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
                if (unlikely(!page))
                        break;
 
-               stash.pages[stash.nr++] = page;
-       } while (stash.nr < pagevec_space(pvec));
+               stack.pages[stack.nr++] = page;
+       } while (pagevec_space(&stack));
 
-       if (stash.nr) {
-               int nr = min_t(int, stash.nr, pagevec_space(pvec));
-               struct page **pages = stash.pages + stash.nr - nr;
+       if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) {
+               page = stack.pages[--stack.nr];
 
-               if (nr && !set_pages_array_wc(pages, nr)) {
-                       memcpy(pvec->pages + pvec->nr,
-                              pages, sizeof(pages[0]) * nr);
-                       pvec->nr += nr;
-                       stash.nr -= nr;
-               }
+               /* Merge spare WC pages to the global stash */
+               stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
 
-               pagevec_release(&stash);
+               /* Push any surplus WC pages onto the local VM stash */
+               if (stack.nr)
+                       stash_push_pagevec(&vm->free_pages, &stack);
        }
 
-       return likely(pvec->nr) ? pvec->pages[--pvec->nr] : NULL;
+       /* Return unwanted leftovers */
+       if (unlikely(stack.nr)) {
+               WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr));
+               __pagevec_release(&stack);
+       }
+
+       return page;
 }
 
 static void vm_free_pages_release(struct i915_address_space *vm,
                                  bool immediate)
 {
-       struct pagevec *pvec = &vm->free_pages;
+       struct pagevec *pvec = &vm->free_pages.pvec;
+       struct pagevec stack;
 
+       lockdep_assert_held(&vm->free_pages.lock);
        GEM_BUG_ON(!pagevec_count(pvec));
 
        if (vm->pt_kmap_wc) {
-               struct pagevec *stash = &vm->i915->mm.wc_stash;
-
-               /* When we use WC, first fill up the global stash and then
+               /*
+                * When we use WC, first fill up the global stash and then
                 * only if full immediately free the overflow.
                 */
+               stash_push_pagevec(&vm->i915->mm.wc_stash, pvec);
 
-               lockdep_assert_held(&vm->i915->drm.struct_mutex);
-               if (pagevec_space(stash)) {
-                       do {
-                               stash->pages[stash->nr++] =
-                                       pvec->pages[--pvec->nr];
-                               if (!pvec->nr)
-                                       return;
-                       } while (pagevec_space(stash));
-
-                       /* As we have made some room in the VM's free_pages,
-                        * we can wait for it to fill again. Unless we are
-                        * inside i915_address_space_fini() and must
-                        * immediately release the pages!
-                        */
-                       if (!immediate)
-                               return;
-               }
+               /*
+                * As we have made some room in the VM's free_pages,
+                * we can wait for it to fill again. Unless we are
+                * inside i915_address_space_fini() and must
+                * immediately release the pages!
+                */
+               if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1))
+                       return;
+
+               /*
+                * We have to drop the lock to allow ourselves to sleep,
+                * so take a copy of the pvec and clear the stash for
+                * others to use it as we sleep.
+                */
+               stack = *pvec;
+               pagevec_reinit(pvec);
+               spin_unlock(&vm->free_pages.lock);
 
+               pvec = &stack;
                set_pages_array_wb(pvec->pages, pvec->nr);
+
+               spin_lock(&vm->free_pages.lock);
        }
 
        __pagevec_release(pvec);
@@ -481,20 +525,60 @@ static void vm_free_page(struct i915_address_space *vm, struct page *page)
         * unconditional might_sleep() for everybody.
         */
        might_sleep();
-       if (!pagevec_add(&vm->free_pages, page))
+       spin_lock(&vm->free_pages.lock);
+       if (!pagevec_add(&vm->free_pages.pvec, page))
                vm_free_pages_release(vm, false);
+       spin_unlock(&vm->free_pages.lock);
+}
+
+static void i915_address_space_init(struct i915_address_space *vm,
+                                   struct drm_i915_private *dev_priv)
+{
+       /*
+        * The vm->mutex must be reclaim safe (for use in the shrinker).
+        * Do a dummy acquire now under fs_reclaim so that any allocation
+        * attempt holding the lock is immediately reported by lockdep.
+        */
+       mutex_init(&vm->mutex);
+       i915_gem_shrinker_taints_mutex(&vm->mutex);
+
+       GEM_BUG_ON(!vm->total);
+       drm_mm_init(&vm->mm, 0, vm->total);
+       vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
+
+       stash_init(&vm->free_pages);
+
+       INIT_LIST_HEAD(&vm->active_list);
+       INIT_LIST_HEAD(&vm->inactive_list);
+       INIT_LIST_HEAD(&vm->unbound_list);
+}
+
+static void i915_address_space_fini(struct i915_address_space *vm)
+{
+       spin_lock(&vm->free_pages.lock);
+       if (pagevec_count(&vm->free_pages.pvec))
+               vm_free_pages_release(vm, true);
+       GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
+       spin_unlock(&vm->free_pages.lock);
+
+       drm_mm_takedown(&vm->mm);
+
+       mutex_destroy(&vm->mutex);
 }
 
 static int __setup_page_dma(struct i915_address_space *vm,
                            struct i915_page_dma *p,
                            gfp_t gfp)
 {
-       p->page = vm_alloc_page(vm, gfp | __GFP_NOWARN | __GFP_NORETRY);
+       p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
        if (unlikely(!p->page))
                return -ENOMEM;
 
-       p->daddr = dma_map_page(vm->dma, p->page, 0, PAGE_SIZE,
-                               PCI_DMA_BIDIRECTIONAL);
+       p->daddr = dma_map_page_attrs(vm->dma,
+                                     p->page, 0, PAGE_SIZE,
+                                     PCI_DMA_BIDIRECTIONAL,
+                                     DMA_ATTR_SKIP_CPU_SYNC |
+                                     DMA_ATTR_NO_WARN);
        if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
                vm_free_page(vm, p->page);
                return -ENOMEM;
@@ -506,7 +590,7 @@ static int __setup_page_dma(struct i915_address_space *vm,
 static int setup_page_dma(struct i915_address_space *vm,
                          struct i915_page_dma *p)
 {
-       return __setup_page_dma(vm, p, I915_GFP_DMA);
+       return __setup_page_dma(vm, p, __GFP_HIGHMEM);
 }
 
 static void cleanup_page_dma(struct i915_address_space *vm,
@@ -520,8 +604,8 @@ static void cleanup_page_dma(struct i915_address_space *vm,
 
 #define setup_px(vm, px) setup_page_dma((vm), px_base(px))
 #define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
-#define fill_px(ppgtt, px, v) fill_page_dma((vm), px_base(px), (v))
-#define fill32_px(ppgtt, px, v) fill_page_dma_32((vm), px_base(px), (v))
+#define fill_px(vm, px, v) fill_page_dma((vm), px_base(px), (v))
+#define fill32_px(vm, px, v) fill_page_dma_32((vm), px_base(px), (v))
 
 static void fill_page_dma(struct i915_address_space *vm,
                          struct i915_page_dma *p,
@@ -575,8 +659,11 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
                if (unlikely(!page))
                        goto skip;
 
-               addr = dma_map_page(vm->dma, page, 0, size,
-                                   PCI_DMA_BIDIRECTIONAL);
+               addr = dma_map_page_attrs(vm->dma,
+                                         page, 0, size,
+                                         PCI_DMA_BIDIRECTIONAL,
+                                         DMA_ATTR_SKIP_CPU_SYNC |
+                                         DMA_ATTR_NO_WARN);
                if (unlikely(dma_mapping_error(vm->dma, addr)))
                        goto free_page;
 
@@ -614,7 +701,7 @@ static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
 {
        struct i915_page_table *pt;
 
-       pt = kmalloc(sizeof(*pt), GFP_KERNEL | __GFP_NOWARN);
+       pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
        if (unlikely(!pt))
                return ERR_PTR(-ENOMEM);
 
@@ -637,21 +724,20 @@ static void gen8_initialize_pt(struct i915_address_space *vm,
                               struct i915_page_table *pt)
 {
        fill_px(vm, pt,
-               gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC));
+               gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
 }
 
-static void gen6_initialize_pt(struct i915_address_space *vm,
+static void gen6_initialize_pt(struct gen6_hw_ppgtt *ppgtt,
                               struct i915_page_table *pt)
 {
-       fill32_px(vm, pt,
-                 vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
+       fill32_px(&ppgtt->base.vm, pt, ppgtt->scratch_pte);
 }
 
 static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
 {
        struct i915_page_directory *pd;
 
-       pd = kzalloc(sizeof(*pd), GFP_KERNEL | __GFP_NOWARN);
+       pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
        if (unlikely(!pd))
                return ERR_PTR(-ENOMEM);
 
@@ -685,7 +771,7 @@ static int __pdp_init(struct i915_address_space *vm,
        const unsigned int pdpes = i915_pdpes_per_pdp(vm);
 
        pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
-                                           GFP_KERNEL | __GFP_NOWARN);
+                                           I915_GFP_ALLOW_FAIL);
        if (unlikely(!pdp->page_directory))
                return -ENOMEM;
 
@@ -765,53 +851,6 @@ static void gen8_initialize_pml4(struct i915_address_space *vm,
        memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4);
 }
 
-/* Broadwell Page Directory Pointer Descriptors */
-static int gen8_write_pdp(struct i915_request *rq,
-                         unsigned entry,
-                         dma_addr_t addr)
-{
-       struct intel_engine_cs *engine = rq->engine;
-       u32 *cs;
-
-       BUG_ON(entry >= 4);
-
-       cs = intel_ring_begin(rq, 6);
-       if (IS_ERR(cs))
-               return PTR_ERR(cs);
-
-       *cs++ = MI_LOAD_REGISTER_IMM(1);
-       *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry));
-       *cs++ = upper_32_bits(addr);
-       *cs++ = MI_LOAD_REGISTER_IMM(1);
-       *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
-       *cs++ = lower_32_bits(addr);
-       intel_ring_advance(rq, cs);
-
-       return 0;
-}
-
-static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
-                              struct i915_request *rq)
-{
-       int i, ret;
-
-       for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
-               const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
-
-               ret = gen8_write_pdp(rq, i, pd_daddr);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
-static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
-                              struct i915_request *rq)
-{
-       return gen8_write_pdp(rq, 0, px_dma(&ppgtt->pml4));
-}
-
 /* PDE TLBs are a pain to invalidate on GEN8+. When we modify
  * the page table structures, we mark them dirty so that
  * context switching/execlist queuing code takes extra steps
@@ -819,7 +858,7 @@ static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
  */
 static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
 {
-       ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask;
+       ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->vm.i915)->ring_mask;
 }
 
 /* Removes entries from a single page table, releasing it if it's empty.
@@ -833,7 +872,7 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
        unsigned int pte = gen8_pte_index(start);
        unsigned int pte_end = pte + num_entries;
        const gen8_pte_t scratch_pte =
-               gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
+               gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
        gen8_pte_t *vaddr;
 
        GEM_BUG_ON(num_entries > pt->used_ptes);
@@ -1005,14 +1044,15 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
                              struct i915_page_directory_pointer *pdp,
                              struct sgt_dma *iter,
                              struct gen8_insert_pte *idx,
-                             enum i915_cache_level cache_level)
+                             enum i915_cache_level cache_level,
+                             u32 flags)
 {
        struct i915_page_directory *pd;
-       const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
+       const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
        gen8_pte_t *vaddr;
        bool ret;
 
-       GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
+       GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
        pd = pdp->page_directory[idx->pdpe];
        vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
        do {
@@ -1043,7 +1083,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
                                        break;
                                }
 
-                               GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
+                               GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
                                pd = pdp->page_directory[idx->pdpe];
                        }
 
@@ -1059,14 +1099,14 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
 static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
                                   struct i915_vma *vma,
                                   enum i915_cache_level cache_level,
-                                  u32 unused)
+                                  u32 flags)
 {
        struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
        struct sgt_dma iter = sgt_dma(vma);
        struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
 
        gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
-                                     cache_level);
+                                     cache_level, flags);
 
        vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
 }
@@ -1074,9 +1114,10 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
 static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
                                           struct i915_page_directory_pointer **pdps,
                                           struct sgt_dma *iter,
-                                          enum i915_cache_level cache_level)
+                                          enum i915_cache_level cache_level,
+                                          u32 flags)
 {
-       const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
+       const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
        u64 start = vma->node.start;
        dma_addr_t rem = iter->sg->length;
 
@@ -1192,19 +1233,21 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
 static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
                                   struct i915_vma *vma,
                                   enum i915_cache_level cache_level,
-                                  u32 unused)
+                                  u32 flags)
 {
        struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
        struct sgt_dma iter = sgt_dma(vma);
        struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
 
        if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
-               gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level);
+               gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level,
+                                              flags);
        } else {
                struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
 
                while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++],
-                                                    &iter, &idx, cache_level))
+                                                    &iter, &idx, cache_level,
+                                                    flags))
                        GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
 
                vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
@@ -1229,7 +1272,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
 {
        int ret;
 
-       ret = setup_scratch_page(vm, I915_GFP_DMA);
+       ret = setup_scratch_page(vm, __GFP_HIGHMEM);
        if (ret)
                return ret;
 
@@ -1272,7 +1315,7 @@ free_scratch_page:
 
 static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
 {
-       struct i915_address_space *vm = &ppgtt->base;
+       struct i915_address_space *vm = &ppgtt->vm;
        struct drm_i915_private *dev_priv = vm->i915;
        enum vgt_g2v_type msg;
        int i;
@@ -1333,13 +1376,13 @@ static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
        int i;
 
        for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
-               if (ppgtt->pml4.pdps[i] == ppgtt->base.scratch_pdp)
+               if (ppgtt->pml4.pdps[i] == ppgtt->vm.scratch_pdp)
                        continue;
 
-               gen8_ppgtt_cleanup_3lvl(&ppgtt->base, ppgtt->pml4.pdps[i]);
+               gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pml4.pdps[i]);
        }
 
-       cleanup_px(&ppgtt->base, &ppgtt->pml4);
+       cleanup_px(&ppgtt->vm, &ppgtt->pml4);
 }
 
 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
@@ -1353,7 +1396,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
        if (use_4lvl(vm))
                gen8_ppgtt_cleanup_4lvl(ppgtt);
        else
-               gen8_ppgtt_cleanup_3lvl(&ppgtt->base, &ppgtt->pdp);
+               gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, &ppgtt->pdp);
 
        gen8_free_scratch(vm);
 }
@@ -1489,7 +1532,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
                          gen8_pte_t scratch_pte,
                          struct seq_file *m)
 {
-       struct i915_address_space *vm = &ppgtt->base;
+       struct i915_address_space *vm = &ppgtt->vm;
        struct i915_page_directory *pd;
        u32 pdpe;
 
@@ -1499,7 +1542,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
                u64 pd_start = start;
                u32 pde;
 
-               if (pdp->page_directory[pdpe] == ppgtt->base.scratch_pd)
+               if (pdp->page_directory[pdpe] == ppgtt->vm.scratch_pd)
                        continue;
 
                seq_printf(m, "\tPDPE #%d\n", pdpe);
@@ -1507,7 +1550,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
                        u32 pte;
                        gen8_pte_t *pt_vaddr;
 
-                       if (pd->page_table[pde] == ppgtt->base.scratch_pt)
+                       if (pd->page_table[pde] == ppgtt->vm.scratch_pt)
                                continue;
 
                        pt_vaddr = kmap_atomic_px(pt);
@@ -1540,10 +1583,10 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
 
 static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
 {
-       struct i915_address_space *vm = &ppgtt->base;
+       struct i915_address_space *vm = &ppgtt->vm;
        const gen8_pte_t scratch_pte =
-               gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
-       u64 start = 0, length = ppgtt->base.total;
+               gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
+       u64 start = 0, length = ppgtt->vm.total;
 
        if (use_4lvl(vm)) {
                u64 pml4e;
@@ -1551,7 +1594,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
                struct i915_page_directory_pointer *pdp;
 
                gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
-                       if (pml4->pdps[pml4e] == ppgtt->base.scratch_pdp)
+                       if (pml4->pdps[pml4e] == ppgtt->vm.scratch_pdp)
                                continue;
 
                        seq_printf(m, "    PML4E #%llu\n", pml4e);
@@ -1564,10 +1607,10 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
 
 static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
 {
-       struct i915_address_space *vm = &ppgtt->base;
+       struct i915_address_space *vm = &ppgtt->vm;
        struct i915_page_directory_pointer *pdp = &ppgtt->pdp;
        struct i915_page_directory *pd;
-       u64 start = 0, length = ppgtt->base.total;
+       u64 start = 0, length = ppgtt->vm.total;
        u64 from = start;
        unsigned int pdpe;
 
@@ -1601,211 +1644,153 @@ unwind:
  * space.
  *
  */
-static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
+static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
 {
-       struct i915_address_space *vm = &ppgtt->base;
-       struct drm_i915_private *dev_priv = vm->i915;
-       int ret;
+       struct i915_hw_ppgtt *ppgtt;
+       int err;
+
+       ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+       if (!ppgtt)
+               return ERR_PTR(-ENOMEM);
+
+       kref_init(&ppgtt->ref);
+
+       ppgtt->vm.i915 = i915;
+       ppgtt->vm.dma = &i915->drm.pdev->dev;
 
-       ppgtt->base.total = USES_FULL_48BIT_PPGTT(dev_priv) ?
+       ppgtt->vm.total = USES_FULL_48BIT_PPGTT(i915) ?
                1ULL << 48 :
                1ULL << 32;
 
+       /*
+        * From bdw, there is support for read-only pages in the PPGTT.
+        *
+        * XXX GVT is not honouring the lack of RW in the PTE bits.
+        */
+       ppgtt->vm.has_read_only = !intel_vgpu_active(i915);
+
+       i915_address_space_init(&ppgtt->vm, i915);
+
        /* There are only few exceptions for gen >=6. chv and bxt.
         * And we are not sure about the latter so play safe for now.
         */
-       if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
-               ppgtt->base.pt_kmap_wc = true;
+       if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915))
+               ppgtt->vm.pt_kmap_wc = true;
 
-       ret = gen8_init_scratch(&ppgtt->base);
-       if (ret) {
-               ppgtt->base.total = 0;
-               return ret;
-       }
+       err = gen8_init_scratch(&ppgtt->vm);
+       if (err)
+               goto err_free;
 
-       if (use_4lvl(vm)) {
-               ret = setup_px(&ppgtt->base, &ppgtt->pml4);
-               if (ret)
-                       goto free_scratch;
+       if (use_4lvl(&ppgtt->vm)) {
+               err = setup_px(&ppgtt->vm, &ppgtt->pml4);
+               if (err)
+                       goto err_scratch;
 
-               gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
+               gen8_initialize_pml4(&ppgtt->vm, &ppgtt->pml4);
 
-               ppgtt->switch_mm = gen8_mm_switch_4lvl;
-               ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_4lvl;
-               ppgtt->base.insert_entries = gen8_ppgtt_insert_4lvl;
-               ppgtt->base.clear_range = gen8_ppgtt_clear_4lvl;
+               ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl;
+               ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl;
+               ppgtt->vm.clear_range = gen8_ppgtt_clear_4lvl;
        } else {
-               ret = __pdp_init(&ppgtt->base, &ppgtt->pdp);
-               if (ret)
-                       goto free_scratch;
+               err = __pdp_init(&ppgtt->vm, &ppgtt->pdp);
+               if (err)
+                       goto err_scratch;
 
-               if (intel_vgpu_active(dev_priv)) {
-                       ret = gen8_preallocate_top_level_pdp(ppgtt);
-                       if (ret) {
+               if (intel_vgpu_active(i915)) {
+                       err = gen8_preallocate_top_level_pdp(ppgtt);
+                       if (err) {
                                __pdp_fini(&ppgtt->pdp);
-                               goto free_scratch;
+                               goto err_scratch;
                        }
                }
 
-               ppgtt->switch_mm = gen8_mm_switch_3lvl;
-               ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_3lvl;
-               ppgtt->base.insert_entries = gen8_ppgtt_insert_3lvl;
-               ppgtt->base.clear_range = gen8_ppgtt_clear_3lvl;
+               ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl;
+               ppgtt->vm.insert_entries = gen8_ppgtt_insert_3lvl;
+               ppgtt->vm.clear_range = gen8_ppgtt_clear_3lvl;
        }
 
-       if (intel_vgpu_active(dev_priv))
+       if (intel_vgpu_active(i915))
                gen8_ppgtt_notify_vgt(ppgtt, true);
 
-       ppgtt->base.cleanup = gen8_ppgtt_cleanup;
-       ppgtt->base.unbind_vma = ppgtt_unbind_vma;
-       ppgtt->base.bind_vma = ppgtt_bind_vma;
-       ppgtt->base.set_pages = ppgtt_set_pages;
-       ppgtt->base.clear_pages = clear_pages;
+       ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
        ppgtt->debug_dump = gen8_dump_ppgtt;
 
-       return 0;
+       ppgtt->vm.vma_ops.bind_vma    = ppgtt_bind_vma;
+       ppgtt->vm.vma_ops.unbind_vma  = ppgtt_unbind_vma;
+       ppgtt->vm.vma_ops.set_pages   = ppgtt_set_pages;
+       ppgtt->vm.vma_ops.clear_pages = clear_pages;
 
-free_scratch:
-       gen8_free_scratch(&ppgtt->base);
-       return ret;
+       return ppgtt;
+
+err_scratch:
+       gen8_free_scratch(&ppgtt->vm);
+err_free:
+       kfree(ppgtt);
+       return ERR_PTR(err);
 }
 
-static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
+static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
 {
-       struct i915_address_space *vm = &ppgtt->base;
-       struct i915_page_table *unused;
-       gen6_pte_t scratch_pte;
-       u32 pd_entry, pte, pde;
-       u32 start = 0, length = ppgtt->base.total;
+       struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
+       const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
+       struct i915_page_table *pt;
+       u32 pte, pde;
 
-       scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
-                                    I915_CACHE_LLC, 0);
+       gen6_for_all_pdes(pt, &base->pd, pde) {
+               gen6_pte_t *vaddr;
+
+               if (pt == base->vm.scratch_pt)
+                       continue;
+
+               if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) {
+                       u32 expected =
+                               GEN6_PDE_ADDR_ENCODE(px_dma(pt)) |
+                               GEN6_PDE_VALID;
+                       u32 pd_entry = readl(ppgtt->pd_addr + pde);
+
+                       if (pd_entry != expected)
+                               seq_printf(m,
+                                          "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
+                                          pde,
+                                          pd_entry,
+                                          expected);
+
+                       seq_printf(m, "\tPDE: %x\n", pd_entry);
+               }
 
-       gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
-               u32 expected;
-               gen6_pte_t *pt_vaddr;
-               const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
-               pd_entry = readl(ppgtt->pd_addr + pde);
-               expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
-
-               if (pd_entry != expected)
-                       seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
-                                  pde,
-                                  pd_entry,
-                                  expected);
-               seq_printf(m, "\tPDE: %x\n", pd_entry);
-
-               pt_vaddr = kmap_atomic_px(ppgtt->pd.page_table[pde]);
-
-               for (pte = 0; pte < GEN6_PTES; pte+=4) {
-                       unsigned long va =
-                               (pde * PAGE_SIZE * GEN6_PTES) +
-                               (pte * PAGE_SIZE);
+               vaddr = kmap_atomic_px(base->pd.page_table[pde]);
+               for (pte = 0; pte < GEN6_PTES; pte += 4) {
                        int i;
-                       bool found = false;
+
                        for (i = 0; i < 4; i++)
-                               if (pt_vaddr[pte + i] != scratch_pte)
-                                       found = true;
-                       if (!found)
+                               if (vaddr[pte + i] != scratch_pte)
+                                       break;
+                       if (i == 4)
                                continue;
 
-                       seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
+                       seq_printf(m, "\t\t(%03d, %04d) %08lx: ",
+                                  pde, pte,
+                                  (pde * GEN6_PTES + pte) * PAGE_SIZE);
                        for (i = 0; i < 4; i++) {
-                               if (pt_vaddr[pte + i] != scratch_pte)
-                                       seq_printf(m, " %08x", pt_vaddr[pte + i]);
+                               if (vaddr[pte + i] != scratch_pte)
+                                       seq_printf(m, " %08x", vaddr[pte + i]);
                                else
-                                       seq_puts(m, "  SCRATCH ");
+                                       seq_puts(m, "  SCRATCH");
                        }
                        seq_puts(m, "\n");
                }
-               kunmap_atomic(pt_vaddr);
+               kunmap_atomic(vaddr);
        }
 }
 
 /* Write pde (index) from the page directory @pd to the page table @pt */
-static inline void gen6_write_pde(const struct i915_hw_ppgtt *ppgtt,
+static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt,
                                  const unsigned int pde,
                                  const struct i915_page_table *pt)
 {
        /* Caller needs to make sure the write completes if necessary */
-       writel_relaxed(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
-                      ppgtt->pd_addr + pde);
-}
-
-/* Write all the page tables found in the ppgtt structure to incrementing page
- * directories. */
-static void gen6_write_page_range(struct i915_hw_ppgtt *ppgtt,
-                                 u32 start, u32 length)
-{
-       struct i915_page_table *pt;
-       unsigned int pde;
-
-       gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde)
-               gen6_write_pde(ppgtt, pde, pt);
-
-       mark_tlbs_dirty(ppgtt);
-       wmb();
-}
-
-static inline u32 get_pd_offset(struct i915_hw_ppgtt *ppgtt)
-{
-       GEM_BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
-       return ppgtt->pd.base.ggtt_offset << 10;
-}
-
-static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
-                        struct i915_request *rq)
-{
-       struct intel_engine_cs *engine = rq->engine;
-       u32 *cs;
-
-       /* NB: TLBs must be flushed and invalidated before a switch */
-       cs = intel_ring_begin(rq, 6);
-       if (IS_ERR(cs))
-               return PTR_ERR(cs);
-
-       *cs++ = MI_LOAD_REGISTER_IMM(2);
-       *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
-       *cs++ = PP_DIR_DCLV_2G;
-       *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
-       *cs++ = get_pd_offset(ppgtt);
-       *cs++ = MI_NOOP;
-       intel_ring_advance(rq, cs);
-
-       return 0;
-}
-
-static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
-                         struct i915_request *rq)
-{
-       struct intel_engine_cs *engine = rq->engine;
-       u32 *cs;
-
-       /* NB: TLBs must be flushed and invalidated before a switch */
-       cs = intel_ring_begin(rq, 6);
-       if (IS_ERR(cs))
-               return PTR_ERR(cs);
-
-       *cs++ = MI_LOAD_REGISTER_IMM(2);
-       *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
-       *cs++ = PP_DIR_DCLV_2G;
-       *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
-       *cs++ = get_pd_offset(ppgtt);
-       *cs++ = MI_NOOP;
-       intel_ring_advance(rq, cs);
-
-       return 0;
-}
-
-static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
-                         struct i915_request *rq)
-{
-       struct intel_engine_cs *engine = rq->engine;
-       struct drm_i915_private *dev_priv = rq->i915;
-
-       I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
-       I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
-       return 0;
+       iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
+                 ppgtt->pd_addr + pde);
 }
 
 static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
@@ -1867,22 +1852,30 @@ static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
 static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
                                   u64 start, u64 length)
 {
-       struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+       struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
        unsigned int first_entry = start >> PAGE_SHIFT;
        unsigned int pde = first_entry / GEN6_PTES;
        unsigned int pte = first_entry % GEN6_PTES;
        unsigned int num_entries = length >> PAGE_SHIFT;
-       gen6_pte_t scratch_pte =
-               vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
+       const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
 
        while (num_entries) {
-               struct i915_page_table *pt = ppgtt->pd.page_table[pde++];
-               unsigned int end = min(pte + num_entries, GEN6_PTES);
+               struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++];
+               const unsigned int end = min(pte + num_entries, GEN6_PTES);
+               const unsigned int count = end - pte;
                gen6_pte_t *vaddr;
 
-               num_entries -= end - pte;
+               GEM_BUG_ON(pt == vm->scratch_pt);
+
+               num_entries -= count;
+
+               GEM_BUG_ON(count > pt->used_ptes);
+               pt->used_ptes -= count;
+               if (!pt->used_ptes)
+                       ppgtt->scan_for_unused_pt = true;
 
-               /* Note that the hw doesn't support removing PDE on the fly
+               /*
+                * Note that the hw doesn't support removing PDE on the fly
                 * (they are cached inside the context with no means to
                 * invalidate the cache), so we can only reset the PTE
                 * entries back to scratch.
@@ -1911,6 +1904,8 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
        struct sgt_dma iter = sgt_dma(vma);
        gen6_pte_t *vaddr;
 
+       GEM_BUG_ON(ppgtt->pd.page_table[act_pt] == vm->scratch_pt);
+
        vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
        do {
                vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
@@ -1939,218 +1934,280 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
 static int gen6_alloc_va_range(struct i915_address_space *vm,
                               u64 start, u64 length)
 {
-       struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+       struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
        struct i915_page_table *pt;
        u64 from = start;
        unsigned int pde;
        bool flush = false;
 
-       gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
+       gen6_for_each_pde(pt, &ppgtt->base.pd, start, length, pde) {
+               const unsigned int count = gen6_pte_count(start, length);
+
                if (pt == vm->scratch_pt) {
                        pt = alloc_pt(vm);
                        if (IS_ERR(pt))
                                goto unwind_out;
 
-                       gen6_initialize_pt(vm, pt);
-                       ppgtt->pd.page_table[pde] = pt;
-                       gen6_write_pde(ppgtt, pde, pt);
-                       flush = true;
+                       gen6_initialize_pt(ppgtt, pt);
+                       ppgtt->base.pd.page_table[pde] = pt;
+
+                       if (i915_vma_is_bound(ppgtt->vma,
+                                             I915_VMA_GLOBAL_BIND)) {
+                               gen6_write_pde(ppgtt, pde, pt);
+                               flush = true;
+                       }
+
+                       GEM_BUG_ON(pt->used_ptes);
                }
+
+               pt->used_ptes += count;
        }
 
        if (flush) {
-               mark_tlbs_dirty(ppgtt);
-               wmb();
+               mark_tlbs_dirty(&ppgtt->base);
+               gen6_ggtt_invalidate(ppgtt->base.vm.i915);
        }
 
        return 0;
 
 unwind_out:
-       gen6_ppgtt_clear_range(vm, from, start);
+       gen6_ppgtt_clear_range(vm, from, start - from);
        return -ENOMEM;
 }
 
-static int gen6_init_scratch(struct i915_address_space *vm)
+static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
 {
+       struct i915_address_space * const vm = &ppgtt->base.vm;
+       struct i915_page_table *unused;
+       u32 pde;
        int ret;
 
-       ret = setup_scratch_page(vm, I915_GFP_DMA);
+       ret = setup_scratch_page(vm, __GFP_HIGHMEM);
        if (ret)
                return ret;
 
+       ppgtt->scratch_pte =
+               vm->pte_encode(vm->scratch_page.daddr,
+                              I915_CACHE_NONE, PTE_READ_ONLY);
+
        vm->scratch_pt = alloc_pt(vm);
        if (IS_ERR(vm->scratch_pt)) {
                cleanup_scratch_page(vm);
                return PTR_ERR(vm->scratch_pt);
        }
 
-       gen6_initialize_pt(vm, vm->scratch_pt);
+       gen6_initialize_pt(ppgtt, vm->scratch_pt);
+       gen6_for_all_pdes(unused, &ppgtt->base.pd, pde)
+               ppgtt->base.pd.page_table[pde] = vm->scratch_pt;
 
        return 0;
 }
 
-static void gen6_free_scratch(struct i915_address_space *vm)
+static void gen6_ppgtt_free_scratch(struct i915_address_space *vm)
 {
        free_pt(vm, vm->scratch_pt);
        cleanup_scratch_page(vm);
 }
 
-static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
+static void gen6_ppgtt_free_pd(struct gen6_hw_ppgtt *ppgtt)
 {
-       struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
-       struct i915_page_directory *pd = &ppgtt->pd;
        struct i915_page_table *pt;
        u32 pde;
 
-       drm_mm_remove_node(&ppgtt->node);
+       gen6_for_all_pdes(pt, &ppgtt->base.pd, pde)
+               if (pt != ppgtt->base.vm.scratch_pt)
+                       free_pt(&ppgtt->base.vm, pt);
+}
+
+static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
+{
+       struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
 
-       gen6_for_all_pdes(pt, pd, pde)
-               if (pt != vm->scratch_pt)
-                       free_pt(vm, pt);
+       i915_vma_destroy(ppgtt->vma);
 
-       gen6_free_scratch(vm);
+       gen6_ppgtt_free_pd(ppgtt);
+       gen6_ppgtt_free_scratch(vm);
 }
 
-static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
+static int pd_vma_set_pages(struct i915_vma *vma)
 {
-       struct i915_address_space *vm = &ppgtt->base;
-       struct drm_i915_private *dev_priv = ppgtt->base.i915;
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       int ret;
+       vma->pages = ERR_PTR(-ENODEV);
+       return 0;
+}
 
-       /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
-        * allocator works in address space sizes, so it's multiplied by page
-        * size. We allocate at the top of the GTT to avoid fragmentation.
-        */
-       BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
+static void pd_vma_clear_pages(struct i915_vma *vma)
+{
+       GEM_BUG_ON(!vma->pages);
 
-       ret = gen6_init_scratch(vm);
-       if (ret)
-               return ret;
+       vma->pages = NULL;
+}
 
-       ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node,
-                                 GEN6_PD_SIZE, GEN6_PD_ALIGN,
-                                 I915_COLOR_UNEVICTABLE,
-                                 0, ggtt->base.total,
-                                 PIN_HIGH);
-       if (ret)
-               goto err_out;
+static int pd_vma_bind(struct i915_vma *vma,
+                      enum i915_cache_level cache_level,
+                      u32 unused)
+{
+       struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
+       struct gen6_hw_ppgtt *ppgtt = vma->private;
+       u32 ggtt_offset = i915_ggtt_offset(vma) / PAGE_SIZE;
+       struct i915_page_table *pt;
+       unsigned int pde;
 
-       if (ppgtt->node.start < ggtt->mappable_end)
-               DRM_DEBUG("Forced to use aperture for PDEs\n");
+       ppgtt->base.pd.base.ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
+       ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
 
-       ppgtt->pd.base.ggtt_offset =
-               ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
+       gen6_for_all_pdes(pt, &ppgtt->base.pd, pde)
+               gen6_write_pde(ppgtt, pde, pt);
 
-       ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
-               ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
+       mark_tlbs_dirty(&ppgtt->base);
+       gen6_ggtt_invalidate(ppgtt->base.vm.i915);
 
        return 0;
-
-err_out:
-       gen6_free_scratch(vm);
-       return ret;
 }
 
-static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
+static void pd_vma_unbind(struct i915_vma *vma)
 {
-       return gen6_ppgtt_allocate_page_directories(ppgtt);
-}
+       struct gen6_hw_ppgtt *ppgtt = vma->private;
+       struct i915_page_table * const scratch_pt = ppgtt->base.vm.scratch_pt;
+       struct i915_page_table *pt;
+       unsigned int pde;
 
-static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
-                                 u64 start, u64 length)
-{
-       struct i915_page_table *unused;
-       u32 pde;
+       if (!ppgtt->scan_for_unused_pt)
+               return;
+
+       /* Free all no longer used page tables */
+       gen6_for_all_pdes(pt, &ppgtt->base.pd, pde) {
+               if (pt->used_ptes || pt == scratch_pt)
+                       continue;
 
-       gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
-               ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
+               free_pt(&ppgtt->base.vm, pt);
+               ppgtt->base.pd.page_table[pde] = scratch_pt;
+       }
+
+       ppgtt->scan_for_unused_pt = false;
 }
 
-static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
-{
-       struct drm_i915_private *dev_priv = ppgtt->base.i915;
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       int ret;
+static const struct i915_vma_ops pd_vma_ops = {
+       .set_pages = pd_vma_set_pages,
+       .clear_pages = pd_vma_clear_pages,
+       .bind_vma = pd_vma_bind,
+       .unbind_vma = pd_vma_unbind,
+};
 
-       ppgtt->base.pte_encode = ggtt->base.pte_encode;
-       if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv))
-               ppgtt->switch_mm = gen6_mm_switch;
-       else if (IS_HASWELL(dev_priv))
-               ppgtt->switch_mm = hsw_mm_switch;
-       else if (IS_GEN7(dev_priv))
-               ppgtt->switch_mm = gen7_mm_switch;
-       else
-               BUG();
+static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
+{
+       struct drm_i915_private *i915 = ppgtt->base.vm.i915;
+       struct i915_ggtt *ggtt = &i915->ggtt;
+       struct i915_vma *vma;
 
-       ret = gen6_ppgtt_alloc(ppgtt);
-       if (ret)
-               return ret;
+       GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+       GEM_BUG_ON(size > ggtt->vm.total);
 
-       ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
+       vma = kmem_cache_zalloc(i915->vmas, GFP_KERNEL);
+       if (!vma)
+               return ERR_PTR(-ENOMEM);
 
-       gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
-       gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
+       init_request_active(&vma->last_fence, NULL);
 
-       ret = gen6_alloc_va_range(&ppgtt->base, 0, ppgtt->base.total);
-       if (ret) {
-               gen6_ppgtt_cleanup(&ppgtt->base);
-               return ret;
-       }
+       vma->vm = &ggtt->vm;
+       vma->ops = &pd_vma_ops;
+       vma->private = ppgtt;
 
-       ppgtt->base.clear_range = gen6_ppgtt_clear_range;
-       ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
-       ppgtt->base.unbind_vma = ppgtt_unbind_vma;
-       ppgtt->base.bind_vma = ppgtt_bind_vma;
-       ppgtt->base.set_pages = ppgtt_set_pages;
-       ppgtt->base.clear_pages = clear_pages;
-       ppgtt->base.cleanup = gen6_ppgtt_cleanup;
-       ppgtt->debug_dump = gen6_dump_ppgtt;
+       vma->active = RB_ROOT;
 
-       DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
-                        ppgtt->node.size >> 20,
-                        ppgtt->node.start / PAGE_SIZE);
+       vma->size = size;
+       vma->fence_size = size;
+       vma->flags = I915_VMA_GGTT;
+       vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
 
-       DRM_DEBUG_DRIVER("Adding PPGTT at offset %x\n",
-                        ppgtt->pd.base.ggtt_offset << 10);
+       INIT_LIST_HEAD(&vma->obj_link);
+       list_add(&vma->vm_link, &vma->vm->unbound_list);
 
-       return 0;
+       return vma;
 }
 
-static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
-                          struct drm_i915_private *dev_priv)
+int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
 {
-       ppgtt->base.i915 = dev_priv;
-       ppgtt->base.dma = &dev_priv->drm.pdev->dev;
+       struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
 
-       if (INTEL_GEN(dev_priv) < 8)
-               return gen6_ppgtt_init(ppgtt);
-       else
-               return gen8_ppgtt_init(ppgtt);
+       /*
+        * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
+        * which will be pinned into every active context.
+        * (When vma->pin_count becomes atomic, I expect we will naturally
+        * need a larger, unpacked, type and kill this redundancy.)
+        */
+       if (ppgtt->pin_count++)
+               return 0;
+
+       /*
+        * PPGTT PDEs reside in the GGTT and consists of 512 entries. The
+        * allocator works in address space sizes, so it's multiplied by page
+        * size. We allocate at the top of the GTT to avoid fragmentation.
+        */
+       return i915_vma_pin(ppgtt->vma,
+                           0, GEN6_PD_ALIGN,
+                           PIN_GLOBAL | PIN_HIGH);
 }
 
-static void i915_address_space_init(struct i915_address_space *vm,
-                                   struct drm_i915_private *dev_priv,
-                                   const char *name)
+void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
 {
-       drm_mm_init(&vm->mm, 0, vm->total);
-       vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
+       struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
 
-       INIT_LIST_HEAD(&vm->active_list);
-       INIT_LIST_HEAD(&vm->inactive_list);
-       INIT_LIST_HEAD(&vm->unbound_list);
+       GEM_BUG_ON(!ppgtt->pin_count);
+       if (--ppgtt->pin_count)
+               return;
 
-       list_add_tail(&vm->global_link, &dev_priv->vm_list);
-       pagevec_init(&vm->free_pages);
+       i915_vma_unpin(ppgtt->vma);
 }
 
-static void i915_address_space_fini(struct i915_address_space *vm)
+static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
 {
-       if (pagevec_count(&vm->free_pages))
-               vm_free_pages_release(vm, true);
+       struct i915_ggtt * const ggtt = &i915->ggtt;
+       struct gen6_hw_ppgtt *ppgtt;
+       int err;
 
-       drm_mm_takedown(&vm->mm);
-       list_del(&vm->global_link);
+       ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+       if (!ppgtt)
+               return ERR_PTR(-ENOMEM);
+
+       kref_init(&ppgtt->base.ref);
+
+       ppgtt->base.vm.i915 = i915;
+       ppgtt->base.vm.dma = &i915->drm.pdev->dev;
+
+       ppgtt->base.vm.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
+
+       i915_address_space_init(&ppgtt->base.vm, i915);
+
+       ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
+       ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
+       ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
+       ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
+       ppgtt->base.debug_dump = gen6_dump_ppgtt;
+
+       ppgtt->base.vm.vma_ops.bind_vma    = ppgtt_bind_vma;
+       ppgtt->base.vm.vma_ops.unbind_vma  = ppgtt_unbind_vma;
+       ppgtt->base.vm.vma_ops.set_pages   = ppgtt_set_pages;
+       ppgtt->base.vm.vma_ops.clear_pages = clear_pages;
+
+       ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
+
+       err = gen6_ppgtt_init_scratch(ppgtt);
+       if (err)
+               goto err_free;
+
+       ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
+       if (IS_ERR(ppgtt->vma)) {
+               err = PTR_ERR(ppgtt->vma);
+               goto err_scratch;
+       }
+
+       return &ppgtt->base;
+
+err_scratch:
+       gen6_ppgtt_free_scratch(&ppgtt->base.vm);
+err_free:
+       kfree(ppgtt);
+       return ERR_PTR(err);
 }
 
 static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
@@ -2212,29 +2269,28 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
        return 0;
 }
 
+static struct i915_hw_ppgtt *
+__hw_ppgtt_create(struct drm_i915_private *i915)
+{
+       if (INTEL_GEN(i915) < 8)
+               return gen6_ppgtt_create(i915);
+       else
+               return gen8_ppgtt_create(i915);
+}
+
 struct i915_hw_ppgtt *
-i915_ppgtt_create(struct drm_i915_private *dev_priv,
-                 struct drm_i915_file_private *fpriv,
-                 const char *name)
+i915_ppgtt_create(struct drm_i915_private *i915,
+                 struct drm_i915_file_private *fpriv)
 {
        struct i915_hw_ppgtt *ppgtt;
-       int ret;
-
-       ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
-       if (!ppgtt)
-               return ERR_PTR(-ENOMEM);
 
-       ret = __hw_ppgtt_init(ppgtt, dev_priv);
-       if (ret) {
-               kfree(ppgtt);
-               return ERR_PTR(ret);
-       }
+       ppgtt = __hw_ppgtt_create(i915);
+       if (IS_ERR(ppgtt))
+               return ppgtt;
 
-       kref_init(&ppgtt->ref);
-       i915_address_space_init(&ppgtt->base, dev_priv, name);
-       ppgtt->base.file = fpriv;
+       ppgtt->vm.file = fpriv;
 
-       trace_i915_ppgtt_create(&ppgtt->base);
+       trace_i915_ppgtt_create(&ppgtt->vm);
 
        return ppgtt;
 }
@@ -2268,16 +2324,16 @@ void i915_ppgtt_release(struct kref *kref)
        struct i915_hw_ppgtt *ppgtt =
                container_of(kref, struct i915_hw_ppgtt, ref);
 
-       trace_i915_ppgtt_release(&ppgtt->base);
+       trace_i915_ppgtt_release(&ppgtt->vm);
 
-       ppgtt_destroy_vma(&ppgtt->base);
+       ppgtt_destroy_vma(&ppgtt->vm);
 
-       GEM_BUG_ON(!list_empty(&ppgtt->base.active_list));
-       GEM_BUG_ON(!list_empty(&ppgtt->base.inactive_list));
-       GEM_BUG_ON(!list_empty(&ppgtt->base.unbound_list));
+       GEM_BUG_ON(!list_empty(&ppgtt->vm.active_list));
+       GEM_BUG_ON(!list_empty(&ppgtt->vm.inactive_list));
+       GEM_BUG_ON(!list_empty(&ppgtt->vm.unbound_list));
 
-       ppgtt->base.cleanup(&ppgtt->base);
-       i915_address_space_fini(&ppgtt->base);
+       ppgtt->vm.cleanup(&ppgtt->vm);
+       i915_address_space_fini(&ppgtt->vm);
        kfree(ppgtt);
 }
 
@@ -2373,7 +2429,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
 
        i915_check_and_clear_faults(dev_priv);
 
-       ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
+       ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
 
        i915_ggtt_invalidate(dev_priv);
 }
@@ -2419,7 +2475,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
        gen8_pte_t __iomem *pte =
                (gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
 
-       gen8_set_pte(pte, gen8_pte_encode(addr, level));
+       gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
 
        ggtt->invalidate(vm->i915);
 }
@@ -2427,14 +2483,19 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
                                     struct i915_vma *vma,
                                     enum i915_cache_level level,
-                                    u32 unused)
+                                    u32 flags)
 {
        struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
        struct sgt_iter sgt_iter;
        gen8_pte_t __iomem *gtt_entries;
-       const gen8_pte_t pte_encode = gen8_pte_encode(0, level);
+       const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
        dma_addr_t addr;
 
+       /*
+        * Note that we ignore PTE_READ_ONLY here. The caller must be careful
+        * not to allow the user to override access to a read only page.
+        */
+
        gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
        gtt_entries += vma->node.start >> PAGE_SHIFT;
        for_each_sgt_dma(addr, sgt_iter, vma->pages)
@@ -2500,7 +2561,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
        unsigned first_entry = start >> PAGE_SHIFT;
        unsigned num_entries = length >> PAGE_SHIFT;
        const gen8_pte_t scratch_pte =
-               gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
+               gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
        gen8_pte_t __iomem *gtt_base =
                (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
        const int max_entries = ggtt_total_entries(ggtt) - first_entry;
@@ -2561,13 +2622,14 @@ struct insert_entries {
        struct i915_address_space *vm;
        struct i915_vma *vma;
        enum i915_cache_level level;
+       u32 flags;
 };
 
 static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
 {
        struct insert_entries *arg = _arg;
 
-       gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, 0);
+       gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
        bxt_vtd_ggtt_wa(arg->vm);
 
        return 0;
@@ -2576,9 +2638,9 @@ static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
 static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
                                             struct i915_vma *vma,
                                             enum i915_cache_level level,
-                                            u32 unused)
+                                            u32 flags)
 {
-       struct insert_entries arg = { vm, vma, level };
+       struct insert_entries arg = { vm, vma, level, flags };
 
        stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
 }
@@ -2669,9 +2731,9 @@ static int ggtt_bind_vma(struct i915_vma *vma,
        struct drm_i915_gem_object *obj = vma->obj;
        u32 pte_flags;
 
-       /* Currently applicable only to VLV */
+       /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
        pte_flags = 0;
-       if (obj->gt_ro)
+       if (i915_gem_object_is_readonly(obj))
                pte_flags |= PTE_READ_ONLY;
 
        intel_runtime_pm_get(i915);
@@ -2709,23 +2771,22 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
 
        /* Currently applicable only to VLV */
        pte_flags = 0;
-       if (vma->obj->gt_ro)
+       if (i915_gem_object_is_readonly(vma->obj))
                pte_flags |= PTE_READ_ONLY;
 
        if (flags & I915_VMA_LOCAL_BIND) {
                struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
 
-               if (!(vma->flags & I915_VMA_LOCAL_BIND) &&
-                   appgtt->base.allocate_va_range) {
-                       ret = appgtt->base.allocate_va_range(&appgtt->base,
-                                                            vma->node.start,
-                                                            vma->size);
+               if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
+                       ret = appgtt->vm.allocate_va_range(&appgtt->vm,
+                                                          vma->node.start,
+                                                          vma->size);
                        if (ret)
                                return ret;
                }
 
-               appgtt->base.insert_entries(&appgtt->base, vma, cache_level,
-                                           pte_flags);
+               appgtt->vm.insert_entries(&appgtt->vm, vma, cache_level,
+                                         pte_flags);
        }
 
        if (flags & I915_VMA_GLOBAL_BIND) {
@@ -2748,7 +2809,7 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
        }
 
        if (vma->flags & I915_VMA_LOCAL_BIND) {
-               struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->base;
+               struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->vm;
 
                vm->clear_range(vm, vma->node.start, vma->size);
        }
@@ -2762,7 +2823,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
 
        if (unlikely(ggtt->do_idle_maps)) {
-               if (i915_gem_wait_for_idle(dev_priv, 0)) {
+               if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) {
                        DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
                        /* Wait a bit, in hopes it avoids the hang */
                        udelay(10);
@@ -2811,34 +2872,32 @@ int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
        struct i915_hw_ppgtt *ppgtt;
        int err;
 
-       ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM), "[alias]");
+       ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM));
        if (IS_ERR(ppgtt))
                return PTR_ERR(ppgtt);
 
-       if (WARN_ON(ppgtt->base.total < ggtt->base.total)) {
+       if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
                err = -ENODEV;
                goto err_ppgtt;
        }
 
-       if (ppgtt->base.allocate_va_range) {
-               /* Note we only pre-allocate as far as the end of the global
-                * GTT. On 48b / 4-level page-tables, the difference is very,
-                * very significant! We have to preallocate as GVT/vgpu does
-                * not like the page directory disappearing.
-                */
-               err = ppgtt->base.allocate_va_range(&ppgtt->base,
-                                                   0, ggtt->base.total);
-               if (err)
-                       goto err_ppgtt;
-       }
+       /*
+        * Note we only pre-allocate as far as the end of the global
+        * GTT. On 48b / 4-level page-tables, the difference is very,
+        * very significant! We have to preallocate as GVT/vgpu does
+        * not like the page directory disappearing.
+        */
+       err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total);
+       if (err)
+               goto err_ppgtt;
 
        i915->mm.aliasing_ppgtt = ppgtt;
 
-       GEM_BUG_ON(ggtt->base.bind_vma != ggtt_bind_vma);
-       ggtt->base.bind_vma = aliasing_gtt_bind_vma;
+       GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
+       ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
 
-       GEM_BUG_ON(ggtt->base.unbind_vma != ggtt_unbind_vma);
-       ggtt->base.unbind_vma = aliasing_gtt_unbind_vma;
+       GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
+       ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
 
        return 0;
 
@@ -2858,8 +2917,8 @@ void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
 
        i915_ppgtt_put(ppgtt);
 
-       ggtt->base.bind_vma = ggtt_bind_vma;
-       ggtt->base.unbind_vma = ggtt_unbind_vma;
+       ggtt->vm.vma_ops.bind_vma   = ggtt_bind_vma;
+       ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
 }
 
 int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
@@ -2883,7 +2942,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
                return ret;
 
        /* Reserve a mappable slot for our lockless error capture */
-       ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
+       ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
                                          PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
                                          0, ggtt->mappable_end,
                                          DRM_MM_INSERT_LOW);
@@ -2891,16 +2950,15 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
                return ret;
 
        /* Clear any non-preallocated blocks */
-       drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
+       drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
                DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
                              hole_start, hole_end);
-               ggtt->base.clear_range(&ggtt->base, hole_start,
-                                      hole_end - hole_start);
+               ggtt->vm.clear_range(&ggtt->vm, hole_start,
+                                    hole_end - hole_start);
        }
 
        /* And finally clear the reserved guard page */
-       ggtt->base.clear_range(&ggtt->base,
-                              ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
+       ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
 
        if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
                ret = i915_gem_init_aliasing_ppgtt(dev_priv);
@@ -2925,30 +2983,26 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
        struct i915_vma *vma, *vn;
        struct pagevec *pvec;
 
-       ggtt->base.closed = true;
-
-       mutex_lock(&dev_priv->drm.struct_mutex);
-       GEM_BUG_ON(!list_empty(&ggtt->base.active_list));
-       list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link)
-               WARN_ON(i915_vma_unbind(vma));
-       mutex_unlock(&dev_priv->drm.struct_mutex);
-
-       i915_gem_cleanup_stolen(&dev_priv->drm);
+       ggtt->vm.closed = true;
 
        mutex_lock(&dev_priv->drm.struct_mutex);
        i915_gem_fini_aliasing_ppgtt(dev_priv);
 
+       GEM_BUG_ON(!list_empty(&ggtt->vm.active_list));
+       list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link)
+               WARN_ON(i915_vma_unbind(vma));
+
        if (drm_mm_node_allocated(&ggtt->error_capture))
                drm_mm_remove_node(&ggtt->error_capture);
 
-       if (drm_mm_initialized(&ggtt->base.mm)) {
+       if (drm_mm_initialized(&ggtt->vm.mm)) {
                intel_vgt_deballoon(dev_priv);
-               i915_address_space_fini(&ggtt->base);
+               i915_address_space_fini(&ggtt->vm);
        }
 
-       ggtt->base.cleanup(&ggtt->base);
+       ggtt->vm.cleanup(&ggtt->vm);
 
-       pvec = &dev_priv->mm.wc_stash;
+       pvec = &dev_priv->mm.wc_stash.pvec;
        if (pvec->nr) {
                set_pages_array_wb(pvec->pages, pvec->nr);
                __pagevec_release(pvec);
@@ -2958,6 +3012,8 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
 
        arch_phys_wc_del(ggtt->mtrr);
        io_mapping_fini(&ggtt->iomap);
+
+       i915_gem_cleanup_stolen(&dev_priv->drm);
 }
 
 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -2996,7 +3052,7 @@ static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
 
 static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
 {
-       struct drm_i915_private *dev_priv = ggtt->base.i915;
+       struct drm_i915_private *dev_priv = ggtt->vm.i915;
        struct pci_dev *pdev = dev_priv->drm.pdev;
        phys_addr_t phys_addr;
        int ret;
@@ -3020,7 +3076,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
                return -ENOMEM;
        }
 
-       ret = setup_scratch_page(&ggtt->base, GFP_DMA32);
+       ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
        if (ret) {
                DRM_ERROR("Scratch setup failed\n");
                /* iounmap will also get called at remove, but meh */
@@ -3326,7 +3382,7 @@ static void setup_private_pat(struct drm_i915_private *dev_priv)
 
 static int gen8_gmch_probe(struct i915_ggtt *ggtt)
 {
-       struct drm_i915_private *dev_priv = ggtt->base.i915;
+       struct drm_i915_private *dev_priv = ggtt->vm.i915;
        struct pci_dev *pdev = dev_priv->drm.pdev;
        unsigned int size;
        u16 snb_gmch_ctl;
@@ -3350,29 +3406,30 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
        else
                size = gen8_get_total_gtt_size(snb_gmch_ctl);
 
-       ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
-       ggtt->base.cleanup = gen6_gmch_remove;
-       ggtt->base.bind_vma = ggtt_bind_vma;
-       ggtt->base.unbind_vma = ggtt_unbind_vma;
-       ggtt->base.set_pages = ggtt_set_pages;
-       ggtt->base.clear_pages = clear_pages;
-       ggtt->base.insert_page = gen8_ggtt_insert_page;
-       ggtt->base.clear_range = nop_clear_range;
+       ggtt->vm.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
+       ggtt->vm.cleanup = gen6_gmch_remove;
+       ggtt->vm.insert_page = gen8_ggtt_insert_page;
+       ggtt->vm.clear_range = nop_clear_range;
        if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
-               ggtt->base.clear_range = gen8_ggtt_clear_range;
+               ggtt->vm.clear_range = gen8_ggtt_clear_range;
 
-       ggtt->base.insert_entries = gen8_ggtt_insert_entries;
+       ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
 
        /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
        if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
-               ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
-               ggtt->base.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
-               if (ggtt->base.clear_range != nop_clear_range)
-                       ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
+               ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
+               ggtt->vm.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
+               if (ggtt->vm.clear_range != nop_clear_range)
+                       ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
        }
 
        ggtt->invalidate = gen6_ggtt_invalidate;
 
+       ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
+       ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
+       ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
+       ggtt->vm.vma_ops.clear_pages = clear_pages;
+
        setup_private_pat(dev_priv);
 
        return ggtt_probe_common(ggtt, size);
@@ -3380,7 +3437,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
 
 static int gen6_gmch_probe(struct i915_ggtt *ggtt)
 {
-       struct drm_i915_private *dev_priv = ggtt->base.i915;
+       struct drm_i915_private *dev_priv = ggtt->vm.i915;
        struct pci_dev *pdev = dev_priv->drm.pdev;
        unsigned int size;
        u16 snb_gmch_ctl;
@@ -3407,29 +3464,30 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
        pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
 
        size = gen6_get_total_gtt_size(snb_gmch_ctl);
-       ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
+       ggtt->vm.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
 
-       ggtt->base.clear_range = gen6_ggtt_clear_range;
-       ggtt->base.insert_page = gen6_ggtt_insert_page;
-       ggtt->base.insert_entries = gen6_ggtt_insert_entries;
-       ggtt->base.bind_vma = ggtt_bind_vma;
-       ggtt->base.unbind_vma = ggtt_unbind_vma;
-       ggtt->base.set_pages = ggtt_set_pages;
-       ggtt->base.clear_pages = clear_pages;
-       ggtt->base.cleanup = gen6_gmch_remove;
+       ggtt->vm.clear_range = gen6_ggtt_clear_range;
+       ggtt->vm.insert_page = gen6_ggtt_insert_page;
+       ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
+       ggtt->vm.cleanup = gen6_gmch_remove;
 
        ggtt->invalidate = gen6_ggtt_invalidate;
 
        if (HAS_EDRAM(dev_priv))
-               ggtt->base.pte_encode = iris_pte_encode;
+               ggtt->vm.pte_encode = iris_pte_encode;
        else if (IS_HASWELL(dev_priv))
-               ggtt->base.pte_encode = hsw_pte_encode;
+               ggtt->vm.pte_encode = hsw_pte_encode;
        else if (IS_VALLEYVIEW(dev_priv))
-               ggtt->base.pte_encode = byt_pte_encode;
+               ggtt->vm.pte_encode = byt_pte_encode;
        else if (INTEL_GEN(dev_priv) >= 7)
-               ggtt->base.pte_encode = ivb_pte_encode;
+               ggtt->vm.pte_encode = ivb_pte_encode;
        else
-               ggtt->base.pte_encode = snb_pte_encode;
+               ggtt->vm.pte_encode = snb_pte_encode;
+
+       ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
+       ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
+       ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
+       ggtt->vm.vma_ops.clear_pages = clear_pages;
 
        return ggtt_probe_common(ggtt, size);
 }
@@ -3441,7 +3499,7 @@ static void i915_gmch_remove(struct i915_address_space *vm)
 
 static int i915_gmch_probe(struct i915_ggtt *ggtt)
 {
-       struct drm_i915_private *dev_priv = ggtt->base.i915;
+       struct drm_i915_private *dev_priv = ggtt->vm.i915;
        phys_addr_t gmadr_base;
        int ret;
 
@@ -3451,26 +3509,25 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
                return -EIO;
        }
 
-       intel_gtt_get(&ggtt->base.total,
-                     &gmadr_base,
-                     &ggtt->mappable_end);
+       intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
 
        ggtt->gmadr =
                (struct resource) DEFINE_RES_MEM(gmadr_base,
                                                 ggtt->mappable_end);
 
        ggtt->do_idle_maps = needs_idle_maps(dev_priv);
-       ggtt->base.insert_page = i915_ggtt_insert_page;
-       ggtt->base.insert_entries = i915_ggtt_insert_entries;
-       ggtt->base.clear_range = i915_ggtt_clear_range;
-       ggtt->base.bind_vma = ggtt_bind_vma;
-       ggtt->base.unbind_vma = ggtt_unbind_vma;
-       ggtt->base.set_pages = ggtt_set_pages;
-       ggtt->base.clear_pages = clear_pages;
-       ggtt->base.cleanup = i915_gmch_remove;
+       ggtt->vm.insert_page = i915_ggtt_insert_page;
+       ggtt->vm.insert_entries = i915_ggtt_insert_entries;
+       ggtt->vm.clear_range = i915_ggtt_clear_range;
+       ggtt->vm.cleanup = i915_gmch_remove;
 
        ggtt->invalidate = gmch_ggtt_invalidate;
 
+       ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
+       ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
+       ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
+       ggtt->vm.vma_ops.clear_pages = clear_pages;
+
        if (unlikely(ggtt->do_idle_maps))
                DRM_INFO("applying Ironlake quirks for intel_iommu\n");
 
@@ -3486,8 +3543,8 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        int ret;
 
-       ggtt->base.i915 = dev_priv;
-       ggtt->base.dma = &dev_priv->drm.pdev->dev;
+       ggtt->vm.i915 = dev_priv;
+       ggtt->vm.dma = &dev_priv->drm.pdev->dev;
 
        if (INTEL_GEN(dev_priv) <= 5)
                ret = i915_gmch_probe(ggtt);
@@ -3504,27 +3561,29 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
         * restriction!
         */
        if (USES_GUC(dev_priv)) {
-               ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
-               ggtt->mappable_end = min_t(u64, ggtt->mappable_end, ggtt->base.total);
+               ggtt->vm.total = min_t(u64, ggtt->vm.total, GUC_GGTT_TOP);
+               ggtt->mappable_end =
+                       min_t(u64, ggtt->mappable_end, ggtt->vm.total);
        }
 
-       if ((ggtt->base.total - 1) >> 32) {
+       if ((ggtt->vm.total - 1) >> 32) {
                DRM_ERROR("We never expected a Global GTT with more than 32bits"
                          " of address space! Found %lldM!\n",
-                         ggtt->base.total >> 20);
-               ggtt->base.total = 1ULL << 32;
-               ggtt->mappable_end = min_t(u64, ggtt->mappable_end, ggtt->base.total);
+                         ggtt->vm.total >> 20);
+               ggtt->vm.total = 1ULL << 32;
+               ggtt->mappable_end =
+                       min_t(u64, ggtt->mappable_end, ggtt->vm.total);
        }
 
-       if (ggtt->mappable_end > ggtt->base.total) {
+       if (ggtt->mappable_end > ggtt->vm.total) {
                DRM_ERROR("mappable aperture extends past end of GGTT,"
                          " aperture=%pa, total=%llx\n",
-                         &ggtt->mappable_end, ggtt->base.total);
-               ggtt->mappable_end = ggtt->base.total;
+                         &ggtt->mappable_end, ggtt->vm.total);
+               ggtt->mappable_end = ggtt->vm.total;
        }
 
        /* GMADR is the PCI mmio aperture into the global GTT. */
-       DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->base.total >> 20);
+       DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
        DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
        DRM_DEBUG_DRIVER("DSM size = %lluM\n",
                         (u64)resource_size(&intel_graphics_stolen_res) >> 20);
@@ -3543,7 +3602,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        int ret;
 
-       INIT_LIST_HEAD(&dev_priv->vm_list);
+       stash_init(&dev_priv->mm.wc_stash);
 
        /* Note that we use page colouring to enforce a guard page at the
         * end of the address space. This is required as the CS may prefetch
@@ -3551,9 +3610,13 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
         * and beyond the end of the GTT if we do not provide a guard.
         */
        mutex_lock(&dev_priv->drm.struct_mutex);
-       i915_address_space_init(&ggtt->base, dev_priv, "[global]");
+       i915_address_space_init(&ggtt->vm, dev_priv);
+
+       /* Only VLV supports read-only GGTT mappings */
+       ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv);
+
        if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
-               ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
+               ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
        mutex_unlock(&dev_priv->drm.struct_mutex);
 
        if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
@@ -3576,7 +3639,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
        return 0;
 
 out_gtt_cleanup:
-       ggtt->base.cleanup(&ggtt->base);
+       ggtt->vm.cleanup(&ggtt->vm);
        return ret;
 }
 
@@ -3610,34 +3673,35 @@ void i915_ggtt_disable_guc(struct drm_i915_private *i915)
 void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
 {
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       struct drm_i915_gem_object *obj, *on;
+       struct i915_vma *vma, *vn;
 
        i915_check_and_clear_faults(dev_priv);
 
        /* First fill our portion of the GTT with scratch pages */
-       ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
+       ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
 
-       ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
+       ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */
 
        /* clflush objects bound into the GGTT and rebind them. */
-       list_for_each_entry_safe(obj, on, &dev_priv->mm.bound_list, mm.link) {
-               bool ggtt_bound = false;
-               struct i915_vma *vma;
+       GEM_BUG_ON(!list_empty(&ggtt->vm.active_list));
+       list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link) {
+               struct drm_i915_gem_object *obj = vma->obj;
 
-               for_each_ggtt_vma(vma, obj) {
-                       if (!i915_vma_unbind(vma))
-                               continue;
+               if (!(vma->flags & I915_VMA_GLOBAL_BIND))
+                       continue;
 
-                       WARN_ON(i915_vma_bind(vma, obj->cache_level,
-                                             PIN_UPDATE));
-                       ggtt_bound = true;
-               }
+               if (!i915_vma_unbind(vma))
+                       continue;
 
-               if (ggtt_bound)
+               WARN_ON(i915_vma_bind(vma,
+                                     obj ? obj->cache_level : 0,
+                                     PIN_UPDATE));
+               if (obj)
                        WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
        }
 
-       ggtt->base.closed = false;
+       ggtt->vm.closed = false;
+       i915_ggtt_invalidate(dev_priv);
 
        if (INTEL_GEN(dev_priv) >= 8) {
                struct intel_ppat *ppat = &dev_priv->ppat;
@@ -3646,23 +3710,6 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
                dev_priv->ppat.update_hw(dev_priv);
                return;
        }
-
-       if (USES_PPGTT(dev_priv)) {
-               struct i915_address_space *vm;
-
-               list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
-                       struct i915_hw_ppgtt *ppgtt;
-
-                       if (i915_is_ggtt(vm))
-                               ppgtt = dev_priv->mm.aliasing_ppgtt;
-                       else
-                               ppgtt = i915_vm_to_ppgtt(vm);
-
-                       gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
-               }
-       }
-
-       i915_ggtt_invalidate(dev_priv);
 }
 
 static struct scatterlist *
@@ -3880,7 +3927,7 @@ int i915_gem_gtt_reserve(struct i915_address_space *vm,
        GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
        GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
        GEM_BUG_ON(range_overflows(offset, size, vm->total));
-       GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
+       GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
        GEM_BUG_ON(drm_mm_node_allocated(node));
 
        node->size = size;
@@ -3977,7 +4024,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
        GEM_BUG_ON(start >= end);
        GEM_BUG_ON(start > 0  && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
        GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
-       GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
+       GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
        GEM_BUG_ON(drm_mm_node_allocated(node));
 
        if (unlikely(range_overflows(start, size, end)))
@@ -3988,7 +4035,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
 
        mode = DRM_MM_INSERT_BEST;
        if (flags & PIN_HIGH)
-               mode = DRM_MM_INSERT_HIGH;
+               mode = DRM_MM_INSERT_HIGHEST;
        if (flags & PIN_MAPPABLE)
                mode = DRM_MM_INSERT_LOW;
 
@@ -4008,6 +4055,15 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
        if (err != -ENOSPC)
                return err;
 
+       if (mode & DRM_MM_INSERT_ONCE) {
+               err = drm_mm_insert_node_in_range(&vm->mm, node,
+                                                 size, alignment, color,
+                                                 start, end,
+                                                 DRM_MM_INSERT_BEST);
+               if (err != -ENOSPC)
+                       return err;
+       }
+
        if (flags & PIN_NOEVICT)
                return -ENOSPC;
 
index aec4f73574f4de684be02e0687b4da763344435e..2a116a91420bc6fab6b31c3638a596bb64b61a33 100644 (file)
@@ -58,6 +58,7 @@
 
 struct drm_i915_file_private;
 struct drm_i915_fence_reg;
+struct i915_vma;
 
 typedef u32 gen6_pte_t;
 typedef u64 gen8_pte_t;
@@ -65,7 +66,7 @@ typedef u64 gen8_pde_t;
 typedef u64 gen8_ppgtt_pdpe_t;
 typedef u64 gen8_ppgtt_pml4e_t;
 
-#define ggtt_total_entries(ggtt) ((ggtt)->base.total >> PAGE_SHIFT)
+#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
 
 /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
 #define GEN6_GTT_ADDR_ENCODE(addr)     ((addr) | (((addr) >> 28) & 0xff0))
@@ -254,6 +255,26 @@ struct i915_pml4 {
        struct i915_page_directory_pointer *pdps[GEN8_PML4ES_PER_PML4];
 };
 
+struct i915_vma_ops {
+       /* Map an object into an address space with the given cache flags. */
+       int (*bind_vma)(struct i915_vma *vma,
+                       enum i915_cache_level cache_level,
+                       u32 flags);
+       /*
+        * Unmap an object from an address space. This usually consists of
+        * setting the valid PTE entries to a reserved scratch page.
+        */
+       void (*unbind_vma)(struct i915_vma *vma);
+
+       int (*set_pages)(struct i915_vma *vma);
+       void (*clear_pages)(struct i915_vma *vma);
+};
+
+struct pagestash {
+       spinlock_t lock;
+       struct pagevec pvec;
+};
+
 struct i915_address_space {
        struct drm_mm mm;
        struct drm_i915_private *i915;
@@ -267,12 +288,13 @@ struct i915_address_space {
         * assign blame.
         */
        struct drm_i915_file_private *file;
-       struct list_head global_link;
        u64 total;              /* size addr space maps (ex. 2GB for ggtt) */
        u64 reserved;           /* size addr space reserved */
 
        bool closed;
 
+       struct mutex mutex; /* protects vma and our lists */
+
        struct i915_page_dma scratch_page;
        struct i915_page_table *scratch_pt;
        struct i915_page_directory *scratch_pd;
@@ -308,8 +330,13 @@ struct i915_address_space {
         */
        struct list_head unbound_list;
 
-       struct pagevec free_pages;
-       bool pt_kmap_wc;
+       struct pagestash free_pages;
+
+       /* Some systems require uncached updates of the page directories */
+       bool pt_kmap_wc:1;
+
+       /* Some systems support read-only mappings for GGTT and/or PPGTT */
+       bool has_read_only:1;
 
        /* FIXME: Need a more generic return type */
        gen6_pte_t (*pte_encode)(dma_addr_t addr,
@@ -331,15 +358,8 @@ struct i915_address_space {
                               enum i915_cache_level cache_level,
                               u32 flags);
        void (*cleanup)(struct i915_address_space *vm);
-       /** Unmap an object from an address space. This usually consists of
-        * setting the valid PTE entries to a reserved scratch page. */
-       void (*unbind_vma)(struct i915_vma *vma);
-       /* Map an object into an address space with the given cache flags. */
-       int (*bind_vma)(struct i915_vma *vma,
-                       enum i915_cache_level cache_level,
-                       u32 flags);
-       int (*set_pages)(struct i915_vma *vma);
-       void (*clear_pages)(struct i915_vma *vma);
+
+       struct i915_vma_ops vma_ops;
 
        I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
        I915_SELFTEST_DECLARE(bool scrub_64K);
@@ -367,7 +387,7 @@ i915_vm_has_scratch_64K(struct i915_address_space *vm)
  * the spec.
  */
 struct i915_ggtt {
-       struct i915_address_space base;
+       struct i915_address_space vm;
 
        struct io_mapping iomap;        /* Mapping to our CPU mappable region */
        struct resource gmadr;          /* GMADR resource */
@@ -385,9 +405,9 @@ struct i915_ggtt {
 };
 
 struct i915_hw_ppgtt {
-       struct i915_address_space base;
+       struct i915_address_space vm;
        struct kref ref;
-       struct drm_mm_node node;
+
        unsigned long pd_dirty_rings;
        union {
                struct i915_pml4 pml4;          /* GEN8+ & 48b PPGTT */
@@ -395,13 +415,28 @@ struct i915_hw_ppgtt {
                struct i915_page_directory pd;          /* GEN6-7 */
        };
 
+       void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
+};
+
+struct gen6_hw_ppgtt {
+       struct i915_hw_ppgtt base;
+
+       struct i915_vma *vma;
        gen6_pte_t __iomem *pd_addr;
+       gen6_pte_t scratch_pte;
 
-       int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
-                        struct i915_request *rq);
-       void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
+       unsigned int pin_count;
+       bool scan_for_unused_pt;
 };
 
+#define __to_gen6_ppgtt(base) container_of(base, struct gen6_hw_ppgtt, base)
+
+static inline struct gen6_hw_ppgtt *to_gen6_ppgtt(struct i915_hw_ppgtt *base)
+{
+       BUILD_BUG_ON(offsetof(struct gen6_hw_ppgtt, base));
+       return __to_gen6_ppgtt(base);
+}
+
 /*
  * gen6_for_each_pde() iterates over every pde from start until start+length.
  * If start and start+length are not perfectly divisible, the macro will round
@@ -440,8 +475,8 @@ static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
        const u64 mask = ~((1ULL << pde_shift) - 1);
        u64 end;
 
-       WARN_ON(length == 0);
-       WARN_ON(offset_in_page(addr|length));
+       GEM_BUG_ON(length == 0);
+       GEM_BUG_ON(offset_in_page(addr | length));
 
        end = addr + length;
 
@@ -543,7 +578,7 @@ static inline struct i915_ggtt *
 i915_vm_to_ggtt(struct i915_address_space *vm)
 {
        GEM_BUG_ON(!i915_is_ggtt(vm));
-       return container_of(vm, struct i915_ggtt, base);
+       return container_of(vm, struct i915_ggtt, vm);
 }
 
 #define INTEL_MAX_PPAT_ENTRIES 8
@@ -591,8 +626,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
 int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv);
 void i915_ppgtt_release(struct kref *kref);
 struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
-                                       struct drm_i915_file_private *fpriv,
-                                       const char *name);
+                                       struct drm_i915_file_private *fpriv);
 void i915_ppgtt_close(struct i915_address_space *vm);
 static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
 {
@@ -605,6 +639,9 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
                kref_put(&ppgtt->ref, i915_ppgtt_release);
 }
 
+int gen6_ppgtt_pin(struct i915_hw_ppgtt *base);
+void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base);
+
 void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
 void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv);
 void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv);
index 54f00b350779733d41a0f3ca0a79445a00595eab..83e5e01fa9eaa9c8586445329089959290f2f32d 100644 (file)
@@ -141,7 +141,6 @@ struct drm_i915_gem_object {
         * Is the object to be mapped as read-only to the GPU
         * Only honoured if hardware has relevant pte bit
         */
-       unsigned long gt_ro:1;
        unsigned int cache_level:3;
        unsigned int cache_coherent:2;
 #define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
@@ -268,7 +267,6 @@ struct drm_i915_gem_object {
        union {
                struct i915_gem_userptr {
                        uintptr_t ptr;
-                       unsigned read_only :1;
 
                        struct i915_mm_struct *mm;
                        struct i915_mmu_object *mmu_object;
@@ -337,26 +335,17 @@ __attribute__((nonnull))
 static inline struct drm_i915_gem_object *
 i915_gem_object_get(struct drm_i915_gem_object *obj)
 {
-       drm_gem_object_reference(&obj->base);
+       drm_gem_object_get(&obj->base);
        return obj;
 }
 
-__deprecated
-extern void drm_gem_object_reference(struct drm_gem_object *);
-
 __attribute__((nonnull))
 static inline void
 i915_gem_object_put(struct drm_i915_gem_object *obj)
 {
-       __drm_gem_object_unreference(&obj->base);
+       __drm_gem_object_put(&obj->base);
 }
 
-__deprecated
-extern void drm_gem_object_unreference(struct drm_gem_object *);
-
-__deprecated
-extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
-
 static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
 {
        reservation_object_lock(obj->resv, NULL);
@@ -367,6 +356,18 @@ static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
        reservation_object_unlock(obj->resv);
 }
 
+static inline void
+i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
+{
+       obj->base.vma_node.readonly = true;
+}
+
+static inline bool
+i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
+{
+       return obj->base.vma_node.readonly;
+}
+
 static inline bool
 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
 {
index 1036e8686916578accdb30f850401b4b384a6ba0..90baf9086d0a49f0fd2667a20ec337f520361051 100644 (file)
@@ -194,7 +194,7 @@ int i915_gem_render_state_emit(struct i915_request *rq)
        if (IS_ERR(so.obj))
                return PTR_ERR(so.obj);
 
-       so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.base, NULL);
+       so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.vm, NULL);
        if (IS_ERR(so.vma)) {
                err = PTR_ERR(so.vma);
                goto err_obj;
@@ -222,7 +222,7 @@ int i915_gem_render_state_emit(struct i915_request *rq)
                        goto err_unpin;
        }
 
-       i915_vma_move_to_active(so.vma, rq, 0);
+       err = i915_vma_move_to_active(so.vma, rq, 0);
 err_unpin:
        i915_vma_unpin(so.vma);
 err_vma:
index 5757fb7c4b5af567d6841f128887875f03d96487..ea90d3a0d51143dc4a189b0e15a3ecf7c37c214c 100644 (file)
@@ -23,6 +23,7 @@
  */
 
 #include <linux/oom.h>
+#include <linux/sched/mm.h>
 #include <linux/shmem_fs.h>
 #include <linux/slab.h>
 #include <linux/swap.h>
@@ -172,7 +173,9 @@ i915_gem_shrink(struct drm_i915_private *i915,
         * we will free as much as we can and hope to get a second chance.
         */
        if (flags & I915_SHRINK_ACTIVE)
-               i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
+               i915_gem_wait_for_idle(i915,
+                                      I915_WAIT_LOCKED,
+                                      MAX_SCHEDULE_TIMEOUT);
 
        trace_i915_gem_shrink(i915, target, flags);
        i915_retire_requests(i915);
@@ -392,7 +395,8 @@ shrinker_lock_uninterruptible(struct drm_i915_private *i915, bool *unlock,
        unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
 
        do {
-               if (i915_gem_wait_for_idle(i915, 0) == 0 &&
+               if (i915_gem_wait_for_idle(i915,
+                                          0, MAX_SCHEDULE_TIMEOUT) == 0 &&
                    shrinker_lock(i915, unlock))
                        break;
 
@@ -466,7 +470,9 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
                return NOTIFY_DONE;
 
        /* Force everything onto the inactive lists */
-       ret = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
+       ret = i915_gem_wait_for_idle(i915,
+                                    I915_WAIT_LOCKED,
+                                    MAX_SCHEDULE_TIMEOUT);
        if (ret)
                goto out;
 
@@ -480,7 +486,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
 
        /* We also want to clear any cached iomaps as they wrap vmap */
        list_for_each_entry_safe(vma, next,
-                                &i915->ggtt.base.inactive_list, vm_link) {
+                                &i915->ggtt.vm.inactive_list, vm_link) {
                unsigned long count = vma->node.size >> PAGE_SHIFT;
                if (vma->iomap && i915_vma_unbind(vma) == 0)
                        freed_pages += count;
@@ -526,3 +532,14 @@ void i915_gem_shrinker_unregister(struct drm_i915_private *i915)
        WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier));
        unregister_shrinker(&i915->mm.shrinker);
 }
+
+void i915_gem_shrinker_taints_mutex(struct mutex *mutex)
+{
+       if (!IS_ENABLED(CONFIG_LOCKDEP))
+               return;
+
+       fs_reclaim_acquire(GFP_KERNEL);
+       mutex_lock(mutex);
+       mutex_unlock(mutex);
+       fs_reclaim_release(GFP_KERNEL);
+}
index ad949cc3092816cc123d8109d80151dfcc9e6ac9..53440bf876501ced342bf8d931af8b64fc8e564e 100644 (file)
@@ -254,6 +254,7 @@ static void vlv_get_stolen_reserved(struct drm_i915_private *dev_priv,
        switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
        default:
                MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
+               /* fall through */
        case GEN7_STOLEN_RESERVED_1M:
                *size = 1024 * 1024;
                break;
@@ -343,6 +344,35 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
        *size = stolen_top - *base;
 }
 
+static void icl_get_stolen_reserved(struct drm_i915_private *dev_priv,
+                                   resource_size_t *base,
+                                   resource_size_t *size)
+{
+       u64 reg_val = I915_READ64(GEN6_STOLEN_RESERVED);
+
+       DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
+
+       *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
+
+       switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
+       case GEN8_STOLEN_RESERVED_1M:
+               *size = 1024 * 1024;
+               break;
+       case GEN8_STOLEN_RESERVED_2M:
+               *size = 2 * 1024 * 1024;
+               break;
+       case GEN8_STOLEN_RESERVED_4M:
+               *size = 4 * 1024 * 1024;
+               break;
+       case GEN8_STOLEN_RESERVED_8M:
+               *size = 8 * 1024 * 1024;
+               break;
+       default:
+               *size = 8 * 1024 * 1024;
+               MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
+       }
+}
+
 int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
 {
        resource_size_t reserved_base, stolen_top;
@@ -399,7 +429,9 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
                        gen7_get_stolen_reserved(dev_priv,
                                                 &reserved_base, &reserved_size);
                break;
-       default:
+       case 8:
+       case 9:
+       case 10:
                if (IS_LP(dev_priv))
                        chv_get_stolen_reserved(dev_priv,
                                                &reserved_base, &reserved_size);
@@ -407,6 +439,11 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
                        bdw_get_stolen_reserved(dev_priv,
                                                &reserved_base, &reserved_size);
                break;
+       case 11:
+       default:
+               icl_get_stolen_reserved(dev_priv, &reserved_base,
+                                       &reserved_size);
+               break;
        }
 
        /*
@@ -642,7 +679,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
        if (ret)
                goto err;
 
-       vma = i915_vma_instance(obj, &ggtt->base, NULL);
+       vma = i915_vma_instance(obj, &ggtt->vm, NULL);
        if (IS_ERR(vma)) {
                ret = PTR_ERR(vma);
                goto err_pages;
@@ -653,7 +690,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
         * setting up the GTT space. The actual reservation will occur
         * later.
         */
-       ret = i915_gem_gtt_reserve(&ggtt->base, &vma->node,
+       ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
                                   size, gtt_offset, obj->cache_level,
                                   0);
        if (ret) {
@@ -666,7 +703,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
        vma->pages = obj->mm.pages;
        vma->flags |= I915_VMA_GLOBAL_BIND;
        __i915_vma_set_map_and_fenceable(vma);
-       list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
+       list_move_tail(&vma->vm_link, &ggtt->vm.inactive_list);
 
        spin_lock(&dev_priv->mm.obj_lock);
        list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
index 854bd51b9478a59d6b0c18820c85498674842a66..dcd6e230d16aa7905c1ae075fab469e333f7ef8b 100644 (file)
@@ -507,7 +507,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
                struct mm_struct *mm = obj->userptr.mm->mm;
                unsigned int flags = 0;
 
-               if (!obj->userptr.read_only)
+               if (!i915_gem_object_is_readonly(obj))
                        flags |= FOLL_WRITE;
 
                ret = -EFAULT;
@@ -643,7 +643,7 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
                if (pvec) /* defer to worker if malloc fails */
                        pinned = __get_user_pages_fast(obj->userptr.ptr,
                                                       num_pages,
-                                                      !obj->userptr.read_only,
+                                                      !i915_gem_object_is_readonly(obj),
                                                       pvec);
        }
 
@@ -789,10 +789,15 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
                return -EFAULT;
 
        if (args->flags & I915_USERPTR_READ_ONLY) {
-               /* On almost all of the current hw, we cannot tell the GPU that a
-                * page is readonly, so this is just a placeholder in the uAPI.
+               struct i915_hw_ppgtt *ppgtt;
+
+               /*
+                * On almost all of the older hw, we cannot tell the GPU that
+                * a page is readonly.
                 */
-               return -ENODEV;
+               ppgtt = dev_priv->kernel_context->ppgtt;
+               if (!ppgtt || !ppgtt->vm.has_read_only)
+                       return -ENODEV;
        }
 
        obj = i915_gem_object_alloc(dev_priv);
@@ -806,7 +811,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
        i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
 
        obj->userptr.ptr = args->user_ptr;
-       obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
+       if (args->flags & I915_USERPTR_READ_ONLY)
+               i915_gem_object_set_readonly(obj);
 
        /* And keep a pointer to the current->mm for resolving the user pages
         * at binding. This means that we need to hook into the mmu_notifier
index df234dc23274fe254c9210183ec8dd7f02de59c4..f7f2aa71d8d99f1c4fa4e4d4632adeb48ecd50f3 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/stop_machine.h>
 #include <linux/zlib.h>
 #include <drm/drm_print.h>
+#include <linux/ascii85.h>
 
 #include "i915_gpu_error.h"
 #include "i915_drv.h"
@@ -335,21 +336,16 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
                                struct drm_i915_error_buffer *err,
                                int count)
 {
-       int i;
-
        err_printf(m, "%s [%d]:\n", name, count);
 
        while (count--) {
-               err_printf(m, "    %08x_%08x %8u %02x %02x ",
+               err_printf(m, "    %08x_%08x %8u %02x %02x %02x",
                           upper_32_bits(err->gtt_offset),
                           lower_32_bits(err->gtt_offset),
                           err->size,
                           err->read_domains,
-                          err->write_domain);
-               for (i = 0; i < I915_NUM_ENGINES; i++)
-                       err_printf(m, "%02x ", err->rseqno[i]);
-
-               err_printf(m, "] %02x", err->wseqno);
+                          err->write_domain,
+                          err->wseqno);
                err_puts(m, tiling_flag(err->tiling));
                err_puts(m, dirty_flag(err->dirty));
                err_puts(m, purgeable_flag(err->purgeable));
@@ -522,35 +518,12 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
        va_end(args);
 }
 
-static int
-ascii85_encode_len(int len)
-{
-       return DIV_ROUND_UP(len, 4);
-}
-
-static bool
-ascii85_encode(u32 in, char *out)
-{
-       int i;
-
-       if (in == 0)
-               return false;
-
-       out[5] = '\0';
-       for (i = 5; i--; ) {
-               out[i] = '!' + in % 85;
-               in /= 85;
-       }
-
-       return true;
-}
-
 static void print_error_obj(struct drm_i915_error_state_buf *m,
                            struct intel_engine_cs *engine,
                            const char *name,
                            struct drm_i915_error_object *obj)
 {
-       char out[6];
+       char out[ASCII85_BUFSZ];
        int page;
 
        if (!obj)
@@ -572,12 +545,8 @@ static void print_error_obj(struct drm_i915_error_state_buf *m,
                        len -= obj->unused;
                len = ascii85_encode_len(len);
 
-               for (i = 0; i < len; i++) {
-                       if (ascii85_encode(obj->pages[page][i], out))
-                               err_puts(m, out);
-                       else
-                               err_puts(m, "z");
-               }
+               for (i = 0; i < len; i++)
+                       err_puts(m, ascii85_encode(obj->pages[page][i], out));
        }
        err_puts(m, "\n");
 }
@@ -973,8 +942,7 @@ i915_error_object_create(struct drm_i915_private *i915,
                void __iomem *s;
                int ret;
 
-               ggtt->base.insert_page(&ggtt->base, dma, slot,
-                                      I915_CACHE_NONE, 0);
+               ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
 
                s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
                ret = compress_page(&compress, (void  __force *)s, dst);
@@ -993,7 +961,7 @@ unwind:
 
 out:
        compress_fini(&compress, dst);
-       ggtt->base.clear_range(&ggtt->base, slot, PAGE_SIZE);
+       ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
        return dst;
 }
 
@@ -1022,13 +990,10 @@ static void capture_bo(struct drm_i915_error_buffer *err,
                       struct i915_vma *vma)
 {
        struct drm_i915_gem_object *obj = vma->obj;
-       int i;
 
        err->size = obj->base.size;
        err->name = obj->base.name;
 
-       for (i = 0; i < I915_NUM_ENGINES; i++)
-               err->rseqno[i] = __active_get_seqno(&vma->last_read[i]);
        err->wseqno = __active_get_seqno(&obj->frontbuffer_write);
        err->engine = __active_get_engine_id(&obj->frontbuffer_write);
 
@@ -1051,6 +1016,9 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err,
        int i = 0;
 
        list_for_each_entry(vma, head, vm_link) {
+               if (!vma->obj)
+                       continue;
+
                if (pinned_only && !i915_vma_is_pinned(vma))
                        continue;
 
@@ -1287,9 +1255,11 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
 static void record_request(struct i915_request *request,
                           struct drm_i915_error_request *erq)
 {
-       erq->context = request->ctx->hw_id;
+       struct i915_gem_context *ctx = request->gem_context;
+
+       erq->context = ctx->hw_id;
        erq->sched_attr = request->sched.attr;
-       erq->ban_score = atomic_read(&request->ctx->ban_score);
+       erq->ban_score = atomic_read(&ctx->ban_score);
        erq->seqno = request->global_seqno;
        erq->jiffies = request->emitted_jiffies;
        erq->start = i915_ggtt_offset(request->ring->vma);
@@ -1297,7 +1267,7 @@ static void record_request(struct i915_request *request,
        erq->tail = request->tail;
 
        rcu_read_lock();
-       erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0;
+       erq->pid = ctx->pid ? pid_nr(ctx->pid) : 0;
        rcu_read_unlock();
 }
 
@@ -1461,12 +1431,12 @@ static void gem_record_rings(struct i915_gpu_state *error)
 
                request = i915_gem_find_active_request(engine);
                if (request) {
+                       struct i915_gem_context *ctx = request->gem_context;
                        struct intel_ring *ring;
 
-                       ee->vm = request->ctx->ppgtt ?
-                               &request->ctx->ppgtt->base : &ggtt->base;
+                       ee->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &ggtt->vm;
 
-                       record_context(&ee->context, request->ctx);
+                       record_context(&ee->context, ctx);
 
                        /* We need to copy these to an anonymous buffer
                         * as the simplest method to avoid being overwritten
@@ -1483,11 +1453,10 @@ static void gem_record_rings(struct i915_gpu_state *error)
 
                        ee->ctx =
                                i915_error_object_create(i915,
-                                                        to_intel_context(request->ctx,
-                                                                         engine)->state);
+                                                        request->hw_context->state);
 
                        error->simulated |=
-                               i915_gem_context_no_error_capture(request->ctx);
+                               i915_gem_context_no_error_capture(ctx);
 
                        ee->rq_head = request->head;
                        ee->rq_post = request->postfix;
@@ -1563,17 +1532,17 @@ static void capture_active_buffers(struct i915_gpu_state *error)
 
 static void capture_pinned_buffers(struct i915_gpu_state *error)
 {
-       struct i915_address_space *vm = &error->i915->ggtt.base;
+       struct i915_address_space *vm = &error->i915->ggtt.vm;
        struct drm_i915_error_buffer *bo;
        struct i915_vma *vma;
        int count_inactive, count_active;
 
        count_inactive = 0;
-       list_for_each_entry(vma, &vm->active_list, vm_link)
+       list_for_each_entry(vma, &vm->inactive_list, vm_link)
                count_inactive++;
 
        count_active = 0;
-       list_for_each_entry(vma, &vm->inactive_list, vm_link)
+       list_for_each_entry(vma, &vm->active_list, vm_link)
                count_active++;
 
        bo = NULL;
@@ -1667,7 +1636,16 @@ static void capture_reg_state(struct i915_gpu_state *error)
        }
 
        /* 4: Everything else */
-       if (INTEL_GEN(dev_priv) >= 8) {
+       if (INTEL_GEN(dev_priv) >= 11) {
+               error->ier = I915_READ(GEN8_DE_MISC_IER);
+               error->gtier[0] = I915_READ(GEN11_RENDER_COPY_INTR_ENABLE);
+               error->gtier[1] = I915_READ(GEN11_VCS_VECS_INTR_ENABLE);
+               error->gtier[2] = I915_READ(GEN11_GUC_SG_INTR_ENABLE);
+               error->gtier[3] = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
+               error->gtier[4] = I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE);
+               error->gtier[5] = I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE);
+               error->ngtier = 6;
+       } else if (INTEL_GEN(dev_priv) >= 8) {
                error->ier = I915_READ(GEN8_DE_MISC_IER);
                for (i = 0; i < 4; i++)
                        error->gtier[i] = I915_READ(GEN8_GT_IER(i));
index dac0f8c4c1cfa776d3e79845a9e036361b511afd..f893a4e8b7831d7b214cf60ab7bcf9b058070714 100644 (file)
@@ -58,7 +58,7 @@ struct i915_gpu_state {
        u32 eir;
        u32 pgtbl_er;
        u32 ier;
-       u32 gtier[4], ngtier;
+       u32 gtier[6], ngtier;
        u32 ccid;
        u32 derrmr;
        u32 forcewake;
@@ -177,7 +177,7 @@ struct i915_gpu_state {
        struct drm_i915_error_buffer {
                u32 size;
                u32 name;
-               u32 rseqno[I915_NUM_ENGINES], wseqno;
+               u32 wseqno;
                u64 gtt_offset;
                u32 read_domains;
                u32 write_domain;
index f9bc3aaa90d0f5de110e893415be0a6ee1c40448..90628a47ae17f81312dff51ddbc89aff4af55654 100644 (file)
@@ -115,6 +115,22 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = {
        [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
 };
 
+static const u32 hpd_gen11[HPD_NUM_PINS] = {
+       [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
+       [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
+       [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
+       [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
+};
+
+static const u32 hpd_icp[HPD_NUM_PINS] = {
+       [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP,
+       [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP,
+       [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP,
+       [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP,
+       [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP,
+       [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
+};
+
 /* IIR can theoretically queue up two events. Be paranoid. */
 #define GEN8_IRQ_RESET_NDX(type, which) do { \
        I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
@@ -247,9 +263,9 @@ static u32
 gen11_gt_engine_identity(struct drm_i915_private * const i915,
                         const unsigned int bank, const unsigned int bit);
 
-bool gen11_reset_one_iir(struct drm_i915_private * const i915,
-                        const unsigned int bank,
-                        const unsigned int bit)
+static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
+                               const unsigned int bank,
+                               const unsigned int bit)
 {
        void __iomem * const regs = i915->regs;
        u32 dw;
@@ -1138,21 +1154,21 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
 
 static void notify_ring(struct intel_engine_cs *engine)
 {
+       const u32 seqno = intel_engine_get_seqno(engine);
        struct i915_request *rq = NULL;
+       struct task_struct *tsk = NULL;
        struct intel_wait *wait;
 
-       if (!engine->breadcrumbs.irq_armed)
+       if (unlikely(!engine->breadcrumbs.irq_armed))
                return;
 
-       atomic_inc(&engine->irq_count);
-       set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
+       rcu_read_lock();
 
        spin_lock(&engine->breadcrumbs.irq_lock);
        wait = engine->breadcrumbs.irq_wait;
        if (wait) {
-               bool wakeup = engine->irq_seqno_barrier;
-
-               /* We use a callback from the dma-fence to submit
+               /*
+                * We use a callback from the dma-fence to submit
                 * requests after waiting on our own requests. To
                 * ensure minimum delay in queuing the next request to
                 * hardware, signal the fence now rather than wait for
@@ -1163,19 +1179,26 @@ static void notify_ring(struct intel_engine_cs *engine)
                 * and to handle coalescing of multiple seqno updates
                 * and many waiters.
                 */
-               if (i915_seqno_passed(intel_engine_get_seqno(engine),
-                                     wait->seqno)) {
+               if (i915_seqno_passed(seqno, wait->seqno)) {
                        struct i915_request *waiter = wait->request;
 
-                       wakeup = true;
-                       if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+                       if (waiter &&
+                           !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
                                      &waiter->fence.flags) &&
                            intel_wait_check_request(wait, waiter))
                                rq = i915_request_get(waiter);
+
+                       tsk = wait->tsk;
+               } else {
+                       if (engine->irq_seqno_barrier &&
+                           i915_seqno_passed(seqno, wait->seqno - 1)) {
+                               set_bit(ENGINE_IRQ_BREADCRUMB,
+                                       &engine->irq_posted);
+                               tsk = wait->tsk;
+                       }
                }
 
-               if (wakeup)
-                       wake_up_process(wait->tsk);
+               engine->breadcrumbs.irq_count++;
        } else {
                if (engine->breadcrumbs.irq_armed)
                        __intel_engine_disarm_breadcrumbs(engine);
@@ -1183,11 +1206,19 @@ static void notify_ring(struct intel_engine_cs *engine)
        spin_unlock(&engine->breadcrumbs.irq_lock);
 
        if (rq) {
-               dma_fence_signal(&rq->fence);
+               spin_lock(&rq->lock);
+               dma_fence_signal_locked(&rq->fence);
                GEM_BUG_ON(!i915_request_completed(rq));
+               spin_unlock(&rq->lock);
+
                i915_request_put(rq);
        }
 
+       if (tsk && tsk->state & TASK_NORMAL)
+               wake_up_process(tsk);
+
+       rcu_read_unlock();
+
        trace_intel_engine_notify(engine, wait);
 }
 
@@ -1234,9 +1265,9 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
                c0 = max(render, media);
                c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
 
-               if (c0 > time * rps->up_threshold)
+               if (c0 > time * rps->power.up_threshold)
                        events = GEN6_PM_RP_UP_THRESHOLD;
-               else if (c0 < time * rps->down_threshold)
+               else if (c0 < time * rps->power.down_threshold)
                        events = GEN6_PM_RP_DOWN_THRESHOLD;
        }
 
@@ -1462,14 +1493,10 @@ static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
 static void
 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
 {
-       struct intel_engine_execlists * const execlists = &engine->execlists;
        bool tasklet = false;
 
-       if (iir & GT_CONTEXT_SWITCH_INTERRUPT) {
-               if (READ_ONCE(engine->execlists.active))
-                       tasklet = !test_and_set_bit(ENGINE_IRQ_EXECLIST,
-                                                   &engine->irq_posted);
-       }
+       if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
+               tasklet = true;
 
        if (iir & GT_RENDER_USER_INTERRUPT) {
                notify_ring(engine);
@@ -1477,7 +1504,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
        }
 
        if (tasklet)
-               tasklet_hi_schedule(&execlists->tasklet);
+               tasklet_hi_schedule(&engine->execlists.tasklet);
 }
 
 static void gen8_gt_irq_ack(struct drm_i915_private *i915,
@@ -1549,78 +1576,122 @@ static void gen8_gt_irq_handler(struct drm_i915_private *i915,
        }
 }
 
-static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
+static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
 {
-       switch (port) {
-       case PORT_A:
+       switch (pin) {
+       case HPD_PORT_C:
+               return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
+       case HPD_PORT_D:
+               return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
+       case HPD_PORT_E:
+               return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
+       case HPD_PORT_F:
+               return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
+       default:
+               return false;
+       }
+}
+
+static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
+{
+       switch (pin) {
+       case HPD_PORT_A:
                return val & PORTA_HOTPLUG_LONG_DETECT;
-       case PORT_B:
+       case HPD_PORT_B:
                return val & PORTB_HOTPLUG_LONG_DETECT;
-       case PORT_C:
+       case HPD_PORT_C:
                return val & PORTC_HOTPLUG_LONG_DETECT;
        default:
                return false;
        }
 }
 
-static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
+static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
+{
+       switch (pin) {
+       case HPD_PORT_A:
+               return val & ICP_DDIA_HPD_LONG_DETECT;
+       case HPD_PORT_B:
+               return val & ICP_DDIB_HPD_LONG_DETECT;
+       default:
+               return false;
+       }
+}
+
+static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
+{
+       switch (pin) {
+       case HPD_PORT_C:
+               return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
+       case HPD_PORT_D:
+               return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
+       case HPD_PORT_E:
+               return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
+       case HPD_PORT_F:
+               return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
+       default:
+               return false;
+       }
+}
+
+static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
 {
-       switch (port) {
-       case PORT_E:
+       switch (pin) {
+       case HPD_PORT_E:
                return val & PORTE_HOTPLUG_LONG_DETECT;
        default:
                return false;
        }
 }
 
-static bool spt_port_hotplug_long_detect(enum port port, u32 val)
+static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
 {
-       switch (port) {
-       case PORT_A:
+       switch (pin) {
+       case HPD_PORT_A:
                return val & PORTA_HOTPLUG_LONG_DETECT;
-       case PORT_B:
+       case HPD_PORT_B:
                return val & PORTB_HOTPLUG_LONG_DETECT;
-       case PORT_C:
+       case HPD_PORT_C:
                return val & PORTC_HOTPLUG_LONG_DETECT;
-       case PORT_D:
+       case HPD_PORT_D:
                return val & PORTD_HOTPLUG_LONG_DETECT;
        default:
                return false;
        }
 }
 
-static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
+static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
 {
-       switch (port) {
-       case PORT_A:
+       switch (pin) {
+       case HPD_PORT_A:
                return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
        default:
                return false;
        }
 }
 
-static bool pch_port_hotplug_long_detect(enum port port, u32 val)
+static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
 {
-       switch (port) {
-       case PORT_B:
+       switch (pin) {
+       case HPD_PORT_B:
                return val & PORTB_HOTPLUG_LONG_DETECT;
-       case PORT_C:
+       case HPD_PORT_C:
                return val & PORTC_HOTPLUG_LONG_DETECT;
-       case PORT_D:
+       case HPD_PORT_D:
                return val & PORTD_HOTPLUG_LONG_DETECT;
        default:
                return false;
        }
 }
 
-static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
+static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
 {
-       switch (port) {
-       case PORT_B:
+       switch (pin) {
+       case HPD_PORT_B:
                return val & PORTB_HOTPLUG_INT_LONG_PULSE;
-       case PORT_C:
+       case HPD_PORT_C:
                return val & PORTC_HOTPLUG_INT_LONG_PULSE;
-       case PORT_D:
+       case HPD_PORT_D:
                return val & PORTD_HOTPLUG_INT_LONG_PULSE;
        default:
                return false;
@@ -1638,27 +1709,22 @@ static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
                               u32 *pin_mask, u32 *long_mask,
                               u32 hotplug_trigger, u32 dig_hotplug_reg,
                               const u32 hpd[HPD_NUM_PINS],
-                              bool long_pulse_detect(enum port port, u32 val))
+                              bool long_pulse_detect(enum hpd_pin pin, u32 val))
 {
-       enum port port;
-       int i;
+       enum hpd_pin pin;
 
-       for_each_hpd_pin(i) {
-               if ((hpd[i] & hotplug_trigger) == 0)
+       for_each_hpd_pin(pin) {
+               if ((hpd[pin] & hotplug_trigger) == 0)
                        continue;
 
-               *pin_mask |= BIT(i);
-
-               port = intel_hpd_pin_to_port(dev_priv, i);
-               if (port == PORT_NONE)
-                       continue;
+               *pin_mask |= BIT(pin);
 
-               if (long_pulse_detect(port, dig_hotplug_reg))
-                       *long_mask |= BIT(i);
+               if (long_pulse_detect(pin, dig_hotplug_reg))
+                       *long_mask |= BIT(pin);
        }
 
-       DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
-                        hotplug_trigger, dig_hotplug_reg, *pin_mask);
+       DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
+                        hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
 
 }
 
@@ -1680,69 +1746,34 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
                                         uint32_t crc4)
 {
        struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
-       struct intel_pipe_crc_entry *entry;
        struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
-       struct drm_driver *driver = dev_priv->drm.driver;
        uint32_t crcs[5];
-       int head, tail;
 
        spin_lock(&pipe_crc->lock);
-       if (pipe_crc->source && !crtc->base.crc.opened) {
-               if (!pipe_crc->entries) {
-                       spin_unlock(&pipe_crc->lock);
-                       DRM_DEBUG_KMS("spurious interrupt\n");
-                       return;
-               }
-
-               head = pipe_crc->head;
-               tail = pipe_crc->tail;
-
-               if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
-                       spin_unlock(&pipe_crc->lock);
-                       DRM_ERROR("CRC buffer overflowing\n");
-                       return;
-               }
-
-               entry = &pipe_crc->entries[head];
-
-               entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe);
-               entry->crc[0] = crc0;
-               entry->crc[1] = crc1;
-               entry->crc[2] = crc2;
-               entry->crc[3] = crc3;
-               entry->crc[4] = crc4;
-
-               head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
-               pipe_crc->head = head;
-
-               spin_unlock(&pipe_crc->lock);
-
-               wake_up_interruptible(&pipe_crc->wq);
-       } else {
-               /*
-                * For some not yet identified reason, the first CRC is
-                * bonkers. So let's just wait for the next vblank and read
-                * out the buggy result.
-                *
-                * On GEN8+ sometimes the second CRC is bonkers as well, so
-                * don't trust that one either.
-                */
-               if (pipe_crc->skipped <= 0 ||
-                   (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
-                       pipe_crc->skipped++;
-                       spin_unlock(&pipe_crc->lock);
-                       return;
-               }
+       /*
+        * For some not yet identified reason, the first CRC is
+        * bonkers. So let's just wait for the next vblank and read
+        * out the buggy result.
+        *
+        * On GEN8+ sometimes the second CRC is bonkers as well, so
+        * don't trust that one either.
+        */
+       if (pipe_crc->skipped <= 0 ||
+           (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
+               pipe_crc->skipped++;
                spin_unlock(&pipe_crc->lock);
-               crcs[0] = crc0;
-               crcs[1] = crc1;
-               crcs[2] = crc2;
-               crcs[3] = crc3;
-               crcs[4] = crc4;
-               drm_crtc_add_crc_entry(&crtc->base, true,
-                                      drm_crtc_accurate_vblank_count(&crtc->base),
-                                      crcs);
+               return;
        }
+       spin_unlock(&pipe_crc->lock);
+
+       crcs[0] = crc0;
+       crcs[1] = crc1;
+       crcs[2] = crc2;
+       crcs[3] = crc3;
+       crcs[4] = crc4;
+       drm_crtc_add_crc_entry(&crtc->base, true,
+                               drm_crtc_accurate_vblank_count(&crtc->base),
+                               crcs);
 }
 #else
 static inline void
@@ -1893,9 +1924,17 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
 
                /*
                 * Clear the PIPE*STAT regs before the IIR
+                *
+                * Toggle the enable bits to make sure we get an
+                * edge in the ISR pipe event bit if we don't clear
+                * all the enabled status bits. Otherwise the edge
+                * triggered IIR on i965/g4x wouldn't notice that
+                * an interrupt is still pending.
                 */
-               if (pipe_stats[pipe])
-                       I915_WRITE(reg, enable_mask | pipe_stats[pipe]);
+               if (pipe_stats[pipe]) {
+                       I915_WRITE(reg, pipe_stats[pipe]);
+                       I915_WRITE(reg, enable_mask);
+               }
        }
        spin_unlock(&dev_priv->irq_lock);
 }
@@ -1990,10 +2029,38 @@ static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
 
 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
 {
-       u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+       u32 hotplug_status = 0, hotplug_status_mask;
+       int i;
+
+       if (IS_G4X(dev_priv) ||
+           IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
+                       DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
+       else
+               hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
+
+       /*
+        * We absolutely have to clear all the pending interrupt
+        * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
+        * interrupt bit won't have an edge, and the i965/g4x
+        * edge triggered IIR will not notice that an interrupt
+        * is still pending. We can't use PORT_HOTPLUG_EN to
+        * guarantee the edge as the act of toggling the enable
+        * bits can itself generate a new hotplug interrupt :(
+        */
+       for (i = 0; i < 10; i++) {
+               u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
+
+               if (tmp == 0)
+                       return hotplug_status;
 
-       if (hotplug_status)
+               hotplug_status |= tmp;
                I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+       }
+
+       WARN_ONCE(1,
+                 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
+                 I915_READ(PORT_HOTPLUG_STAT));
 
        return hotplug_status;
 }
@@ -2100,7 +2167,6 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
 
                I915_WRITE(VLV_IER, ier);
                I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
-               POSTING_READ(VLV_MASTER_IER);
 
                if (gt_iir)
                        snb_gt_irq_handler(dev_priv, gt_iir);
@@ -2185,7 +2251,6 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
 
                I915_WRITE(VLV_IER, ier);
                I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
-               POSTING_READ(GEN8_MASTER_IRQ);
 
                gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
 
@@ -2354,6 +2419,43 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
                cpt_serr_int_handler(dev_priv);
 }
 
+static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
+{
+       u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
+       u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
+       u32 pin_mask = 0, long_mask = 0;
+
+       if (ddi_hotplug_trigger) {
+               u32 dig_hotplug_reg;
+
+               dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
+               I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
+
+               intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+                                  ddi_hotplug_trigger,
+                                  dig_hotplug_reg, hpd_icp,
+                                  icp_ddi_port_hotplug_long_detect);
+       }
+
+       if (tc_hotplug_trigger) {
+               u32 dig_hotplug_reg;
+
+               dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
+               I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
+
+               intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
+                                  tc_hotplug_trigger,
+                                  dig_hotplug_reg, hpd_icp,
+                                  icp_tc_port_hotplug_long_detect);
+       }
+
+       if (pin_mask)
+               intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+
+       if (pch_iir & SDE_GMBUS_ICP)
+               gmbus_irq_handler(dev_priv);
+}
+
 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
 {
        u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
@@ -2517,7 +2619,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
        /* disable master interrupt before clearing iir  */
        de_ier = I915_READ(DEIER);
        I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
-       POSTING_READ(DEIER);
 
        /* Disable south interrupts. We'll only write to SDEIIR once, so further
         * interrupts will will be stored on its back queue, and then we'll be
@@ -2527,7 +2628,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
        if (!HAS_PCH_NOP(dev_priv)) {
                sde_ier = I915_READ(SDEIER);
                I915_WRITE(SDEIER, 0);
-               POSTING_READ(SDEIER);
        }
 
        /* Find, clear, then process each source of interrupt */
@@ -2562,11 +2662,8 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
        }
 
        I915_WRITE(DEIER, de_ier);
-       POSTING_READ(DEIER);
-       if (!HAS_PCH_NOP(dev_priv)) {
+       if (!HAS_PCH_NOP(dev_priv))
                I915_WRITE(SDEIER, sde_ier);
-               POSTING_READ(SDEIER);
-       }
 
        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
        enable_rpm_wakeref_asserts(dev_priv);
@@ -2590,6 +2687,40 @@ static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
        intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
 }
 
+static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
+{
+       u32 pin_mask = 0, long_mask = 0;
+       u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
+       u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
+
+       if (trigger_tc) {
+               u32 dig_hotplug_reg;
+
+               dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
+               I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
+
+               intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
+                                  dig_hotplug_reg, hpd_gen11,
+                                  gen11_port_hotplug_long_detect);
+       }
+
+       if (trigger_tbt) {
+               u32 dig_hotplug_reg;
+
+               dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
+               I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
+
+               intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
+                                  dig_hotplug_reg, hpd_gen11,
+                                  gen11_port_hotplug_long_detect);
+       }
+
+       if (pin_mask)
+               intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
+       else
+               DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
+}
+
 static irqreturn_t
 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
 {
@@ -2625,6 +2756,17 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
                        DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
        }
 
+       if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
+               iir = I915_READ(GEN11_DE_HPD_IIR);
+               if (iir) {
+                       I915_WRITE(GEN11_DE_HPD_IIR, iir);
+                       ret = IRQ_HANDLED;
+                       gen11_hpd_irq_handler(dev_priv, iir);
+               } else {
+                       DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
+               }
+       }
+
        if (master_ctl & GEN8_DE_PORT_IRQ) {
                iir = I915_READ(GEN8_DE_PORT_IIR);
                if (iir) {
@@ -2640,7 +2782,11 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
                                            GEN9_AUX_CHANNEL_C |
                                            GEN9_AUX_CHANNEL_D;
 
-                       if (IS_CNL_WITH_PORT_F(dev_priv))
+                       if (INTEL_GEN(dev_priv) >= 11)
+                               tmp_mask |= ICL_AUX_CHANNEL_E;
+
+                       if (IS_CNL_WITH_PORT_F(dev_priv) ||
+                           INTEL_GEN(dev_priv) >= 11)
                                tmp_mask |= CNL_AUX_CHANNEL_F;
 
                        if (iir & tmp_mask) {
@@ -2724,8 +2870,11 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
                        I915_WRITE(SDEIIR, iir);
                        ret = IRQ_HANDLED;
 
-                       if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
-                           HAS_PCH_CNP(dev_priv))
+                       if (HAS_PCH_ICP(dev_priv))
+                               icp_irq_handler(dev_priv, iir);
+                       else if (HAS_PCH_SPT(dev_priv) ||
+                                HAS_PCH_KBP(dev_priv) ||
+                                HAS_PCH_CNP(dev_priv))
                                spt_irq_handler(dev_priv, iir);
                        else
                                cpt_irq_handler(dev_priv, iir);
@@ -2942,11 +3091,44 @@ gen11_gt_irq_handler(struct drm_i915_private * const i915,
        spin_unlock(&i915->irq_lock);
 }
 
+static void
+gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl,
+                     u32 *iir)
+{
+       void __iomem * const regs = dev_priv->regs;
+
+       if (!(master_ctl & GEN11_GU_MISC_IRQ))
+               return;
+
+       *iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
+       if (likely(*iir))
+               raw_reg_write(regs, GEN11_GU_MISC_IIR, *iir);
+}
+
+static void
+gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv,
+                         const u32 master_ctl, const u32 iir)
+{
+       if (!(master_ctl & GEN11_GU_MISC_IRQ))
+               return;
+
+       if (unlikely(!iir)) {
+               DRM_ERROR("GU_MISC iir blank!\n");
+               return;
+       }
+
+       if (iir & GEN11_GU_MISC_GSE)
+               intel_opregion_asle_intr(dev_priv);
+       else
+               DRM_ERROR("Unexpected GU_MISC interrupt 0x%x\n", iir);
+}
+
 static irqreturn_t gen11_irq_handler(int irq, void *arg)
 {
        struct drm_i915_private * const i915 = to_i915(arg);
        void __iomem * const regs = i915->regs;
        u32 master_ctl;
+       u32 gu_misc_iir;
 
        if (!intel_irqs_enabled(i915))
                return IRQ_NONE;
@@ -2975,9 +3157,13 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
                enable_rpm_wakeref_asserts(i915);
        }
 
+       gen11_gu_misc_irq_ack(i915, master_ctl, &gu_misc_iir);
+
        /* Acknowledge and enable interrupts. */
        raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
 
+       gen11_gu_misc_irq_handler(i915, master_ctl, gu_misc_iir);
+
        return IRQ_HANDLED;
 }
 
@@ -3053,7 +3239,7 @@ static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
                 */
                DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
                I915_WRITE(EMR, I915_READ(EMR) | eir);
-               I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+               I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT);
        }
 }
 
@@ -3464,7 +3650,12 @@ static void gen11_irq_reset(struct drm_device *dev)
 
        GEN3_IRQ_RESET(GEN8_DE_PORT_);
        GEN3_IRQ_RESET(GEN8_DE_MISC_);
+       GEN3_IRQ_RESET(GEN11_DE_HPD_);
+       GEN3_IRQ_RESET(GEN11_GU_MISC_);
        GEN3_IRQ_RESET(GEN8_PCU_);
+
+       if (HAS_PCH_ICP(dev_priv))
+               GEN3_IRQ_RESET(SDE);
 }
 
 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
@@ -3581,6 +3772,73 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
        ibx_hpd_detection_setup(dev_priv);
 }
 
+static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv)
+{
+       u32 hotplug;
+
+       hotplug = I915_READ(SHOTPLUG_CTL_DDI);
+       hotplug |= ICP_DDIA_HPD_ENABLE |
+                  ICP_DDIB_HPD_ENABLE;
+       I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
+
+       hotplug = I915_READ(SHOTPLUG_CTL_TC);
+       hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) |
+                  ICP_TC_HPD_ENABLE(PORT_TC2) |
+                  ICP_TC_HPD_ENABLE(PORT_TC3) |
+                  ICP_TC_HPD_ENABLE(PORT_TC4);
+       I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
+}
+
+static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
+{
+       u32 hotplug_irqs, enabled_irqs;
+
+       hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP;
+       enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp);
+
+       ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+
+       icp_hpd_detection_setup(dev_priv);
+}
+
+static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
+{
+       u32 hotplug;
+
+       hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
+       hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
+                  GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
+                  GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
+                  GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
+       I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
+
+       hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
+       hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
+                  GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
+                  GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
+                  GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
+       I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
+}
+
+static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
+{
+       u32 hotplug_irqs, enabled_irqs;
+       u32 val;
+
+       enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11);
+       hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
+
+       val = I915_READ(GEN11_DE_HPD_IMR);
+       val &= ~hotplug_irqs;
+       I915_WRITE(GEN11_DE_HPD_IMR, val);
+       POSTING_READ(GEN11_DE_HPD_IMR);
+
+       gen11_hpd_detection_setup(dev_priv);
+
+       if (HAS_PCH_ICP(dev_priv))
+               icp_hpd_irq_setup(dev_priv);
+}
+
 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
 {
        u32 val, hotplug;
@@ -3907,9 +4165,12 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
        uint32_t de_pipe_enables;
        u32 de_port_masked = GEN8_AUX_CHANNEL_A;
        u32 de_port_enables;
-       u32 de_misc_masked = GEN8_DE_MISC_GSE | GEN8_DE_EDP_PSR;
+       u32 de_misc_masked = GEN8_DE_EDP_PSR;
        enum pipe pipe;
 
+       if (INTEL_GEN(dev_priv) <= 10)
+               de_misc_masked |= GEN8_DE_MISC_GSE;
+
        if (INTEL_GEN(dev_priv) >= 9) {
                de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
                de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
@@ -3920,7 +4181,10 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
                de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
        }
 
-       if (IS_CNL_WITH_PORT_F(dev_priv))
+       if (INTEL_GEN(dev_priv) >= 11)
+               de_port_masked |= ICL_AUX_CHANNEL_E;
+
+       if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
                de_port_masked |= CNL_AUX_CHANNEL_F;
 
        de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
@@ -3948,10 +4212,18 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
        GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
        GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
 
-       if (IS_GEN9_LP(dev_priv))
+       if (INTEL_GEN(dev_priv) >= 11) {
+               u32 de_hpd_masked = 0;
+               u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
+                                    GEN11_DE_TBT_HOTPLUG_MASK;
+
+               GEN3_IRQ_INIT(GEN11_DE_HPD_, ~de_hpd_masked, de_hpd_enables);
+               gen11_hpd_detection_setup(dev_priv);
+       } else if (IS_GEN9_LP(dev_priv)) {
                bxt_hpd_detection_setup(dev_priv);
-       else if (IS_BROADWELL(dev_priv))
+       } else if (IS_BROADWELL(dev_priv)) {
                ilk_hpd_detection_setup(dev_priv);
+       }
 }
 
 static int gen8_irq_postinstall(struct drm_device *dev)
@@ -4000,13 +4272,34 @@ static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
        I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
 }
 
+static void icp_irq_postinstall(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       u32 mask = SDE_GMBUS_ICP;
+
+       WARN_ON(I915_READ(SDEIER) != 0);
+       I915_WRITE(SDEIER, 0xffffffff);
+       POSTING_READ(SDEIER);
+
+       gen3_assert_iir_is_zero(dev_priv, SDEIIR);
+       I915_WRITE(SDEIMR, ~mask);
+
+       icp_hpd_detection_setup(dev_priv);
+}
+
 static int gen11_irq_postinstall(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 gu_misc_masked = GEN11_GU_MISC_GSE;
+
+       if (HAS_PCH_ICP(dev_priv))
+               icp_irq_postinstall(dev);
 
        gen11_gt_irq_postinstall(dev_priv);
        gen8_de_irq_postinstall(dev_priv);
 
+       GEN3_IRQ_INIT(GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
+
        I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
 
        I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
@@ -4054,11 +4347,13 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
        /* Unmask the interrupts that we always want on. */
        dev_priv->irq_mask =
                ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
-                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
+                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+                 I915_MASTER_ERROR_INTERRUPT);
 
        enable_mask =
                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+               I915_MASTER_ERROR_INTERRUPT |
                I915_USER_INTERRUPT;
 
        GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
@@ -4073,6 +4368,81 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
        return 0;
 }
 
+static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv,
+                              u16 *eir, u16 *eir_stuck)
+{
+       u16 emr;
+
+       *eir = I915_READ16(EIR);
+
+       if (*eir)
+               I915_WRITE16(EIR, *eir);
+
+       *eir_stuck = I915_READ16(EIR);
+       if (*eir_stuck == 0)
+               return;
+
+       /*
+        * Toggle all EMR bits to make sure we get an edge
+        * in the ISR master error bit if we don't clear
+        * all the EIR bits. Otherwise the edge triggered
+        * IIR on i965/g4x wouldn't notice that an interrupt
+        * is still pending. Also some EIR bits can't be
+        * cleared except by handling the underlying error
+        * (or by a GPU reset) so we mask any bit that
+        * remains set.
+        */
+       emr = I915_READ16(EMR);
+       I915_WRITE16(EMR, 0xffff);
+       I915_WRITE16(EMR, emr | *eir_stuck);
+}
+
+static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
+                                  u16 eir, u16 eir_stuck)
+{
+       DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
+
+       if (eir_stuck)
+               DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck);
+}
+
+static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
+                              u32 *eir, u32 *eir_stuck)
+{
+       u32 emr;
+
+       *eir = I915_READ(EIR);
+
+       I915_WRITE(EIR, *eir);
+
+       *eir_stuck = I915_READ(EIR);
+       if (*eir_stuck == 0)
+               return;
+
+       /*
+        * Toggle all EMR bits to make sure we get an edge
+        * in the ISR master error bit if we don't clear
+        * all the EIR bits. Otherwise the edge triggered
+        * IIR on i965/g4x wouldn't notice that an interrupt
+        * is still pending. Also some EIR bits can't be
+        * cleared except by handling the underlying error
+        * (or by a GPU reset) so we mask any bit that
+        * remains set.
+        */
+       emr = I915_READ(EMR);
+       I915_WRITE(EMR, 0xffffffff);
+       I915_WRITE(EMR, emr | *eir_stuck);
+}
+
+static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
+                                  u32 eir, u32 eir_stuck)
+{
+       DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
+
+       if (eir_stuck)
+               DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck);
+}
+
 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = arg;
@@ -4087,6 +4457,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
 
        do {
                u32 pipe_stats[I915_MAX_PIPES] = {};
+               u16 eir = 0, eir_stuck = 0;
                u16 iir;
 
                iir = I915_READ16(IIR);
@@ -4099,13 +4470,16 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
                 * signalled in iir */
                i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
 
+               if (iir & I915_MASTER_ERROR_INTERRUPT)
+                       i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
+
                I915_WRITE16(IIR, iir);
 
                if (iir & I915_USER_INTERRUPT)
                        notify_ring(dev_priv->engine[RCS]);
 
-               if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
-                       DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
+               if (iir & I915_MASTER_ERROR_INTERRUPT)
+                       i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
 
                i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
        } while (0);
@@ -4143,12 +4517,14 @@ static int i915_irq_postinstall(struct drm_device *dev)
        dev_priv->irq_mask =
                ~(I915_ASLE_INTERRUPT |
                  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
-                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
+                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+                 I915_MASTER_ERROR_INTERRUPT);
 
        enable_mask =
                I915_ASLE_INTERRUPT |
                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+               I915_MASTER_ERROR_INTERRUPT |
                I915_USER_INTERRUPT;
 
        if (I915_HAS_HOTPLUG(dev_priv)) {
@@ -4186,6 +4562,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
 
        do {
                u32 pipe_stats[I915_MAX_PIPES] = {};
+               u32 eir = 0, eir_stuck = 0;
                u32 hotplug_status = 0;
                u32 iir;
 
@@ -4203,13 +4580,16 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
                 * signalled in iir */
                i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
 
+               if (iir & I915_MASTER_ERROR_INTERRUPT)
+                       i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
+
                I915_WRITE(IIR, iir);
 
                if (iir & I915_USER_INTERRUPT)
                        notify_ring(dev_priv->engine[RCS]);
 
-               if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
-                       DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
+               if (iir & I915_MASTER_ERROR_INTERRUPT)
+                       i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
 
                if (hotplug_status)
                        i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -4263,14 +4643,14 @@ static int i965_irq_postinstall(struct drm_device *dev)
                  I915_DISPLAY_PORT_INTERRUPT |
                  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
                  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
-                 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+                 I915_MASTER_ERROR_INTERRUPT);
 
        enable_mask =
                I915_ASLE_INTERRUPT |
                I915_DISPLAY_PORT_INTERRUPT |
                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
-               I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
+               I915_MASTER_ERROR_INTERRUPT |
                I915_USER_INTERRUPT;
 
        if (IS_G4X(dev_priv))
@@ -4330,6 +4710,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
 
        do {
                u32 pipe_stats[I915_MAX_PIPES] = {};
+               u32 eir = 0, eir_stuck = 0;
                u32 hotplug_status = 0;
                u32 iir;
 
@@ -4346,6 +4727,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
                 * signalled in iir */
                i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
 
+               if (iir & I915_MASTER_ERROR_INTERRUPT)
+                       i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
+
                I915_WRITE(IIR, iir);
 
                if (iir & I915_USER_INTERRUPT)
@@ -4354,8 +4738,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
                if (iir & I915_BSD_USER_INTERRUPT)
                        notify_ring(dev_priv->engine[VCS]);
 
-               if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
-                       DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
+               if (iir & I915_MASTER_ERROR_INTERRUPT)
+                       i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
 
                if (hotplug_status)
                        i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -4470,7 +4854,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
                dev->driver->irq_uninstall = gen11_irq_reset;
                dev->driver->enable_vblank = gen8_enable_vblank;
                dev->driver->disable_vblank = gen8_disable_vblank;
-               dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
+               dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
        } else if (INTEL_GEN(dev_priv) >= 8) {
                dev->driver->irq_handler = gen8_irq_handler;
                dev->driver->irq_preinstall = gen8_irq_reset;
index 66ea3552c63ec7634d5b61b4023a1d8899bc291d..295e981e4a398c242d10bd6ed446aeb0bfecdc70 100644 (file)
@@ -44,10 +44,6 @@ i915_param_named(modeset, int, 0400,
        "Use kernel modesetting [KMS] (0=disable, "
        "1=on, -1=force vga console preference [default])");
 
-i915_param_named_unsafe(panel_ignore_lid, int, 0600,
-       "Override lid status (0=autodetect, 1=autodetect disabled [default], "
-       "-1=force lid closed, -2=force lid open)");
-
 i915_param_named_unsafe(enable_dc, int, 0400,
        "Enable power-saving display C-states. "
        "(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6)");
@@ -92,7 +88,7 @@ i915_param_named_unsafe(enable_ppgtt, int, 0400,
 
 i915_param_named_unsafe(enable_psr, int, 0600,
        "Enable PSR "
-       "(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
+       "(0=disabled, 1=enabled) "
        "Default: -1 (use per-chip default)");
 
 i915_param_named_unsafe(alpha_support, bool, 0400,
@@ -130,9 +126,6 @@ i915_param_named_unsafe(invert_brightness, int, 0600,
 i915_param_named(disable_display, bool, 0400,
        "Disable display (default: false)");
 
-i915_param_named_unsafe(enable_cmd_parser, bool, 0400,
-       "Enable command parsing (true=enabled [default], false=disabled)");
-
 i915_param_named(mmio_debug, int, 0600,
        "Enable the MMIO debug code for the first N failures (default: off). "
        "This may negatively affect performance.");
index 6684025b7af8b899604ba67646f4a00191f685c6..6c4d4a21474b5ffaa9954a145068b1d3effce694 100644 (file)
@@ -36,7 +36,6 @@ struct drm_printer;
 #define I915_PARAMS_FOR_EACH(param) \
        param(char *, vbt_firmware, NULL) \
        param(int, modeset, -1) \
-       param(int, panel_ignore_lid, 1) \
        param(int, lvds_channel_mode, 0) \
        param(int, panel_use_ssc, -1) \
        param(int, vbt_sdvo_panel_type, -1) \
@@ -58,7 +57,6 @@ struct drm_printer;
        param(unsigned int, inject_load_failure, 0) \
        /* leave bools at the end to not create holes */ \
        param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \
-       param(bool, enable_cmd_parser, true) \
        param(bool, enable_hangcheck, true) \
        param(bool, fastboot, false) \
        param(bool, prefault_disable, false) \
index 4364922e935d3380c07e7c998f3fba8c5368f645..6a4d1388ad2d39b2f972e0d1e1270bab8d3ce8df 100644 (file)
@@ -340,7 +340,6 @@ static const struct intel_device_info intel_valleyview_info = {
        GEN(7),
        .is_lp = 1,
        .num_pipes = 2,
-       .has_psr = 1,
        .has_runtime_pm = 1,
        .has_rc6 = 1,
        .has_gmch_display = 1,
@@ -433,7 +432,6 @@ static const struct intel_device_info intel_cherryview_info = {
        .is_lp = 1,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
        .has_64bit_reloc = 1,
-       .has_psr = 1,
        .has_runtime_pm = 1,
        .has_resource_streamer = 1,
        .has_rc6 = 1,
@@ -659,12 +657,15 @@ static const struct pci_device_id pciidlist[] = {
        INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info),
        INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
        INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
+       INTEL_AML_GT2_IDS(&intel_kabylake_gt2_info),
        INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info),
        INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info),
        INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info),
-       INTEL_CFL_U_GT1_IDS(&intel_coffeelake_gt1_info),
        INTEL_CFL_U_GT2_IDS(&intel_coffeelake_gt2_info),
        INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
+       INTEL_WHL_U_GT1_IDS(&intel_coffeelake_gt1_info),
+       INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info),
+       INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info),
        INTEL_CNL_IDS(&intel_cannonlake_info),
        INTEL_ICL_11_IDS(&intel_icelake_11_info),
        {0, 0, 0}
@@ -673,10 +674,16 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
 
 static void i915_pci_remove(struct pci_dev *pdev)
 {
-       struct drm_device *dev = pci_get_drvdata(pdev);
+       struct drm_device *dev;
+
+       dev = pci_get_drvdata(pdev);
+       if (!dev) /* driver load aborted, nothing to cleanup */
+               return;
 
        i915_driver_unload(dev);
        drm_dev_put(dev);
+
+       pci_set_drvdata(pdev, NULL);
 }
 
 static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -711,6 +718,11 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err)
                return err;
 
+       if (i915_inject_load_failure()) {
+               i915_pci_remove(pdev);
+               return -ENODEV;
+       }
+
        err = i915_live_selftests(pdev);
        if (err) {
                i915_pci_remove(pdev);
index 019bd2d073ad4461b0660f60e0a696e6af203ed7..6bf10952c7240363fdcd2df907979ef9aef0d247 100644 (file)
@@ -315,7 +315,7 @@ static u32 i915_oa_max_sample_rate = 100000;
  * code assumes all reports have a power-of-two size and ~(size - 1) can
  * be used as a mask to align the OA tail pointer.
  */
-static struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
+static const struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
        [I915_OA_FORMAT_A13]        = { 0, 64 },
        [I915_OA_FORMAT_A29]        = { 1, 128 },
        [I915_OA_FORMAT_A13_B8_C8]  = { 2, 128 },
@@ -326,7 +326,7 @@ static struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
        [I915_OA_FORMAT_C4_B8]      = { 7, 64 },
 };
 
-static struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
+static const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
        [I915_OA_FORMAT_A12]                = { 0, 64 },
        [I915_OA_FORMAT_A12_B8_C8]          = { 2, 128 },
        [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
@@ -737,12 +737,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
                        continue;
                }
 
-               /*
-                * XXX: Just keep the lower 21 bits for now since I'm not
-                * entirely sure if the HW touches any of the higher bits in
-                * this field
-                */
-               ctx_id = report32[2] & 0x1fffff;
+               ctx_id = report32[2] & dev_priv->perf.oa.specific_ctx_id_mask;
 
                /*
                 * Squash whatever is in the CTX_ID field if it's marked as
@@ -1203,6 +1198,33 @@ static int i915_oa_read(struct i915_perf_stream *stream,
        return dev_priv->perf.oa.ops.read(stream, buf, count, offset);
 }
 
+static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
+                                           struct i915_gem_context *ctx)
+{
+       struct intel_engine_cs *engine = i915->engine[RCS];
+       struct intel_context *ce;
+       int ret;
+
+       ret = i915_mutex_lock_interruptible(&i915->drm);
+       if (ret)
+               return ERR_PTR(ret);
+
+       /*
+        * As the ID is the gtt offset of the context's vma we
+        * pin the vma to ensure the ID remains fixed.
+        *
+        * NB: implied RCS engine...
+        */
+       ce = intel_context_pin(ctx, engine);
+       mutex_unlock(&i915->drm.struct_mutex);
+       if (IS_ERR(ce))
+               return ce;
+
+       i915->perf.oa.pinned_ctx = ce;
+
+       return ce;
+}
+
 /**
  * oa_get_render_ctx_id - determine and hold ctx hw id
  * @stream: An i915-perf stream opened for OA metrics
@@ -1215,40 +1237,76 @@ static int i915_oa_read(struct i915_perf_stream *stream,
  */
 static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
 {
-       struct drm_i915_private *dev_priv = stream->dev_priv;
+       struct drm_i915_private *i915 = stream->dev_priv;
+       struct intel_context *ce;
 
-       if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
-               dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id;
-       } else {
-               struct intel_engine_cs *engine = dev_priv->engine[RCS];
-               struct intel_ring *ring;
-               int ret;
-
-               ret = i915_mutex_lock_interruptible(&dev_priv->drm);
-               if (ret)
-                       return ret;
+       ce = oa_pin_context(i915, stream->ctx);
+       if (IS_ERR(ce))
+               return PTR_ERR(ce);
 
+       switch (INTEL_GEN(i915)) {
+       case 7: {
                /*
-                * As the ID is the gtt offset of the context's vma we
-                * pin the vma to ensure the ID remains fixed.
-                *
-                * NB: implied RCS engine...
+                * On Haswell we don't do any post processing of the reports
+                * and don't need to use the mask.
                 */
-               ring = intel_context_pin(stream->ctx, engine);
-               mutex_unlock(&dev_priv->drm.struct_mutex);
-               if (IS_ERR(ring))
-                       return PTR_ERR(ring);
+               i915->perf.oa.specific_ctx_id = i915_ggtt_offset(ce->state);
+               i915->perf.oa.specific_ctx_id_mask = 0;
+               break;
+       }
 
+       case 8:
+       case 9:
+       case 10:
+               if (USES_GUC_SUBMISSION(i915)) {
+                       /*
+                        * When using GuC, the context descriptor we write in
+                        * i915 is read by GuC and rewritten before it's
+                        * actually written into the hardware. The LRCA is
+                        * what is put into the context id field of the
+                        * context descriptor by GuC. Because it's aligned to
+                        * a page, the lower 12bits are always at 0 and
+                        * dropped by GuC. They won't be part of the context
+                        * ID in the OA reports, so squash those lower bits.
+                        */
+                       i915->perf.oa.specific_ctx_id =
+                               lower_32_bits(ce->lrc_desc) >> 12;
 
-               /*
-                * Explicitly track the ID (instead of calling
-                * i915_ggtt_offset() on the fly) considering the difference
-                * with gen8+ and execlists
-                */
-               dev_priv->perf.oa.specific_ctx_id =
-                       i915_ggtt_offset(to_intel_context(stream->ctx, engine)->state);
+                       /*
+                        * GuC uses the top bit to signal proxy submission, so
+                        * ignore that bit.
+                        */
+                       i915->perf.oa.specific_ctx_id_mask =
+                               (1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
+               } else {
+                       i915->perf.oa.specific_ctx_id_mask =
+                               (1U << GEN8_CTX_ID_WIDTH) - 1;
+                       i915->perf.oa.specific_ctx_id =
+                               upper_32_bits(ce->lrc_desc);
+                       i915->perf.oa.specific_ctx_id &=
+                               i915->perf.oa.specific_ctx_id_mask;
+               }
+               break;
+
+       case 11: {
+               i915->perf.oa.specific_ctx_id_mask =
+                       ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32) |
+                       ((1U << GEN11_ENGINE_INSTANCE_WIDTH) - 1) << (GEN11_ENGINE_INSTANCE_SHIFT - 32) |
+                       ((1 << GEN11_ENGINE_CLASS_WIDTH) - 1) << (GEN11_ENGINE_CLASS_SHIFT - 32);
+               i915->perf.oa.specific_ctx_id = upper_32_bits(ce->lrc_desc);
+               i915->perf.oa.specific_ctx_id &=
+                       i915->perf.oa.specific_ctx_id_mask;
+               break;
+       }
+
+       default:
+               MISSING_CASE(INTEL_GEN(i915));
        }
 
+       DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
+                        i915->perf.oa.specific_ctx_id,
+                        i915->perf.oa.specific_ctx_id_mask);
+
        return 0;
 }
 
@@ -1262,17 +1320,15 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
 static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
 {
        struct drm_i915_private *dev_priv = stream->dev_priv;
+       struct intel_context *ce;
 
-       if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
-               dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
-       } else {
-               struct intel_engine_cs *engine = dev_priv->engine[RCS];
+       dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
+       dev_priv->perf.oa.specific_ctx_id_mask = 0;
 
+       ce = fetch_and_zero(&dev_priv->perf.oa.pinned_ctx);
+       if (ce) {
                mutex_lock(&dev_priv->drm.struct_mutex);
-
-               dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
-               intel_context_unpin(stream->ctx, engine);
-
+               intel_context_unpin(ce);
                mutex_unlock(&dev_priv->drm.struct_mutex);
        }
 }
@@ -1780,7 +1836,9 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
         * So far the best way to work around this issue seems to be draining
         * the GPU from any submitted work.
         */
-       ret = i915_gem_wait_for_idle(dev_priv, wait_flags);
+       ret = i915_gem_wait_for_idle(dev_priv,
+                                    wait_flags,
+                                    MAX_SCHEDULE_TIMEOUT);
        if (ret)
                goto out;
 
index dc87797db500517b8ada97d92acf5d6f399b3bd5..c39541ed2219482c17814b9f3190b75f6be3c75d 100644 (file)
@@ -127,6 +127,7 @@ static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915)
 {
        if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) {
                i915->pmu.timer_enabled = true;
+               i915->pmu.timer_last = ktime_get();
                hrtimer_start_range_ns(&i915->pmu.timer,
                                       ns_to_ktime(PERIOD), 0,
                                       HRTIMER_MODE_REL_PINNED);
@@ -155,12 +156,13 @@ static bool grab_forcewake(struct drm_i915_private *i915, bool fw)
 }
 
 static void
-update_sample(struct i915_pmu_sample *sample, u32 unit, u32 val)
+add_sample(struct i915_pmu_sample *sample, u32 val)
 {
-       sample->cur += mul_u32_u32(val, unit);
+       sample->cur += val;
 }
 
-static void engines_sample(struct drm_i915_private *dev_priv)
+static void
+engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
 {
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
@@ -182,8 +184,9 @@ static void engines_sample(struct drm_i915_private *dev_priv)
 
                val = !i915_seqno_passed(current_seqno, last_seqno);
 
-               update_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
-                             PERIOD, val);
+               if (val)
+                       add_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
+                                  period_ns);
 
                if (val && (engine->pmu.enable &
                    (BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) {
@@ -194,11 +197,13 @@ static void engines_sample(struct drm_i915_private *dev_priv)
                        val = 0;
                }
 
-               update_sample(&engine->pmu.sample[I915_SAMPLE_WAIT],
-                             PERIOD, !!(val & RING_WAIT));
+               if (val & RING_WAIT)
+                       add_sample(&engine->pmu.sample[I915_SAMPLE_WAIT],
+                                  period_ns);
 
-               update_sample(&engine->pmu.sample[I915_SAMPLE_SEMA],
-                             PERIOD, !!(val & RING_WAIT_SEMAPHORE));
+               if (val & RING_WAIT_SEMAPHORE)
+                       add_sample(&engine->pmu.sample[I915_SAMPLE_SEMA],
+                                  period_ns);
        }
 
        if (fw)
@@ -207,7 +212,14 @@ static void engines_sample(struct drm_i915_private *dev_priv)
        intel_runtime_pm_put(dev_priv);
 }
 
-static void frequency_sample(struct drm_i915_private *dev_priv)
+static void
+add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul)
+{
+       sample->cur += mul_u32_u32(val, mul);
+}
+
+static void
+frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
 {
        if (dev_priv->pmu.enable &
            config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
@@ -221,15 +233,17 @@ static void frequency_sample(struct drm_i915_private *dev_priv)
                        intel_runtime_pm_put(dev_priv);
                }
 
-               update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
-                             1, intel_gpu_freq(dev_priv, val));
+               add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
+                               intel_gpu_freq(dev_priv, val),
+                               period_ns / 1000);
        }
 
        if (dev_priv->pmu.enable &
            config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
-               update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ], 1,
-                             intel_gpu_freq(dev_priv,
-                                            dev_priv->gt_pm.rps.cur_freq));
+               add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ],
+                               intel_gpu_freq(dev_priv,
+                                              dev_priv->gt_pm.rps.cur_freq),
+                               period_ns / 1000);
        }
 }
 
@@ -237,14 +251,27 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
 {
        struct drm_i915_private *i915 =
                container_of(hrtimer, struct drm_i915_private, pmu.timer);
+       unsigned int period_ns;
+       ktime_t now;
 
        if (!READ_ONCE(i915->pmu.timer_enabled))
                return HRTIMER_NORESTART;
 
-       engines_sample(i915);
-       frequency_sample(i915);
+       now = ktime_get();
+       period_ns = ktime_to_ns(ktime_sub(now, i915->pmu.timer_last));
+       i915->pmu.timer_last = now;
+
+       /*
+        * Strictly speaking the passed in period may not be 100% accurate for
+        * all internal calculation, since some amount of time can be spent on
+        * grabbing the forcewake. However the potential error from timer call-
+        * back delay greatly dominates this so we keep it simple.
+        */
+       engines_sample(i915, period_ns);
+       frequency_sample(i915, period_ns);
+
+       hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD));
 
-       hrtimer_forward_now(hrtimer, ns_to_ktime(PERIOD));
        return HRTIMER_RESTART;
 }
 
@@ -519,12 +546,12 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
                case I915_PMU_ACTUAL_FREQUENCY:
                        val =
                           div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur,
-                                  FREQUENCY);
+                                  USEC_PER_SEC /* to MHz */);
                        break;
                case I915_PMU_REQUESTED_FREQUENCY:
                        val =
                           div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur,
-                                  FREQUENCY);
+                                  USEC_PER_SEC /* to MHz */);
                        break;
                case I915_PMU_INTERRUPTS:
                        val = count_interrupts(i915);
index 2ba735299f7c5815ac3803ec62525802ab0950b2..7f164ca3db129472d3262439f5290d505ea6e14a 100644 (file)
@@ -65,6 +65,14 @@ struct i915_pmu {
         * event types.
         */
        u64 enable;
+
+       /**
+        * @timer_last:
+        *
+        * Timestmap of the previous timer invocation.
+        */
+       ktime_t timer_last;
+
        /**
         * @enable_count: Reference counts for the enabled events.
         *
index 195203f298dfcfdc03f91b529af65f8e6337aad9..eeaa3d506d95dc357ec5f317252c7d4bd11a3ed7 100644 (file)
@@ -54,6 +54,7 @@ enum vgt_g2v_type {
  */
 #define VGT_CAPS_FULL_48BIT_PPGTT      BIT(2)
 #define VGT_CAPS_HWSP_EMULATION                BIT(3)
+#define VGT_CAPS_HUGE_GTT              BIT(4)
 
 struct vgt_if {
        u64 magic;              /* VGT_MAGIC */
@@ -93,7 +94,10 @@ struct vgt_if {
        u32 rsv5[4];
 
        u32 g2v_notify;
-       u32 rsv6[7];
+       u32 rsv6[5];
+
+       u32 cursor_x_hot;
+       u32 cursor_y_hot;
 
        struct {
                u32 lo;
index f11bb213ec0784e4c50db5bd0ea0647a5419e0e1..91e7483228e11d1ff932a19c749f349b3961ea4f 100644 (file)
@@ -139,23 +139,40 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
        return !i915_mmio_reg_equal(reg, INVALID_MMIO_REG);
 }
 
+/*
+ * Given the first two numbers __a and __b of arbitrarily many evenly spaced
+ * numbers, pick the 0-based __index'th value.
+ *
+ * Always prefer this over _PICK() if the numbers are evenly spaced.
+ */
+#define _PICK_EVEN(__index, __a, __b) ((__a) + (__index) * ((__b) - (__a)))
+
+/*
+ * Given the arbitrary numbers in varargs, pick the 0-based __index'th number.
+ *
+ * Always prefer _PICK_EVEN() over this if the numbers are evenly spaced.
+ */
 #define _PICK(__index, ...) (((const u32 []){ __VA_ARGS__ })[__index])
 
-#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+/*
+ * Named helper wrappers around _PICK_EVEN() and _PICK().
+ */
+#define _PIPE(pipe, a, b) _PICK_EVEN(pipe, a, b)
 #define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b))
-#define _PLANE(plane, a, b) _PIPE(plane, a, b)
+#define _PLANE(plane, a, b) _PICK_EVEN(plane, a, b)
 #define _MMIO_PLANE(plane, a, b) _MMIO_PIPE(plane, a, b)
-#define _TRANS(tran, a, b) ((a) + (tran)*((b)-(a)))
+#define _TRANS(tran, a, b) _PICK_EVEN(tran, a, b)
 #define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b))
-#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
+#define _PORT(port, a, b) _PICK_EVEN(port, a, b)
 #define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b))
 #define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
 #define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
-#define _PLL(pll, a, b) ((a) + (pll)*((b)-(a)))
+#define _PLL(pll, a, b) _PICK_EVEN(pll, a, b)
 #define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b))
 #define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__)
 #define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
 
+#define __MASKED_FIELD(mask, value) ((mask) << 16 | (value))
 #define _MASKED_FIELD(mask, value) ({                                     \
        if (__builtin_constant_p(mask))                                    \
                BUILD_BUG_ON_MSG(((mask) & 0xffff0000), "Incorrect mask"); \
@@ -164,7 +181,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
        if (__builtin_constant_p(mask) && __builtin_constant_p(value))     \
                BUILD_BUG_ON_MSG((value) & ~(mask),                        \
                                 "Incorrect value for mask");              \
-       (mask) << 16 | (value); })
+       __MASKED_FIELD(mask, value); })
 #define _MASKED_BIT_ENABLE(a)  ({ typeof(a) _a = (a); _MASKED_FIELD(_a, _a); })
 #define _MASKED_BIT_DISABLE(a) (_MASKED_FIELD((a), 0))
 
@@ -270,19 +287,19 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 
 
 #define ILK_GDSR _MMIO(MCHBAR_MIRROR_BASE + 0x2ca4)
-#define  ILK_GRDOM_FULL                (0<<1)
-#define  ILK_GRDOM_RENDER      (1<<1)
-#define  ILK_GRDOM_MEDIA       (3<<1)
-#define  ILK_GRDOM_MASK                (3<<1)
-#define  ILK_GRDOM_RESET_ENABLE (1<<0)
+#define  ILK_GRDOM_FULL                (0 << 1)
+#define  ILK_GRDOM_RENDER      (1 << 1)
+#define  ILK_GRDOM_MEDIA       (3 << 1)
+#define  ILK_GRDOM_MASK                (3 << 1)
+#define  ILK_GRDOM_RESET_ENABLE (1 << 0)
 
 #define GEN6_MBCUNIT_SNPCR     _MMIO(0x900c) /* for LLC config */
 #define   GEN6_MBC_SNPCR_SHIFT 21
-#define   GEN6_MBC_SNPCR_MASK  (3<<21)
-#define   GEN6_MBC_SNPCR_MAX   (0<<21)
-#define   GEN6_MBC_SNPCR_MED   (1<<21)
-#define   GEN6_MBC_SNPCR_LOW   (2<<21)
-#define   GEN6_MBC_SNPCR_MIN   (3<<21) /* only 1/16th of the cache is shared */
+#define   GEN6_MBC_SNPCR_MASK  (3 << 21)
+#define   GEN6_MBC_SNPCR_MAX   (0 << 21)
+#define   GEN6_MBC_SNPCR_MED   (1 << 21)
+#define   GEN6_MBC_SNPCR_LOW   (2 << 21)
+#define   GEN6_MBC_SNPCR_MIN   (3 << 21) /* only 1/16th of the cache is shared */
 
 #define VLV_G3DCTL             _MMIO(0x9024)
 #define VLV_GSCKGCTL           _MMIO(0x9028)
@@ -314,13 +331,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define  GEN11_GRDOM_VECS              (1 << 13)
 #define  GEN11_GRDOM_VECS2             (1 << 14)
 
-#define RING_PP_DIR_BASE(engine)       _MMIO((engine)->mmio_base+0x228)
-#define RING_PP_DIR_BASE_READ(engine)  _MMIO((engine)->mmio_base+0x518)
-#define RING_PP_DIR_DCLV(engine)       _MMIO((engine)->mmio_base+0x220)
+#define RING_PP_DIR_BASE(engine)       _MMIO((engine)->mmio_base + 0x228)
+#define RING_PP_DIR_BASE_READ(engine)  _MMIO((engine)->mmio_base + 0x518)
+#define RING_PP_DIR_DCLV(engine)       _MMIO((engine)->mmio_base + 0x220)
 #define   PP_DIR_DCLV_2G               0xffffffff
 
-#define GEN8_RING_PDP_UDW(engine, n)   _MMIO((engine)->mmio_base+0x270 + (n) * 8 + 4)
-#define GEN8_RING_PDP_LDW(engine, n)   _MMIO((engine)->mmio_base+0x270 + (n) * 8)
+#define GEN8_RING_PDP_UDW(engine, n)   _MMIO((engine)->mmio_base + 0x270 + (n) * 8 + 4)
+#define GEN8_RING_PDP_LDW(engine, n)   _MMIO((engine)->mmio_base + 0x270 + (n) * 8)
 
 #define GEN8_R_PWR_CLK_STATE           _MMIO(0x20C8)
 #define   GEN8_RPCS_ENABLE             (1 << 31)
@@ -358,25 +375,25 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define   GEN8_SELECTIVE_READ_ADDRESSING_ENABLE         (1 << 13)
 
 #define GAM_ECOCHK                     _MMIO(0x4090)
-#define   BDW_DISABLE_HDC_INVALIDATION (1<<25)
-#define   ECOCHK_SNB_BIT               (1<<10)
-#define   ECOCHK_DIS_TLB               (1<<8)
-#define   HSW_ECOCHK_ARB_PRIO_SOL      (1<<6)
-#define   ECOCHK_PPGTT_CACHE64B                (0x3<<3)
-#define   ECOCHK_PPGTT_CACHE4B         (0x0<<3)
-#define   ECOCHK_PPGTT_GFDT_IVB                (0x1<<4)
-#define   ECOCHK_PPGTT_LLC_IVB         (0x1<<3)
-#define   ECOCHK_PPGTT_UC_HSW          (0x1<<3)
-#define   ECOCHK_PPGTT_WT_HSW          (0x2<<3)
-#define   ECOCHK_PPGTT_WB_HSW          (0x3<<3)
+#define   BDW_DISABLE_HDC_INVALIDATION (1 << 25)
+#define   ECOCHK_SNB_BIT               (1 << 10)
+#define   ECOCHK_DIS_TLB               (1 << 8)
+#define   HSW_ECOCHK_ARB_PRIO_SOL      (1 << 6)
+#define   ECOCHK_PPGTT_CACHE64B                (0x3 << 3)
+#define   ECOCHK_PPGTT_CACHE4B         (0x0 << 3)
+#define   ECOCHK_PPGTT_GFDT_IVB                (0x1 << 4)
+#define   ECOCHK_PPGTT_LLC_IVB         (0x1 << 3)
+#define   ECOCHK_PPGTT_UC_HSW          (0x1 << 3)
+#define   ECOCHK_PPGTT_WT_HSW          (0x2 << 3)
+#define   ECOCHK_PPGTT_WB_HSW          (0x3 << 3)
 
 #define GAC_ECO_BITS                   _MMIO(0x14090)
-#define   ECOBITS_SNB_BIT              (1<<13)
-#define   ECOBITS_PPGTT_CACHE64B       (3<<8)
-#define   ECOBITS_PPGTT_CACHE4B                (0<<8)
+#define   ECOBITS_SNB_BIT              (1 << 13)
+#define   ECOBITS_PPGTT_CACHE64B       (3 << 8)
+#define   ECOBITS_PPGTT_CACHE4B                (0 << 8)
 
 #define GAB_CTL                                _MMIO(0x24000)
-#define   GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8)
+#define   GAB_CTL_CONT_AFTER_PAGEFAULT (1 << 8)
 
 #define GEN6_STOLEN_RESERVED           _MMIO(0x1082C0)
 #define GEN6_STOLEN_RESERVED_ADDR_MASK (0xFFF << 20)
@@ -395,6 +412,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define GEN8_STOLEN_RESERVED_4M                (2 << 7)
 #define GEN8_STOLEN_RESERVED_8M                (3 << 7)
 #define GEN6_STOLEN_RESERVED_ENABLE    (1 << 0)
+#define GEN11_STOLEN_RESERVED_ADDR_MASK        (0xFFFFFFFFFFFULL << 20)
 
 /* VGA stuff */
 
@@ -404,15 +422,15 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define _VGA_MSR_WRITE _MMIO(0x3c2)
 #define VGA_MSR_WRITE 0x3c2
 #define VGA_MSR_READ 0x3cc
-#define   VGA_MSR_MEM_EN (1<<1)
-#define   VGA_MSR_CGA_MODE (1<<0)
+#define   VGA_MSR_MEM_EN (1 << 1)
+#define   VGA_MSR_CGA_MODE (1 << 0)
 
 #define VGA_SR_INDEX 0x3c4
 #define SR01                   1
 #define VGA_SR_DATA 0x3c5
 
 #define VGA_AR_INDEX 0x3c0
-#define   VGA_AR_VID_EN (1<<5)
+#define   VGA_AR_VID_EN (1 << 5)
 #define VGA_AR_DATA_WRITE 0x3c0
 #define VGA_AR_DATA_READ 0x3c1
 
@@ -445,8 +463,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define MI_PREDICATE_SRC1_UDW  _MMIO(0x2408 + 4)
 
 #define MI_PREDICATE_RESULT_2  _MMIO(0x2214)
-#define  LOWER_SLICE_ENABLED   (1<<0)
-#define  LOWER_SLICE_DISABLED  (0<<0)
+#define  LOWER_SLICE_ENABLED   (1 << 0)
+#define  LOWER_SLICE_DISABLED  (0 << 0)
 
 /*
  * Registers used only by the command parser
@@ -504,47 +522,47 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define  GEN7_OACONTROL_CTX_MASK           0xFFFFF000
 #define  GEN7_OACONTROL_TIMER_PERIOD_MASK   0x3F
 #define  GEN7_OACONTROL_TIMER_PERIOD_SHIFT  6
-#define  GEN7_OACONTROL_TIMER_ENABLE       (1<<5)
-#define  GEN7_OACONTROL_FORMAT_A13         (0<<2)
-#define  GEN7_OACONTROL_FORMAT_A29         (1<<2)
-#define  GEN7_OACONTROL_FORMAT_A13_B8_C8    (2<<2)
-#define  GEN7_OACONTROL_FORMAT_A29_B8_C8    (3<<2)
-#define  GEN7_OACONTROL_FORMAT_B4_C8       (4<<2)
-#define  GEN7_OACONTROL_FORMAT_A45_B8_C8    (5<<2)
-#define  GEN7_OACONTROL_FORMAT_B4_C8_A16    (6<<2)
-#define  GEN7_OACONTROL_FORMAT_C4_B8       (7<<2)
+#define  GEN7_OACONTROL_TIMER_ENABLE       (1 << 5)
+#define  GEN7_OACONTROL_FORMAT_A13         (0 << 2)
+#define  GEN7_OACONTROL_FORMAT_A29         (1 << 2)
+#define  GEN7_OACONTROL_FORMAT_A13_B8_C8    (2 << 2)
+#define  GEN7_OACONTROL_FORMAT_A29_B8_C8    (3 << 2)
+#define  GEN7_OACONTROL_FORMAT_B4_C8       (4 << 2)
+#define  GEN7_OACONTROL_FORMAT_A45_B8_C8    (5 << 2)
+#define  GEN7_OACONTROL_FORMAT_B4_C8_A16    (6 << 2)
+#define  GEN7_OACONTROL_FORMAT_C4_B8       (7 << 2)
 #define  GEN7_OACONTROL_FORMAT_SHIFT       2
-#define  GEN7_OACONTROL_PER_CTX_ENABLE     (1<<1)
-#define  GEN7_OACONTROL_ENABLE             (1<<0)
+#define  GEN7_OACONTROL_PER_CTX_ENABLE     (1 << 1)
+#define  GEN7_OACONTROL_ENABLE             (1 << 0)
 
 #define GEN8_OACTXID _MMIO(0x2364)
 
 #define GEN8_OA_DEBUG _MMIO(0x2B04)
-#define  GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS    (1<<5)
-#define  GEN9_OA_DEBUG_INCLUDE_CLK_RATIO           (1<<6)
-#define  GEN9_OA_DEBUG_DISABLE_GO_1_0_REPORTS      (1<<2)
-#define  GEN9_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS   (1<<1)
+#define  GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS    (1 << 5)
+#define  GEN9_OA_DEBUG_INCLUDE_CLK_RATIO           (1 << 6)
+#define  GEN9_OA_DEBUG_DISABLE_GO_1_0_REPORTS      (1 << 2)
+#define  GEN9_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS   (1 << 1)
 
 #define GEN8_OACONTROL _MMIO(0x2B00)
-#define  GEN8_OA_REPORT_FORMAT_A12         (0<<2)
-#define  GEN8_OA_REPORT_FORMAT_A12_B8_C8    (2<<2)
-#define  GEN8_OA_REPORT_FORMAT_A36_B8_C8    (5<<2)
-#define  GEN8_OA_REPORT_FORMAT_C4_B8       (7<<2)
+#define  GEN8_OA_REPORT_FORMAT_A12         (0 << 2)
+#define  GEN8_OA_REPORT_FORMAT_A12_B8_C8    (2 << 2)
+#define  GEN8_OA_REPORT_FORMAT_A36_B8_C8    (5 << 2)
+#define  GEN8_OA_REPORT_FORMAT_C4_B8       (7 << 2)
 #define  GEN8_OA_REPORT_FORMAT_SHIFT       2
-#define  GEN8_OA_SPECIFIC_CONTEXT_ENABLE    (1<<1)
-#define  GEN8_OA_COUNTER_ENABLE             (1<<0)
+#define  GEN8_OA_SPECIFIC_CONTEXT_ENABLE    (1 << 1)
+#define  GEN8_OA_COUNTER_ENABLE             (1 << 0)
 
 #define GEN8_OACTXCONTROL _MMIO(0x2360)
 #define  GEN8_OA_TIMER_PERIOD_MASK         0x3F
 #define  GEN8_OA_TIMER_PERIOD_SHIFT        2
-#define  GEN8_OA_TIMER_ENABLE              (1<<1)
-#define  GEN8_OA_COUNTER_RESUME                    (1<<0)
+#define  GEN8_OA_TIMER_ENABLE              (1 << 1)
+#define  GEN8_OA_COUNTER_RESUME                    (1 << 0)
 
 #define GEN7_OABUFFER _MMIO(0x23B0) /* R/W */
-#define  GEN7_OABUFFER_OVERRUN_DISABLE     (1<<3)
-#define  GEN7_OABUFFER_EDGE_TRIGGER        (1<<2)
-#define  GEN7_OABUFFER_STOP_RESUME_ENABLE   (1<<1)
-#define  GEN7_OABUFFER_RESUME              (1<<0)
+#define  GEN7_OABUFFER_OVERRUN_DISABLE     (1 << 3)
+#define  GEN7_OABUFFER_EDGE_TRIGGER        (1 << 2)
+#define  GEN7_OABUFFER_STOP_RESUME_ENABLE   (1 << 1)
+#define  GEN7_OABUFFER_RESUME              (1 << 0)
 
 #define GEN8_OABUFFER_UDW _MMIO(0x23b4)
 #define GEN8_OABUFFER _MMIO(0x2b14)
@@ -552,33 +570,33 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 
 #define GEN7_OASTATUS1 _MMIO(0x2364)
 #define  GEN7_OASTATUS1_TAIL_MASK          0xffffffc0
-#define  GEN7_OASTATUS1_COUNTER_OVERFLOW    (1<<2)
-#define  GEN7_OASTATUS1_OABUFFER_OVERFLOW   (1<<1)
-#define  GEN7_OASTATUS1_REPORT_LOST        (1<<0)
+#define  GEN7_OASTATUS1_COUNTER_OVERFLOW    (1 << 2)
+#define  GEN7_OASTATUS1_OABUFFER_OVERFLOW   (1 << 1)
+#define  GEN7_OASTATUS1_REPORT_LOST        (1 << 0)
 
 #define GEN7_OASTATUS2 _MMIO(0x2368)
 #define  GEN7_OASTATUS2_HEAD_MASK           0xffffffc0
 #define  GEN7_OASTATUS2_MEM_SELECT_GGTT     (1 << 0) /* 0: PPGTT, 1: GGTT */
 
 #define GEN8_OASTATUS _MMIO(0x2b08)
-#define  GEN8_OASTATUS_OVERRUN_STATUS      (1<<3)
-#define  GEN8_OASTATUS_COUNTER_OVERFLOW     (1<<2)
-#define  GEN8_OASTATUS_OABUFFER_OVERFLOW    (1<<1)
-#define  GEN8_OASTATUS_REPORT_LOST         (1<<0)
+#define  GEN8_OASTATUS_OVERRUN_STATUS      (1 << 3)
+#define  GEN8_OASTATUS_COUNTER_OVERFLOW     (1 << 2)
+#define  GEN8_OASTATUS_OABUFFER_OVERFLOW    (1 << 1)
+#define  GEN8_OASTATUS_REPORT_LOST         (1 << 0)
 
 #define GEN8_OAHEADPTR _MMIO(0x2B0C)
 #define GEN8_OAHEADPTR_MASK    0xffffffc0
 #define GEN8_OATAILPTR _MMIO(0x2B10)
 #define GEN8_OATAILPTR_MASK    0xffffffc0
 
-#define OABUFFER_SIZE_128K  (0<<3)
-#define OABUFFER_SIZE_256K  (1<<3)
-#define OABUFFER_SIZE_512K  (2<<3)
-#define OABUFFER_SIZE_1M    (3<<3)
-#define OABUFFER_SIZE_2M    (4<<3)
-#define OABUFFER_SIZE_4M    (5<<3)
-#define OABUFFER_SIZE_8M    (6<<3)
-#define OABUFFER_SIZE_16M   (7<<3)
+#define OABUFFER_SIZE_128K  (0 << 3)
+#define OABUFFER_SIZE_256K  (1 << 3)
+#define OABUFFER_SIZE_512K  (2 << 3)
+#define OABUFFER_SIZE_1M    (3 << 3)
+#define OABUFFER_SIZE_2M    (4 << 3)
+#define OABUFFER_SIZE_4M    (5 << 3)
+#define OABUFFER_SIZE_8M    (6 << 3)
+#define OABUFFER_SIZE_16M   (7 << 3)
 
 /*
  * Flexible, Aggregate EU Counter Registers.
@@ -601,35 +619,35 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define OASTARTTRIG1_THRESHOLD_MASK          0xffff
 
 #define OASTARTTRIG2 _MMIO(0x2714)
-#define OASTARTTRIG2_INVERT_A_0 (1<<0)
-#define OASTARTTRIG2_INVERT_A_1 (1<<1)
-#define OASTARTTRIG2_INVERT_A_2 (1<<2)
-#define OASTARTTRIG2_INVERT_A_3 (1<<3)
-#define OASTARTTRIG2_INVERT_A_4 (1<<4)
-#define OASTARTTRIG2_INVERT_A_5 (1<<5)
-#define OASTARTTRIG2_INVERT_A_6 (1<<6)
-#define OASTARTTRIG2_INVERT_A_7 (1<<7)
-#define OASTARTTRIG2_INVERT_A_8 (1<<8)
-#define OASTARTTRIG2_INVERT_A_9 (1<<9)
-#define OASTARTTRIG2_INVERT_A_10 (1<<10)
-#define OASTARTTRIG2_INVERT_A_11 (1<<11)
-#define OASTARTTRIG2_INVERT_A_12 (1<<12)
-#define OASTARTTRIG2_INVERT_A_13 (1<<13)
-#define OASTARTTRIG2_INVERT_A_14 (1<<14)
-#define OASTARTTRIG2_INVERT_A_15 (1<<15)
-#define OASTARTTRIG2_INVERT_B_0 (1<<16)
-#define OASTARTTRIG2_INVERT_B_1 (1<<17)
-#define OASTARTTRIG2_INVERT_B_2 (1<<18)
-#define OASTARTTRIG2_INVERT_B_3 (1<<19)
-#define OASTARTTRIG2_INVERT_C_0 (1<<20)
-#define OASTARTTRIG2_INVERT_C_1 (1<<21)
-#define OASTARTTRIG2_INVERT_D_0 (1<<22)
-#define OASTARTTRIG2_THRESHOLD_ENABLE      (1<<23)
-#define OASTARTTRIG2_START_TRIG_FLAG_MBZ    (1<<24)
-#define OASTARTTRIG2_EVENT_SELECT_0  (1<<28)
-#define OASTARTTRIG2_EVENT_SELECT_1  (1<<29)
-#define OASTARTTRIG2_EVENT_SELECT_2  (1<<30)
-#define OASTARTTRIG2_EVENT_SELECT_3  (1<<31)
+#define OASTARTTRIG2_INVERT_A_0 (1 << 0)
+#define OASTARTTRIG2_INVERT_A_1 (1 << 1)
+#define OASTARTTRIG2_INVERT_A_2 (1 << 2)
+#define OASTARTTRIG2_INVERT_A_3 (1 << 3)
+#define OASTARTTRIG2_INVERT_A_4 (1 << 4)
+#define OASTARTTRIG2_INVERT_A_5 (1 << 5)
+#define OASTARTTRIG2_INVERT_A_6 (1 << 6)
+#define OASTARTTRIG2_INVERT_A_7 (1 << 7)
+#define OASTARTTRIG2_INVERT_A_8 (1 << 8)
+#define OASTARTTRIG2_INVERT_A_9 (1 << 9)
+#define OASTARTTRIG2_INVERT_A_10 (1 << 10)
+#define OASTARTTRIG2_INVERT_A_11 (1 << 11)
+#define OASTARTTRIG2_INVERT_A_12 (1 << 12)
+#define OASTARTTRIG2_INVERT_A_13 (1 << 13)
+#define OASTARTTRIG2_INVERT_A_14 (1 << 14)
+#define OASTARTTRIG2_INVERT_A_15 (1 << 15)
+#define OASTARTTRIG2_INVERT_B_0 (1 << 16)
+#define OASTARTTRIG2_INVERT_B_1 (1 << 17)
+#define OASTARTTRIG2_INVERT_B_2 (1 << 18)
+#define OASTARTTRIG2_INVERT_B_3 (1 << 19)
+#define OASTARTTRIG2_INVERT_C_0 (1 << 20)
+#define OASTARTTRIG2_INVERT_C_1 (1 << 21)
+#define OASTARTTRIG2_INVERT_D_0 (1 << 22)
+#define OASTARTTRIG2_THRESHOLD_ENABLE      (1 << 23)
+#define OASTARTTRIG2_START_TRIG_FLAG_MBZ    (1 << 24)
+#define OASTARTTRIG2_EVENT_SELECT_0  (1 << 28)
+#define OASTARTTRIG2_EVENT_SELECT_1  (1 << 29)
+#define OASTARTTRIG2_EVENT_SELECT_2  (1 << 30)
+#define OASTARTTRIG2_EVENT_SELECT_3  (1 << 31)
 
 #define OASTARTTRIG3 _MMIO(0x2718)
 #define OASTARTTRIG3_NOA_SELECT_MASK      0xf
@@ -658,35 +676,35 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define OASTARTTRIG5_THRESHOLD_MASK          0xffff
 
 #define OASTARTTRIG6 _MMIO(0x2724)
-#define OASTARTTRIG6_INVERT_A_0 (1<<0)
-#define OASTARTTRIG6_INVERT_A_1 (1<<1)
-#define OASTARTTRIG6_INVERT_A_2 (1<<2)
-#define OASTARTTRIG6_INVERT_A_3 (1<<3)
-#define OASTARTTRIG6_INVERT_A_4 (1<<4)
-#define OASTARTTRIG6_INVERT_A_5 (1<<5)
-#define OASTARTTRIG6_INVERT_A_6 (1<<6)
-#define OASTARTTRIG6_INVERT_A_7 (1<<7)
-#define OASTARTTRIG6_INVERT_A_8 (1<<8)
-#define OASTARTTRIG6_INVERT_A_9 (1<<9)
-#define OASTARTTRIG6_INVERT_A_10 (1<<10)
-#define OASTARTTRIG6_INVERT_A_11 (1<<11)
-#define OASTARTTRIG6_INVERT_A_12 (1<<12)
-#define OASTARTTRIG6_INVERT_A_13 (1<<13)
-#define OASTARTTRIG6_INVERT_A_14 (1<<14)
-#define OASTARTTRIG6_INVERT_A_15 (1<<15)
-#define OASTARTTRIG6_INVERT_B_0 (1<<16)
-#define OASTARTTRIG6_INVERT_B_1 (1<<17)
-#define OASTARTTRIG6_INVERT_B_2 (1<<18)
-#define OASTARTTRIG6_INVERT_B_3 (1<<19)
-#define OASTARTTRIG6_INVERT_C_0 (1<<20)
-#define OASTARTTRIG6_INVERT_C_1 (1<<21)
-#define OASTARTTRIG6_INVERT_D_0 (1<<22)
-#define OASTARTTRIG6_THRESHOLD_ENABLE      (1<<23)
-#define OASTARTTRIG6_START_TRIG_FLAG_MBZ    (1<<24)
-#define OASTARTTRIG6_EVENT_SELECT_4  (1<<28)
-#define OASTARTTRIG6_EVENT_SELECT_5  (1<<29)
-#define OASTARTTRIG6_EVENT_SELECT_6  (1<<30)
-#define OASTARTTRIG6_EVENT_SELECT_7  (1<<31)
+#define OASTARTTRIG6_INVERT_A_0 (1 << 0)
+#define OASTARTTRIG6_INVERT_A_1 (1 << 1)
+#define OASTARTTRIG6_INVERT_A_2 (1 << 2)
+#define OASTARTTRIG6_INVERT_A_3 (1 << 3)
+#define OASTARTTRIG6_INVERT_A_4 (1 << 4)
+#define OASTARTTRIG6_INVERT_A_5 (1 << 5)
+#define OASTARTTRIG6_INVERT_A_6 (1 << 6)
+#define OASTARTTRIG6_INVERT_A_7 (1 << 7)
+#define OASTARTTRIG6_INVERT_A_8 (1 << 8)
+#define OASTARTTRIG6_INVERT_A_9 (1 << 9)
+#define OASTARTTRIG6_INVERT_A_10 (1 << 10)
+#define OASTARTTRIG6_INVERT_A_11 (1 << 11)
+#define OASTARTTRIG6_INVERT_A_12 (1 << 12)
+#define OASTARTTRIG6_INVERT_A_13 (1 << 13)
+#define OASTARTTRIG6_INVERT_A_14 (1 << 14)
+#define OASTARTTRIG6_INVERT_A_15 (1 << 15)
+#define OASTARTTRIG6_INVERT_B_0 (1 << 16)
+#define OASTARTTRIG6_INVERT_B_1 (1 << 17)
+#define OASTARTTRIG6_INVERT_B_2 (1 << 18)
+#define OASTARTTRIG6_INVERT_B_3 (1 << 19)
+#define OASTARTTRIG6_INVERT_C_0 (1 << 20)
+#define OASTARTTRIG6_INVERT_C_1 (1 << 21)
+#define OASTARTTRIG6_INVERT_D_0 (1 << 22)
+#define OASTARTTRIG6_THRESHOLD_ENABLE      (1 << 23)
+#define OASTARTTRIG6_START_TRIG_FLAG_MBZ    (1 << 24)
+#define OASTARTTRIG6_EVENT_SELECT_4  (1 << 28)
+#define OASTARTTRIG6_EVENT_SELECT_5  (1 << 29)
+#define OASTARTTRIG6_EVENT_SELECT_6  (1 << 30)
+#define OASTARTTRIG6_EVENT_SELECT_7  (1 << 31)
 
 #define OASTARTTRIG7 _MMIO(0x2728)
 #define OASTARTTRIG7_NOA_SELECT_MASK      0xf
@@ -715,31 +733,31 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define OAREPORTTRIG1_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
 
 #define OAREPORTTRIG2 _MMIO(0x2744)
-#define OAREPORTTRIG2_INVERT_A_0  (1<<0)
-#define OAREPORTTRIG2_INVERT_A_1  (1<<1)
-#define OAREPORTTRIG2_INVERT_A_2  (1<<2)
-#define OAREPORTTRIG2_INVERT_A_3  (1<<3)
-#define OAREPORTTRIG2_INVERT_A_4  (1<<4)
-#define OAREPORTTRIG2_INVERT_A_5  (1<<5)
-#define OAREPORTTRIG2_INVERT_A_6  (1<<6)
-#define OAREPORTTRIG2_INVERT_A_7  (1<<7)
-#define OAREPORTTRIG2_INVERT_A_8  (1<<8)
-#define OAREPORTTRIG2_INVERT_A_9  (1<<9)
-#define OAREPORTTRIG2_INVERT_A_10 (1<<10)
-#define OAREPORTTRIG2_INVERT_A_11 (1<<11)
-#define OAREPORTTRIG2_INVERT_A_12 (1<<12)
-#define OAREPORTTRIG2_INVERT_A_13 (1<<13)
-#define OAREPORTTRIG2_INVERT_A_14 (1<<14)
-#define OAREPORTTRIG2_INVERT_A_15 (1<<15)
-#define OAREPORTTRIG2_INVERT_B_0  (1<<16)
-#define OAREPORTTRIG2_INVERT_B_1  (1<<17)
-#define OAREPORTTRIG2_INVERT_B_2  (1<<18)
-#define OAREPORTTRIG2_INVERT_B_3  (1<<19)
-#define OAREPORTTRIG2_INVERT_C_0  (1<<20)
-#define OAREPORTTRIG2_INVERT_C_1  (1<<21)
-#define OAREPORTTRIG2_INVERT_D_0  (1<<22)
-#define OAREPORTTRIG2_THRESHOLD_ENABLE     (1<<23)
-#define OAREPORTTRIG2_REPORT_TRIGGER_ENABLE (1<<31)
+#define OAREPORTTRIG2_INVERT_A_0  (1 << 0)
+#define OAREPORTTRIG2_INVERT_A_1  (1 << 1)
+#define OAREPORTTRIG2_INVERT_A_2  (1 << 2)
+#define OAREPORTTRIG2_INVERT_A_3  (1 << 3)
+#define OAREPORTTRIG2_INVERT_A_4  (1 << 4)
+#define OAREPORTTRIG2_INVERT_A_5  (1 << 5)
+#define OAREPORTTRIG2_INVERT_A_6  (1 << 6)
+#define OAREPORTTRIG2_INVERT_A_7  (1 << 7)
+#define OAREPORTTRIG2_INVERT_A_8  (1 << 8)
+#define OAREPORTTRIG2_INVERT_A_9  (1 << 9)
+#define OAREPORTTRIG2_INVERT_A_10 (1 << 10)
+#define OAREPORTTRIG2_INVERT_A_11 (1 << 11)
+#define OAREPORTTRIG2_INVERT_A_12 (1 << 12)
+#define OAREPORTTRIG2_INVERT_A_13 (1 << 13)
+#define OAREPORTTRIG2_INVERT_A_14 (1 << 14)
+#define OAREPORTTRIG2_INVERT_A_15 (1 << 15)
+#define OAREPORTTRIG2_INVERT_B_0  (1 << 16)
+#define OAREPORTTRIG2_INVERT_B_1  (1 << 17)
+#define OAREPORTTRIG2_INVERT_B_2  (1 << 18)
+#define OAREPORTTRIG2_INVERT_B_3  (1 << 19)
+#define OAREPORTTRIG2_INVERT_C_0  (1 << 20)
+#define OAREPORTTRIG2_INVERT_C_1  (1 << 21)
+#define OAREPORTTRIG2_INVERT_D_0  (1 << 22)
+#define OAREPORTTRIG2_THRESHOLD_ENABLE     (1 << 23)
+#define OAREPORTTRIG2_REPORT_TRIGGER_ENABLE (1 << 31)
 
 #define OAREPORTTRIG3 _MMIO(0x2748)
 #define OAREPORTTRIG3_NOA_SELECT_MASK      0xf
@@ -768,31 +786,31 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define OAREPORTTRIG5_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
 
 #define OAREPORTTRIG6 _MMIO(0x2754)
-#define OAREPORTTRIG6_INVERT_A_0  (1<<0)
-#define OAREPORTTRIG6_INVERT_A_1  (1<<1)
-#define OAREPORTTRIG6_INVERT_A_2  (1<<2)
-#define OAREPORTTRIG6_INVERT_A_3  (1<<3)
-#define OAREPORTTRIG6_INVERT_A_4  (1<<4)
-#define OAREPORTTRIG6_INVERT_A_5  (1<<5)
-#define OAREPORTTRIG6_INVERT_A_6  (1<<6)
-#define OAREPORTTRIG6_INVERT_A_7  (1<<7)
-#define OAREPORTTRIG6_INVERT_A_8  (1<<8)
-#define OAREPORTTRIG6_INVERT_A_9  (1<<9)
-#define OAREPORTTRIG6_INVERT_A_10 (1<<10)
-#define OAREPORTTRIG6_INVERT_A_11 (1<<11)
-#define OAREPORTTRIG6_INVERT_A_12 (1<<12)
-#define OAREPORTTRIG6_INVERT_A_13 (1<<13)
-#define OAREPORTTRIG6_INVERT_A_14 (1<<14)
-#define OAREPORTTRIG6_INVERT_A_15 (1<<15)
-#define OAREPORTTRIG6_INVERT_B_0  (1<<16)
-#define OAREPORTTRIG6_INVERT_B_1  (1<<17)
-#define OAREPORTTRIG6_INVERT_B_2  (1<<18)
-#define OAREPORTTRIG6_INVERT_B_3  (1<<19)
-#define OAREPORTTRIG6_INVERT_C_0  (1<<20)
-#define OAREPORTTRIG6_INVERT_C_1  (1<<21)
-#define OAREPORTTRIG6_INVERT_D_0  (1<<22)
-#define OAREPORTTRIG6_THRESHOLD_ENABLE     (1<<23)
-#define OAREPORTTRIG6_REPORT_TRIGGER_ENABLE (1<<31)
+#define OAREPORTTRIG6_INVERT_A_0  (1 << 0)
+#define OAREPORTTRIG6_INVERT_A_1  (1 << 1)
+#define OAREPORTTRIG6_INVERT_A_2  (1 << 2)
+#define OAREPORTTRIG6_INVERT_A_3  (1 << 3)
+#define OAREPORTTRIG6_INVERT_A_4  (1 << 4)
+#define OAREPORTTRIG6_INVERT_A_5  (1 << 5)
+#define OAREPORTTRIG6_INVERT_A_6  (1 << 6)
+#define OAREPORTTRIG6_INVERT_A_7  (1 << 7)
+#define OAREPORTTRIG6_INVERT_A_8  (1 << 8)
+#define OAREPORTTRIG6_INVERT_A_9  (1 << 9)
+#define OAREPORTTRIG6_INVERT_A_10 (1 << 10)
+#define OAREPORTTRIG6_INVERT_A_11 (1 << 11)
+#define OAREPORTTRIG6_INVERT_A_12 (1 << 12)
+#define OAREPORTTRIG6_INVERT_A_13 (1 << 13)
+#define OAREPORTTRIG6_INVERT_A_14 (1 << 14)
+#define OAREPORTTRIG6_INVERT_A_15 (1 << 15)
+#define OAREPORTTRIG6_INVERT_B_0  (1 << 16)
+#define OAREPORTTRIG6_INVERT_B_1  (1 << 17)
+#define OAREPORTTRIG6_INVERT_B_2  (1 << 18)
+#define OAREPORTTRIG6_INVERT_B_3  (1 << 19)
+#define OAREPORTTRIG6_INVERT_C_0  (1 << 20)
+#define OAREPORTTRIG6_INVERT_C_1  (1 << 21)
+#define OAREPORTTRIG6_INVERT_D_0  (1 << 22)
+#define OAREPORTTRIG6_THRESHOLD_ENABLE     (1 << 23)
+#define OAREPORTTRIG6_REPORT_TRIGGER_ENABLE (1 << 31)
 
 #define OAREPORTTRIG7 _MMIO(0x2758)
 #define OAREPORTTRIG7_NOA_SELECT_MASK      0xf
@@ -828,9 +846,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define OACEC_COMPARE_VALUE_MASK    0xffff
 #define OACEC_COMPARE_VALUE_SHIFT   3
 
-#define OACEC_SELECT_NOA       (0<<19)
-#define OACEC_SELECT_PREV      (1<<19)
-#define OACEC_SELECT_BOOLEAN   (2<<19)
+#define OACEC_SELECT_NOA       (0 << 19)
+#define OACEC_SELECT_PREV      (1 << 19)
+#define OACEC_SELECT_BOOLEAN   (2 << 19)
 
 /* CECX_1 */
 #define OACEC_MASK_MASK                    0xffff
@@ -948,9 +966,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
  * Reset registers
  */
 #define DEBUG_RESET_I830               _MMIO(0x6070)
-#define  DEBUG_RESET_FULL              (1<<7)
-#define  DEBUG_RESET_RENDER            (1<<8)
-#define  DEBUG_RESET_DISPLAY           (1<<9)
+#define  DEBUG_RESET_FULL              (1 << 7)
+#define  DEBUG_RESET_RENDER            (1 << 8)
+#define  DEBUG_RESET_DISPLAY           (1 << 9)
 
 /*
  * IOSF sideband
@@ -961,7 +979,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define   IOSF_PORT_SHIFT                      8
 #define   IOSF_BYTE_ENABLES_SHIFT              4
 #define   IOSF_BAR_SHIFT                       1
-#define   IOSF_SB_BUSY                         (1<<0)
+#define   IOSF_SB_BUSY                         (1 << 0)
 #define   IOSF_PORT_BUNIT                      0x03
 #define   IOSF_PORT_PUNIT                      0x04
 #define   IOSF_PORT_NC                         0x11
@@ -1044,13 +1062,13 @@ enum i915_power_well_id {
 
        /*
         * HSW/BDW
-        *  - HSW_PWR_WELL_CTL_DRIVER(0) (status bit: id*2, req bit: id*2+1)
+        *  - _HSW_PWR_WELL_CTL1-4 (status bit: id*2, req bit: id*2+1)
         */
        HSW_DISP_PW_GLOBAL = 15,
 
        /*
         * GEN9+
-        *  - HSW_PWR_WELL_CTL_DRIVER(0) (status bit: id*2, req bit: id*2+1)
+        *  - _HSW_PWR_WELL_CTL1-4 (status bit: id*2, req bit: id*2+1)
         */
        SKL_DISP_PW_MISC_IO = 0,
        SKL_DISP_PW_DDI_A_E,
@@ -1074,17 +1092,54 @@ enum i915_power_well_id {
        SKL_DISP_PW_2,
 
        /* - custom power wells */
-       SKL_DISP_PW_DC_OFF,
        BXT_DPIO_CMN_A,
        BXT_DPIO_CMN_BC,
-       GLK_DPIO_CMN_C,                 /* 19 */
+       GLK_DPIO_CMN_C,                 /* 18 */
+
+       /*
+        * GEN11+
+        *  - _HSW_PWR_WELL_CTL1-4
+        *    (status bit: (id&15)*2, req bit:(id&15)*2+1)
+        */
+       ICL_DISP_PW_1 = 0,
+       ICL_DISP_PW_2,
+       ICL_DISP_PW_3,
+       ICL_DISP_PW_4,
+
+       /*
+        *  - _HSW_PWR_WELL_CTL_AUX1/2/4
+        *    (status bit: (id&15)*2, req bit:(id&15)*2+1)
+        */
+       ICL_DISP_PW_AUX_A = 16,
+       ICL_DISP_PW_AUX_B,
+       ICL_DISP_PW_AUX_C,
+       ICL_DISP_PW_AUX_D,
+       ICL_DISP_PW_AUX_E,
+       ICL_DISP_PW_AUX_F,
+
+       ICL_DISP_PW_AUX_TBT1 = 24,
+       ICL_DISP_PW_AUX_TBT2,
+       ICL_DISP_PW_AUX_TBT3,
+       ICL_DISP_PW_AUX_TBT4,
+
+       /*
+        *  - _HSW_PWR_WELL_CTL_DDI1/2/4
+        *    (status bit: (id&15)*2, req bit:(id&15)*2+1)
+        */
+       ICL_DISP_PW_DDI_A = 32,
+       ICL_DISP_PW_DDI_B,
+       ICL_DISP_PW_DDI_C,
+       ICL_DISP_PW_DDI_D,
+       ICL_DISP_PW_DDI_E,
+       ICL_DISP_PW_DDI_F,                      /* 37 */
 
        /*
         * Multiple platforms.
         * Must start following the highest ID of any platform.
         * - custom power wells
         */
-       I915_DISP_PW_ALWAYS_ON = 20,
+       SKL_DISP_PW_DC_OFF = 38,
+       I915_DISP_PW_ALWAYS_ON,
 };
 
 #define PUNIT_REG_PWRGT_CTRL                   0x60
@@ -1098,8 +1153,8 @@ enum i915_power_well_id {
 #define PUNIT_REG_GPU_LFM                      0xd3
 #define PUNIT_REG_GPU_FREQ_REQ                 0xd4
 #define PUNIT_REG_GPU_FREQ_STS                 0xd8
-#define   GPLLENABLE                           (1<<4)
-#define   GENFREQSTATUS                                (1<<0)
+#define   GPLLENABLE                           (1 << 4)
+#define   GENFREQSTATUS                                (1 << 0)
 #define PUNIT_REG_MEDIA_TURBO_FREQ_REQ         0xdc
 #define PUNIT_REG_CZ_TIMESTAMP                 0xce
 
@@ -1141,11 +1196,11 @@ enum i915_power_well_id {
 #define   FB_FMAX_VMIN_FREQ_LO_SHIFT           27
 #define   FB_FMAX_VMIN_FREQ_LO_MASK            0xf8000000
 
-#define VLV_TURBO_SOC_OVERRIDE 0x04
-#define        VLV_OVERRIDE_EN 1
-#define        VLV_SOC_TDP_EN  (1 << 1)
-#define        VLV_BIAS_CPU_125_SOC_875 (6 << 2)
-#define        CHV_BIAS_CPU_50_SOC_50 (3 << 2)
+#define VLV_TURBO_SOC_OVERRIDE         0x04
+#define   VLV_OVERRIDE_EN              1
+#define   VLV_SOC_TDP_EN               (1 << 1)
+#define   VLV_BIAS_CPU_125_SOC_875     (6 << 2)
+#define   CHV_BIAS_CPU_50_SOC_50       (3 << 2)
 
 /* vlv2 north clock has */
 #define CCK_FUSE_REG                           0x8
@@ -1194,10 +1249,10 @@ enum i915_power_well_id {
 #define DPIO_DEVFN                     0
 
 #define DPIO_CTL                       _MMIO(VLV_DISPLAY_BASE + 0x2110)
-#define  DPIO_MODSEL1                  (1<<3) /* if ref clk b == 27 */
-#define  DPIO_MODSEL0                  (1<<2) /* if ref clk a == 27 */
-#define  DPIO_SFR_BYPASS               (1<<1)
-#define  DPIO_CMNRST                   (1<<0)
+#define  DPIO_MODSEL1                  (1 << 3) /* if ref clk b == 27 */
+#define  DPIO_MODSEL0                  (1 << 2) /* if ref clk a == 27 */
+#define  DPIO_SFR_BYPASS               (1 << 1)
+#define  DPIO_CMNRST                   (1 << 0)
 
 #define DPIO_PHY(pipe)                 ((pipe) >> 1)
 #define DPIO_PHY_IOSF_PORT(phy)                (dev_priv->dpio_phy_iosf_port[phy])
@@ -1215,7 +1270,7 @@ enum i915_power_well_id {
 #define   DPIO_P1_SHIFT                        (21) /* 3 bits */
 #define   DPIO_P2_SHIFT                        (16) /* 5 bits */
 #define   DPIO_N_SHIFT                 (12) /* 4 bits */
-#define   DPIO_ENABLE_CALIBRATION      (1<<11)
+#define   DPIO_ENABLE_CALIBRATION      (1 << 11)
 #define   DPIO_M1DIV_SHIFT             (8) /* 3 bits */
 #define   DPIO_M2DIV_MASK              0xff
 #define _VLV_PLL_DW3_CH1               0x802c
@@ -1264,10 +1319,10 @@ enum i915_power_well_id {
 
 #define _VLV_PCS_DW0_CH0               0x8200
 #define _VLV_PCS_DW0_CH1               0x8400
-#define   DPIO_PCS_TX_LANE2_RESET      (1<<16)
-#define   DPIO_PCS_TX_LANE1_RESET      (1<<7)
-#define   DPIO_LEFT_TXFIFO_RST_MASTER2 (1<<4)
-#define   DPIO_RIGHT_TXFIFO_RST_MASTER2        (1<<3)
+#define   DPIO_PCS_TX_LANE2_RESET      (1 << 16)
+#define   DPIO_PCS_TX_LANE1_RESET      (1 << 7)
+#define   DPIO_LEFT_TXFIFO_RST_MASTER2 (1 << 4)
+#define   DPIO_RIGHT_TXFIFO_RST_MASTER2        (1 << 3)
 #define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1)
 
 #define _VLV_PCS01_DW0_CH0             0x200
@@ -1279,11 +1334,11 @@ enum i915_power_well_id {
 
 #define _VLV_PCS_DW1_CH0               0x8204
 #define _VLV_PCS_DW1_CH1               0x8404
-#define   CHV_PCS_REQ_SOFTRESET_EN     (1<<23)
-#define   DPIO_PCS_CLK_CRI_RXEB_EIOS_EN        (1<<22)
-#define   DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1<<21)
+#define   CHV_PCS_REQ_SOFTRESET_EN     (1 << 23)
+#define   DPIO_PCS_CLK_CRI_RXEB_EIOS_EN        (1 << 22)
+#define   DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1 << 21)
 #define   DPIO_PCS_CLK_DATAWIDTH_SHIFT (6)
-#define   DPIO_PCS_CLK_SOFT_RESET      (1<<5)
+#define   DPIO_PCS_CLK_SOFT_RESET      (1 << 5)
 #define VLV_PCS_DW1(ch) _PORT(ch, _VLV_PCS_DW1_CH0, _VLV_PCS_DW1_CH1)
 
 #define _VLV_PCS01_DW1_CH0             0x204
@@ -1308,12 +1363,12 @@ enum i915_power_well_id {
 
 #define _VLV_PCS_DW9_CH0               0x8224
 #define _VLV_PCS_DW9_CH1               0x8424
-#define   DPIO_PCS_TX2MARGIN_MASK      (0x7<<13)
-#define   DPIO_PCS_TX2MARGIN_000       (0<<13)
-#define   DPIO_PCS_TX2MARGIN_101       (1<<13)
-#define   DPIO_PCS_TX1MARGIN_MASK      (0x7<<10)
-#define   DPIO_PCS_TX1MARGIN_000       (0<<10)
-#define   DPIO_PCS_TX1MARGIN_101       (1<<10)
+#define   DPIO_PCS_TX2MARGIN_MASK      (0x7 << 13)
+#define   DPIO_PCS_TX2MARGIN_000       (0 << 13)
+#define   DPIO_PCS_TX2MARGIN_101       (1 << 13)
+#define   DPIO_PCS_TX1MARGIN_MASK      (0x7 << 10)
+#define   DPIO_PCS_TX1MARGIN_000       (0 << 10)
+#define   DPIO_PCS_TX1MARGIN_101       (1 << 10)
 #define        VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1)
 
 #define _VLV_PCS01_DW9_CH0             0x224
@@ -1325,14 +1380,14 @@ enum i915_power_well_id {
 
 #define _CHV_PCS_DW10_CH0              0x8228
 #define _CHV_PCS_DW10_CH1              0x8428
-#define   DPIO_PCS_SWING_CALC_TX0_TX2  (1<<30)
-#define   DPIO_PCS_SWING_CALC_TX1_TX3  (1<<31)
-#define   DPIO_PCS_TX2DEEMP_MASK       (0xf<<24)
-#define   DPIO_PCS_TX2DEEMP_9P5                (0<<24)
-#define   DPIO_PCS_TX2DEEMP_6P0                (2<<24)
-#define   DPIO_PCS_TX1DEEMP_MASK       (0xf<<16)
-#define   DPIO_PCS_TX1DEEMP_9P5                (0<<16)
-#define   DPIO_PCS_TX1DEEMP_6P0                (2<<16)
+#define   DPIO_PCS_SWING_CALC_TX0_TX2  (1 << 30)
+#define   DPIO_PCS_SWING_CALC_TX1_TX3  (1 << 31)
+#define   DPIO_PCS_TX2DEEMP_MASK       (0xf << 24)
+#define   DPIO_PCS_TX2DEEMP_9P5                (0 << 24)
+#define   DPIO_PCS_TX2DEEMP_6P0                (2 << 24)
+#define   DPIO_PCS_TX1DEEMP_MASK       (0xf << 16)
+#define   DPIO_PCS_TX1DEEMP_9P5                (0 << 16)
+#define   DPIO_PCS_TX1DEEMP_6P0                (2 << 16)
 #define CHV_PCS_DW10(ch) _PORT(ch, _CHV_PCS_DW10_CH0, _CHV_PCS_DW10_CH1)
 
 #define _VLV_PCS01_DW10_CH0            0x0228
@@ -1344,10 +1399,10 @@ enum i915_power_well_id {
 
 #define _VLV_PCS_DW11_CH0              0x822c
 #define _VLV_PCS_DW11_CH1              0x842c
-#define   DPIO_TX2_STAGGER_MASK(x)     ((x)<<24)
-#define   DPIO_LANEDESKEW_STRAP_OVRD   (1<<3)
-#define   DPIO_LEFT_TXFIFO_RST_MASTER  (1<<1)
-#define   DPIO_RIGHT_TXFIFO_RST_MASTER (1<<0)
+#define   DPIO_TX2_STAGGER_MASK(x)     ((x) << 24)
+#define   DPIO_LANEDESKEW_STRAP_OVRD   (1 << 3)
+#define   DPIO_LEFT_TXFIFO_RST_MASTER  (1 << 1)
+#define   DPIO_RIGHT_TXFIFO_RST_MASTER (1 << 0)
 #define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1)
 
 #define _VLV_PCS01_DW11_CH0            0x022c
@@ -1366,11 +1421,11 @@ enum i915_power_well_id {
 
 #define _VLV_PCS_DW12_CH0              0x8230
 #define _VLV_PCS_DW12_CH1              0x8430
-#define   DPIO_TX2_STAGGER_MULT(x)     ((x)<<20)
-#define   DPIO_TX1_STAGGER_MULT(x)     ((x)<<16)
-#define   DPIO_TX1_STAGGER_MASK(x)     ((x)<<8)
-#define   DPIO_LANESTAGGER_STRAP_OVRD  (1<<6)
-#define   DPIO_LANESTAGGER_STRAP(x)    ((x)<<0)
+#define   DPIO_TX2_STAGGER_MULT(x)     ((x) << 20)
+#define   DPIO_TX1_STAGGER_MULT(x)     ((x) << 16)
+#define   DPIO_TX1_STAGGER_MASK(x)     ((x) << 8)
+#define   DPIO_LANESTAGGER_STRAP_OVRD  (1 << 6)
+#define   DPIO_LANESTAGGER_STRAP(x)    ((x) << 0)
 #define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1)
 
 #define _VLV_PCS_DW14_CH0              0x8238
@@ -1391,7 +1446,7 @@ enum i915_power_well_id {
 #define _VLV_TX_DW3_CH0                        0x828c
 #define _VLV_TX_DW3_CH1                        0x848c
 /* The following bit for CHV phy */
-#define   DPIO_TX_UNIQ_TRANS_SCALE_EN  (1<<27)
+#define   DPIO_TX_UNIQ_TRANS_SCALE_EN  (1 << 27)
 #define   DPIO_SWING_MARGIN101_SHIFT   16
 #define   DPIO_SWING_MARGIN101_MASK    (0xff << DPIO_SWING_MARGIN101_SHIFT)
 #define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1)
@@ -1410,7 +1465,7 @@ enum i915_power_well_id {
 
 #define _VLV_TX_DW5_CH0                        0x8294
 #define _VLV_TX_DW5_CH1                        0x8494
-#define   DPIO_TX_OCALINIT_EN          (1<<31)
+#define   DPIO_TX_OCALINIT_EN          (1 << 31)
 #define VLV_TX_DW5(ch) _PORT(ch, _VLV_TX_DW5_CH0, _VLV_TX_DW5_CH1)
 
 #define _VLV_TX_DW11_CH0               0x82ac
@@ -1640,10 +1695,10 @@ enum i915_power_well_id {
 #define  PORT_PLL_LOCK_THRESHOLD_SHIFT 1
 #define  PORT_PLL_LOCK_THRESHOLD_MASK  (0x7 << PORT_PLL_LOCK_THRESHOLD_SHIFT)
 /* PORT_PLL_10_A */
-#define  PORT_PLL_DCO_AMP_OVR_EN_H     (1<<27)
+#define  PORT_PLL_DCO_AMP_OVR_EN_H     (1 << 27)
 #define  PORT_PLL_DCO_AMP_DEFAULT      15
 #define  PORT_PLL_DCO_AMP_MASK         0x3c00
-#define  PORT_PLL_DCO_AMP(x)           ((x)<<10)
+#define  PORT_PLL_DCO_AMP(x)           ((x) << 10)
 #define _PORT_PLL_BASE(phy, ch)                _BXT_PHY_CH(phy, ch, \
                                                    _PORT_PLL_0_B, \
                                                    _PORT_PLL_0_C)
@@ -1666,6 +1721,26 @@ enum i915_power_well_id {
 #define ICL_PORT_CL_DW5(port)  _MMIO_PORT(port, _ICL_PORT_CL_DW5_A, \
                                                 _ICL_PORT_CL_DW5_B)
 
+#define _CNL_PORT_CL_DW10_A            0x162028
+#define _ICL_PORT_CL_DW10_B            0x6c028
+#define ICL_PORT_CL_DW10(port)         _MMIO_PORT(port,        \
+                                                  _CNL_PORT_CL_DW10_A, \
+                                                  _ICL_PORT_CL_DW10_B)
+#define  PG_SEQ_DELAY_OVERRIDE_MASK    (3 << 25)
+#define  PG_SEQ_DELAY_OVERRIDE_SHIFT   25
+#define  PG_SEQ_DELAY_OVERRIDE_ENABLE  (1 << 24)
+#define  PWR_UP_ALL_LANES              (0x0 << 4)
+#define  PWR_DOWN_LN_3_2_1             (0xe << 4)
+#define  PWR_DOWN_LN_3_2               (0xc << 4)
+#define  PWR_DOWN_LN_3                 (0x8 << 4)
+#define  PWR_DOWN_LN_2_1_0             (0x7 << 4)
+#define  PWR_DOWN_LN_1_0               (0x3 << 4)
+#define  PWR_DOWN_LN_1                 (0x2 << 4)
+#define  PWR_DOWN_LN_3_1               (0xa << 4)
+#define  PWR_DOWN_LN_3_1_0             (0xb << 4)
+#define  PWR_DOWN_LN_MASK              (0xf << 4)
+#define  PWR_DOWN_LN_SHIFT             4
+
 #define _PORT_CL1CM_DW9_A              0x162024
 #define _PORT_CL1CM_DW9_BC             0x6C024
 #define   IREF0RC_OFFSET_SHIFT         8
@@ -1678,6 +1753,13 @@ enum i915_power_well_id {
 #define   IREF1RC_OFFSET_MASK          (0xFF << IREF1RC_OFFSET_SHIFT)
 #define BXT_PORT_CL1CM_DW10(phy)       _BXT_PHY((phy), _PORT_CL1CM_DW10_BC)
 
+#define _ICL_PORT_CL_DW12_A            0x162030
+#define _ICL_PORT_CL_DW12_B            0x6C030
+#define   ICL_LANE_ENABLE_AUX          (1 << 0)
+#define ICL_PORT_CL_DW12(port)         _MMIO_PORT((port),              \
+                                                  _ICL_PORT_CL_DW12_A, \
+                                                  _ICL_PORT_CL_DW12_B)
+
 #define _PORT_CL1CM_DW28_A             0x162070
 #define _PORT_CL1CM_DW28_BC            0x6C070
 #define   OCL1_POWER_DOWN_EN           (1 << 23)
@@ -1715,16 +1797,22 @@ enum i915_power_well_id {
                                                    _CNL_PORT_PCS_DW1_LN0_D, \
                                                    _CNL_PORT_PCS_DW1_LN0_AE, \
                                                    _CNL_PORT_PCS_DW1_LN0_F))
+
 #define _ICL_PORT_PCS_DW1_GRP_A                0x162604
 #define _ICL_PORT_PCS_DW1_GRP_B                0x6C604
 #define _ICL_PORT_PCS_DW1_LN0_A                0x162804
 #define _ICL_PORT_PCS_DW1_LN0_B                0x6C804
+#define _ICL_PORT_PCS_DW1_AUX_A                0x162304
+#define _ICL_PORT_PCS_DW1_AUX_B                0x6c304
 #define ICL_PORT_PCS_DW1_GRP(port)     _MMIO_PORT(port,\
                                                   _ICL_PORT_PCS_DW1_GRP_A, \
                                                   _ICL_PORT_PCS_DW1_GRP_B)
 #define ICL_PORT_PCS_DW1_LN0(port)     _MMIO_PORT(port, \
                                                   _ICL_PORT_PCS_DW1_LN0_A, \
                                                   _ICL_PORT_PCS_DW1_LN0_B)
+#define ICL_PORT_PCS_DW1_AUX(port)     _MMIO_PORT(port, \
+                                                  _ICL_PORT_PCS_DW1_AUX_A, \
+                                                  _ICL_PORT_PCS_DW1_AUX_B)
 #define   COMMON_KEEPER_EN             (1 << 26)
 
 /* CNL Port TX registers */
@@ -1745,7 +1833,7 @@ enum i915_power_well_id {
                                               _CNL_PORT_TX_D_GRP_OFFSET, \
                                               _CNL_PORT_TX_AE_GRP_OFFSET, \
                                               _CNL_PORT_TX_F_GRP_OFFSET) + \
-                                              4*(dw))
+                                              4 * (dw))
 #define _CNL_PORT_TX_DW_LN0(port, dw)  (_PICK((port), \
                                               _CNL_PORT_TX_AE_LN0_OFFSET, \
                                               _CNL_PORT_TX_B_LN0_OFFSET, \
@@ -1753,7 +1841,7 @@ enum i915_power_well_id {
                                               _CNL_PORT_TX_D_LN0_OFFSET, \
                                               _CNL_PORT_TX_AE_LN0_OFFSET, \
                                               _CNL_PORT_TX_F_LN0_OFFSET) + \
-                                              4*(dw))
+                                              4 * (dw))
 
 #define CNL_PORT_TX_DW2_GRP(port)      _MMIO(_CNL_PORT_TX_DW_GRP((port), 2))
 #define CNL_PORT_TX_DW2_LN0(port)      _MMIO(_CNL_PORT_TX_DW_LN0((port), 2))
@@ -1761,16 +1849,23 @@ enum i915_power_well_id {
 #define _ICL_PORT_TX_DW2_GRP_B         0x6C688
 #define _ICL_PORT_TX_DW2_LN0_A         0x162888
 #define _ICL_PORT_TX_DW2_LN0_B         0x6C888
+#define _ICL_PORT_TX_DW2_AUX_A         0x162388
+#define _ICL_PORT_TX_DW2_AUX_B         0x6c388
 #define ICL_PORT_TX_DW2_GRP(port)      _MMIO_PORT(port, \
                                                   _ICL_PORT_TX_DW2_GRP_A, \
                                                   _ICL_PORT_TX_DW2_GRP_B)
 #define ICL_PORT_TX_DW2_LN0(port)      _MMIO_PORT(port, \
                                                   _ICL_PORT_TX_DW2_LN0_A, \
                                                   _ICL_PORT_TX_DW2_LN0_B)
+#define ICL_PORT_TX_DW2_AUX(port)      _MMIO_PORT(port, \
+                                                  _ICL_PORT_TX_DW2_AUX_A, \
+                                                  _ICL_PORT_TX_DW2_AUX_B)
 #define   SWING_SEL_UPPER(x)           (((x) >> 3) << 15)
 #define   SWING_SEL_UPPER_MASK         (1 << 15)
 #define   SWING_SEL_LOWER(x)           (((x) & 0x7) << 11)
 #define   SWING_SEL_LOWER_MASK         (0x7 << 11)
+#define   FRC_LATENCY_OPTIM_MASK       (0x7 << 8)
+#define   FRC_LATENCY_OPTIM_VAL(x)     ((x) << 8)
 #define   RCOMP_SCALAR(x)              ((x) << 0)
 #define   RCOMP_SCALAR_MASK            (0xFF << 0)
 
@@ -1779,21 +1874,26 @@ enum i915_power_well_id {
 #define CNL_PORT_TX_DW4_GRP(port)      _MMIO(_CNL_PORT_TX_DW_GRP((port), 4))
 #define CNL_PORT_TX_DW4_LN0(port)      _MMIO(_CNL_PORT_TX_DW_LN0((port), 4))
 #define CNL_PORT_TX_DW4_LN(port, ln)   _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \
-                                            (ln * (_CNL_PORT_TX_DW4_LN1_AE - \
+                                          ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \
                                                    _CNL_PORT_TX_DW4_LN0_AE)))
 #define _ICL_PORT_TX_DW4_GRP_A         0x162690
 #define _ICL_PORT_TX_DW4_GRP_B         0x6C690
 #define _ICL_PORT_TX_DW4_LN0_A         0x162890
 #define _ICL_PORT_TX_DW4_LN1_A         0x162990
 #define _ICL_PORT_TX_DW4_LN0_B         0x6C890
+#define _ICL_PORT_TX_DW4_AUX_A         0x162390
+#define _ICL_PORT_TX_DW4_AUX_B         0x6c390
 #define ICL_PORT_TX_DW4_GRP(port)      _MMIO_PORT(port, \
                                                   _ICL_PORT_TX_DW4_GRP_A, \
                                                   _ICL_PORT_TX_DW4_GRP_B)
 #define ICL_PORT_TX_DW4_LN(port, ln)   _MMIO(_PORT(port, \
                                                   _ICL_PORT_TX_DW4_LN0_A, \
                                                   _ICL_PORT_TX_DW4_LN0_B) + \
-                                             (ln * (_ICL_PORT_TX_DW4_LN1_A - \
-                                                    _ICL_PORT_TX_DW4_LN0_A)))
+                                            ((ln) * (_ICL_PORT_TX_DW4_LN1_A - \
+                                                     _ICL_PORT_TX_DW4_LN0_A)))
+#define ICL_PORT_TX_DW4_AUX(port)      _MMIO_PORT(port, \
+                                                  _ICL_PORT_TX_DW4_AUX_A, \
+                                                  _ICL_PORT_TX_DW4_AUX_B)
 #define   LOADGEN_SELECT               (1 << 31)
 #define   POST_CURSOR_1(x)             ((x) << 12)
 #define   POST_CURSOR_1_MASK           (0x3F << 12)
@@ -1808,12 +1908,17 @@ enum i915_power_well_id {
 #define _ICL_PORT_TX_DW5_GRP_B         0x6C694
 #define _ICL_PORT_TX_DW5_LN0_A         0x162894
 #define _ICL_PORT_TX_DW5_LN0_B         0x6C894
+#define _ICL_PORT_TX_DW5_AUX_A         0x162394
+#define _ICL_PORT_TX_DW5_AUX_B         0x6c394
 #define ICL_PORT_TX_DW5_GRP(port)      _MMIO_PORT(port, \
                                                   _ICL_PORT_TX_DW5_GRP_A, \
                                                   _ICL_PORT_TX_DW5_GRP_B)
 #define ICL_PORT_TX_DW5_LN0(port)      _MMIO_PORT(port, \
                                                   _ICL_PORT_TX_DW5_LN0_A, \
                                                   _ICL_PORT_TX_DW5_LN0_B)
+#define ICL_PORT_TX_DW5_AUX(port)      _MMIO_PORT(port, \
+                                                  _ICL_PORT_TX_DW5_AUX_A, \
+                                                  _ICL_PORT_TX_DW5_AUX_B)
 #define   TX_TRAINING_EN               (1 << 31)
 #define   TAP2_DISABLE                 (1 << 30)
 #define   TAP3_DISABLE                 (1 << 29)
@@ -1990,6 +2095,11 @@ enum i915_power_well_id {
                                                   _ICL_PORT_COMP_DW10_A, \
                                                   _ICL_PORT_COMP_DW10_B)
 
+/* ICL PHY DFLEX registers */
+#define PORT_TX_DFLEXDPMLE1            _MMIO(0x1638C0)
+#define   DFLEXDPMLE1_DPMLETC_MASK(n)  (0xf << (4 * (n)))
+#define   DFLEXDPMLE1_DPMLETC(n, x)    ((x) << (4 * (n)))
+
 /* BXT PHY Ref registers */
 #define _PORT_REF_DW3_A                        0x16218C
 #define _PORT_REF_DW3_BC               0x6C18C
@@ -2134,8 +2244,8 @@ enum i915_power_well_id {
 /* SKL balance leg register */
 #define DISPIO_CR_TX_BMU_CR0           _MMIO(0x6C00C)
 /* I_boost values */
-#define BALANCE_LEG_SHIFT(port)                (8+3*(port))
-#define BALANCE_LEG_MASK(port)         (7<<(8+3*(port)))
+#define BALANCE_LEG_SHIFT(port)                (8 + 3 * (port))
+#define BALANCE_LEG_MASK(port)         (7 << (8 + 3 * (port)))
 /* Balance leg disable bits */
 #define BALANCE_LEG_DISABLE_SHIFT      23
 #define BALANCE_LEG_DISABLE(port)      (1 << (23 + (port)))
@@ -2155,10 +2265,10 @@ enum i915_power_well_id {
 #define   I830_FENCE_TILING_Y_SHIFT    12
 #define   I830_FENCE_SIZE_BITS(size)   ((ffs((size) >> 19) - 1) << 8)
 #define   I830_FENCE_PITCH_SHIFT       4
-#define   I830_FENCE_REG_VALID         (1<<0)
+#define   I830_FENCE_REG_VALID         (1 << 0)
 #define   I915_FENCE_MAX_PITCH_VAL     4
 #define   I830_FENCE_MAX_PITCH_VAL     6
-#define   I830_FENCE_MAX_SIZE_VAL      (1<<8)
+#define   I830_FENCE_MAX_SIZE_VAL      (1 << 8)
 
 #define   I915_FENCE_START_MASK                0x0ff00000
 #define   I915_FENCE_SIZE_BITS(size)   ((ffs((size) >> 20) - 1) << 8)
@@ -2167,7 +2277,7 @@ enum i915_power_well_id {
 #define FENCE_REG_965_HI(i)            _MMIO(0x03000 + (i) * 8 + 4)
 #define   I965_FENCE_PITCH_SHIFT       2
 #define   I965_FENCE_TILING_Y_SHIFT    1
-#define   I965_FENCE_REG_VALID         (1<<0)
+#define   I965_FENCE_REG_VALID         (1 << 0)
 #define   I965_FENCE_MAX_PITCH_VAL     0x0400
 
 #define FENCE_REG_GEN6_LO(i)           _MMIO(0x100000 + (i) * 8)
@@ -2190,13 +2300,13 @@ enum i915_power_well_id {
 #define   PGTBL_ADDRESS_LO_MASK        0xfffff000 /* bits [31:12] */
 #define   PGTBL_ADDRESS_HI_MASK        0x000000f0 /* bits [35:32] (gen4) */
 #define PGTBL_ER       _MMIO(0x02024)
-#define PRB0_BASE      (0x2030-0x30)
-#define PRB1_BASE      (0x2040-0x30) /* 830,gen3 */
-#define PRB2_BASE      (0x2050-0x30) /* gen3 */
-#define SRB0_BASE      (0x2100-0x30) /* gen2 */
-#define SRB1_BASE      (0x2110-0x30) /* gen2 */
-#define SRB2_BASE      (0x2120-0x30) /* 830 */
-#define SRB3_BASE      (0x2130-0x30) /* 830 */
+#define PRB0_BASE      (0x2030 - 0x30)
+#define PRB1_BASE      (0x2040 - 0x30) /* 830,gen3 */
+#define PRB2_BASE      (0x2050 - 0x30) /* gen3 */
+#define SRB0_BASE      (0x2100 - 0x30) /* gen2 */
+#define SRB1_BASE      (0x2110 - 0x30) /* gen2 */
+#define SRB2_BASE      (0x2120 - 0x30) /* 830 */
+#define SRB3_BASE      (0x2130 - 0x30) /* 830 */
 #define RENDER_RING_BASE       0x02000
 #define BSD_RING_BASE          0x04000
 #define GEN6_BSD_RING_BASE     0x12000
@@ -2209,14 +2319,14 @@ enum i915_power_well_id {
 #define GEN11_VEBOX_RING_BASE          0x1c8000
 #define GEN11_VEBOX2_RING_BASE         0x1d8000
 #define BLT_RING_BASE          0x22000
-#define RING_TAIL(base)                _MMIO((base)+0x30)
-#define RING_HEAD(base)                _MMIO((base)+0x34)
-#define RING_START(base)       _MMIO((base)+0x38)
-#define RING_CTL(base)         _MMIO((base)+0x3c)
+#define RING_TAIL(base)                _MMIO((base) + 0x30)
+#define RING_HEAD(base)                _MMIO((base) + 0x34)
+#define RING_START(base)       _MMIO((base) + 0x38)
+#define RING_CTL(base)         _MMIO((base) + 0x3c)
 #define   RING_CTL_SIZE(size)  ((size) - PAGE_SIZE) /* in bytes -> pages */
-#define RING_SYNC_0(base)      _MMIO((base)+0x40)
-#define RING_SYNC_1(base)      _MMIO((base)+0x44)
-#define RING_SYNC_2(base)      _MMIO((base)+0x48)
+#define RING_SYNC_0(base)      _MMIO((base) + 0x40)
+#define RING_SYNC_1(base)      _MMIO((base) + 0x44)
+#define RING_SYNC_2(base)      _MMIO((base) + 0x48)
 #define GEN6_RVSYNC    (RING_SYNC_0(RENDER_RING_BASE))
 #define GEN6_RBSYNC    (RING_SYNC_1(RENDER_RING_BASE))
 #define GEN6_RVESYNC   (RING_SYNC_2(RENDER_RING_BASE))
@@ -2230,21 +2340,22 @@ enum i915_power_well_id {
 #define GEN6_VERSYNC   (RING_SYNC_1(VEBOX_RING_BASE))
 #define GEN6_VEVSYNC   (RING_SYNC_2(VEBOX_RING_BASE))
 #define GEN6_NOSYNC    INVALID_MMIO_REG
-#define RING_PSMI_CTL(base)    _MMIO((base)+0x50)
-#define RING_MAX_IDLE(base)    _MMIO((base)+0x54)
-#define RING_HWS_PGA(base)     _MMIO((base)+0x80)
-#define RING_HWS_PGA_GEN6(base)        _MMIO((base)+0x2080)
-#define RING_RESET_CTL(base)   _MMIO((base)+0xd0)
+#define RING_PSMI_CTL(base)    _MMIO((base) + 0x50)
+#define RING_MAX_IDLE(base)    _MMIO((base) + 0x54)
+#define RING_HWS_PGA(base)     _MMIO((base) + 0x80)
+#define RING_HWS_PGA_GEN6(base)        _MMIO((base) + 0x2080)
+#define RING_RESET_CTL(base)   _MMIO((base) + 0xd0)
 #define   RESET_CTL_REQUEST_RESET  (1 << 0)
 #define   RESET_CTL_READY_TO_RESET (1 << 1)
+#define RING_SEMA_WAIT_POLL(base) _MMIO((base) + 0x24c)
 
 #define HSW_GTT_CACHE_EN       _MMIO(0x4024)
 #define   GTT_CACHE_EN_ALL     0xF0007FFF
 #define GEN7_WR_WATERMARK      _MMIO(0x4028)
 #define GEN7_GFX_PRIO_CTRL     _MMIO(0x402C)
 #define ARB_MODE               _MMIO(0x4030)
-#define   ARB_MODE_SWIZZLE_SNB (1<<4)
-#define   ARB_MODE_SWIZZLE_IVB (1<<5)
+#define   ARB_MODE_SWIZZLE_SNB (1 << 4)
+#define   ARB_MODE_SWIZZLE_IVB (1 << 5)
 #define GEN7_GFX_PEND_TLB0     _MMIO(0x4034)
 #define GEN7_GFX_PEND_TLB1     _MMIO(0x4038)
 /* L3, CVS, ZTLB, RCC, CASC LRA min, max values */
@@ -2254,30 +2365,30 @@ enum i915_power_well_id {
 #define GEN7_GFX_MAX_REQ_COUNT         _MMIO(0x4074)
 
 #define GAMTARBMODE            _MMIO(0x04a08)
-#define   ARB_MODE_BWGTLB_DISABLE (1<<9)
-#define   ARB_MODE_SWIZZLE_BDW (1<<1)
+#define   ARB_MODE_BWGTLB_DISABLE (1 << 9)
+#define   ARB_MODE_SWIZZLE_BDW (1 << 1)
 #define RENDER_HWS_PGA_GEN7    _MMIO(0x04080)
-#define RING_FAULT_REG(engine) _MMIO(0x4094 + 0x100*(engine)->hw_id)
+#define RING_FAULT_REG(engine) _MMIO(0x4094 + 0x100 * (engine)->hw_id)
 #define GEN8_RING_FAULT_REG    _MMIO(0x4094)
 #define   GEN8_RING_FAULT_ENGINE_ID(x) (((x) >> 12) & 0x7)
-#define   RING_FAULT_GTTSEL_MASK (1<<11)
+#define   RING_FAULT_GTTSEL_MASK (1 << 11)
 #define   RING_FAULT_SRCID(x)  (((x) >> 3) & 0xff)
 #define   RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3)
-#define   RING_FAULT_VALID     (1<<0)
+#define   RING_FAULT_VALID     (1 << 0)
 #define DONE_REG               _MMIO(0x40b0)
 #define GEN8_PRIVATE_PAT_LO    _MMIO(0x40e0)
 #define GEN8_PRIVATE_PAT_HI    _MMIO(0x40e0 + 4)
-#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index)*4)
+#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4)
 #define BSD_HWS_PGA_GEN7       _MMIO(0x04180)
 #define BLT_HWS_PGA_GEN7       _MMIO(0x04280)
 #define VEBOX_HWS_PGA_GEN7     _MMIO(0x04380)
-#define RING_ACTHD(base)       _MMIO((base)+0x74)
-#define RING_ACTHD_UDW(base)   _MMIO((base)+0x5c)
-#define RING_NOPID(base)       _MMIO((base)+0x94)
-#define RING_IMR(base)         _MMIO((base)+0xa8)
-#define RING_HWSTAM(base)      _MMIO((base)+0x98)
-#define RING_TIMESTAMP(base)           _MMIO((base)+0x358)
-#define RING_TIMESTAMP_UDW(base)       _MMIO((base)+0x358 + 4)
+#define RING_ACTHD(base)       _MMIO((base) + 0x74)
+#define RING_ACTHD_UDW(base)   _MMIO((base) + 0x5c)
+#define RING_NOPID(base)       _MMIO((base) + 0x94)
+#define RING_IMR(base)         _MMIO((base) + 0xa8)
+#define RING_HWSTAM(base)      _MMIO((base) + 0x98)
+#define RING_TIMESTAMP(base)           _MMIO((base) + 0x358)
+#define RING_TIMESTAMP_UDW(base)       _MMIO((base) + 0x358 + 4)
 #define   TAIL_ADDR            0x001FFFF8
 #define   HEAD_WRAP_COUNT      0xFFE00000
 #define   HEAD_WRAP_ONE                0x00200000
@@ -2290,24 +2401,25 @@ enum i915_power_well_id {
 #define   RING_VALID_MASK      0x00000001
 #define   RING_VALID           0x00000001
 #define   RING_INVALID         0x00000000
-#define   RING_WAIT_I8XX       (1<<0) /* gen2, PRBx_HEAD */
-#define   RING_WAIT            (1<<11) /* gen3+, PRBx_CTL */
-#define   RING_WAIT_SEMAPHORE  (1<<10) /* gen6+ */
+#define   RING_WAIT_I8XX       (1 << 0) /* gen2, PRBx_HEAD */
+#define   RING_WAIT            (1 << 11) /* gen3+, PRBx_CTL */
+#define   RING_WAIT_SEMAPHORE  (1 << 10) /* gen6+ */
 
-#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base)+0x4D0) + (i)*4)
+#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base) + 0x4D0) + (i) * 4)
 #define   RING_MAX_NONPRIV_SLOTS  12
 
 #define GEN7_TLB_RD_ADDR       _MMIO(0x4700)
 
 #define GEN9_GAMT_ECO_REG_RW_IA _MMIO(0x4ab0)
-#define   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS  (1<<18)
+#define   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS  (1 << 18)
 
 #define GEN8_GAMW_ECO_DEV_RW_IA _MMIO(0x4080)
 #define   GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF
 
 #define GAMT_CHKN_BIT_REG      _MMIO(0x4ab8)
-#define   GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING     (1<<28)
-#define   GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT       (1<<24)
+#define   GAMT_CHKN_DISABLE_L3_COH_PIPE                        (1 << 31)
+#define   GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING     (1 << 28)
+#define   GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT       (1 << 24)
 
 #if 0
 #define PRB0_TAIL      _MMIO(0x2030)
@@ -2333,19 +2445,19 @@ enum i915_power_well_id {
 #define   GEN11_MCR_SLICE_MASK         GEN11_MCR_SLICE(0xf)
 #define   GEN11_MCR_SUBSLICE(subslice) (((subslice) & 0x7) << 24)
 #define   GEN11_MCR_SUBSLICE_MASK      GEN11_MCR_SUBSLICE(0x7)
-#define RING_IPEIR(base)       _MMIO((base)+0x64)
-#define RING_IPEHR(base)       _MMIO((base)+0x68)
+#define RING_IPEIR(base)       _MMIO((base) + 0x64)
+#define RING_IPEHR(base)       _MMIO((base) + 0x68)
 /*
  * On GEN4, only the render ring INSTDONE exists and has a different
  * layout than the GEN7+ version.
  * The GEN2 counterpart of this register is GEN2_INSTDONE.
  */
-#define RING_INSTDONE(base)    _MMIO((base)+0x6c)
-#define RING_INSTPS(base)      _MMIO((base)+0x70)
-#define RING_DMA_FADD(base)    _MMIO((base)+0x78)
-#define RING_DMA_FADD_UDW(base)        _MMIO((base)+0x60) /* gen8+ */
-#define RING_INSTPM(base)      _MMIO((base)+0xc0)
-#define RING_MI_MODE(base)     _MMIO((base)+0x9c)
+#define RING_INSTDONE(base)    _MMIO((base) + 0x6c)
+#define RING_INSTPS(base)      _MMIO((base) + 0x70)
+#define RING_DMA_FADD(base)    _MMIO((base) + 0x78)
+#define RING_DMA_FADD_UDW(base)        _MMIO((base) + 0x60) /* gen8+ */
+#define RING_INSTPM(base)      _MMIO((base) + 0xc0)
+#define RING_MI_MODE(base)     _MMIO((base) + 0x9c)
 #define INSTPS         _MMIO(0x2070) /* 965+ only */
 #define GEN4_INSTDONE1 _MMIO(0x207c) /* 965+ only, aka INSTDONE_2 on SNB */
 #define ACTHD_I965     _MMIO(0x2074)
@@ -2353,37 +2465,37 @@ enum i915_power_well_id {
 #define HWS_ADDRESS_MASK       0xfffff000
 #define HWS_START_ADDRESS_SHIFT        4
 #define PWRCTXA                _MMIO(0x2088) /* 965GM+ only */
-#define   PWRCTX_EN    (1<<0)
+#define   PWRCTX_EN    (1 << 0)
 #define IPEIR          _MMIO(0x2088)
 #define IPEHR          _MMIO(0x208c)
 #define GEN2_INSTDONE  _MMIO(0x2090)
 #define NOPID          _MMIO(0x2094)
 #define HWSTAM         _MMIO(0x2098)
 #define DMA_FADD_I8XX  _MMIO(0x20d0)
-#define RING_BBSTATE(base)     _MMIO((base)+0x110)
+#define RING_BBSTATE(base)     _MMIO((base) + 0x110)
 #define   RING_BB_PPGTT                (1 << 5)
-#define RING_SBBADDR(base)     _MMIO((base)+0x114) /* hsw+ */
-#define RING_SBBSTATE(base)    _MMIO((base)+0x118) /* hsw+ */
-#define RING_SBBADDR_UDW(base) _MMIO((base)+0x11c) /* gen8+ */
-#define RING_BBADDR(base)      _MMIO((base)+0x140)
-#define RING_BBADDR_UDW(base)  _MMIO((base)+0x168) /* gen8+ */
-#define RING_BB_PER_CTX_PTR(base)      _MMIO((base)+0x1c0) /* gen8+ */
-#define RING_INDIRECT_CTX(base)                _MMIO((base)+0x1c4) /* gen8+ */
-#define RING_INDIRECT_CTX_OFFSET(base) _MMIO((base)+0x1c8) /* gen8+ */
-#define RING_CTX_TIMESTAMP(base)       _MMIO((base)+0x3a8) /* gen8+ */
+#define RING_SBBADDR(base)     _MMIO((base) + 0x114) /* hsw+ */
+#define RING_SBBSTATE(base)    _MMIO((base) + 0x118) /* hsw+ */
+#define RING_SBBADDR_UDW(base) _MMIO((base) + 0x11c) /* gen8+ */
+#define RING_BBADDR(base)      _MMIO((base) + 0x140)
+#define RING_BBADDR_UDW(base)  _MMIO((base) + 0x168) /* gen8+ */
+#define RING_BB_PER_CTX_PTR(base)      _MMIO((base) + 0x1c0) /* gen8+ */
+#define RING_INDIRECT_CTX(base)                _MMIO((base) + 0x1c4) /* gen8+ */
+#define RING_INDIRECT_CTX_OFFSET(base) _MMIO((base) + 0x1c8) /* gen8+ */
+#define RING_CTX_TIMESTAMP(base)       _MMIO((base) + 0x3a8) /* gen8+ */
 
 #define ERROR_GEN6     _MMIO(0x40a0)
 #define GEN7_ERR_INT   _MMIO(0x44040)
-#define   ERR_INT_POISON               (1<<31)
-#define   ERR_INT_MMIO_UNCLAIMED       (1<<13)
-#define   ERR_INT_PIPE_CRC_DONE_C      (1<<8)
-#define   ERR_INT_FIFO_UNDERRUN_C      (1<<6)
-#define   ERR_INT_PIPE_CRC_DONE_B      (1<<5)
-#define   ERR_INT_FIFO_UNDERRUN_B      (1<<3)
-#define   ERR_INT_PIPE_CRC_DONE_A      (1<<2)
-#define   ERR_INT_PIPE_CRC_DONE(pipe)  (1<<(2 + (pipe)*3))
-#define   ERR_INT_FIFO_UNDERRUN_A      (1<<0)
-#define   ERR_INT_FIFO_UNDERRUN(pipe)  (1<<((pipe)*3))
+#define   ERR_INT_POISON               (1 << 31)
+#define   ERR_INT_MMIO_UNCLAIMED       (1 << 13)
+#define   ERR_INT_PIPE_CRC_DONE_C      (1 << 8)
+#define   ERR_INT_FIFO_UNDERRUN_C      (1 << 6)
+#define   ERR_INT_PIPE_CRC_DONE_B      (1 << 5)
+#define   ERR_INT_FIFO_UNDERRUN_B      (1 << 3)
+#define   ERR_INT_PIPE_CRC_DONE_A      (1 << 2)
+#define   ERR_INT_PIPE_CRC_DONE(pipe)  (1 << (2 + (pipe) * 3))
+#define   ERR_INT_FIFO_UNDERRUN_A      (1 << 0)
+#define   ERR_INT_FIFO_UNDERRUN(pipe)  (1 << ((pipe) * 3))
 
 #define GEN8_FAULT_TLB_DATA0           _MMIO(0x4b10)
 #define GEN8_FAULT_TLB_DATA1           _MMIO(0x4b14)
@@ -2391,7 +2503,7 @@ enum i915_power_well_id {
 #define   FAULT_GTT_SEL                        (1 << 4)
 
 #define FPGA_DBG               _MMIO(0x42300)
-#define   FPGA_DBG_RM_NOCLAIM  (1<<31)
+#define   FPGA_DBG_RM_NOCLAIM  (1 << 31)
 
 #define CLAIM_ER               _MMIO(VLV_DISPLAY_BASE + 0x2028)
 #define   CLAIM_ER_CLR         (1 << 31)
@@ -2400,22 +2512,22 @@ enum i915_power_well_id {
 
 #define DERRMR         _MMIO(0x44050)
 /* Note that HBLANK events are reserved on bdw+ */
-#define   DERRMR_PIPEA_SCANLINE                (1<<0)
-#define   DERRMR_PIPEA_PRI_FLIP_DONE   (1<<1)
-#define   DERRMR_PIPEA_SPR_FLIP_DONE   (1<<2)
-#define   DERRMR_PIPEA_VBLANK          (1<<3)
-#define   DERRMR_PIPEA_HBLANK          (1<<5)
-#define   DERRMR_PIPEB_SCANLINE        (1<<8)
-#define   DERRMR_PIPEB_PRI_FLIP_DONE   (1<<9)
-#define   DERRMR_PIPEB_SPR_FLIP_DONE   (1<<10)
-#define   DERRMR_PIPEB_VBLANK          (1<<11)
-#define   DERRMR_PIPEB_HBLANK          (1<<13)
+#define   DERRMR_PIPEA_SCANLINE                (1 << 0)
+#define   DERRMR_PIPEA_PRI_FLIP_DONE   (1 << 1)
+#define   DERRMR_PIPEA_SPR_FLIP_DONE   (1 << 2)
+#define   DERRMR_PIPEA_VBLANK          (1 << 3)
+#define   DERRMR_PIPEA_HBLANK          (1 << 5)
+#define   DERRMR_PIPEB_SCANLINE                (1 << 8)
+#define   DERRMR_PIPEB_PRI_FLIP_DONE   (1 << 9)
+#define   DERRMR_PIPEB_SPR_FLIP_DONE   (1 << 10)
+#define   DERRMR_PIPEB_VBLANK          (1 << 11)
+#define   DERRMR_PIPEB_HBLANK          (1 << 13)
 /* Note that PIPEC is not a simple translation of PIPEA/PIPEB */
-#define   DERRMR_PIPEC_SCANLINE                (1<<14)
-#define   DERRMR_PIPEC_PRI_FLIP_DONE   (1<<15)
-#define   DERRMR_PIPEC_SPR_FLIP_DONE   (1<<20)
-#define   DERRMR_PIPEC_VBLANK          (1<<21)
-#define   DERRMR_PIPEC_HBLANK          (1<<22)
+#define   DERRMR_PIPEC_SCANLINE                (1 << 14)
+#define   DERRMR_PIPEC_PRI_FLIP_DONE   (1 << 15)
+#define   DERRMR_PIPEC_SPR_FLIP_DONE   (1 << 20)
+#define   DERRMR_PIPEC_VBLANK          (1 << 21)
+#define   DERRMR_PIPEC_HBLANK          (1 << 22)
 
 
 /* GM45+ chicken bits -- debug workaround bits that may be required
@@ -2425,16 +2537,21 @@ enum i915_power_well_id {
 #define _3D_CHICKEN    _MMIO(0x2084)
 #define  _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB     (1 << 10)
 #define _3D_CHICKEN2   _MMIO(0x208c)
+
+#define FF_SLICE_CHICKEN       _MMIO(0x2088)
+#define  FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX      (1 << 1)
+
 /* Disables pipelining of read flushes past the SF-WIZ interface.
  * Required on all Ironlake steppings according to the B-Spec, but the
  * particular danger of not doing so is not specified.
  */
 # define _3D_CHICKEN2_WM_READ_PIPELINED                        (1 << 14)
 #define _3D_CHICKEN3   _MMIO(0x2090)
+#define  _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX           (1 << 12)
 #define  _3D_CHICKEN_SF_DISABLE_OBJEND_CULL            (1 << 10)
 #define  _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE       (1 << 5)
 #define  _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL         (1 << 5)
-#define  _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x)      ((x)<<1) /* gen8+ */
+#define  _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x)      ((x) << 1) /* gen8+ */
 #define  _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH  (1 << 1) /* gen6 */
 
 #define MI_MODE                _MMIO(0x209c)
@@ -2473,22 +2590,22 @@ enum i915_power_well_id {
 
 #define GFX_MODE       _MMIO(0x2520)
 #define GFX_MODE_GEN7  _MMIO(0x229c)
-#define RING_MODE_GEN7(engine) _MMIO((engine)->mmio_base+0x29c)
-#define   GFX_RUN_LIST_ENABLE          (1<<15)
-#define   GFX_INTERRUPT_STEERING       (1<<14)
-#define   GFX_TLB_INVALIDATE_EXPLICIT  (1<<13)
-#define   GFX_SURFACE_FAULT_ENABLE     (1<<12)
-#define   GFX_REPLAY_MODE              (1<<11)
-#define   GFX_PSMI_GRANULARITY         (1<<10)
-#define   GFX_PPGTT_ENABLE             (1<<9)
-#define   GEN8_GFX_PPGTT_48B           (1<<7)
-
-#define   GFX_FORWARD_VBLANK_MASK      (3<<5)
-#define   GFX_FORWARD_VBLANK_NEVER     (0<<5)
-#define   GFX_FORWARD_VBLANK_ALWAYS    (1<<5)
-#define   GFX_FORWARD_VBLANK_COND      (2<<5)
-
-#define   GEN11_GFX_DISABLE_LEGACY_MODE        (1<<3)
+#define RING_MODE_GEN7(engine) _MMIO((engine)->mmio_base + 0x29c)
+#define   GFX_RUN_LIST_ENABLE          (1 << 15)
+#define   GFX_INTERRUPT_STEERING       (1 << 14)
+#define   GFX_TLB_INVALIDATE_EXPLICIT  (1 << 13)
+#define   GFX_SURFACE_FAULT_ENABLE     (1 << 12)
+#define   GFX_REPLAY_MODE              (1 << 11)
+#define   GFX_PSMI_GRANULARITY         (1 << 10)
+#define   GFX_PPGTT_ENABLE             (1 << 9)
+#define   GEN8_GFX_PPGTT_48B           (1 << 7)
+
+#define   GFX_FORWARD_VBLANK_MASK      (3 << 5)
+#define   GFX_FORWARD_VBLANK_NEVER     (0 << 5)
+#define   GFX_FORWARD_VBLANK_ALWAYS    (1 << 5)
+#define   GFX_FORWARD_VBLANK_COND      (2 << 5)
+
+#define   GEN11_GFX_DISABLE_LEGACY_MODE        (1 << 3)
 
 #define VLV_DISPLAY_BASE 0x180000
 #define VLV_MIPI_BASE VLV_DISPLAY_BASE
@@ -2502,8 +2619,8 @@ enum i915_power_well_id {
 #define IMR            _MMIO(0x20a8)
 #define ISR            _MMIO(0x20ac)
 #define VLV_GUNIT_CLOCK_GATE   _MMIO(VLV_DISPLAY_BASE + 0x2060)
-#define   GINT_DIS             (1<<22)
-#define   GCFG_DIS             (1<<8)
+#define   GINT_DIS             (1 << 22)
+#define   GCFG_DIS             (1 << 8)
 #define VLV_GUNIT_CLOCK_GATE2  _MMIO(VLV_DISPLAY_BASE + 0x2064)
 #define VLV_IIR_RW     _MMIO(VLV_DISPLAY_BASE + 0x2084)
 #define VLV_IER                _MMIO(VLV_DISPLAY_BASE + 0x20a0)
@@ -2513,35 +2630,35 @@ enum i915_power_well_id {
 #define VLV_PCBR       _MMIO(VLV_DISPLAY_BASE + 0x2120)
 #define VLV_PCBR_ADDR_SHIFT    12
 
-#define   DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */
+#define   DISPLAY_PLANE_FLIP_PENDING(plane) (1 << (11 - (plane))) /* A and B only */
 #define EIR            _MMIO(0x20b0)
 #define EMR            _MMIO(0x20b4)
 #define ESR            _MMIO(0x20b8)
-#define   GM45_ERROR_PAGE_TABLE                                (1<<5)
-#define   GM45_ERROR_MEM_PRIV                          (1<<4)
-#define   I915_ERROR_PAGE_TABLE                                (1<<4)
-#define   GM45_ERROR_CP_PRIV                           (1<<3)
-#define   I915_ERROR_MEMORY_REFRESH                    (1<<1)
-#define   I915_ERROR_INSTRUCTION                       (1<<0)
+#define   GM45_ERROR_PAGE_TABLE                                (1 << 5)
+#define   GM45_ERROR_MEM_PRIV                          (1 << 4)
+#define   I915_ERROR_PAGE_TABLE                                (1 << 4)
+#define   GM45_ERROR_CP_PRIV                           (1 << 3)
+#define   I915_ERROR_MEMORY_REFRESH                    (1 << 1)
+#define   I915_ERROR_INSTRUCTION                       (1 << 0)
 #define INSTPM         _MMIO(0x20c0)
-#define   INSTPM_SELF_EN (1<<12) /* 915GM only */
-#define   INSTPM_AGPBUSY_INT_EN (1<<11) /* gen3: when disabled, pending interrupts
+#define   INSTPM_SELF_EN (1 << 12) /* 915GM only */
+#define   INSTPM_AGPBUSY_INT_EN (1 << 11) /* gen3: when disabled, pending interrupts
                                        will not assert AGPBUSY# and will only
                                        be delivered when out of C3. */
-#define   INSTPM_FORCE_ORDERING                                (1<<7) /* GEN6+ */
-#define   INSTPM_TLB_INVALIDATE        (1<<9)
-#define   INSTPM_SYNC_FLUSH    (1<<5)
+#define   INSTPM_FORCE_ORDERING                                (1 << 7) /* GEN6+ */
+#define   INSTPM_TLB_INVALIDATE        (1 << 9)
+#define   INSTPM_SYNC_FLUSH    (1 << 5)
 #define ACTHD          _MMIO(0x20c8)
 #define MEM_MODE       _MMIO(0x20cc)
-#define   MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1<<3) /* 830 only */
-#define   MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1<<2) /* 830/845 only */
-#define   MEM_DISPLAY_TRICKLE_FEED_DISABLE (1<<2) /* 85x only */
+#define   MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1 << 3) /* 830 only */
+#define   MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1 << 2) /* 830/845 only */
+#define   MEM_DISPLAY_TRICKLE_FEED_DISABLE (1 << 2) /* 85x only */
 #define FW_BLC         _MMIO(0x20d8)
 #define FW_BLC2                _MMIO(0x20dc)
 #define FW_BLC_SELF    _MMIO(0x20e0) /* 915+ only */
-#define   FW_BLC_SELF_EN_MASK      (1<<31)
-#define   FW_BLC_SELF_FIFO_MASK    (1<<16) /* 945 only */
-#define   FW_BLC_SELF_EN           (1<<15) /* 945 only */
+#define   FW_BLC_SELF_EN_MASK      (1 << 31)
+#define   FW_BLC_SELF_FIFO_MASK    (1 << 16) /* 945 only */
+#define   FW_BLC_SELF_EN           (1 << 15) /* 945 only */
 #define MM_BURST_LENGTH     0x00700000
 #define MM_FIFO_WATERMARK   0x0001F000
 #define LM_BURST_LENGTH     0x00000700
@@ -2640,37 +2757,37 @@ enum i915_power_well_id {
 #define   MI_AGPBUSY_830_MODE                  (1 << 0) /* 85x only */
 
 #define CACHE_MODE_0   _MMIO(0x2120) /* 915+ only */
-#define   CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8)
-#define   CM0_IZ_OPT_DISABLE      (1<<6)
-#define   CM0_ZR_OPT_DISABLE      (1<<5)
-#define          CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
-#define   CM0_DEPTH_EVICT_DISABLE (1<<4)
-#define   CM0_COLOR_EVICT_DISABLE (1<<3)
-#define   CM0_DEPTH_WRITE_DISABLE (1<<1)
-#define   CM0_RC_OP_FLUSH_DISABLE (1<<0)
+#define   CM0_PIPELINED_RENDER_FLUSH_DISABLE (1 << 8)
+#define   CM0_IZ_OPT_DISABLE      (1 << 6)
+#define   CM0_ZR_OPT_DISABLE      (1 << 5)
+#define          CM0_STC_EVICT_DISABLE_LRA_SNB (1 << 5)
+#define   CM0_DEPTH_EVICT_DISABLE (1 << 4)
+#define   CM0_COLOR_EVICT_DISABLE (1 << 3)
+#define   CM0_DEPTH_WRITE_DISABLE (1 << 1)
+#define   CM0_RC_OP_FLUSH_DISABLE (1 << 0)
 #define GFX_FLSH_CNTL  _MMIO(0x2170) /* 915+ only */
 #define GFX_FLSH_CNTL_GEN6     _MMIO(0x101008)
-#define   GFX_FLSH_CNTL_EN     (1<<0)
+#define   GFX_FLSH_CNTL_EN     (1 << 0)
 #define ECOSKPD                _MMIO(0x21d0)
-#define   ECO_GATING_CX_ONLY   (1<<3)
-#define   ECO_FLIP_DONE                (1<<0)
+#define   ECO_GATING_CX_ONLY   (1 << 3)
+#define   ECO_FLIP_DONE                (1 << 0)
 
 #define CACHE_MODE_0_GEN7      _MMIO(0x7000) /* IVB+ */
-#define RC_OP_FLUSH_ENABLE (1<<0)
-#define   HIZ_RAW_STALL_OPT_DISABLE (1<<2)
+#define RC_OP_FLUSH_ENABLE (1 << 0)
+#define   HIZ_RAW_STALL_OPT_DISABLE (1 << 2)
 #define CACHE_MODE_1           _MMIO(0x7004) /* IVB+ */
-#define   PIXEL_SUBSPAN_COLLECT_OPT_DISABLE    (1<<6)
-#define   GEN8_4x4_STC_OPTIMIZATION_DISABLE    (1<<6)
-#define   GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE   (1<<1)
+#define   PIXEL_SUBSPAN_COLLECT_OPT_DISABLE    (1 << 6)
+#define   GEN8_4x4_STC_OPTIMIZATION_DISABLE    (1 << 6)
+#define   GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE   (1 << 1)
 
 #define GEN6_BLITTER_ECOSKPD   _MMIO(0x221d0)
 #define   GEN6_BLITTER_LOCK_SHIFT                      16
-#define   GEN6_BLITTER_FBC_NOTIFY                      (1<<3)
+#define   GEN6_BLITTER_FBC_NOTIFY                      (1 << 3)
 
 #define GEN6_RC_SLEEP_PSMI_CONTROL     _MMIO(0x2050)
 #define   GEN6_PSMI_SLEEP_MSG_DISABLE  (1 << 0)
 #define   GEN8_RC_SEMA_IDLE_MSG_DISABLE        (1 << 12)
-#define   GEN8_FF_DOP_CLOCK_GATE_DISABLE       (1<<10)
+#define   GEN8_FF_DOP_CLOCK_GATE_DISABLE       (1 << 10)
 
 #define GEN6_RCS_PWR_FSM _MMIO(0x22ac)
 #define GEN9_RCS_FE_FSM2 _MMIO(0x22a4)
@@ -2709,6 +2826,10 @@ enum i915_power_well_id {
 #define   GEN10_F2_SS_DIS_SHIFT                18
 #define   GEN10_F2_SS_DIS_MASK         (0xf << GEN10_F2_SS_DIS_SHIFT)
 
+#define        GEN10_MIRROR_FUSE3              _MMIO(0x9118)
+#define GEN10_L3BANK_PAIR_COUNT     4
+#define GEN10_L3BANK_MASK   0x0F
+
 #define GEN8_EU_DISABLE0               _MMIO(0x9134)
 #define   GEN8_EU_DIS0_S0_MASK         0xffffff
 #define   GEN8_EU_DIS0_S1_SHIFT                24
@@ -2722,7 +2843,7 @@ enum i915_power_well_id {
 #define GEN8_EU_DISABLE2               _MMIO(0x913c)
 #define   GEN8_EU_DIS2_S2_MASK         0xff
 
-#define GEN9_EU_DISABLE(slice)         _MMIO(0x9134 + (slice)*0x4)
+#define GEN9_EU_DISABLE(slice)         _MMIO(0x9134 + (slice) * 0x4)
 
 #define GEN10_EU_DISABLE3              _MMIO(0x9140)
 #define   GEN10_EU_DIS_SS_MASK         0xff
@@ -2779,44 +2900,43 @@ enum i915_power_well_id {
         (IS_HASWELL(dev_priv) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
 
 /* These are all the "old" interrupts */
-#define ILK_BSD_USER_INTERRUPT                         (1<<5)
-
-#define I915_PM_INTERRUPT                              (1<<31)
-#define I915_ISP_INTERRUPT                             (1<<22)
-#define I915_LPE_PIPE_B_INTERRUPT                      (1<<21)
-#define I915_LPE_PIPE_A_INTERRUPT                      (1<<20)
-#define I915_MIPIC_INTERRUPT                           (1<<19)
-#define I915_MIPIA_INTERRUPT                           (1<<18)
-#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT             (1<<18)
-#define I915_DISPLAY_PORT_INTERRUPT                    (1<<17)
-#define I915_DISPLAY_PIPE_C_HBLANK_INTERRUPT           (1<<16)
-#define I915_MASTER_ERROR_INTERRUPT                    (1<<15)
-#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT     (1<<15)
-#define I915_DISPLAY_PIPE_B_HBLANK_INTERRUPT           (1<<14)
-#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT       (1<<14) /* p-state */
-#define I915_DISPLAY_PIPE_A_HBLANK_INTERRUPT           (1<<13)
-#define I915_HWB_OOM_INTERRUPT                         (1<<13)
-#define I915_LPE_PIPE_C_INTERRUPT                      (1<<12)
-#define I915_SYNC_STATUS_INTERRUPT                     (1<<12)
-#define I915_MISC_INTERRUPT                            (1<<11)
-#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT    (1<<11)
-#define I915_DISPLAY_PIPE_C_VBLANK_INTERRUPT           (1<<10)
-#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT    (1<<10)
-#define I915_DISPLAY_PIPE_C_EVENT_INTERRUPT            (1<<9)
-#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT      (1<<9)
-#define I915_DISPLAY_PIPE_C_DPBM_INTERRUPT             (1<<8)
-#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT    (1<<8)
-#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT           (1<<7)
-#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT            (1<<6)
-#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT           (1<<5)
-#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT            (1<<4)
-#define I915_DISPLAY_PIPE_A_DPBM_INTERRUPT             (1<<3)
-#define I915_DISPLAY_PIPE_B_DPBM_INTERRUPT             (1<<2)
-#define I915_DEBUG_INTERRUPT                           (1<<2)
-#define I915_WINVALID_INTERRUPT                                (1<<1)
-#define I915_USER_INTERRUPT                            (1<<1)
-#define I915_ASLE_INTERRUPT                            (1<<0)
-#define I915_BSD_USER_INTERRUPT                                (1<<25)
+#define ILK_BSD_USER_INTERRUPT                         (1 << 5)
+
+#define I915_PM_INTERRUPT                              (1 << 31)
+#define I915_ISP_INTERRUPT                             (1 << 22)
+#define I915_LPE_PIPE_B_INTERRUPT                      (1 << 21)
+#define I915_LPE_PIPE_A_INTERRUPT                      (1 << 20)
+#define I915_MIPIC_INTERRUPT                           (1 << 19)
+#define I915_MIPIA_INTERRUPT                           (1 << 18)
+#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT             (1 << 18)
+#define I915_DISPLAY_PORT_INTERRUPT                    (1 << 17)
+#define I915_DISPLAY_PIPE_C_HBLANK_INTERRUPT           (1 << 16)
+#define I915_MASTER_ERROR_INTERRUPT                    (1 << 15)
+#define I915_DISPLAY_PIPE_B_HBLANK_INTERRUPT           (1 << 14)
+#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT       (1 << 14) /* p-state */
+#define I915_DISPLAY_PIPE_A_HBLANK_INTERRUPT           (1 << 13)
+#define I915_HWB_OOM_INTERRUPT                         (1 << 13)
+#define I915_LPE_PIPE_C_INTERRUPT                      (1 << 12)
+#define I915_SYNC_STATUS_INTERRUPT                     (1 << 12)
+#define I915_MISC_INTERRUPT                            (1 << 11)
+#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT    (1 << 11)
+#define I915_DISPLAY_PIPE_C_VBLANK_INTERRUPT           (1 << 10)
+#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT    (1 << 10)
+#define I915_DISPLAY_PIPE_C_EVENT_INTERRUPT            (1 << 9)
+#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT      (1 << 9)
+#define I915_DISPLAY_PIPE_C_DPBM_INTERRUPT             (1 << 8)
+#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT    (1 << 8)
+#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT           (1 << 7)
+#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT            (1 << 6)
+#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT           (1 << 5)
+#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT            (1 << 4)
+#define I915_DISPLAY_PIPE_A_DPBM_INTERRUPT             (1 << 3)
+#define I915_DISPLAY_PIPE_B_DPBM_INTERRUPT             (1 << 2)
+#define I915_DEBUG_INTERRUPT                           (1 << 2)
+#define I915_WINVALID_INTERRUPT                                (1 << 1)
+#define I915_USER_INTERRUPT                            (1 << 1)
+#define I915_ASLE_INTERRUPT                            (1 << 0)
+#define I915_BSD_USER_INTERRUPT                                (1 << 25)
 
 #define I915_HDMI_LPE_AUDIO_BASE       (VLV_DISPLAY_BASE + 0x65000)
 #define I915_HDMI_LPE_AUDIO_SIZE       0x1000
@@ -2839,19 +2959,19 @@ enum i915_power_well_id {
 #define GEN7_FF_THREAD_MODE            _MMIO(0x20a0)
 #define   GEN7_FF_SCHED_MASK           0x0077070
 #define   GEN8_FF_DS_REF_CNT_FFME      (1 << 19)
-#define   GEN7_FF_TS_SCHED_HS1         (0x5<<16)
-#define   GEN7_FF_TS_SCHED_HS0         (0x3<<16)
-#define   GEN7_FF_TS_SCHED_LOAD_BALANCE        (0x1<<16)
-#define   GEN7_FF_TS_SCHED_HW          (0x0<<16) /* Default */
+#define   GEN7_FF_TS_SCHED_HS1         (0x5 << 16)
+#define   GEN7_FF_TS_SCHED_HS0         (0x3 << 16)
+#define   GEN7_FF_TS_SCHED_LOAD_BALANCE        (0x1 << 16)
+#define   GEN7_FF_TS_SCHED_HW          (0x0 << 16) /* Default */
 #define   GEN7_FF_VS_REF_CNT_FFME      (1 << 15)
-#define   GEN7_FF_VS_SCHED_HS1         (0x5<<12)
-#define   GEN7_FF_VS_SCHED_HS0         (0x3<<12)
-#define   GEN7_FF_VS_SCHED_LOAD_BALANCE        (0x1<<12) /* Default */
-#define   GEN7_FF_VS_SCHED_HW          (0x0<<12)
-#define   GEN7_FF_DS_SCHED_HS1         (0x5<<4)
-#define   GEN7_FF_DS_SCHED_HS0         (0x3<<4)
-#define   GEN7_FF_DS_SCHED_LOAD_BALANCE        (0x1<<4)  /* Default */
-#define   GEN7_FF_DS_SCHED_HW          (0x0<<4)
+#define   GEN7_FF_VS_SCHED_HS1         (0x5 << 12)
+#define   GEN7_FF_VS_SCHED_HS0         (0x3 << 12)
+#define   GEN7_FF_VS_SCHED_LOAD_BALANCE        (0x1 << 12) /* Default */
+#define   GEN7_FF_VS_SCHED_HW          (0x0 << 12)
+#define   GEN7_FF_DS_SCHED_HS1         (0x5 << 4)
+#define   GEN7_FF_DS_SCHED_HS0         (0x3 << 4)
+#define   GEN7_FF_DS_SCHED_LOAD_BALANCE        (0x1 << 4)  /* Default */
+#define   GEN7_FF_DS_SCHED_HW          (0x0 << 4)
 
 /*
  * Framebuffer compression (915+ only)
@@ -2860,51 +2980,51 @@ enum i915_power_well_id {
 #define FBC_CFB_BASE           _MMIO(0x3200) /* 4k page aligned */
 #define FBC_LL_BASE            _MMIO(0x3204) /* 4k page aligned */
 #define FBC_CONTROL            _MMIO(0x3208)
-#define   FBC_CTL_EN           (1<<31)
-#define   FBC_CTL_PERIODIC     (1<<30)
+#define   FBC_CTL_EN           (1 << 31)
+#define   FBC_CTL_PERIODIC     (1 << 30)
 #define   FBC_CTL_INTERVAL_SHIFT (16)
-#define   FBC_CTL_UNCOMPRESSIBLE (1<<14)
-#define   FBC_CTL_C3_IDLE      (1<<13)
+#define   FBC_CTL_UNCOMPRESSIBLE (1 << 14)
+#define   FBC_CTL_C3_IDLE      (1 << 13)
 #define   FBC_CTL_STRIDE_SHIFT (5)
 #define   FBC_CTL_FENCENO_SHIFT        (0)
 #define FBC_COMMAND            _MMIO(0x320c)
-#define   FBC_CMD_COMPRESS     (1<<0)
+#define   FBC_CMD_COMPRESS     (1 << 0)
 #define FBC_STATUS             _MMIO(0x3210)
-#define   FBC_STAT_COMPRESSING (1<<31)
-#define   FBC_STAT_COMPRESSED  (1<<30)
-#define   FBC_STAT_MODIFIED    (1<<29)
+#define   FBC_STAT_COMPRESSING (1 << 31)
+#define   FBC_STAT_COMPRESSED  (1 << 30)
+#define   FBC_STAT_MODIFIED    (1 << 29)
 #define   FBC_STAT_CURRENT_LINE_SHIFT  (0)
 #define FBC_CONTROL2           _MMIO(0x3214)
-#define   FBC_CTL_FENCE_DBL    (0<<4)
-#define   FBC_CTL_IDLE_IMM     (0<<2)
-#define   FBC_CTL_IDLE_FULL    (1<<2)
-#define   FBC_CTL_IDLE_LINE    (2<<2)
-#define   FBC_CTL_IDLE_DEBUG   (3<<2)
-#define   FBC_CTL_CPU_FENCE    (1<<1)
-#define   FBC_CTL_PLANE(plane) ((plane)<<0)
+#define   FBC_CTL_FENCE_DBL    (0 << 4)
+#define   FBC_CTL_IDLE_IMM     (0 << 2)
+#define   FBC_CTL_IDLE_FULL    (1 << 2)
+#define   FBC_CTL_IDLE_LINE    (2 << 2)
+#define   FBC_CTL_IDLE_DEBUG   (3 << 2)
+#define   FBC_CTL_CPU_FENCE    (1 << 1)
+#define   FBC_CTL_PLANE(plane) ((plane) << 0)
 #define FBC_FENCE_OFF          _MMIO(0x3218) /* BSpec typo has 321Bh */
 #define FBC_TAG(i)             _MMIO(0x3300 + (i) * 4)
 
 #define FBC_LL_SIZE            (1536)
 
 #define FBC_LLC_READ_CTRL      _MMIO(0x9044)
-#define   FBC_LLC_FULLY_OPEN   (1<<30)
+#define   FBC_LLC_FULLY_OPEN   (1 << 30)
 
 /* Framebuffer compression for GM45+ */
 #define DPFC_CB_BASE           _MMIO(0x3200)
 #define DPFC_CONTROL           _MMIO(0x3208)
-#define   DPFC_CTL_EN          (1<<31)
-#define   DPFC_CTL_PLANE(plane)        ((plane)<<30)
-#define   IVB_DPFC_CTL_PLANE(plane)    ((plane)<<29)
-#define   DPFC_CTL_FENCE_EN    (1<<29)
-#define   IVB_DPFC_CTL_FENCE_EN        (1<<28)
-#define   DPFC_CTL_PERSISTENT_MODE     (1<<25)
-#define   DPFC_SR_EN           (1<<10)
-#define   DPFC_CTL_LIMIT_1X    (0<<6)
-#define   DPFC_CTL_LIMIT_2X    (1<<6)
-#define   DPFC_CTL_LIMIT_4X    (2<<6)
+#define   DPFC_CTL_EN          (1 << 31)
+#define   DPFC_CTL_PLANE(plane)        ((plane) << 30)
+#define   IVB_DPFC_CTL_PLANE(plane)    ((plane) << 29)
+#define   DPFC_CTL_FENCE_EN    (1 << 29)
+#define   IVB_DPFC_CTL_FENCE_EN        (1 << 28)
+#define   DPFC_CTL_PERSISTENT_MODE     (1 << 25)
+#define   DPFC_SR_EN           (1 << 10)
+#define   DPFC_CTL_LIMIT_1X    (0 << 6)
+#define   DPFC_CTL_LIMIT_2X    (1 << 6)
+#define   DPFC_CTL_LIMIT_4X    (2 << 6)
 #define DPFC_RECOMP_CTL                _MMIO(0x320c)
-#define   DPFC_RECOMP_STALL_EN (1<<27)
+#define   DPFC_RECOMP_STALL_EN (1 << 27)
 #define   DPFC_RECOMP_STALL_WM_SHIFT (16)
 #define   DPFC_RECOMP_STALL_WM_MASK (0x07ff0000)
 #define   DPFC_RECOMP_TIMER_COUNT_SHIFT (0)
@@ -2917,12 +3037,12 @@ enum i915_power_well_id {
 #define DPFC_STATUS2           _MMIO(0x3214)
 #define DPFC_FENCE_YOFF                _MMIO(0x3218)
 #define DPFC_CHICKEN           _MMIO(0x3224)
-#define   DPFC_HT_MODIFY       (1<<31)
+#define   DPFC_HT_MODIFY       (1 << 31)
 
 /* Framebuffer compression for Ironlake */
 #define ILK_DPFC_CB_BASE       _MMIO(0x43200)
 #define ILK_DPFC_CONTROL       _MMIO(0x43208)
-#define   FBC_CTL_FALSE_COLOR  (1<<10)
+#define   FBC_CTL_FALSE_COLOR  (1 << 10)
 /* The bit 28-8 is reserved */
 #define   DPFC_RESERVED                (0x1FFFFF00)
 #define ILK_DPFC_RECOMP_CTL    _MMIO(0x4320c)
@@ -2933,15 +3053,15 @@ enum i915_power_well_id {
 #define  BDW_FBC_COMP_SEG_MASK 0xfff
 #define ILK_DPFC_FENCE_YOFF    _MMIO(0x43218)
 #define ILK_DPFC_CHICKEN       _MMIO(0x43224)
-#define   ILK_DPFC_DISABLE_DUMMY0 (1<<8)
-#define   ILK_DPFC_NUKE_ON_ANY_MODIFICATION    (1<<23)
+#define   ILK_DPFC_DISABLE_DUMMY0 (1 << 8)
+#define   ILK_DPFC_NUKE_ON_ANY_MODIFICATION    (1 << 23)
 #define ILK_FBC_RT_BASE                _MMIO(0x2128)
-#define   ILK_FBC_RT_VALID     (1<<0)
-#define   SNB_FBC_FRONT_BUFFER (1<<1)
+#define   ILK_FBC_RT_VALID     (1 << 0)
+#define   SNB_FBC_FRONT_BUFFER (1 << 1)
 
 #define ILK_DISPLAY_CHICKEN1   _MMIO(0x42000)
-#define   ILK_FBCQ_DIS         (1<<22)
-#define          ILK_PABSTRETCH_DIS    (1<<21)
+#define   ILK_FBCQ_DIS         (1 << 22)
+#define          ILK_PABSTRETCH_DIS    (1 << 21)
 
 
 /*
@@ -2950,7 +3070,7 @@ enum i915_power_well_id {
  * The following two registers are of type GTTMMADR
  */
 #define SNB_DPFC_CTL_SA                _MMIO(0x100100)
-#define   SNB_CPU_FENCE_ENABLE (1<<29)
+#define   SNB_CPU_FENCE_ENABLE (1 << 29)
 #define DPFC_CPU_FENCE_OFFSET  _MMIO(0x100104)
 
 /* Framebuffer compression for Ivybridge */
@@ -2960,8 +3080,8 @@ enum i915_power_well_id {
 #define   IPS_ENABLE   (1 << 31)
 
 #define MSG_FBC_REND_STATE     _MMIO(0x50380)
-#define   FBC_REND_NUKE                (1<<2)
-#define   FBC_REND_CACHE_CLEAN (1<<1)
+#define   FBC_REND_NUKE                (1 << 2)
+#define   FBC_REND_CACHE_CLEAN (1 << 1)
 
 /*
  * GPIO regs
@@ -2974,6 +3094,10 @@ enum i915_power_well_id {
 #define GPIOF                  _MMIO(0x5024)
 #define GPIOG                  _MMIO(0x5028)
 #define GPIOH                  _MMIO(0x502c)
+#define GPIOJ                  _MMIO(0x5034)
+#define GPIOK                  _MMIO(0x5038)
+#define GPIOL                  _MMIO(0x503C)
+#define GPIOM                  _MMIO(0x5040)
 # define GPIO_CLOCK_DIR_MASK           (1 << 0)
 # define GPIO_CLOCK_DIR_IN             (0 << 1)
 # define GPIO_CLOCK_DIR_OUT            (1 << 1)
@@ -2990,12 +3114,13 @@ enum i915_power_well_id {
 # define GPIO_DATA_PULLUP_DISABLE      (1 << 13)
 
 #define GMBUS0                 _MMIO(dev_priv->gpio_mmio_base + 0x5100) /* clock/port select */
-#define   GMBUS_AKSV_SELECT    (1<<11)
-#define   GMBUS_RATE_100KHZ    (0<<8)
-#define   GMBUS_RATE_50KHZ     (1<<8)
-#define   GMBUS_RATE_400KHZ    (2<<8) /* reserved on Pineview */
-#define   GMBUS_RATE_1MHZ      (3<<8) /* reserved on Pineview */
-#define   GMBUS_HOLD_EXT       (1<<7) /* 300ns hold time, rsvd on Pineview */
+#define   GMBUS_AKSV_SELECT    (1 << 11)
+#define   GMBUS_RATE_100KHZ    (0 << 8)
+#define   GMBUS_RATE_50KHZ     (1 << 8)
+#define   GMBUS_RATE_400KHZ    (2 << 8) /* reserved on Pineview */
+#define   GMBUS_RATE_1MHZ      (3 << 8) /* reserved on Pineview */
+#define   GMBUS_HOLD_EXT       (1 << 7) /* 300ns hold time, rsvd on Pineview */
+#define   GMBUS_BYTE_CNT_OVERRIDE (1 << 6)
 #define   GMBUS_PIN_DISABLED   0
 #define   GMBUS_PIN_SSC                1
 #define   GMBUS_PIN_VGADDC     2
@@ -3016,36 +3141,37 @@ enum i915_power_well_id {
 
 #define   GMBUS_NUM_PINS       13 /* including 0 */
 #define GMBUS1                 _MMIO(dev_priv->gpio_mmio_base + 0x5104) /* command/status */
-#define   GMBUS_SW_CLR_INT     (1<<31)
-#define   GMBUS_SW_RDY         (1<<30)
-#define   GMBUS_ENT            (1<<29) /* enable timeout */
-#define   GMBUS_CYCLE_NONE     (0<<25)
-#define   GMBUS_CYCLE_WAIT     (1<<25)
-#define   GMBUS_CYCLE_INDEX    (2<<25)
-#define   GMBUS_CYCLE_STOP     (4<<25)
+#define   GMBUS_SW_CLR_INT     (1 << 31)
+#define   GMBUS_SW_RDY         (1 << 30)
+#define   GMBUS_ENT            (1 << 29) /* enable timeout */
+#define   GMBUS_CYCLE_NONE     (0 << 25)
+#define   GMBUS_CYCLE_WAIT     (1 << 25)
+#define   GMBUS_CYCLE_INDEX    (2 << 25)
+#define   GMBUS_CYCLE_STOP     (4 << 25)
 #define   GMBUS_BYTE_COUNT_SHIFT 16
 #define   GMBUS_BYTE_COUNT_MAX   256U
+#define   GEN9_GMBUS_BYTE_COUNT_MAX 511U
 #define   GMBUS_SLAVE_INDEX_SHIFT 8
 #define   GMBUS_SLAVE_ADDR_SHIFT 1
-#define   GMBUS_SLAVE_READ     (1<<0)
-#define   GMBUS_SLAVE_WRITE    (0<<0)
+#define   GMBUS_SLAVE_READ     (1 << 0)
+#define   GMBUS_SLAVE_WRITE    (0 << 0)
 #define GMBUS2                 _MMIO(dev_priv->gpio_mmio_base + 0x5108) /* status */
-#define   GMBUS_INUSE          (1<<15)
-#define   GMBUS_HW_WAIT_PHASE  (1<<14)
-#define   GMBUS_STALL_TIMEOUT  (1<<13)
-#define   GMBUS_INT            (1<<12)
-#define   GMBUS_HW_RDY         (1<<11)
-#define   GMBUS_SATOER         (1<<10)
-#define   GMBUS_ACTIVE         (1<<9)
+#define   GMBUS_INUSE          (1 << 15)
+#define   GMBUS_HW_WAIT_PHASE  (1 << 14)
+#define   GMBUS_STALL_TIMEOUT  (1 << 13)
+#define   GMBUS_INT            (1 << 12)
+#define   GMBUS_HW_RDY         (1 << 11)
+#define   GMBUS_SATOER         (1 << 10)
+#define   GMBUS_ACTIVE         (1 << 9)
 #define GMBUS3                 _MMIO(dev_priv->gpio_mmio_base + 0x510c) /* data buffer bytes 3-0 */
 #define GMBUS4                 _MMIO(dev_priv->gpio_mmio_base + 0x5110) /* interrupt mask (Pineview+) */
-#define   GMBUS_SLAVE_TIMEOUT_EN (1<<4)
-#define   GMBUS_NAK_EN         (1<<3)
-#define   GMBUS_IDLE_EN                (1<<2)
-#define   GMBUS_HW_WAIT_EN     (1<<1)
-#define   GMBUS_HW_RDY_EN      (1<<0)
+#define   GMBUS_SLAVE_TIMEOUT_EN (1 << 4)
+#define   GMBUS_NAK_EN         (1 << 3)
+#define   GMBUS_IDLE_EN                (1 << 2)
+#define   GMBUS_HW_WAIT_EN     (1 << 1)
+#define   GMBUS_HW_RDY_EN      (1 << 0)
 #define GMBUS5                 _MMIO(dev_priv->gpio_mmio_base + 0x5120) /* byte index */
-#define   GMBUS_2BYTE_INDEX_EN (1<<31)
+#define   GMBUS_2BYTE_INDEX_EN (1 << 31)
 
 /*
  * Clock control & power management
@@ -3083,10 +3209,10 @@ enum i915_power_well_id {
 #define   DPLL_P2_CLOCK_DIV_MASK       0x03000000 /* i915 */
 #define   DPLL_FPA01_P1_POST_DIV_MASK  0x00ff0000 /* i915 */
 #define   DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
-#define   DPLL_LOCK_VLV                        (1<<15)
-#define   DPLL_INTEGRATED_CRI_CLK_VLV  (1<<14)
-#define   DPLL_INTEGRATED_REF_CLK_VLV  (1<<13)
-#define   DPLL_SSC_REF_CLK_CHV         (1<<13)
+#define   DPLL_LOCK_VLV                        (1 << 15)
+#define   DPLL_INTEGRATED_CRI_CLK_VLV  (1 << 14)
+#define   DPLL_INTEGRATED_REF_CLK_VLV  (1 << 13)
+#define   DPLL_SSC_REF_CLK_CHV         (1 << 13)
 #define   DPLL_PORTC_READY_MASK                (0xf << 4)
 #define   DPLL_PORTB_READY_MASK                (0xf)
 
@@ -3096,20 +3222,20 @@ enum i915_power_well_id {
 #define DPIO_PHY_STATUS                        _MMIO(VLV_DISPLAY_BASE + 0x6240)
 #define   DPLL_PORTD_READY_MASK                (0xf)
 #define DISPLAY_PHY_CONTROL _MMIO(VLV_DISPLAY_BASE + 0x60100)
-#define   PHY_CH_POWER_DOWN_OVRD_EN(phy, ch)   (1 << (2*(phy)+(ch)+27))
+#define   PHY_CH_POWER_DOWN_OVRD_EN(phy, ch)   (1 << (2 * (phy) + (ch) + 27))
 #define   PHY_LDO_DELAY_0NS                    0x0
 #define   PHY_LDO_DELAY_200NS                  0x1
 #define   PHY_LDO_DELAY_600NS                  0x2
-#define   PHY_LDO_SEQ_DELAY(delay, phy)                ((delay) << (2*(phy)+23))
-#define   PHY_CH_POWER_DOWN_OVRD(mask, phy, ch)        ((mask) << (8*(phy)+4*(ch)+11))
+#define   PHY_LDO_SEQ_DELAY(delay, phy)                ((delay) << (2 * (phy) + 23))
+#define   PHY_CH_POWER_DOWN_OVRD(mask, phy, ch)        ((mask) << (8 * (phy) + 4 * (ch) + 11))
 #define   PHY_CH_SU_PSR                                0x1
 #define   PHY_CH_DEEP_PSR                      0x7
-#define   PHY_CH_POWER_MODE(mode, phy, ch)     ((mode) << (6*(phy)+3*(ch)+2))
+#define   PHY_CH_POWER_MODE(mode, phy, ch)     ((mode) << (6 * (phy) + 3 * (ch) + 2))
 #define   PHY_COM_LANE_RESET_DEASSERT(phy)     (1 << (phy))
 #define DISPLAY_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x60104)
-#define   PHY_POWERGOOD(phy)   (((phy) == DPIO_PHY0) ? (1<<31) : (1<<30))
-#define   PHY_STATUS_CMN_LDO(phy, ch)                   (1 << (6-(6*(phy)+3*(ch))))
-#define   PHY_STATUS_SPLINE_LDO(phy, ch, spline)        (1 << (8-(6*(phy)+3*(ch)+(spline))))
+#define   PHY_POWERGOOD(phy)   (((phy) == DPIO_PHY0) ? (1 << 31) : (1 << 30))
+#define   PHY_STATUS_CMN_LDO(phy, ch)                   (1 << (6 - (6 * (phy) + 3 * (ch))))
+#define   PHY_STATUS_SPLINE_LDO(phy, ch, spline)        (1 << (8 - (6 * (phy) + 3 * (ch) + (spline))))
 
 /*
  * The i830 generation, in LVDS mode, defines P1 as the bit number set within
@@ -3130,7 +3256,7 @@ enum i915_power_well_id {
 /* Ironlake */
 # define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT     9
 # define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK      (7 << 9)
-# define PLL_REF_SDVO_HDMI_MULTIPLIER(x)       (((x)-1) << 9)
+# define PLL_REF_SDVO_HDMI_MULTIPLIER(x)       (((x) - 1) << 9)
 # define DPLL_FPA1_P1_POST_DIV_SHIFT            0
 # define DPLL_FPA1_P1_POST_DIV_MASK             0xff
 
@@ -3219,10 +3345,10 @@ enum i915_power_well_id {
 #define   DPLLA_TEST_M_BYPASS          (1 << 2)
 #define   DPLLA_INPUT_BUFFER_ENABLE    (1 << 0)
 #define D_STATE                _MMIO(0x6104)
-#define  DSTATE_GFX_RESET_I830                 (1<<6)
-#define  DSTATE_PLL_D3_OFF                     (1<<3)
-#define  DSTATE_GFX_CLOCK_GATING               (1<<1)
-#define  DSTATE_DOT_CLOCK_GATING               (1<<0)
+#define  DSTATE_GFX_RESET_I830                 (1 << 6)
+#define  DSTATE_PLL_D3_OFF                     (1 << 3)
+#define  DSTATE_GFX_CLOCK_GATING               (1 << 1)
+#define  DSTATE_DOT_CLOCK_GATING               (1 << 0)
 #define DSPCLK_GATE_D  _MMIO(dev_priv->info.display_mmio_offset + 0x6200)
 # define DPUNIT_B_CLOCK_GATE_DISABLE           (1 << 30) /* 965 */
 # define VSUNIT_CLOCK_GATE_DISABLE             (1 << 29) /* 965 */
@@ -3339,7 +3465,7 @@ enum i915_power_well_id {
 #define DEUC                   _MMIO(0x6214)          /* CRL only */
 
 #define FW_BLC_SELF_VLV                _MMIO(VLV_DISPLAY_BASE + 0x6500)
-#define  FW_CSPWRDWNEN         (1<<15)
+#define  FW_CSPWRDWNEN         (1 << 15)
 
 #define MI_ARB_VLV             _MMIO(VLV_DISPLAY_BASE + 0x6504)
 
@@ -3464,7 +3590,7 @@ enum i915_power_well_id {
 #define HPLLVCO_MOBILE          _MMIO(MCHBAR_MIRROR_BASE + 0xc0f)
 
 #define TSC1                   _MMIO(0x11001)
-#define   TSE                  (1<<0)
+#define   TSE                  (1 << 0)
 #define TR1                    _MMIO(0x11006)
 #define TSFS                   _MMIO(0x11020)
 #define   TSFS_SLOPE_MASK      0x0000ff00
@@ -3510,23 +3636,23 @@ enum i915_power_well_id {
 #define   MEMCTL_CMD_CHVID     3
 #define   MEMCTL_CMD_VMMOFF    4
 #define   MEMCTL_CMD_VMMON     5
-#define   MEMCTL_CMD_STS       (1<<12) /* write 1 triggers command, clears
+#define   MEMCTL_CMD_STS       (1 << 12) /* write 1 triggers command, clears
                                           when command complete */
 #define   MEMCTL_FREQ_MASK     0x0f00 /* jitter, from 0-15 */
 #define   MEMCTL_FREQ_SHIFT    8
-#define   MEMCTL_SFCAVM                (1<<7)
+#define   MEMCTL_SFCAVM                (1 << 7)
 #define   MEMCTL_TGT_VID_MASK  0x007f
 #define MEMIHYST               _MMIO(0x1117c)
 #define MEMINTREN              _MMIO(0x11180) /* 16 bits */
-#define   MEMINT_RSEXIT_EN     (1<<8)
-#define   MEMINT_CX_SUPR_EN    (1<<7)
-#define   MEMINT_CONT_BUSY_EN  (1<<6)
-#define   MEMINT_AVG_BUSY_EN   (1<<5)
-#define   MEMINT_EVAL_CHG_EN   (1<<4)
-#define   MEMINT_MON_IDLE_EN   (1<<3)
-#define   MEMINT_UP_EVAL_EN    (1<<2)
-#define   MEMINT_DOWN_EVAL_EN  (1<<1)
-#define   MEMINT_SW_CMD_EN     (1<<0)
+#define   MEMINT_RSEXIT_EN     (1 << 8)
+#define   MEMINT_CX_SUPR_EN    (1 << 7)
+#define   MEMINT_CONT_BUSY_EN  (1 << 6)
+#define   MEMINT_AVG_BUSY_EN   (1 << 5)
+#define   MEMINT_EVAL_CHG_EN   (1 << 4)
+#define   MEMINT_MON_IDLE_EN   (1 << 3)
+#define   MEMINT_UP_EVAL_EN    (1 << 2)
+#define   MEMINT_DOWN_EVAL_EN  (1 << 1)
+#define   MEMINT_SW_CMD_EN     (1 << 0)
 #define MEMINTRSTR             _MMIO(0x11182) /* 16 bits */
 #define   MEM_RSEXIT_MASK      0xc000
 #define   MEM_RSEXIT_SHIFT     14
@@ -3548,26 +3674,26 @@ enum i915_power_well_id {
 #define   MEM_INT_STEER_SMI    2
 #define   MEM_INT_STEER_SCI    3
 #define MEMINTRSTS             _MMIO(0x11184)
-#define   MEMINT_RSEXIT                (1<<7)
-#define   MEMINT_CONT_BUSY     (1<<6)
-#define   MEMINT_AVG_BUSY      (1<<5)
-#define   MEMINT_EVAL_CHG      (1<<4)
-#define   MEMINT_MON_IDLE      (1<<3)
-#define   MEMINT_UP_EVAL       (1<<2)
-#define   MEMINT_DOWN_EVAL     (1<<1)
-#define   MEMINT_SW_CMD                (1<<0)
+#define   MEMINT_RSEXIT                (1 << 7)
+#define   MEMINT_CONT_BUSY     (1 << 6)
+#define   MEMINT_AVG_BUSY      (1 << 5)
+#define   MEMINT_EVAL_CHG      (1 << 4)
+#define   MEMINT_MON_IDLE      (1 << 3)
+#define   MEMINT_UP_EVAL       (1 << 2)
+#define   MEMINT_DOWN_EVAL     (1 << 1)
+#define   MEMINT_SW_CMD                (1 << 0)
 #define MEMMODECTL             _MMIO(0x11190)
-#define   MEMMODE_BOOST_EN     (1<<31)
+#define   MEMMODE_BOOST_EN     (1 << 31)
 #define   MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */
 #define   MEMMODE_BOOST_FREQ_SHIFT 24
 #define   MEMMODE_IDLE_MODE_MASK 0x00030000
 #define   MEMMODE_IDLE_MODE_SHIFT 16
 #define   MEMMODE_IDLE_MODE_EVAL 0
 #define   MEMMODE_IDLE_MODE_CONT 1
-#define   MEMMODE_HWIDLE_EN    (1<<15)
-#define   MEMMODE_SWMODE_EN    (1<<14)
-#define   MEMMODE_RCLK_GATE    (1<<13)
-#define   MEMMODE_HW_UPDATE    (1<<12)
+#define   MEMMODE_HWIDLE_EN    (1 << 15)
+#define   MEMMODE_SWMODE_EN    (1 << 14)
+#define   MEMMODE_RCLK_GATE    (1 << 13)
+#define   MEMMODE_HW_UPDATE    (1 << 12)
 #define   MEMMODE_FSTART_MASK  0x00000f00 /* starting jitter, 0-15 */
 #define   MEMMODE_FSTART_SHIFT 8
 #define   MEMMODE_FMAX_MASK    0x000000f0 /* max jitter, 0-15 */
@@ -3581,8 +3707,8 @@ enum i915_power_well_id {
 #define   SWMEMCMD_TARVID      (3 << 13)
 #define   SWMEMCMD_VRM_OFF     (4 << 13)
 #define   SWMEMCMD_VRM_ON      (5 << 13)
-#define   CMDSTS               (1<<12)
-#define   SFCAVM               (1<<11)
+#define   CMDSTS               (1 << 12)
+#define   SFCAVM               (1 << 11)
 #define   SWFREQ_MASK          0x0380 /* P0-7 */
 #define   SWFREQ_SHIFT         7
 #define   TARVID_MASK          0x001f
@@ -3591,49 +3717,49 @@ enum i915_power_well_id {
 #define RCUPEI                 _MMIO(0x111b0)
 #define RCDNEI                 _MMIO(0x111b4)
 #define RSTDBYCTL              _MMIO(0x111b8)
-#define   RS1EN                        (1<<31)
-#define   RS2EN                        (1<<30)
-#define   RS3EN                        (1<<29)
-#define   D3RS3EN              (1<<28) /* Display D3 imlies RS3 */
-#define   SWPROMORSX           (1<<27) /* RSx promotion timers ignored */
-#define   RCWAKERW             (1<<26) /* Resetwarn from PCH causes wakeup */
-#define   DPRSLPVREN           (1<<25) /* Fast voltage ramp enable */
-#define   GFXTGHYST            (1<<24) /* Hysteresis to allow trunk gating */
-#define   RCX_SW_EXIT          (1<<23) /* Leave RSx and prevent re-entry */
-#define   RSX_STATUS_MASK      (7<<20)
-#define   RSX_STATUS_ON                (0<<20)
-#define   RSX_STATUS_RC1       (1<<20)
-#define   RSX_STATUS_RC1E      (2<<20)
-#define   RSX_STATUS_RS1       (3<<20)
-#define   RSX_STATUS_RS2       (4<<20) /* aka rc6 */
-#define   RSX_STATUS_RSVD      (5<<20) /* deep rc6 unsupported on ilk */
-#define   RSX_STATUS_RS3       (6<<20) /* rs3 unsupported on ilk */
-#define   RSX_STATUS_RSVD2     (7<<20)
-#define   UWRCRSXE             (1<<19) /* wake counter limit prevents rsx */
-#define   RSCRP                        (1<<18) /* rs requests control on rs1/2 reqs */
-#define   JRSC                 (1<<17) /* rsx coupled to cpu c-state */
-#define   RS2INC0              (1<<16) /* allow rs2 in cpu c0 */
-#define   RS1CONTSAV_MASK      (3<<14)
-#define   RS1CONTSAV_NO_RS1    (0<<14) /* rs1 doesn't save/restore context */
-#define   RS1CONTSAV_RSVD      (1<<14)
-#define   RS1CONTSAV_SAVE_RS1  (2<<14) /* rs1 saves context */
-#define   RS1CONTSAV_FULL_RS1  (3<<14) /* rs1 saves and restores context */
-#define   NORMSLEXLAT_MASK     (3<<12)
-#define   SLOW_RS123           (0<<12)
-#define   SLOW_RS23            (1<<12)
-#define   SLOW_RS3             (2<<12)
-#define   NORMAL_RS123         (3<<12)
-#define   RCMODE_TIMEOUT       (1<<11) /* 0 is eval interval method */
-#define   IMPROMOEN            (1<<10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */
-#define   RCENTSYNC            (1<<9) /* rs coupled to cpu c-state (3/6/7) */
-#define   STATELOCK            (1<<7) /* locked to rs_cstate if 0 */
-#define   RS_CSTATE_MASK       (3<<4)
-#define   RS_CSTATE_C367_RS1   (0<<4)
-#define   RS_CSTATE_C36_RS1_C7_RS2 (1<<4)
-#define   RS_CSTATE_RSVD       (2<<4)
-#define   RS_CSTATE_C367_RS2   (3<<4)
-#define   REDSAVES             (1<<3) /* no context save if was idle during rs0 */
-#define   REDRESTORES          (1<<2) /* no restore if was idle during rs0 */
+#define   RS1EN                        (1 << 31)
+#define   RS2EN                        (1 << 30)
+#define   RS3EN                        (1 << 29)
+#define   D3RS3EN              (1 << 28) /* Display D3 imlies RS3 */
+#define   SWPROMORSX           (1 << 27) /* RSx promotion timers ignored */
+#define   RCWAKERW             (1 << 26) /* Resetwarn from PCH causes wakeup */
+#define   DPRSLPVREN           (1 << 25) /* Fast voltage ramp enable */
+#define   GFXTGHYST            (1 << 24) /* Hysteresis to allow trunk gating */
+#define   RCX_SW_EXIT          (1 << 23) /* Leave RSx and prevent re-entry */
+#define   RSX_STATUS_MASK      (7 << 20)
+#define   RSX_STATUS_ON                (0 << 20)
+#define   RSX_STATUS_RC1       (1 << 20)
+#define   RSX_STATUS_RC1E      (2 << 20)
+#define   RSX_STATUS_RS1       (3 << 20)
+#define   RSX_STATUS_RS2       (4 << 20) /* aka rc6 */
+#define   RSX_STATUS_RSVD      (5 << 20) /* deep rc6 unsupported on ilk */
+#define   RSX_STATUS_RS3       (6 << 20) /* rs3 unsupported on ilk */
+#define   RSX_STATUS_RSVD2     (7 << 20)
+#define   UWRCRSXE             (1 << 19) /* wake counter limit prevents rsx */
+#define   RSCRP                        (1 << 18) /* rs requests control on rs1/2 reqs */
+#define   JRSC                 (1 << 17) /* rsx coupled to cpu c-state */
+#define   RS2INC0              (1 << 16) /* allow rs2 in cpu c0 */
+#define   RS1CONTSAV_MASK      (3 << 14)
+#define   RS1CONTSAV_NO_RS1    (0 << 14) /* rs1 doesn't save/restore context */
+#define   RS1CONTSAV_RSVD      (1 << 14)
+#define   RS1CONTSAV_SAVE_RS1  (2 << 14) /* rs1 saves context */
+#define   RS1CONTSAV_FULL_RS1  (3 << 14) /* rs1 saves and restores context */
+#define   NORMSLEXLAT_MASK     (3 << 12)
+#define   SLOW_RS123           (0 << 12)
+#define   SLOW_RS23            (1 << 12)
+#define   SLOW_RS3             (2 << 12)
+#define   NORMAL_RS123         (3 << 12)
+#define   RCMODE_TIMEOUT       (1 << 11) /* 0 is eval interval method */
+#define   IMPROMOEN            (1 << 10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */
+#define   RCENTSYNC            (1 << 9) /* rs coupled to cpu c-state (3/6/7) */
+#define   STATELOCK            (1 << 7) /* locked to rs_cstate if 0 */
+#define   RS_CSTATE_MASK       (3 << 4)
+#define   RS_CSTATE_C367_RS1   (0 << 4)
+#define   RS_CSTATE_C36_RS1_C7_RS2 (1 << 4)
+#define   RS_CSTATE_RSVD       (2 << 4)
+#define   RS_CSTATE_C367_RS2   (3 << 4)
+#define   REDSAVES             (1 << 3) /* no context save if was idle during rs0 */
+#define   REDRESTORES          (1 << 2) /* no restore if was idle during rs0 */
 #define VIDCTL                 _MMIO(0x111c0)
 #define VIDSTS                 _MMIO(0x111c8)
 #define VIDSTART               _MMIO(0x111cc) /* 8 bits */
@@ -3642,7 +3768,7 @@ enum i915_power_well_id {
 #define   MEMSTAT_VID_SHIFT    8
 #define   MEMSTAT_PSTATE_MASK  0x00f8
 #define   MEMSTAT_PSTATE_SHIFT  3
-#define   MEMSTAT_MON_ACTV     (1<<2)
+#define   MEMSTAT_MON_ACTV     (1 << 2)
 #define   MEMSTAT_SRC_CTL_MASK 0x0003
 #define   MEMSTAT_SRC_CTL_CORE 0
 #define   MEMSTAT_SRC_CTL_TRB  1
@@ -3651,7 +3777,7 @@ enum i915_power_well_id {
 #define RCPREVBSYTUPAVG                _MMIO(0x113b8)
 #define RCPREVBSYTDNAVG                _MMIO(0x113bc)
 #define PMMISC                 _MMIO(0x11214)
-#define   MCPPCE_EN            (1<<0) /* enable PM_MSG from PCH->MPC */
+#define   MCPPCE_EN            (1 << 0) /* enable PM_MSG from PCH->MPC */
 #define SDEW                   _MMIO(0x1124c)
 #define CSIEW0                 _MMIO(0x11250)
 #define CSIEW1                 _MMIO(0x11254)
@@ -3668,8 +3794,8 @@ enum i915_power_well_id {
 #define RPPREVBSYTUPAVG                _MMIO(0x113b8)
 #define RPPREVBSYTDNAVG                _MMIO(0x113bc)
 #define ECR                    _MMIO(0x11600)
-#define   ECR_GPFE             (1<<31)
-#define   ECR_IMONE            (1<<30)
+#define   ECR_GPFE             (1 << 31)
+#define   ECR_IMONE            (1 << 30)
 #define   ECR_CAP_MASK         0x0000001f /* Event range, 0-31 */
 #define OGW0                   _MMIO(0x11608)
 #define OGW1                   _MMIO(0x1160c)
@@ -3776,11 +3902,11 @@ enum {
        FAULT_AND_CONTINUE /* Unsupported */
 };
 
-#define GEN8_CTX_VALID (1<<0)
-#define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
-#define GEN8_CTX_FORCE_RESTORE (1<<2)
-#define GEN8_CTX_L3LLC_COHERENT (1<<5)
-#define GEN8_CTX_PRIVILEGE (1<<8)
+#define GEN8_CTX_VALID (1 << 0)
+#define GEN8_CTX_FORCE_PD_RESTORE (1 << 1)
+#define GEN8_CTX_FORCE_RESTORE (1 << 2)
+#define GEN8_CTX_L3LLC_COHERENT (1 << 5)
+#define GEN8_CTX_PRIVILEGE (1 << 8)
 #define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
 
 #define GEN8_CTX_ID_SHIFT 32
@@ -3802,7 +3928,7 @@ enum {
 
 #define OVADD                  _MMIO(0x30000)
 #define DOVSTA                 _MMIO(0x30008)
-#define OC_BUF                 (0x3<<20)
+#define OC_BUF                 (0x3 << 20)
 #define OGAMC5                 _MMIO(0x30010)
 #define OGAMC4                 _MMIO(0x30014)
 #define OGAMC3                 _MMIO(0x30018)
@@ -3970,64 +4096,65 @@ enum {
 /* VLV eDP PSR registers */
 #define _PSRCTLA                               (VLV_DISPLAY_BASE + 0x60090)
 #define _PSRCTLB                               (VLV_DISPLAY_BASE + 0x61090)
-#define  VLV_EDP_PSR_ENABLE                    (1<<0)
-#define  VLV_EDP_PSR_RESET                     (1<<1)
-#define  VLV_EDP_PSR_MODE_MASK                 (7<<2)
-#define  VLV_EDP_PSR_MODE_HW_TIMER             (1<<3)
-#define  VLV_EDP_PSR_MODE_SW_TIMER             (1<<2)
-#define  VLV_EDP_PSR_SINGLE_FRAME_UPDATE       (1<<7)
-#define  VLV_EDP_PSR_ACTIVE_ENTRY              (1<<8)
-#define  VLV_EDP_PSR_SRC_TRANSMITTER_STATE     (1<<9)
-#define  VLV_EDP_PSR_DBL_FRAME                 (1<<10)
-#define  VLV_EDP_PSR_FRAME_COUNT_MASK          (0xff<<16)
+#define  VLV_EDP_PSR_ENABLE                    (1 << 0)
+#define  VLV_EDP_PSR_RESET                     (1 << 1)
+#define  VLV_EDP_PSR_MODE_MASK                 (7 << 2)
+#define  VLV_EDP_PSR_MODE_HW_TIMER             (1 << 3)
+#define  VLV_EDP_PSR_MODE_SW_TIMER             (1 << 2)
+#define  VLV_EDP_PSR_SINGLE_FRAME_UPDATE       (1 << 7)
+#define  VLV_EDP_PSR_ACTIVE_ENTRY              (1 << 8)
+#define  VLV_EDP_PSR_SRC_TRANSMITTER_STATE     (1 << 9)
+#define  VLV_EDP_PSR_DBL_FRAME                 (1 << 10)
+#define  VLV_EDP_PSR_FRAME_COUNT_MASK          (0xff << 16)
 #define  VLV_EDP_PSR_IDLE_FRAME_SHIFT          16
 #define VLV_PSRCTL(pipe)       _MMIO_PIPE(pipe, _PSRCTLA, _PSRCTLB)
 
 #define _VSCSDPA                       (VLV_DISPLAY_BASE + 0x600a0)
 #define _VSCSDPB                       (VLV_DISPLAY_BASE + 0x610a0)
-#define  VLV_EDP_PSR_SDP_FREQ_MASK     (3<<30)
-#define  VLV_EDP_PSR_SDP_FREQ_ONCE     (1<<31)
-#define  VLV_EDP_PSR_SDP_FREQ_EVFRAME  (1<<30)
+#define  VLV_EDP_PSR_SDP_FREQ_MASK     (3 << 30)
+#define  VLV_EDP_PSR_SDP_FREQ_ONCE     (1 << 31)
+#define  VLV_EDP_PSR_SDP_FREQ_EVFRAME  (1 << 30)
 #define VLV_VSCSDP(pipe)       _MMIO_PIPE(pipe, _VSCSDPA, _VSCSDPB)
 
 #define _PSRSTATA                      (VLV_DISPLAY_BASE + 0x60094)
 #define _PSRSTATB                      (VLV_DISPLAY_BASE + 0x61094)
-#define  VLV_EDP_PSR_LAST_STATE_MASK   (7<<3)
+#define  VLV_EDP_PSR_LAST_STATE_MASK   (7 << 3)
 #define  VLV_EDP_PSR_CURR_STATE_MASK   7
-#define  VLV_EDP_PSR_DISABLED          (0<<0)
-#define  VLV_EDP_PSR_INACTIVE          (1<<0)
-#define  VLV_EDP_PSR_IN_TRANS_TO_ACTIVE        (2<<0)
-#define  VLV_EDP_PSR_ACTIVE_NORFB_UP   (3<<0)
-#define  VLV_EDP_PSR_ACTIVE_SF_UPDATE  (4<<0)
-#define  VLV_EDP_PSR_EXIT              (5<<0)
-#define  VLV_EDP_PSR_IN_TRANS          (1<<7)
+#define  VLV_EDP_PSR_DISABLED          (0 << 0)
+#define  VLV_EDP_PSR_INACTIVE          (1 << 0)
+#define  VLV_EDP_PSR_IN_TRANS_TO_ACTIVE        (2 << 0)
+#define  VLV_EDP_PSR_ACTIVE_NORFB_UP   (3 << 0)
+#define  VLV_EDP_PSR_ACTIVE_SF_UPDATE  (4 << 0)
+#define  VLV_EDP_PSR_EXIT              (5 << 0)
+#define  VLV_EDP_PSR_IN_TRANS          (1 << 7)
 #define VLV_PSRSTAT(pipe)      _MMIO_PIPE(pipe, _PSRSTATA, _PSRSTATB)
 
 /* HSW+ eDP PSR registers */
 #define HSW_EDP_PSR_BASE       0x64800
 #define BDW_EDP_PSR_BASE       0x6f800
 #define EDP_PSR_CTL                            _MMIO(dev_priv->psr_mmio_base + 0)
-#define   EDP_PSR_ENABLE                       (1<<31)
-#define   BDW_PSR_SINGLE_FRAME                 (1<<30)
-#define   EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK  (1<<29) /* SW can't modify */
-#define   EDP_PSR_LINK_STANDBY                 (1<<27)
-#define   EDP_PSR_MIN_LINK_ENTRY_TIME_MASK     (3<<25)
-#define   EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES  (0<<25)
-#define   EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES  (1<<25)
-#define   EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES  (2<<25)
-#define   EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES  (3<<25)
+#define   EDP_PSR_ENABLE                       (1 << 31)
+#define   BDW_PSR_SINGLE_FRAME                 (1 << 30)
+#define   EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK  (1 << 29) /* SW can't modify */
+#define   EDP_PSR_LINK_STANDBY                 (1 << 27)
+#define   EDP_PSR_MIN_LINK_ENTRY_TIME_MASK     (3 << 25)
+#define   EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES  (0 << 25)
+#define   EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES  (1 << 25)
+#define   EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES  (2 << 25)
+#define   EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES  (3 << 25)
 #define   EDP_PSR_MAX_SLEEP_TIME_SHIFT         20
-#define   EDP_PSR_SKIP_AUX_EXIT                        (1<<12)
-#define   EDP_PSR_TP1_TP2_SEL                  (0<<11)
-#define   EDP_PSR_TP1_TP3_SEL                  (1<<11)
-#define   EDP_PSR_TP2_TP3_TIME_500us           (0<<8)
-#define   EDP_PSR_TP2_TP3_TIME_100us           (1<<8)
-#define   EDP_PSR_TP2_TP3_TIME_2500us          (2<<8)
-#define   EDP_PSR_TP2_TP3_TIME_0us             (3<<8)
-#define   EDP_PSR_TP1_TIME_500us               (0<<4)
-#define   EDP_PSR_TP1_TIME_100us               (1<<4)
-#define   EDP_PSR_TP1_TIME_2500us              (2<<4)
-#define   EDP_PSR_TP1_TIME_0us                 (3<<4)
+#define   EDP_PSR_SKIP_AUX_EXIT                        (1 << 12)
+#define   EDP_PSR_TP1_TP2_SEL                  (0 << 11)
+#define   EDP_PSR_TP1_TP3_SEL                  (1 << 11)
+#define   EDP_PSR_CRC_ENABLE                   (1 << 10) /* BDW+ */
+#define   EDP_PSR_TP2_TP3_TIME_500us           (0 << 8)
+#define   EDP_PSR_TP2_TP3_TIME_100us           (1 << 8)
+#define   EDP_PSR_TP2_TP3_TIME_2500us          (2 << 8)
+#define   EDP_PSR_TP2_TP3_TIME_0us             (3 << 8)
+#define   EDP_PSR_TP1_TIME_500us               (0 << 4)
+#define   EDP_PSR_TP1_TIME_100us               (1 << 4)
+#define   EDP_PSR_TP1_TIME_2500us              (2 << 4)
+#define   EDP_PSR_TP1_TIME_0us                 (3 << 4)
 #define   EDP_PSR_IDLE_FRAME_SHIFT             0
 
 /* Bspec claims those aren't shifted but stay at 0x64800 */
@@ -4047,55 +4174,56 @@ enum {
 #define EDP_PSR_AUX_DATA(i)                    _MMIO(dev_priv->psr_mmio_base + 0x14 + (i) * 4) /* 5 registers */
 
 #define EDP_PSR_STATUS                         _MMIO(dev_priv->psr_mmio_base + 0x40)
-#define   EDP_PSR_STATUS_STATE_MASK            (7<<29)
-#define   EDP_PSR_STATUS_STATE_IDLE            (0<<29)
-#define   EDP_PSR_STATUS_STATE_SRDONACK                (1<<29)
-#define   EDP_PSR_STATUS_STATE_SRDENT          (2<<29)
-#define   EDP_PSR_STATUS_STATE_BUFOFF          (3<<29)
-#define   EDP_PSR_STATUS_STATE_BUFON           (4<<29)
-#define   EDP_PSR_STATUS_STATE_AUXACK          (5<<29)
-#define   EDP_PSR_STATUS_STATE_SRDOFFACK       (6<<29)
-#define   EDP_PSR_STATUS_LINK_MASK             (3<<26)
-#define   EDP_PSR_STATUS_LINK_FULL_OFF         (0<<26)
-#define   EDP_PSR_STATUS_LINK_FULL_ON          (1<<26)
-#define   EDP_PSR_STATUS_LINK_STANDBY          (2<<26)
+#define   EDP_PSR_STATUS_STATE_MASK            (7 << 29)
+#define   EDP_PSR_STATUS_STATE_SHIFT           29
+#define   EDP_PSR_STATUS_STATE_IDLE            (0 << 29)
+#define   EDP_PSR_STATUS_STATE_SRDONACK                (1 << 29)
+#define   EDP_PSR_STATUS_STATE_SRDENT          (2 << 29)
+#define   EDP_PSR_STATUS_STATE_BUFOFF          (3 << 29)
+#define   EDP_PSR_STATUS_STATE_BUFON           (4 << 29)
+#define   EDP_PSR_STATUS_STATE_AUXACK          (5 << 29)
+#define   EDP_PSR_STATUS_STATE_SRDOFFACK       (6 << 29)
+#define   EDP_PSR_STATUS_LINK_MASK             (3 << 26)
+#define   EDP_PSR_STATUS_LINK_FULL_OFF         (0 << 26)
+#define   EDP_PSR_STATUS_LINK_FULL_ON          (1 << 26)
+#define   EDP_PSR_STATUS_LINK_STANDBY          (2 << 26)
 #define   EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT 20
 #define   EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK  0x1f
 #define   EDP_PSR_STATUS_COUNT_SHIFT           16
 #define   EDP_PSR_STATUS_COUNT_MASK            0xf
-#define   EDP_PSR_STATUS_AUX_ERROR             (1<<15)
-#define   EDP_PSR_STATUS_AUX_SENDING           (1<<12)
-#define   EDP_PSR_STATUS_SENDING_IDLE          (1<<9)
-#define   EDP_PSR_STATUS_SENDING_TP2_TP3       (1<<8)
-#define   EDP_PSR_STATUS_SENDING_TP1           (1<<4)
+#define   EDP_PSR_STATUS_AUX_ERROR             (1 << 15)
+#define   EDP_PSR_STATUS_AUX_SENDING           (1 << 12)
+#define   EDP_PSR_STATUS_SENDING_IDLE          (1 << 9)
+#define   EDP_PSR_STATUS_SENDING_TP2_TP3       (1 << 8)
+#define   EDP_PSR_STATUS_SENDING_TP1           (1 << 4)
 #define   EDP_PSR_STATUS_IDLE_MASK             0xf
 
 #define EDP_PSR_PERF_CNT               _MMIO(dev_priv->psr_mmio_base + 0x44)
 #define   EDP_PSR_PERF_CNT_MASK                0xffffff
 
 #define EDP_PSR_DEBUG                          _MMIO(dev_priv->psr_mmio_base + 0x60) /* PSR_MASK on SKL+ */
-#define   EDP_PSR_DEBUG_MASK_MAX_SLEEP         (1<<28)
-#define   EDP_PSR_DEBUG_MASK_LPSP              (1<<27)
-#define   EDP_PSR_DEBUG_MASK_MEMUP             (1<<26)
-#define   EDP_PSR_DEBUG_MASK_HPD               (1<<25)
-#define   EDP_PSR_DEBUG_MASK_DISP_REG_WRITE    (1<<16)
-#define   EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1<<15) /* SKL+ */
+#define   EDP_PSR_DEBUG_MASK_MAX_SLEEP         (1 << 28)
+#define   EDP_PSR_DEBUG_MASK_LPSP              (1 << 27)
+#define   EDP_PSR_DEBUG_MASK_MEMUP             (1 << 26)
+#define   EDP_PSR_DEBUG_MASK_HPD               (1 << 25)
+#define   EDP_PSR_DEBUG_MASK_DISP_REG_WRITE    (1 << 16)
+#define   EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */
 
 #define EDP_PSR2_CTL                   _MMIO(0x6f900)
-#define   EDP_PSR2_ENABLE              (1<<31)
-#define   EDP_SU_TRACK_ENABLE          (1<<30)
-#define   EDP_Y_COORDINATE_VALID       (1<<26) /* GLK and CNL+ */
-#define   EDP_Y_COORDINATE_ENABLE      (1<<25) /* GLK and CNL+ */
-#define   EDP_MAX_SU_DISABLE_TIME(t)   ((t)<<20)
-#define   EDP_MAX_SU_DISABLE_TIME_MASK (0x1f<<20)
-#define   EDP_PSR2_TP2_TIME_500                (0<<8)
-#define   EDP_PSR2_TP2_TIME_100                (1<<8)
-#define   EDP_PSR2_TP2_TIME_2500       (2<<8)
-#define   EDP_PSR2_TP2_TIME_50         (3<<8)
-#define   EDP_PSR2_TP2_TIME_MASK       (3<<8)
+#define   EDP_PSR2_ENABLE              (1 << 31)
+#define   EDP_SU_TRACK_ENABLE          (1 << 30)
+#define   EDP_Y_COORDINATE_VALID       (1 << 26) /* GLK and CNL+ */
+#define   EDP_Y_COORDINATE_ENABLE      (1 << 25) /* GLK and CNL+ */
+#define   EDP_MAX_SU_DISABLE_TIME(t)   ((t) << 20)
+#define   EDP_MAX_SU_DISABLE_TIME_MASK (0x1f << 20)
+#define   EDP_PSR2_TP2_TIME_500us      (0 << 8)
+#define   EDP_PSR2_TP2_TIME_100us      (1 << 8)
+#define   EDP_PSR2_TP2_TIME_2500us     (2 << 8)
+#define   EDP_PSR2_TP2_TIME_50us       (3 << 8)
+#define   EDP_PSR2_TP2_TIME_MASK       (3 << 8)
 #define   EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4
-#define   EDP_PSR2_FRAME_BEFORE_SU_MASK        (0xf<<4)
-#define   EDP_PSR2_FRAME_BEFORE_SU(a)  ((a)<<4)
+#define   EDP_PSR2_FRAME_BEFORE_SU_MASK        (0xf << 4)
+#define   EDP_PSR2_FRAME_BEFORE_SU(a)  ((a) << 4)
 #define   EDP_PSR2_IDLE_FRAME_MASK     0xf
 #define   EDP_PSR2_IDLE_FRAME_SHIFT    0
 
@@ -4123,7 +4251,7 @@ enum {
 #define  PSR_EVENT_PSR_DISABLE                 (1 << 0)
 
 #define EDP_PSR2_STATUS                        _MMIO(0x6f940)
-#define EDP_PSR2_STATUS_STATE_MASK     (0xf<<28)
+#define EDP_PSR2_STATUS_STATE_MASK     (0xf << 28)
 #define EDP_PSR2_STATUS_STATE_SHIFT    28
 
 /* VGA port control */
@@ -4131,47 +4259,48 @@ enum {
 #define PCH_ADPA                _MMIO(0xe1100)
 #define VLV_ADPA               _MMIO(VLV_DISPLAY_BASE + 0x61100)
 
-#define   ADPA_DAC_ENABLE      (1<<31)
+#define   ADPA_DAC_ENABLE      (1 << 31)
 #define   ADPA_DAC_DISABLE     0
-#define   ADPA_PIPE_SELECT_MASK        (1<<30)
-#define   ADPA_PIPE_A_SELECT   0
-#define   ADPA_PIPE_B_SELECT   (1<<30)
-#define   ADPA_PIPE_SELECT(pipe) ((pipe) << 30)
-/* CPT uses bits 29:30 for pch transcoder select */
+#define   ADPA_PIPE_SEL_SHIFT          30
+#define   ADPA_PIPE_SEL_MASK           (1 << 30)
+#define   ADPA_PIPE_SEL(pipe)          ((pipe) << 30)
+#define   ADPA_PIPE_SEL_SHIFT_CPT      29
+#define   ADPA_PIPE_SEL_MASK_CPT       (3 << 29)
+#define   ADPA_PIPE_SEL_CPT(pipe)      ((pipe) << 29)
 #define   ADPA_CRT_HOTPLUG_MASK  0x03ff0000 /* bit 25-16 */
-#define   ADPA_CRT_HOTPLUG_MONITOR_NONE  (0<<24)
-#define   ADPA_CRT_HOTPLUG_MONITOR_MASK  (3<<24)
-#define   ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
-#define   ADPA_CRT_HOTPLUG_MONITOR_MONO  (2<<24)
-#define   ADPA_CRT_HOTPLUG_ENABLE        (1<<23)
-#define   ADPA_CRT_HOTPLUG_PERIOD_64     (0<<22)
-#define   ADPA_CRT_HOTPLUG_PERIOD_128    (1<<22)
-#define   ADPA_CRT_HOTPLUG_WARMUP_5MS    (0<<21)
-#define   ADPA_CRT_HOTPLUG_WARMUP_10MS   (1<<21)
-#define   ADPA_CRT_HOTPLUG_SAMPLE_2S     (0<<20)
-#define   ADPA_CRT_HOTPLUG_SAMPLE_4S     (1<<20)
-#define   ADPA_CRT_HOTPLUG_VOLTAGE_40    (0<<18)
-#define   ADPA_CRT_HOTPLUG_VOLTAGE_50    (1<<18)
-#define   ADPA_CRT_HOTPLUG_VOLTAGE_60    (2<<18)
-#define   ADPA_CRT_HOTPLUG_VOLTAGE_70    (3<<18)
-#define   ADPA_CRT_HOTPLUG_VOLREF_325MV  (0<<17)
-#define   ADPA_CRT_HOTPLUG_VOLREF_475MV  (1<<17)
-#define   ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
-#define   ADPA_USE_VGA_HVPOLARITY (1<<15)
+#define   ADPA_CRT_HOTPLUG_MONITOR_NONE  (0 << 24)
+#define   ADPA_CRT_HOTPLUG_MONITOR_MASK  (3 << 24)
+#define   ADPA_CRT_HOTPLUG_MONITOR_COLOR (3 << 24)
+#define   ADPA_CRT_HOTPLUG_MONITOR_MONO  (2 << 24)
+#define   ADPA_CRT_HOTPLUG_ENABLE        (1 << 23)
+#define   ADPA_CRT_HOTPLUG_PERIOD_64     (0 << 22)
+#define   ADPA_CRT_HOTPLUG_PERIOD_128    (1 << 22)
+#define   ADPA_CRT_HOTPLUG_WARMUP_5MS    (0 << 21)
+#define   ADPA_CRT_HOTPLUG_WARMUP_10MS   (1 << 21)
+#define   ADPA_CRT_HOTPLUG_SAMPLE_2S     (0 << 20)
+#define   ADPA_CRT_HOTPLUG_SAMPLE_4S     (1 << 20)
+#define   ADPA_CRT_HOTPLUG_VOLTAGE_40    (0 << 18)
+#define   ADPA_CRT_HOTPLUG_VOLTAGE_50    (1 << 18)
+#define   ADPA_CRT_HOTPLUG_VOLTAGE_60    (2 << 18)
+#define   ADPA_CRT_HOTPLUG_VOLTAGE_70    (3 << 18)
+#define   ADPA_CRT_HOTPLUG_VOLREF_325MV  (0 << 17)
+#define   ADPA_CRT_HOTPLUG_VOLREF_475MV  (1 << 17)
+#define   ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1 << 16)
+#define   ADPA_USE_VGA_HVPOLARITY (1 << 15)
 #define   ADPA_SETS_HVPOLARITY 0
-#define   ADPA_VSYNC_CNTL_DISABLE (1<<10)
+#define   ADPA_VSYNC_CNTL_DISABLE (1 << 10)
 #define   ADPA_VSYNC_CNTL_ENABLE 0
-#define   ADPA_HSYNC_CNTL_DISABLE (1<<11)
+#define   ADPA_HSYNC_CNTL_DISABLE (1 << 11)
 #define   ADPA_HSYNC_CNTL_ENABLE 0
-#define   ADPA_VSYNC_ACTIVE_HIGH (1<<4)
+#define   ADPA_VSYNC_ACTIVE_HIGH (1 << 4)
 #define   ADPA_VSYNC_ACTIVE_LOW        0
-#define   ADPA_HSYNC_ACTIVE_HIGH (1<<3)
+#define   ADPA_HSYNC_ACTIVE_HIGH (1 << 3)
 #define   ADPA_HSYNC_ACTIVE_LOW        0
-#define   ADPA_DPMS_MASK       (~(3<<10))
-#define   ADPA_DPMS_ON         (0<<10)
-#define   ADPA_DPMS_SUSPEND    (1<<10)
-#define   ADPA_DPMS_STANDBY    (2<<10)
-#define   ADPA_DPMS_OFF                (3<<10)
+#define   ADPA_DPMS_MASK       (~(3 << 10))
+#define   ADPA_DPMS_ON         (0 << 10)
+#define   ADPA_DPMS_SUSPEND    (1 << 10)
+#define   ADPA_DPMS_STANDBY    (2 << 10)
+#define   ADPA_DPMS_OFF                (3 << 10)
 
 
 /* Hotplug control (945+ only) */
@@ -4296,9 +4425,9 @@ enum {
 
 /* Gen 3 SDVO bits: */
 #define   SDVO_ENABLE                          (1 << 31)
-#define   SDVO_PIPE_SEL(pipe)                  ((pipe) << 30)
+#define   SDVO_PIPE_SEL_SHIFT                  30
 #define   SDVO_PIPE_SEL_MASK                   (1 << 30)
-#define   SDVO_PIPE_B_SELECT                   (1 << 30)
+#define   SDVO_PIPE_SEL(pipe)                  ((pipe) << 30)
 #define   SDVO_STALL_SELECT                    (1 << 29)
 #define   SDVO_INTERRUPT_ENABLE                        (1 << 26)
 /*
@@ -4338,12 +4467,14 @@ enum {
 #define   SDVOB_HOTPLUG_ENABLE                 (1 << 23) /* SDVO only */
 
 /* Gen 6 (CPT) SDVO/HDMI bits: */
-#define   SDVO_PIPE_SEL_CPT(pipe)              ((pipe) << 29)
+#define   SDVO_PIPE_SEL_SHIFT_CPT              29
 #define   SDVO_PIPE_SEL_MASK_CPT               (3 << 29)
+#define   SDVO_PIPE_SEL_CPT(pipe)              ((pipe) << 29)
 
 /* CHV SDVO/HDMI bits: */
-#define   SDVO_PIPE_SEL_CHV(pipe)              ((pipe) << 24)
+#define   SDVO_PIPE_SEL_SHIFT_CHV              24
 #define   SDVO_PIPE_SEL_MASK_CHV               (3 << 24)
+#define   SDVO_PIPE_SEL_CHV(pipe)              ((pipe) << 24)
 
 
 /* DVO port control */
@@ -4354,7 +4485,9 @@ enum {
 #define _DVOC                  0x61160
 #define DVOC                   _MMIO(_DVOC)
 #define   DVO_ENABLE                   (1 << 31)
-#define   DVO_PIPE_B_SELECT            (1 << 30)
+#define   DVO_PIPE_SEL_SHIFT           30
+#define   DVO_PIPE_SEL_MASK            (1 << 30)
+#define   DVO_PIPE_SEL(pipe)           ((pipe) << 30)
 #define   DVO_PIPE_STALL_UNUSED                (0 << 28)
 #define   DVO_PIPE_STALL               (1 << 28)
 #define   DVO_PIPE_STALL_TV            (2 << 28)
@@ -4376,7 +4509,7 @@ enum {
 #define   DVO_BLANK_ACTIVE_HIGH                (1 << 2)
 #define   DVO_OUTPUT_CSTATE_PIXELS     (1 << 1)        /* SDG only */
 #define   DVO_OUTPUT_SOURCE_SIZE_PIXELS        (1 << 0)        /* SDG only */
-#define   DVO_PRESERVE_MASK            (0x7<<24)
+#define   DVO_PRESERVE_MASK            (0x7 << 24)
 #define DVOA_SRCDIM            _MMIO(0x61124)
 #define DVOB_SRCDIM            _MMIO(0x61144)
 #define DVOC_SRCDIM            _MMIO(0x61164)
@@ -4391,9 +4524,12 @@ enum {
  */
 #define   LVDS_PORT_EN                 (1 << 31)
 /* Selects pipe B for LVDS data.  Must be set on pre-965. */
-#define   LVDS_PIPEB_SELECT            (1 << 30)
-#define   LVDS_PIPE_MASK               (1 << 30)
-#define   LVDS_PIPE(pipe)              ((pipe) << 30)
+#define   LVDS_PIPE_SEL_SHIFT          30
+#define   LVDS_PIPE_SEL_MASK           (1 << 30)
+#define   LVDS_PIPE_SEL(pipe)          ((pipe) << 30)
+#define   LVDS_PIPE_SEL_SHIFT_CPT      29
+#define   LVDS_PIPE_SEL_MASK_CPT       (3 << 29)
+#define   LVDS_PIPE_SEL_CPT(pipe)      ((pipe) << 29)
 /* LVDS dithering flag on 965/g4x platform */
 #define   LVDS_ENABLE_DITHER           (1 << 25)
 /* LVDS sync polarity flags. Set to invert (i.e. negative) */
@@ -4466,6 +4602,16 @@ enum {
 #define   VIDEO_DIP_ENABLE_GMP_HSW     (1 << 4)
 #define   VIDEO_DIP_ENABLE_SPD_HSW     (1 << 0)
 
+#define  DRM_DIP_ENABLE                        (1 << 28)
+#define  PSR_VSC_BIT_7_SET             (1 << 27)
+#define  VSC_SELECT_MASK               (0x3 << 26)
+#define  VSC_SELECT_SHIFT              26
+#define  VSC_DIP_HW_HEA_DATA           (0 << 26)
+#define  VSC_DIP_HW_HEA_SW_DATA                (1 << 26)
+#define  VSC_DIP_HW_DATA_SW_HEA                (2 << 26)
+#define  VSC_DIP_SW_HEA_DATA           (3 << 26)
+#define  VDIP_ENABLE_PPS               (1 << 24)
+
 /* Panel power sequencing */
 #define PPS_BASE                       0x61200
 #define VLV_PPS_BASE                   (VLV_DISPLAY_BASE + PPS_BASE)
@@ -4690,7 +4836,9 @@ enum {
 /* Enables the TV encoder */
 # define TV_ENC_ENABLE                 (1 << 31)
 /* Sources the TV encoder input from pipe B instead of A. */
-# define TV_ENC_PIPEB_SELECT           (1 << 30)
+# define TV_ENC_PIPE_SEL_SHIFT         30
+# define TV_ENC_PIPE_SEL_MASK          (1 << 30)
+# define TV_ENC_PIPE_SEL(pipe)         ((pipe) << 30)
 /* Outputs composite video (DAC A only) */
 # define TV_ENC_OUTPUT_COMPOSITE       (0 << 28)
 /* Outputs SVideo video (DAC B/C) */
@@ -5172,10 +5320,15 @@ enum {
 #define CHV_DP_D               _MMIO(VLV_DISPLAY_BASE + 0x64300)
 
 #define   DP_PORT_EN                   (1 << 31)
-#define   DP_PIPEB_SELECT              (1 << 30)
-#define   DP_PIPE_MASK                 (1 << 30)
-#define   DP_PIPE_SELECT_CHV(pipe)     ((pipe) << 16)
-#define   DP_PIPE_MASK_CHV             (3 << 16)
+#define   DP_PIPE_SEL_SHIFT            30
+#define   DP_PIPE_SEL_MASK             (1 << 30)
+#define   DP_PIPE_SEL(pipe)            ((pipe) << 30)
+#define   DP_PIPE_SEL_SHIFT_IVB                29
+#define   DP_PIPE_SEL_MASK_IVB         (3 << 29)
+#define   DP_PIPE_SEL_IVB(pipe)                ((pipe) << 29)
+#define   DP_PIPE_SEL_SHIFT_CHV                16
+#define   DP_PIPE_SEL_MASK_CHV         (3 << 16)
+#define   DP_PIPE_SEL_CHV(pipe)                ((pipe) << 16)
 
 /* Link training mode - select a suitable mode for each stage */
 #define   DP_LINK_TRAIN_PAT_1          (0 << 28)
@@ -5282,6 +5435,13 @@ enum {
 #define _DPD_AUX_CH_DATA4      (dev_priv->info.display_mmio_offset + 0x64320)
 #define _DPD_AUX_CH_DATA5      (dev_priv->info.display_mmio_offset + 0x64324)
 
+#define _DPE_AUX_CH_CTL                (dev_priv->info.display_mmio_offset + 0x64410)
+#define _DPE_AUX_CH_DATA1      (dev_priv->info.display_mmio_offset + 0x64414)
+#define _DPE_AUX_CH_DATA2      (dev_priv->info.display_mmio_offset + 0x64418)
+#define _DPE_AUX_CH_DATA3      (dev_priv->info.display_mmio_offset + 0x6441c)
+#define _DPE_AUX_CH_DATA4      (dev_priv->info.display_mmio_offset + 0x64420)
+#define _DPE_AUX_CH_DATA5      (dev_priv->info.display_mmio_offset + 0x64424)
+
 #define _DPF_AUX_CH_CTL                (dev_priv->info.display_mmio_offset + 0x64510)
 #define _DPF_AUX_CH_DATA1      (dev_priv->info.display_mmio_offset + 0x64514)
 #define _DPF_AUX_CH_DATA2      (dev_priv->info.display_mmio_offset + 0x64518)
@@ -5337,7 +5497,7 @@ enum {
 #define _PIPEB_DATA_M_G4X      0x71050
 
 /* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
-#define  TU_SIZE(x)             (((x)-1) << 25) /* default size 64 */
+#define  TU_SIZE(x)             (((x) - 1) << 25) /* default size 64 */
 #define  TU_SIZE_SHIFT         25
 #define  TU_SIZE_MASK           (0x3f << 25)
 
@@ -5379,18 +5539,18 @@ enum {
 #define   DSL_LINEMASK_GEN2    0x00000fff
 #define   DSL_LINEMASK_GEN3    0x00001fff
 #define _PIPEACONF             0x70008
-#define   PIPECONF_ENABLE      (1<<31)
+#define   PIPECONF_ENABLE      (1 << 31)
 #define   PIPECONF_DISABLE     0
-#define   PIPECONF_DOUBLE_WIDE (1<<30)
-#define   I965_PIPECONF_ACTIVE (1<<30)
-#define   PIPECONF_DSI_PLL_LOCKED      (1<<29) /* vlv & pipe A only */
-#define   PIPECONF_FRAME_START_DELAY_MASK (3<<27)
+#define   PIPECONF_DOUBLE_WIDE (1 << 30)
+#define   I965_PIPECONF_ACTIVE (1 << 30)
+#define   PIPECONF_DSI_PLL_LOCKED      (1 << 29) /* vlv & pipe A only */
+#define   PIPECONF_FRAME_START_DELAY_MASK (3 << 27)
 #define   PIPECONF_SINGLE_WIDE 0
 #define   PIPECONF_PIPE_UNLOCKED 0
-#define   PIPECONF_PIPE_LOCKED (1<<25)
+#define   PIPECONF_PIPE_LOCKED (1 << 25)
 #define   PIPECONF_PALETTE     0
-#define   PIPECONF_GAMMA               (1<<24)
-#define   PIPECONF_FORCE_BORDER        (1<<25)
+#define   PIPECONF_GAMMA               (1 << 24)
+#define   PIPECONF_FORCE_BORDER        (1 << 25)
 #define   PIPECONF_INTERLACE_MASK      (7 << 21)
 #define   PIPECONF_INTERLACE_MASK_HSW  (3 << 21)
 /* Note that pre-gen3 does not support interlaced display directly. Panel
@@ -5409,67 +5569,67 @@ enum {
 #define   PIPECONF_PFIT_PF_INTERLACED_DBL_ILK  (5 << 21) /* ilk/snb only */
 #define   PIPECONF_INTERLACE_MODE_MASK         (7 << 21)
 #define   PIPECONF_EDP_RR_MODE_SWITCH          (1 << 20)
-#define   PIPECONF_CXSR_DOWNCLOCK      (1<<16)
+#define   PIPECONF_CXSR_DOWNCLOCK      (1 << 16)
 #define   PIPECONF_EDP_RR_MODE_SWITCH_VLV      (1 << 14)
 #define   PIPECONF_COLOR_RANGE_SELECT  (1 << 13)
 #define   PIPECONF_BPC_MASK    (0x7 << 5)
-#define   PIPECONF_8BPC                (0<<5)
-#define   PIPECONF_10BPC       (1<<5)
-#define   PIPECONF_6BPC                (2<<5)
-#define   PIPECONF_12BPC       (3<<5)
-#define   PIPECONF_DITHER_EN   (1<<4)
+#define   PIPECONF_8BPC                (0 << 5)
+#define   PIPECONF_10BPC       (1 << 5)
+#define   PIPECONF_6BPC                (2 << 5)
+#define   PIPECONF_12BPC       (3 << 5)
+#define   PIPECONF_DITHER_EN   (1 << 4)
 #define   PIPECONF_DITHER_TYPE_MASK (0x0000000c)
-#define   PIPECONF_DITHER_TYPE_SP (0<<2)
-#define   PIPECONF_DITHER_TYPE_ST1 (1<<2)
-#define   PIPECONF_DITHER_TYPE_ST2 (2<<2)
-#define   PIPECONF_DITHER_TYPE_TEMP (3<<2)
+#define   PIPECONF_DITHER_TYPE_SP (0 << 2)
+#define   PIPECONF_DITHER_TYPE_ST1 (1 << 2)
+#define   PIPECONF_DITHER_TYPE_ST2 (2 << 2)
+#define   PIPECONF_DITHER_TYPE_TEMP (3 << 2)
 #define _PIPEASTAT             0x70024
-#define   PIPE_FIFO_UNDERRUN_STATUS            (1UL<<31)
-#define   SPRITE1_FLIP_DONE_INT_EN_VLV         (1UL<<30)
-#define   PIPE_CRC_ERROR_ENABLE                        (1UL<<29)
-#define   PIPE_CRC_DONE_ENABLE                 (1UL<<28)
-#define   PERF_COUNTER2_INTERRUPT_EN           (1UL<<27)
-#define   PIPE_GMBUS_EVENT_ENABLE              (1UL<<27)
-#define   PLANE_FLIP_DONE_INT_EN_VLV           (1UL<<26)
-#define   PIPE_HOTPLUG_INTERRUPT_ENABLE                (1UL<<26)
-#define   PIPE_VSYNC_INTERRUPT_ENABLE          (1UL<<25)
-#define   PIPE_DISPLAY_LINE_COMPARE_ENABLE     (1UL<<24)
-#define   PIPE_DPST_EVENT_ENABLE               (1UL<<23)
-#define   SPRITE0_FLIP_DONE_INT_EN_VLV         (1UL<<22)
-#define   PIPE_LEGACY_BLC_EVENT_ENABLE         (1UL<<22)
-#define   PIPE_ODD_FIELD_INTERRUPT_ENABLE      (1UL<<21)
-#define   PIPE_EVEN_FIELD_INTERRUPT_ENABLE     (1UL<<20)
-#define   PIPE_B_PSR_INTERRUPT_ENABLE_VLV      (1UL<<19)
-#define   PERF_COUNTER_INTERRUPT_EN            (1UL<<19)
-#define   PIPE_HOTPLUG_TV_INTERRUPT_ENABLE     (1UL<<18) /* pre-965 */
-#define   PIPE_START_VBLANK_INTERRUPT_ENABLE   (1UL<<18) /* 965 or later */
-#define   PIPE_FRAMESTART_INTERRUPT_ENABLE     (1UL<<17)
-#define   PIPE_VBLANK_INTERRUPT_ENABLE         (1UL<<17)
-#define   PIPEA_HBLANK_INT_EN_VLV              (1UL<<16)
-#define   PIPE_OVERLAY_UPDATED_ENABLE          (1UL<<16)
-#define   SPRITE1_FLIP_DONE_INT_STATUS_VLV     (1UL<<15)
-#define   SPRITE0_FLIP_DONE_INT_STATUS_VLV     (1UL<<14)
-#define   PIPE_CRC_ERROR_INTERRUPT_STATUS      (1UL<<13)
-#define   PIPE_CRC_DONE_INTERRUPT_STATUS       (1UL<<12)
-#define   PERF_COUNTER2_INTERRUPT_STATUS       (1UL<<11)
-#define   PIPE_GMBUS_INTERRUPT_STATUS          (1UL<<11)
-#define   PLANE_FLIP_DONE_INT_STATUS_VLV       (1UL<<10)
-#define   PIPE_HOTPLUG_INTERRUPT_STATUS                (1UL<<10)
-#define   PIPE_VSYNC_INTERRUPT_STATUS          (1UL<<9)
-#define   PIPE_DISPLAY_LINE_COMPARE_STATUS     (1UL<<8)
-#define   PIPE_DPST_EVENT_STATUS               (1UL<<7)
-#define   PIPE_A_PSR_STATUS_VLV                        (1UL<<6)
-#define   PIPE_LEGACY_BLC_EVENT_STATUS         (1UL<<6)
-#define   PIPE_ODD_FIELD_INTERRUPT_STATUS      (1UL<<5)
-#define   PIPE_EVEN_FIELD_INTERRUPT_STATUS     (1UL<<4)
-#define   PIPE_B_PSR_STATUS_VLV                        (1UL<<3)
-#define   PERF_COUNTER_INTERRUPT_STATUS                (1UL<<3)
-#define   PIPE_HOTPLUG_TV_INTERRUPT_STATUS     (1UL<<2) /* pre-965 */
-#define   PIPE_START_VBLANK_INTERRUPT_STATUS   (1UL<<2) /* 965 or later */
-#define   PIPE_FRAMESTART_INTERRUPT_STATUS     (1UL<<1)
-#define   PIPE_VBLANK_INTERRUPT_STATUS         (1UL<<1)
-#define   PIPE_HBLANK_INT_STATUS               (1UL<<0)
-#define   PIPE_OVERLAY_UPDATED_STATUS          (1UL<<0)
+#define   PIPE_FIFO_UNDERRUN_STATUS            (1UL << 31)
+#define   SPRITE1_FLIP_DONE_INT_EN_VLV         (1UL << 30)
+#define   PIPE_CRC_ERROR_ENABLE                        (1UL << 29)
+#define   PIPE_CRC_DONE_ENABLE                 (1UL << 28)
+#define   PERF_COUNTER2_INTERRUPT_EN           (1UL << 27)
+#define   PIPE_GMBUS_EVENT_ENABLE              (1UL << 27)
+#define   PLANE_FLIP_DONE_INT_EN_VLV           (1UL << 26)
+#define   PIPE_HOTPLUG_INTERRUPT_ENABLE                (1UL << 26)
+#define   PIPE_VSYNC_INTERRUPT_ENABLE          (1UL << 25)
+#define   PIPE_DISPLAY_LINE_COMPARE_ENABLE     (1UL << 24)
+#define   PIPE_DPST_EVENT_ENABLE               (1UL << 23)
+#define   SPRITE0_FLIP_DONE_INT_EN_VLV         (1UL << 22)
+#define   PIPE_LEGACY_BLC_EVENT_ENABLE         (1UL << 22)
+#define   PIPE_ODD_FIELD_INTERRUPT_ENABLE      (1UL << 21)
+#define   PIPE_EVEN_FIELD_INTERRUPT_ENABLE     (1UL << 20)
+#define   PIPE_B_PSR_INTERRUPT_ENABLE_VLV      (1UL << 19)
+#define   PERF_COUNTER_INTERRUPT_EN            (1UL << 19)
+#define   PIPE_HOTPLUG_TV_INTERRUPT_ENABLE     (1UL << 18) /* pre-965 */
+#define   PIPE_START_VBLANK_INTERRUPT_ENABLE   (1UL << 18) /* 965 or later */
+#define   PIPE_FRAMESTART_INTERRUPT_ENABLE     (1UL << 17)
+#define   PIPE_VBLANK_INTERRUPT_ENABLE         (1UL << 17)
+#define   PIPEA_HBLANK_INT_EN_VLV              (1UL << 16)
+#define   PIPE_OVERLAY_UPDATED_ENABLE          (1UL << 16)
+#define   SPRITE1_FLIP_DONE_INT_STATUS_VLV     (1UL << 15)
+#define   SPRITE0_FLIP_DONE_INT_STATUS_VLV     (1UL << 14)
+#define   PIPE_CRC_ERROR_INTERRUPT_STATUS      (1UL << 13)
+#define   PIPE_CRC_DONE_INTERRUPT_STATUS       (1UL << 12)
+#define   PERF_COUNTER2_INTERRUPT_STATUS       (1UL << 11)
+#define   PIPE_GMBUS_INTERRUPT_STATUS          (1UL << 11)
+#define   PLANE_FLIP_DONE_INT_STATUS_VLV       (1UL << 10)
+#define   PIPE_HOTPLUG_INTERRUPT_STATUS                (1UL << 10)
+#define   PIPE_VSYNC_INTERRUPT_STATUS          (1UL << 9)
+#define   PIPE_DISPLAY_LINE_COMPARE_STATUS     (1UL << 8)
+#define   PIPE_DPST_EVENT_STATUS               (1UL << 7)
+#define   PIPE_A_PSR_STATUS_VLV                        (1UL << 6)
+#define   PIPE_LEGACY_BLC_EVENT_STATUS         (1UL << 6)
+#define   PIPE_ODD_FIELD_INTERRUPT_STATUS      (1UL << 5)
+#define   PIPE_EVEN_FIELD_INTERRUPT_STATUS     (1UL << 4)
+#define   PIPE_B_PSR_STATUS_VLV                        (1UL << 3)
+#define   PERF_COUNTER_INTERRUPT_STATUS                (1UL << 3)
+#define   PIPE_HOTPLUG_TV_INTERRUPT_STATUS     (1UL << 2) /* pre-965 */
+#define   PIPE_START_VBLANK_INTERRUPT_STATUS   (1UL << 2) /* 965 or later */
+#define   PIPE_FRAMESTART_INTERRUPT_STATUS     (1UL << 1)
+#define   PIPE_VBLANK_INTERRUPT_STATUS         (1UL << 1)
+#define   PIPE_HBLANK_INT_STATUS               (1UL << 0)
+#define   PIPE_OVERLAY_UPDATED_STATUS          (1UL << 0)
 
 #define PIPESTAT_INT_ENABLE_MASK               0x7fff0000
 #define PIPESTAT_INT_STATUS_MASK               0x0000ffff
@@ -5498,67 +5658,67 @@ enum {
 
 #define _PIPE_MISC_A                   0x70030
 #define _PIPE_MISC_B                   0x71030
-#define   PIPEMISC_YUV420_ENABLE       (1<<27)
-#define   PIPEMISC_YUV420_MODE_FULL_BLEND (1<<26)
-#define   PIPEMISC_OUTPUT_COLORSPACE_YUV  (1<<11)
-#define   PIPEMISC_DITHER_BPC_MASK     (7<<5)
-#define   PIPEMISC_DITHER_8_BPC                (0<<5)
-#define   PIPEMISC_DITHER_10_BPC       (1<<5)
-#define   PIPEMISC_DITHER_6_BPC                (2<<5)
-#define   PIPEMISC_DITHER_12_BPC       (3<<5)
-#define   PIPEMISC_DITHER_ENABLE       (1<<4)
-#define   PIPEMISC_DITHER_TYPE_MASK    (3<<2)
-#define   PIPEMISC_DITHER_TYPE_SP      (0<<2)
+#define   PIPEMISC_YUV420_ENABLE       (1 << 27)
+#define   PIPEMISC_YUV420_MODE_FULL_BLEND (1 << 26)
+#define   PIPEMISC_OUTPUT_COLORSPACE_YUV  (1 << 11)
+#define   PIPEMISC_DITHER_BPC_MASK     (7 << 5)
+#define   PIPEMISC_DITHER_8_BPC                (0 << 5)
+#define   PIPEMISC_DITHER_10_BPC       (1 << 5)
+#define   PIPEMISC_DITHER_6_BPC                (2 << 5)
+#define   PIPEMISC_DITHER_12_BPC       (3 << 5)
+#define   PIPEMISC_DITHER_ENABLE       (1 << 4)
+#define   PIPEMISC_DITHER_TYPE_MASK    (3 << 2)
+#define   PIPEMISC_DITHER_TYPE_SP      (0 << 2)
 #define PIPEMISC(pipe)                 _MMIO_PIPE2(pipe, _PIPE_MISC_A)
 
 #define VLV_DPFLIPSTAT                         _MMIO(VLV_DISPLAY_BASE + 0x70028)
-#define   PIPEB_LINE_COMPARE_INT_EN            (1<<29)
-#define   PIPEB_HLINE_INT_EN                   (1<<28)
-#define   PIPEB_VBLANK_INT_EN                  (1<<27)
-#define   SPRITED_FLIP_DONE_INT_EN             (1<<26)
-#define   SPRITEC_FLIP_DONE_INT_EN             (1<<25)
-#define   PLANEB_FLIP_DONE_INT_EN              (1<<24)
-#define   PIPE_PSR_INT_EN                      (1<<22)
-#define   PIPEA_LINE_COMPARE_INT_EN            (1<<21)
-#define   PIPEA_HLINE_INT_EN                   (1<<20)
-#define   PIPEA_VBLANK_INT_EN                  (1<<19)
-#define   SPRITEB_FLIP_DONE_INT_EN             (1<<18)
-#define   SPRITEA_FLIP_DONE_INT_EN             (1<<17)
-#define   PLANEA_FLIPDONE_INT_EN               (1<<16)
-#define   PIPEC_LINE_COMPARE_INT_EN            (1<<13)
-#define   PIPEC_HLINE_INT_EN                   (1<<12)
-#define   PIPEC_VBLANK_INT_EN                  (1<<11)
-#define   SPRITEF_FLIPDONE_INT_EN              (1<<10)
-#define   SPRITEE_FLIPDONE_INT_EN              (1<<9)
-#define   PLANEC_FLIPDONE_INT_EN               (1<<8)
+#define   PIPEB_LINE_COMPARE_INT_EN            (1 << 29)
+#define   PIPEB_HLINE_INT_EN                   (1 << 28)
+#define   PIPEB_VBLANK_INT_EN                  (1 << 27)
+#define   SPRITED_FLIP_DONE_INT_EN             (1 << 26)
+#define   SPRITEC_FLIP_DONE_INT_EN             (1 << 25)
+#define   PLANEB_FLIP_DONE_INT_EN              (1 << 24)
+#define   PIPE_PSR_INT_EN                      (1 << 22)
+#define   PIPEA_LINE_COMPARE_INT_EN            (1 << 21)
+#define   PIPEA_HLINE_INT_EN                   (1 << 20)
+#define   PIPEA_VBLANK_INT_EN                  (1 << 19)
+#define   SPRITEB_FLIP_DONE_INT_EN             (1 << 18)
+#define   SPRITEA_FLIP_DONE_INT_EN             (1 << 17)
+#define   PLANEA_FLIPDONE_INT_EN               (1 << 16)
+#define   PIPEC_LINE_COMPARE_INT_EN            (1 << 13)
+#define   PIPEC_HLINE_INT_EN                   (1 << 12)
+#define   PIPEC_VBLANK_INT_EN                  (1 << 11)
+#define   SPRITEF_FLIPDONE_INT_EN              (1 << 10)
+#define   SPRITEE_FLIPDONE_INT_EN              (1 << 9)
+#define   PLANEC_FLIPDONE_INT_EN               (1 << 8)
 
 #define DPINVGTT                               _MMIO(VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */
-#define   SPRITEF_INVALID_GTT_INT_EN           (1<<27)
-#define   SPRITEE_INVALID_GTT_INT_EN           (1<<26)
-#define   PLANEC_INVALID_GTT_INT_EN            (1<<25)
-#define   CURSORC_INVALID_GTT_INT_EN           (1<<24)
-#define   CURSORB_INVALID_GTT_INT_EN           (1<<23)
-#define   CURSORA_INVALID_GTT_INT_EN           (1<<22)
-#define   SPRITED_INVALID_GTT_INT_EN           (1<<21)
-#define   SPRITEC_INVALID_GTT_INT_EN           (1<<20)
-#define   PLANEB_INVALID_GTT_INT_EN            (1<<19)
-#define   SPRITEB_INVALID_GTT_INT_EN           (1<<18)
-#define   SPRITEA_INVALID_GTT_INT_EN           (1<<17)
-#define   PLANEA_INVALID_GTT_INT_EN            (1<<16)
+#define   SPRITEF_INVALID_GTT_INT_EN           (1 << 27)
+#define   SPRITEE_INVALID_GTT_INT_EN           (1 << 26)
+#define   PLANEC_INVALID_GTT_INT_EN            (1 << 25)
+#define   CURSORC_INVALID_GTT_INT_EN           (1 << 24)
+#define   CURSORB_INVALID_GTT_INT_EN           (1 << 23)
+#define   CURSORA_INVALID_GTT_INT_EN           (1 << 22)
+#define   SPRITED_INVALID_GTT_INT_EN           (1 << 21)
+#define   SPRITEC_INVALID_GTT_INT_EN           (1 << 20)
+#define   PLANEB_INVALID_GTT_INT_EN            (1 << 19)
+#define   SPRITEB_INVALID_GTT_INT_EN           (1 << 18)
+#define   SPRITEA_INVALID_GTT_INT_EN           (1 << 17)
+#define   PLANEA_INVALID_GTT_INT_EN            (1 << 16)
 #define   DPINVGTT_EN_MASK                     0xff0000
 #define   DPINVGTT_EN_MASK_CHV                 0xfff0000
-#define   SPRITEF_INVALID_GTT_STATUS           (1<<11)
-#define   SPRITEE_INVALID_GTT_STATUS           (1<<10)
-#define   PLANEC_INVALID_GTT_STATUS            (1<<9)
-#define   CURSORC_INVALID_GTT_STATUS           (1<<8)
-#define   CURSORB_INVALID_GTT_STATUS           (1<<7)
-#define   CURSORA_INVALID_GTT_STATUS           (1<<6)
-#define   SPRITED_INVALID_GTT_STATUS           (1<<5)
-#define   SPRITEC_INVALID_GTT_STATUS           (1<<4)
-#define   PLANEB_INVALID_GTT_STATUS            (1<<3)
-#define   SPRITEB_INVALID_GTT_STATUS           (1<<2)
-#define   SPRITEA_INVALID_GTT_STATUS           (1<<1)
-#define   PLANEA_INVALID_GTT_STATUS            (1<<0)
+#define   SPRITEF_INVALID_GTT_STATUS           (1 << 11)
+#define   SPRITEE_INVALID_GTT_STATUS           (1 << 10)
+#define   PLANEC_INVALID_GTT_STATUS            (1 << 9)
+#define   CURSORC_INVALID_GTT_STATUS           (1 << 8)
+#define   CURSORB_INVALID_GTT_STATUS           (1 << 7)
+#define   CURSORA_INVALID_GTT_STATUS           (1 << 6)
+#define   SPRITED_INVALID_GTT_STATUS           (1 << 5)
+#define   SPRITEC_INVALID_GTT_STATUS           (1 << 4)
+#define   PLANEB_INVALID_GTT_STATUS            (1 << 3)
+#define   SPRITEB_INVALID_GTT_STATUS           (1 << 2)
+#define   SPRITEA_INVALID_GTT_STATUS           (1 << 1)
+#define   PLANEA_INVALID_GTT_STATUS            (1 << 0)
 #define   DPINVGTT_STATUS_MASK                 0xff
 #define   DPINVGTT_STATUS_MASK_CHV             0xfff
 
@@ -5599,149 +5759,149 @@ enum {
 /* pnv/gen4/g4x/vlv/chv */
 #define DSPFW1         _MMIO(dev_priv->info.display_mmio_offset + 0x70034)
 #define   DSPFW_SR_SHIFT               23
-#define   DSPFW_SR_MASK                        (0x1ff<<23)
+#define   DSPFW_SR_MASK                        (0x1ff << 23)
 #define   DSPFW_CURSORB_SHIFT          16
-#define   DSPFW_CURSORB_MASK           (0x3f<<16)
+#define   DSPFW_CURSORB_MASK           (0x3f << 16)
 #define   DSPFW_PLANEB_SHIFT           8
-#define   DSPFW_PLANEB_MASK            (0x7f<<8)
-#define   DSPFW_PLANEB_MASK_VLV                (0xff<<8) /* vlv/chv */
+#define   DSPFW_PLANEB_MASK            (0x7f << 8)
+#define   DSPFW_PLANEB_MASK_VLV                (0xff << 8) /* vlv/chv */
 #define   DSPFW_PLANEA_SHIFT           0
-#define   DSPFW_PLANEA_MASK            (0x7f<<0)
-#define   DSPFW_PLANEA_MASK_VLV                (0xff<<0) /* vlv/chv */
+#define   DSPFW_PLANEA_MASK            (0x7f << 0)
+#define   DSPFW_PLANEA_MASK_VLV                (0xff << 0) /* vlv/chv */
 #define DSPFW2         _MMIO(dev_priv->info.display_mmio_offset + 0x70038)
-#define   DSPFW_FBC_SR_EN              (1<<31)   /* g4x */
+#define   DSPFW_FBC_SR_EN              (1 << 31)         /* g4x */
 #define   DSPFW_FBC_SR_SHIFT           28
-#define   DSPFW_FBC_SR_MASK            (0x7<<28) /* g4x */
+#define   DSPFW_FBC_SR_MASK            (0x7 << 28) /* g4x */
 #define   DSPFW_FBC_HPLL_SR_SHIFT      24
-#define   DSPFW_FBC_HPLL_SR_MASK       (0xf<<24) /* g4x */
+#define   DSPFW_FBC_HPLL_SR_MASK       (0xf << 24) /* g4x */
 #define   DSPFW_SPRITEB_SHIFT          (16)
-#define   DSPFW_SPRITEB_MASK           (0x7f<<16) /* g4x */
-#define   DSPFW_SPRITEB_MASK_VLV       (0xff<<16) /* vlv/chv */
+#define   DSPFW_SPRITEB_MASK           (0x7f << 16) /* g4x */
+#define   DSPFW_SPRITEB_MASK_VLV       (0xff << 16) /* vlv/chv */
 #define   DSPFW_CURSORA_SHIFT          8
-#define   DSPFW_CURSORA_MASK           (0x3f<<8)
+#define   DSPFW_CURSORA_MASK           (0x3f << 8)
 #define   DSPFW_PLANEC_OLD_SHIFT       0
-#define   DSPFW_PLANEC_OLD_MASK                (0x7f<<0) /* pre-gen4 sprite C */
+#define   DSPFW_PLANEC_OLD_MASK                (0x7f << 0) /* pre-gen4 sprite C */
 #define   DSPFW_SPRITEA_SHIFT          0
-#define   DSPFW_SPRITEA_MASK           (0x7f<<0) /* g4x */
-#define   DSPFW_SPRITEA_MASK_VLV       (0xff<<0) /* vlv/chv */
+#define   DSPFW_SPRITEA_MASK           (0x7f << 0) /* g4x */
+#define   DSPFW_SPRITEA_MASK_VLV       (0xff << 0) /* vlv/chv */
 #define DSPFW3         _MMIO(dev_priv->info.display_mmio_offset + 0x7003c)
-#define   DSPFW_HPLL_SR_EN             (1<<31)
-#define   PINEVIEW_SELF_REFRESH_EN     (1<<30)
+#define   DSPFW_HPLL_SR_EN             (1 << 31)
+#define   PINEVIEW_SELF_REFRESH_EN     (1 << 30)
 #define   DSPFW_CURSOR_SR_SHIFT                24
-#define   DSPFW_CURSOR_SR_MASK         (0x3f<<24)
+#define   DSPFW_CURSOR_SR_MASK         (0x3f << 24)
 #define   DSPFW_HPLL_CURSOR_SHIFT      16
-#define   DSPFW_HPLL_CURSOR_MASK       (0x3f<<16)
+#define   DSPFW_HPLL_CURSOR_MASK       (0x3f << 16)
 #define   DSPFW_HPLL_SR_SHIFT          0
-#define   DSPFW_HPLL_SR_MASK           (0x1ff<<0)
+#define   DSPFW_HPLL_SR_MASK           (0x1ff << 0)
 
 /* vlv/chv */
 #define DSPFW4         _MMIO(VLV_DISPLAY_BASE + 0x70070)
 #define   DSPFW_SPRITEB_WM1_SHIFT      16
-#define   DSPFW_SPRITEB_WM1_MASK       (0xff<<16)
+#define   DSPFW_SPRITEB_WM1_MASK       (0xff << 16)
 #define   DSPFW_CURSORA_WM1_SHIFT      8
-#define   DSPFW_CURSORA_WM1_MASK       (0x3f<<8)
+#define   DSPFW_CURSORA_WM1_MASK       (0x3f << 8)
 #define   DSPFW_SPRITEA_WM1_SHIFT      0
-#define   DSPFW_SPRITEA_WM1_MASK       (0xff<<0)
+#define   DSPFW_SPRITEA_WM1_MASK       (0xff << 0)
 #define DSPFW5         _MMIO(VLV_DISPLAY_BASE + 0x70074)
 #define   DSPFW_PLANEB_WM1_SHIFT       24
-#define   DSPFW_PLANEB_WM1_MASK                (0xff<<24)
+#define   DSPFW_PLANEB_WM1_MASK                (0xff << 24)
 #define   DSPFW_PLANEA_WM1_SHIFT       16
-#define   DSPFW_PLANEA_WM1_MASK                (0xff<<16)
+#define   DSPFW_PLANEA_WM1_MASK                (0xff << 16)
 #define   DSPFW_CURSORB_WM1_SHIFT      8
-#define   DSPFW_CURSORB_WM1_MASK       (0x3f<<8)
+#define   DSPFW_CURSORB_WM1_MASK       (0x3f << 8)
 #define   DSPFW_CURSOR_SR_WM1_SHIFT    0
-#define   DSPFW_CURSOR_SR_WM1_MASK     (0x3f<<0)
+#define   DSPFW_CURSOR_SR_WM1_MASK     (0x3f << 0)
 #define DSPFW6         _MMIO(VLV_DISPLAY_BASE + 0x70078)
 #define   DSPFW_SR_WM1_SHIFT           0
-#define   DSPFW_SR_WM1_MASK            (0x1ff<<0)
+#define   DSPFW_SR_WM1_MASK            (0x1ff << 0)
 #define DSPFW7         _MMIO(VLV_DISPLAY_BASE + 0x7007c)
 #define DSPFW7_CHV     _MMIO(VLV_DISPLAY_BASE + 0x700b4) /* wtf #1? */
 #define   DSPFW_SPRITED_WM1_SHIFT      24
-#define   DSPFW_SPRITED_WM1_MASK       (0xff<<24)
+#define   DSPFW_SPRITED_WM1_MASK       (0xff << 24)
 #define   DSPFW_SPRITED_SHIFT          16
-#define   DSPFW_SPRITED_MASK_VLV       (0xff<<16)
+#define   DSPFW_SPRITED_MASK_VLV       (0xff << 16)
 #define   DSPFW_SPRITEC_WM1_SHIFT      8
-#define   DSPFW_SPRITEC_WM1_MASK       (0xff<<8)
+#define   DSPFW_SPRITEC_WM1_MASK       (0xff << 8)
 #define   DSPFW_SPRITEC_SHIFT          0
-#define   DSPFW_SPRITEC_MASK_VLV       (0xff<<0)
+#define   DSPFW_SPRITEC_MASK_VLV       (0xff << 0)
 #define DSPFW8_CHV     _MMIO(VLV_DISPLAY_BASE + 0x700b8)
 #define   DSPFW_SPRITEF_WM1_SHIFT      24
-#define   DSPFW_SPRITEF_WM1_MASK       (0xff<<24)
+#define   DSPFW_SPRITEF_WM1_MASK       (0xff << 24)
 #define   DSPFW_SPRITEF_SHIFT          16
-#define   DSPFW_SPRITEF_MASK_VLV       (0xff<<16)
+#define   DSPFW_SPRITEF_MASK_VLV       (0xff << 16)
 #define   DSPFW_SPRITEE_WM1_SHIFT      8
-#define   DSPFW_SPRITEE_WM1_MASK       (0xff<<8)
+#define   DSPFW_SPRITEE_WM1_MASK       (0xff << 8)
 #define   DSPFW_SPRITEE_SHIFT          0
-#define   DSPFW_SPRITEE_MASK_VLV       (0xff<<0)
+#define   DSPFW_SPRITEE_MASK_VLV       (0xff << 0)
 #define DSPFW9_CHV     _MMIO(VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */
 #define   DSPFW_PLANEC_WM1_SHIFT       24
-#define   DSPFW_PLANEC_WM1_MASK                (0xff<<24)
+#define   DSPFW_PLANEC_WM1_MASK                (0xff << 24)
 #define   DSPFW_PLANEC_SHIFT           16
-#define   DSPFW_PLANEC_MASK_VLV                (0xff<<16)
+#define   DSPFW_PLANEC_MASK_VLV                (0xff << 16)
 #define   DSPFW_CURSORC_WM1_SHIFT      8
-#define   DSPFW_CURSORC_WM1_MASK       (0x3f<<16)
+#define   DSPFW_CURSORC_WM1_MASK       (0x3f << 16)
 #define   DSPFW_CURSORC_SHIFT          0
-#define   DSPFW_CURSORC_MASK           (0x3f<<0)
+#define   DSPFW_CURSORC_MASK           (0x3f << 0)
 
 /* vlv/chv high order bits */
 #define DSPHOWM                _MMIO(VLV_DISPLAY_BASE + 0x70064)
 #define   DSPFW_SR_HI_SHIFT            24
-#define   DSPFW_SR_HI_MASK             (3<<24) /* 2 bits for chv, 1 for vlv */
+#define   DSPFW_SR_HI_MASK             (3 << 24) /* 2 bits for chv, 1 for vlv */
 #define   DSPFW_SPRITEF_HI_SHIFT       23
-#define   DSPFW_SPRITEF_HI_MASK                (1<<23)
+#define   DSPFW_SPRITEF_HI_MASK                (1 << 23)
 #define   DSPFW_SPRITEE_HI_SHIFT       22
-#define   DSPFW_SPRITEE_HI_MASK                (1<<22)
+#define   DSPFW_SPRITEE_HI_MASK                (1 << 22)
 #define   DSPFW_PLANEC_HI_SHIFT                21
-#define   DSPFW_PLANEC_HI_MASK         (1<<21)
+#define   DSPFW_PLANEC_HI_MASK         (1 << 21)
 #define   DSPFW_SPRITED_HI_SHIFT       20
-#define   DSPFW_SPRITED_HI_MASK                (1<<20)
+#define   DSPFW_SPRITED_HI_MASK                (1 << 20)
 #define   DSPFW_SPRITEC_HI_SHIFT       16
-#define   DSPFW_SPRITEC_HI_MASK                (1<<16)
+#define   DSPFW_SPRITEC_HI_MASK                (1 << 16)
 #define   DSPFW_PLANEB_HI_SHIFT                12
-#define   DSPFW_PLANEB_HI_MASK         (1<<12)
+#define   DSPFW_PLANEB_HI_MASK         (1 << 12)
 #define   DSPFW_SPRITEB_HI_SHIFT       8
-#define   DSPFW_SPRITEB_HI_MASK                (1<<8)
+#define   DSPFW_SPRITEB_HI_MASK                (1 << 8)
 #define   DSPFW_SPRITEA_HI_SHIFT       4
-#define   DSPFW_SPRITEA_HI_MASK                (1<<4)
+#define   DSPFW_SPRITEA_HI_MASK                (1 << 4)
 #define   DSPFW_PLANEA_HI_SHIFT                0
-#define   DSPFW_PLANEA_HI_MASK         (1<<0)
+#define   DSPFW_PLANEA_HI_MASK         (1 << 0)
 #define DSPHOWM1       _MMIO(VLV_DISPLAY_BASE + 0x70068)
 #define   DSPFW_SR_WM1_HI_SHIFT                24
-#define   DSPFW_SR_WM1_HI_MASK         (3<<24) /* 2 bits for chv, 1 for vlv */
+#define   DSPFW_SR_WM1_HI_MASK         (3 << 24) /* 2 bits for chv, 1 for vlv */
 #define   DSPFW_SPRITEF_WM1_HI_SHIFT   23
-#define   DSPFW_SPRITEF_WM1_HI_MASK    (1<<23)
+#define   DSPFW_SPRITEF_WM1_HI_MASK    (1 << 23)
 #define   DSPFW_SPRITEE_WM1_HI_SHIFT   22
-#define   DSPFW_SPRITEE_WM1_HI_MASK    (1<<22)
+#define   DSPFW_SPRITEE_WM1_HI_MASK    (1 << 22)
 #define   DSPFW_PLANEC_WM1_HI_SHIFT    21
-#define   DSPFW_PLANEC_WM1_HI_MASK     (1<<21)
+#define   DSPFW_PLANEC_WM1_HI_MASK     (1 << 21)
 #define   DSPFW_SPRITED_WM1_HI_SHIFT   20
-#define   DSPFW_SPRITED_WM1_HI_MASK    (1<<20)
+#define   DSPFW_SPRITED_WM1_HI_MASK    (1 << 20)
 #define   DSPFW_SPRITEC_WM1_HI_SHIFT   16
-#define   DSPFW_SPRITEC_WM1_HI_MASK    (1<<16)
+#define   DSPFW_SPRITEC_WM1_HI_MASK    (1 << 16)
 #define   DSPFW_PLANEB_WM1_HI_SHIFT    12
-#define   DSPFW_PLANEB_WM1_HI_MASK     (1<<12)
+#define   DSPFW_PLANEB_WM1_HI_MASK     (1 << 12)
 #define   DSPFW_SPRITEB_WM1_HI_SHIFT   8
-#define   DSPFW_SPRITEB_WM1_HI_MASK    (1<<8)
+#define   DSPFW_SPRITEB_WM1_HI_MASK    (1 << 8)
 #define   DSPFW_SPRITEA_WM1_HI_SHIFT   4
-#define   DSPFW_SPRITEA_WM1_HI_MASK    (1<<4)
+#define   DSPFW_SPRITEA_WM1_HI_MASK    (1 << 4)
 #define   DSPFW_PLANEA_WM1_HI_SHIFT    0
-#define   DSPFW_PLANEA_WM1_HI_MASK     (1<<0)
+#define   DSPFW_PLANEA_WM1_HI_MASK     (1 << 0)
 
 /* drain latency register values*/
 #define VLV_DDL(pipe)                  _MMIO(VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
 #define DDL_CURSOR_SHIFT               24
-#define DDL_SPRITE_SHIFT(sprite)       (8+8*(sprite))
+#define DDL_SPRITE_SHIFT(sprite)       (8 + 8 * (sprite))
 #define DDL_PLANE_SHIFT                        0
-#define DDL_PRECISION_HIGH             (1<<7)
-#define DDL_PRECISION_LOW              (0<<7)
+#define DDL_PRECISION_HIGH             (1 << 7)
+#define DDL_PRECISION_LOW              (0 << 7)
 #define DRAIN_LATENCY_MASK             0x7f
 
 #define CBR1_VLV                       _MMIO(VLV_DISPLAY_BASE + 0x70400)
-#define  CBR_PND_DEADLINE_DISABLE      (1<<31)
-#define  CBR_PWM_CLOCK_MUX_SELECT      (1<<30)
+#define  CBR_PND_DEADLINE_DISABLE      (1 << 31)
+#define  CBR_PWM_CLOCK_MUX_SELECT      (1 << 30)
 
 #define CBR4_VLV                       _MMIO(VLV_DISPLAY_BASE + 0x70450)
-#define  CBR_DPLLBMD_PIPE(pipe)                (1<<(7+(pipe)*11)) /* pipes B and C */
+#define  CBR_DPLLBMD_PIPE(pipe)                (1 << (7 + (pipe) * 11)) /* pipes B and C */
 
 /* FIFO watermark sizes etc */
 #define G4X_FIFO_LINE_SIZE     64
@@ -5813,32 +5973,32 @@ enum {
 
 /* define the Watermark register on Ironlake */
 #define WM0_PIPEA_ILK          _MMIO(0x45100)
-#define  WM0_PIPE_PLANE_MASK   (0xffff<<16)
+#define  WM0_PIPE_PLANE_MASK   (0xffff << 16)
 #define  WM0_PIPE_PLANE_SHIFT  16
-#define  WM0_PIPE_SPRITE_MASK  (0xff<<8)
+#define  WM0_PIPE_SPRITE_MASK  (0xff << 8)
 #define  WM0_PIPE_SPRITE_SHIFT 8
 #define  WM0_PIPE_CURSOR_MASK  (0xff)
 
 #define WM0_PIPEB_ILK          _MMIO(0x45104)
 #define WM0_PIPEC_IVB          _MMIO(0x45200)
 #define WM1_LP_ILK             _MMIO(0x45108)
-#define  WM1_LP_SR_EN          (1<<31)
+#define  WM1_LP_SR_EN          (1 << 31)
 #define  WM1_LP_LATENCY_SHIFT  24
-#define  WM1_LP_LATENCY_MASK   (0x7f<<24)
-#define  WM1_LP_FBC_MASK       (0xf<<20)
+#define  WM1_LP_LATENCY_MASK   (0x7f << 24)
+#define  WM1_LP_FBC_MASK       (0xf << 20)
 #define  WM1_LP_FBC_SHIFT      20
 #define  WM1_LP_FBC_SHIFT_BDW  19
-#define  WM1_LP_SR_MASK                (0x7ff<<8)
+#define  WM1_LP_SR_MASK                (0x7ff << 8)
 #define  WM1_LP_SR_SHIFT       8
 #define  WM1_LP_CURSOR_MASK    (0xff)
 #define WM2_LP_ILK             _MMIO(0x4510c)
-#define  WM2_LP_EN             (1<<31)
+#define  WM2_LP_EN             (1 << 31)
 #define WM3_LP_ILK             _MMIO(0x45110)
-#define  WM3_LP_EN             (1<<31)
+#define  WM3_LP_EN             (1 << 31)
 #define WM1S_LP_ILK            _MMIO(0x45120)
 #define WM2S_LP_IVB            _MMIO(0x45124)
 #define WM3S_LP_IVB            _MMIO(0x45128)
-#define  WM1S_LP_EN            (1<<31)
+#define  WM1S_LP_EN            (1 << 31)
 
 #define HSW_WM_LP_VAL(lat, fbc, pri, cur) \
        (WM3_LP_EN | ((lat) << WM1_LP_LATENCY_SHIFT) | \
@@ -5895,8 +6055,7 @@ enum {
 #define   CURSOR_ENABLE                0x80000000
 #define   CURSOR_GAMMA_ENABLE  0x40000000
 #define   CURSOR_STRIDE_SHIFT  28
-#define   CURSOR_STRIDE(x)     ((ffs(x)-9) << CURSOR_STRIDE_SHIFT) /* 256,512,1k,2k */
-#define   CURSOR_PIPE_CSC_ENABLE (1<<24)
+#define   CURSOR_STRIDE(x)     ((ffs(x) - 9) << CURSOR_STRIDE_SHIFT) /* 256,512,1k,2k */
 #define   CURSOR_FORMAT_SHIFT  24
 #define   CURSOR_FORMAT_MASK   (0x07 << CURSOR_FORMAT_SHIFT)
 #define   CURSOR_FORMAT_2C     (0x00 << CURSOR_FORMAT_SHIFT)
@@ -5905,18 +6064,21 @@ enum {
 #define   CURSOR_FORMAT_ARGB   (0x04 << CURSOR_FORMAT_SHIFT)
 #define   CURSOR_FORMAT_XRGB   (0x05 << CURSOR_FORMAT_SHIFT)
 /* New style CUR*CNTR flags */
-#define   CURSOR_MODE          0x27
-#define   CURSOR_MODE_DISABLE   0x00
-#define   CURSOR_MODE_128_32B_AX 0x02
-#define   CURSOR_MODE_256_32B_AX 0x03
-#define   CURSOR_MODE_64_32B_AX 0x07
-#define   CURSOR_MODE_128_ARGB_AX ((1 << 5) | CURSOR_MODE_128_32B_AX)
-#define   CURSOR_MODE_256_ARGB_AX ((1 << 5) | CURSOR_MODE_256_32B_AX)
-#define   CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
+#define   MCURSOR_MODE         0x27
+#define   MCURSOR_MODE_DISABLE   0x00
+#define   MCURSOR_MODE_128_32B_AX 0x02
+#define   MCURSOR_MODE_256_32B_AX 0x03
+#define   MCURSOR_MODE_64_32B_AX 0x07
+#define   MCURSOR_MODE_128_ARGB_AX ((1 << 5) | MCURSOR_MODE_128_32B_AX)
+#define   MCURSOR_MODE_256_ARGB_AX ((1 << 5) | MCURSOR_MODE_256_32B_AX)
+#define   MCURSOR_MODE_64_ARGB_AX ((1 << 5) | MCURSOR_MODE_64_32B_AX)
+#define   MCURSOR_PIPE_SELECT_MASK     (0x3 << 28)
+#define   MCURSOR_PIPE_SELECT_SHIFT    28
 #define   MCURSOR_PIPE_SELECT(pipe)    ((pipe) << 28)
 #define   MCURSOR_GAMMA_ENABLE  (1 << 26)
-#define   CURSOR_ROTATE_180    (1<<15)
-#define   CURSOR_TRICKLE_FEED_DISABLE  (1 << 14)
+#define   MCURSOR_PIPE_CSC_ENABLE (1 << 24)
+#define   MCURSOR_ROTATE_180   (1 << 15)
+#define   MCURSOR_TRICKLE_FEED_DISABLE (1 << 14)
 #define _CURABASE              0x70084
 #define _CURAPOS               0x70088
 #define   CURSOR_POS_MASK       0x007FF
@@ -5953,41 +6115,41 @@ enum {
 
 /* Display A control */
 #define _DSPACNTR                              0x70180
-#define   DISPLAY_PLANE_ENABLE                 (1<<31)
+#define   DISPLAY_PLANE_ENABLE                 (1 << 31)
 #define   DISPLAY_PLANE_DISABLE                        0
-#define   DISPPLANE_GAMMA_ENABLE               (1<<30)
+#define   DISPPLANE_GAMMA_ENABLE               (1 << 30)
 #define   DISPPLANE_GAMMA_DISABLE              0
-#define   DISPPLANE_PIXFORMAT_MASK             (0xf<<26)
-#define   DISPPLANE_YUV422                     (0x0<<26)
-#define   DISPPLANE_8BPP                       (0x2<<26)
-#define   DISPPLANE_BGRA555                    (0x3<<26)
-#define   DISPPLANE_BGRX555                    (0x4<<26)
-#define   DISPPLANE_BGRX565                    (0x5<<26)
-#define   DISPPLANE_BGRX888                    (0x6<<26)
-#define   DISPPLANE_BGRA888                    (0x7<<26)
-#define   DISPPLANE_RGBX101010                 (0x8<<26)
-#define   DISPPLANE_RGBA101010                 (0x9<<26)
-#define   DISPPLANE_BGRX101010                 (0xa<<26)
-#define   DISPPLANE_RGBX161616                 (0xc<<26)
-#define   DISPPLANE_RGBX888                    (0xe<<26)
-#define   DISPPLANE_RGBA888                    (0xf<<26)
-#define   DISPPLANE_STEREO_ENABLE              (1<<25)
+#define   DISPPLANE_PIXFORMAT_MASK             (0xf << 26)
+#define   DISPPLANE_YUV422                     (0x0 << 26)
+#define   DISPPLANE_8BPP                       (0x2 << 26)
+#define   DISPPLANE_BGRA555                    (0x3 << 26)
+#define   DISPPLANE_BGRX555                    (0x4 << 26)
+#define   DISPPLANE_BGRX565                    (0x5 << 26)
+#define   DISPPLANE_BGRX888                    (0x6 << 26)
+#define   DISPPLANE_BGRA888                    (0x7 << 26)
+#define   DISPPLANE_RGBX101010                 (0x8 << 26)
+#define   DISPPLANE_RGBA101010                 (0x9 << 26)
+#define   DISPPLANE_BGRX101010                 (0xa << 26)
+#define   DISPPLANE_RGBX161616                 (0xc << 26)
+#define   DISPPLANE_RGBX888                    (0xe << 26)
+#define   DISPPLANE_RGBA888                    (0xf << 26)
+#define   DISPPLANE_STEREO_ENABLE              (1 << 25)
 #define   DISPPLANE_STEREO_DISABLE             0
-#define   DISPPLANE_PIPE_CSC_ENABLE            (1<<24)
+#define   DISPPLANE_PIPE_CSC_ENABLE            (1 << 24)
 #define   DISPPLANE_SEL_PIPE_SHIFT             24
-#define   DISPPLANE_SEL_PIPE_MASK              (3<<DISPPLANE_SEL_PIPE_SHIFT)
-#define   DISPPLANE_SEL_PIPE(pipe)             ((pipe)<<DISPPLANE_SEL_PIPE_SHIFT)
-#define   DISPPLANE_SRC_KEY_ENABLE             (1<<22)
+#define   DISPPLANE_SEL_PIPE_MASK              (3 << DISPPLANE_SEL_PIPE_SHIFT)
+#define   DISPPLANE_SEL_PIPE(pipe)             ((pipe) << DISPPLANE_SEL_PIPE_SHIFT)
+#define   DISPPLANE_SRC_KEY_ENABLE             (1 << 22)
 #define   DISPPLANE_SRC_KEY_DISABLE            0
-#define   DISPPLANE_LINE_DOUBLE                        (1<<20)
+#define   DISPPLANE_LINE_DOUBLE                        (1 << 20)
 #define   DISPPLANE_NO_LINE_DOUBLE             0
 #define   DISPPLANE_STEREO_POLARITY_FIRST      0
-#define   DISPPLANE_STEREO_POLARITY_SECOND     (1<<18)
-#define   DISPPLANE_ALPHA_PREMULTIPLY          (1<<16) /* CHV pipe B */
-#define   DISPPLANE_ROTATE_180                 (1<<15)
-#define   DISPPLANE_TRICKLE_FEED_DISABLE       (1<<14) /* Ironlake */
-#define   DISPPLANE_TILED                      (1<<10)
-#define   DISPPLANE_MIRROR                     (1<<8) /* CHV pipe B */
+#define   DISPPLANE_STEREO_POLARITY_SECOND     (1 << 18)
+#define   DISPPLANE_ALPHA_PREMULTIPLY          (1 << 16) /* CHV pipe B */
+#define   DISPPLANE_ROTATE_180                 (1 << 15)
+#define   DISPPLANE_TRICKLE_FEED_DISABLE       (1 << 14) /* Ironlake */
+#define   DISPPLANE_TILED                      (1 << 10)
+#define   DISPPLANE_MIRROR                     (1 << 8) /* CHV pipe B */
 #define _DSPAADDR                              0x70184
 #define _DSPASTRIDE                            0x70188
 #define _DSPAPOS                               0x7018C /* reserved */
@@ -6010,15 +6172,15 @@ enum {
 
 /* CHV pipe B blender and primary plane */
 #define _CHV_BLEND_A           0x60a00
-#define   CHV_BLEND_LEGACY             (0<<30)
-#define   CHV_BLEND_ANDROID            (1<<30)
-#define   CHV_BLEND_MPO                        (2<<30)
-#define   CHV_BLEND_MASK               (3<<30)
+#define   CHV_BLEND_LEGACY             (0 << 30)
+#define   CHV_BLEND_ANDROID            (1 << 30)
+#define   CHV_BLEND_MPO                        (2 << 30)
+#define   CHV_BLEND_MASK               (3 << 30)
 #define _CHV_CANVAS_A          0x60a04
 #define _PRIMPOS_A             0x60a08
 #define _PRIMSIZE_A            0x60a0c
 #define _PRIMCNSTALPHA_A       0x60a10
-#define   PRIM_CONST_ALPHA_ENABLE      (1<<31)
+#define   PRIM_CONST_ALPHA_ENABLE      (1 << 31)
 
 #define CHV_BLEND(pipe)                _MMIO_TRANS2(pipe, _CHV_BLEND_A)
 #define CHV_CANVAS(pipe)       _MMIO_TRANS2(pipe, _CHV_CANVAS_A)
@@ -6028,8 +6190,8 @@ enum {
 
 /* Display/Sprite base address macros */
 #define DISP_BASEADDR_MASK     (0xfffff000)
-#define I915_LO_DISPBASE(val)  (val & ~DISP_BASEADDR_MASK)
-#define I915_HI_DISPBASE(val)  (val & DISP_BASEADDR_MASK)
+#define I915_LO_DISPBASE(val)  ((val) & ~DISP_BASEADDR_MASK)
+#define I915_HI_DISPBASE(val)  ((val) & DISP_BASEADDR_MASK)
 
 /*
  * VBIOS flags
@@ -6059,7 +6221,7 @@ enum {
 
 /* Display B control */
 #define _DSPBCNTR              (dev_priv->info.display_mmio_offset + 0x71180)
-#define   DISPPLANE_ALPHA_TRANS_ENABLE         (1<<15)
+#define   DISPPLANE_ALPHA_TRANS_ENABLE         (1 << 15)
 #define   DISPPLANE_ALPHA_TRANS_DISABLE                0
 #define   DISPPLANE_SPRITE_ABOVE_DISPLAY       0
 #define   DISPPLANE_SPRITE_ABOVE_OVERLAY       (1)
@@ -6074,27 +6236,27 @@ enum {
 
 /* Sprite A control */
 #define _DVSACNTR              0x72180
-#define   DVS_ENABLE           (1<<31)
-#define   DVS_GAMMA_ENABLE     (1<<30)
-#define   DVS_YUV_RANGE_CORRECTION_DISABLE     (1<<27)
-#define   DVS_PIXFORMAT_MASK   (3<<25)
-#define   DVS_FORMAT_YUV422    (0<<25)
-#define   DVS_FORMAT_RGBX101010        (1<<25)
-#define   DVS_FORMAT_RGBX888   (2<<25)
-#define   DVS_FORMAT_RGBX161616        (3<<25)
-#define   DVS_PIPE_CSC_ENABLE   (1<<24)
-#define   DVS_SOURCE_KEY       (1<<22)
-#define   DVS_RGB_ORDER_XBGR   (1<<20)
-#define   DVS_YUV_FORMAT_BT709 (1<<18)
-#define   DVS_YUV_BYTE_ORDER_MASK (3<<16)
-#define   DVS_YUV_ORDER_YUYV   (0<<16)
-#define   DVS_YUV_ORDER_UYVY   (1<<16)
-#define   DVS_YUV_ORDER_YVYU   (2<<16)
-#define   DVS_YUV_ORDER_VYUY   (3<<16)
-#define   DVS_ROTATE_180       (1<<15)
-#define   DVS_DEST_KEY         (1<<2)
-#define   DVS_TRICKLE_FEED_DISABLE (1<<14)
-#define   DVS_TILED            (1<<10)
+#define   DVS_ENABLE           (1 << 31)
+#define   DVS_GAMMA_ENABLE     (1 << 30)
+#define   DVS_YUV_RANGE_CORRECTION_DISABLE     (1 << 27)
+#define   DVS_PIXFORMAT_MASK   (3 << 25)
+#define   DVS_FORMAT_YUV422    (0 << 25)
+#define   DVS_FORMAT_RGBX101010        (1 << 25)
+#define   DVS_FORMAT_RGBX888   (2 << 25)
+#define   DVS_FORMAT_RGBX161616        (3 << 25)
+#define   DVS_PIPE_CSC_ENABLE   (1 << 24)
+#define   DVS_SOURCE_KEY       (1 << 22)
+#define   DVS_RGB_ORDER_XBGR   (1 << 20)
+#define   DVS_YUV_FORMAT_BT709 (1 << 18)
+#define   DVS_YUV_BYTE_ORDER_MASK (3 << 16)
+#define   DVS_YUV_ORDER_YUYV   (0 << 16)
+#define   DVS_YUV_ORDER_UYVY   (1 << 16)
+#define   DVS_YUV_ORDER_YVYU   (2 << 16)
+#define   DVS_YUV_ORDER_VYUY   (3 << 16)
+#define   DVS_ROTATE_180       (1 << 15)
+#define   DVS_DEST_KEY         (1 << 2)
+#define   DVS_TRICKLE_FEED_DISABLE (1 << 14)
+#define   DVS_TILED            (1 << 10)
 #define _DVSALINOFF            0x72184
 #define _DVSASTRIDE            0x72188
 #define _DVSAPOS               0x7218c
@@ -6106,13 +6268,13 @@ enum {
 #define _DVSATILEOFF           0x721a4
 #define _DVSASURFLIVE          0x721ac
 #define _DVSASCALE             0x72204
-#define   DVS_SCALE_ENABLE     (1<<31)
-#define   DVS_FILTER_MASK      (3<<29)
-#define   DVS_FILTER_MEDIUM    (0<<29)
-#define   DVS_FILTER_ENHANCING (1<<29)
-#define   DVS_FILTER_SOFTENING (2<<29)
-#define   DVS_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
-#define   DVS_VERTICAL_OFFSET_ENABLE (1<<27)
+#define   DVS_SCALE_ENABLE     (1 << 31)
+#define   DVS_FILTER_MASK      (3 << 29)
+#define   DVS_FILTER_MEDIUM    (0 << 29)
+#define   DVS_FILTER_ENHANCING (1 << 29)
+#define   DVS_FILTER_SOFTENING (2 << 29)
+#define   DVS_VERTICAL_OFFSET_HALF (1 << 28) /* must be enabled below */
+#define   DVS_VERTICAL_OFFSET_ENABLE (1 << 27)
 #define _DVSAGAMC              0x72300
 
 #define _DVSBCNTR              0x73180
@@ -6143,31 +6305,31 @@ enum {
 #define DVSSURFLIVE(pipe) _MMIO_PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
 
 #define _SPRA_CTL              0x70280
-#define   SPRITE_ENABLE                        (1<<31)
-#define   SPRITE_GAMMA_ENABLE          (1<<30)
-#define   SPRITE_YUV_RANGE_CORRECTION_DISABLE  (1<<28)
-#define   SPRITE_PIXFORMAT_MASK                (7<<25)
-#define   SPRITE_FORMAT_YUV422         (0<<25)
-#define   SPRITE_FORMAT_RGBX101010     (1<<25)
-#define   SPRITE_FORMAT_RGBX888                (2<<25)
-#define   SPRITE_FORMAT_RGBX161616     (3<<25)
-#define   SPRITE_FORMAT_YUV444         (4<<25)
-#define   SPRITE_FORMAT_XR_BGR101010   (5<<25) /* Extended range */
-#define   SPRITE_PIPE_CSC_ENABLE       (1<<24)
-#define   SPRITE_SOURCE_KEY            (1<<22)
-#define   SPRITE_RGB_ORDER_RGBX                (1<<20) /* only for 888 and 161616 */
-#define   SPRITE_YUV_TO_RGB_CSC_DISABLE        (1<<19)
-#define   SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709   (1<<18) /* 0 is BT601 */
-#define   SPRITE_YUV_BYTE_ORDER_MASK   (3<<16)
-#define   SPRITE_YUV_ORDER_YUYV                (0<<16)
-#define   SPRITE_YUV_ORDER_UYVY                (1<<16)
-#define   SPRITE_YUV_ORDER_YVYU                (2<<16)
-#define   SPRITE_YUV_ORDER_VYUY                (3<<16)
-#define   SPRITE_ROTATE_180            (1<<15)
-#define   SPRITE_TRICKLE_FEED_DISABLE  (1<<14)
-#define   SPRITE_INT_GAMMA_ENABLE      (1<<13)
-#define   SPRITE_TILED                 (1<<10)
-#define   SPRITE_DEST_KEY              (1<<2)
+#define   SPRITE_ENABLE                        (1 << 31)
+#define   SPRITE_GAMMA_ENABLE          (1 << 30)
+#define   SPRITE_YUV_RANGE_CORRECTION_DISABLE  (1 << 28)
+#define   SPRITE_PIXFORMAT_MASK                (7 << 25)
+#define   SPRITE_FORMAT_YUV422         (0 << 25)
+#define   SPRITE_FORMAT_RGBX101010     (1 << 25)
+#define   SPRITE_FORMAT_RGBX888                (2 << 25)
+#define   SPRITE_FORMAT_RGBX161616     (3 << 25)
+#define   SPRITE_FORMAT_YUV444         (4 << 25)
+#define   SPRITE_FORMAT_XR_BGR101010   (5 << 25) /* Extended range */
+#define   SPRITE_PIPE_CSC_ENABLE       (1 << 24)
+#define   SPRITE_SOURCE_KEY            (1 << 22)
+#define   SPRITE_RGB_ORDER_RGBX                (1 << 20) /* only for 888 and 161616 */
+#define   SPRITE_YUV_TO_RGB_CSC_DISABLE        (1 << 19)
+#define   SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709   (1 << 18) /* 0 is BT601 */
+#define   SPRITE_YUV_BYTE_ORDER_MASK   (3 << 16)
+#define   SPRITE_YUV_ORDER_YUYV                (0 << 16)
+#define   SPRITE_YUV_ORDER_UYVY                (1 << 16)
+#define   SPRITE_YUV_ORDER_YVYU                (2 << 16)
+#define   SPRITE_YUV_ORDER_VYUY                (3 << 16)
+#define   SPRITE_ROTATE_180            (1 << 15)
+#define   SPRITE_TRICKLE_FEED_DISABLE  (1 << 14)
+#define   SPRITE_INT_GAMMA_ENABLE      (1 << 13)
+#define   SPRITE_TILED                 (1 << 10)
+#define   SPRITE_DEST_KEY              (1 << 2)
 #define _SPRA_LINOFF           0x70284
 #define _SPRA_STRIDE           0x70288
 #define _SPRA_POS              0x7028c
@@ -6180,13 +6342,13 @@ enum {
 #define _SPRA_OFFSET           0x702a4
 #define _SPRA_SURFLIVE         0x702ac
 #define _SPRA_SCALE            0x70304
-#define   SPRITE_SCALE_ENABLE  (1<<31)
-#define   SPRITE_FILTER_MASK   (3<<29)
-#define   SPRITE_FILTER_MEDIUM (0<<29)
-#define   SPRITE_FILTER_ENHANCING      (1<<29)
-#define   SPRITE_FILTER_SOFTENING      (2<<29)
-#define   SPRITE_VERTICAL_OFFSET_HALF  (1<<28) /* must be enabled below */
-#define   SPRITE_VERTICAL_OFFSET_ENABLE        (1<<27)
+#define   SPRITE_SCALE_ENABLE  (1 << 31)
+#define   SPRITE_FILTER_MASK   (3 << 29)
+#define   SPRITE_FILTER_MEDIUM (0 << 29)
+#define   SPRITE_FILTER_ENHANCING      (1 << 29)
+#define   SPRITE_FILTER_SOFTENING      (2 << 29)
+#define   SPRITE_VERTICAL_OFFSET_HALF  (1 << 28) /* must be enabled below */
+#define   SPRITE_VERTICAL_OFFSET_ENABLE        (1 << 27)
 #define _SPRA_GAMC             0x70400
 
 #define _SPRB_CTL              0x71280
@@ -6220,28 +6382,28 @@ enum {
 #define SPRSURFLIVE(pipe) _MMIO_PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
 
 #define _SPACNTR               (VLV_DISPLAY_BASE + 0x72180)
-#define   SP_ENABLE                    (1<<31)
-#define   SP_GAMMA_ENABLE              (1<<30)
-#define   SP_PIXFORMAT_MASK            (0xf<<26)
-#define   SP_FORMAT_YUV422             (0<<26)
-#define   SP_FORMAT_BGR565             (5<<26)
-#define   SP_FORMAT_BGRX8888           (6<<26)
-#define   SP_FORMAT_BGRA8888           (7<<26)
-#define   SP_FORMAT_RGBX1010102                (8<<26)
-#define   SP_FORMAT_RGBA1010102                (9<<26)
-#define   SP_FORMAT_RGBX8888           (0xe<<26)
-#define   SP_FORMAT_RGBA8888           (0xf<<26)
-#define   SP_ALPHA_PREMULTIPLY         (1<<23) /* CHV pipe B */
-#define   SP_SOURCE_KEY                        (1<<22)
-#define   SP_YUV_FORMAT_BT709          (1<<18)
-#define   SP_YUV_BYTE_ORDER_MASK       (3<<16)
-#define   SP_YUV_ORDER_YUYV            (0<<16)
-#define   SP_YUV_ORDER_UYVY            (1<<16)
-#define   SP_YUV_ORDER_YVYU            (2<<16)
-#define   SP_YUV_ORDER_VYUY            (3<<16)
-#define   SP_ROTATE_180                        (1<<15)
-#define   SP_TILED                     (1<<10)
-#define   SP_MIRROR                    (1<<8) /* CHV pipe B */
+#define   SP_ENABLE                    (1 << 31)
+#define   SP_GAMMA_ENABLE              (1 << 30)
+#define   SP_PIXFORMAT_MASK            (0xf << 26)
+#define   SP_FORMAT_YUV422             (0 << 26)
+#define   SP_FORMAT_BGR565             (5 << 26)
+#define   SP_FORMAT_BGRX8888           (6 << 26)
+#define   SP_FORMAT_BGRA8888           (7 << 26)
+#define   SP_FORMAT_RGBX1010102                (8 << 26)
+#define   SP_FORMAT_RGBA1010102                (9 << 26)
+#define   SP_FORMAT_RGBX8888           (0xe << 26)
+#define   SP_FORMAT_RGBA8888           (0xf << 26)
+#define   SP_ALPHA_PREMULTIPLY         (1 << 23) /* CHV pipe B */
+#define   SP_SOURCE_KEY                        (1 << 22)
+#define   SP_YUV_FORMAT_BT709          (1 << 18)
+#define   SP_YUV_BYTE_ORDER_MASK       (3 << 16)
+#define   SP_YUV_ORDER_YUYV            (0 << 16)
+#define   SP_YUV_ORDER_UYVY            (1 << 16)
+#define   SP_YUV_ORDER_YVYU            (2 << 16)
+#define   SP_YUV_ORDER_VYUY            (3 << 16)
+#define   SP_ROTATE_180                        (1 << 15)
+#define   SP_TILED                     (1 << 10)
+#define   SP_MIRROR                    (1 << 8) /* CHV pipe B */
 #define _SPALINOFF             (VLV_DISPLAY_BASE + 0x72184)
 #define _SPASTRIDE             (VLV_DISPLAY_BASE + 0x72188)
 #define _SPAPOS                        (VLV_DISPLAY_BASE + 0x7218c)
@@ -6252,7 +6414,7 @@ enum {
 #define _SPAKEYMAXVAL          (VLV_DISPLAY_BASE + 0x721a0)
 #define _SPATILEOFF            (VLV_DISPLAY_BASE + 0x721a4)
 #define _SPACONSTALPHA         (VLV_DISPLAY_BASE + 0x721a8)
-#define   SP_CONST_ALPHA_ENABLE                (1<<31)
+#define   SP_CONST_ALPHA_ENABLE                (1 << 31)
 #define _SPACLRC0              (VLV_DISPLAY_BASE + 0x721d0)
 #define   SP_CONTRAST(x)               ((x) << 18) /* u3.6 */
 #define   SP_BRIGHTNESS(x)             ((x) & 0xff) /* s8 */
@@ -6344,40 +6506,40 @@ enum {
  * correctly map to the same formats in ICL, as long as bit 23 is set to 0
  */
 #define   PLANE_CTL_FORMAT_MASK                        (0xf << 24)
-#define   PLANE_CTL_FORMAT_YUV422              (  0 << 24)
-#define   PLANE_CTL_FORMAT_NV12                        (  1 << 24)
-#define   PLANE_CTL_FORMAT_XRGB_2101010                (  2 << 24)
-#define   PLANE_CTL_FORMAT_XRGB_8888           (  4 << 24)
-#define   PLANE_CTL_FORMAT_XRGB_16161616F      (  6 << 24)
-#define   PLANE_CTL_FORMAT_AYUV                        (  8 << 24)
-#define   PLANE_CTL_FORMAT_INDEXED             ( 12 << 24)
-#define   PLANE_CTL_FORMAT_RGB_565             ( 14 << 24)
+#define   PLANE_CTL_FORMAT_YUV422              (0 << 24)
+#define   PLANE_CTL_FORMAT_NV12                        (1 << 24)
+#define   PLANE_CTL_FORMAT_XRGB_2101010                (2 << 24)
+#define   PLANE_CTL_FORMAT_XRGB_8888           (4 << 24)
+#define   PLANE_CTL_FORMAT_XRGB_16161616F      (6 << 24)
+#define   PLANE_CTL_FORMAT_AYUV                        (8 << 24)
+#define   PLANE_CTL_FORMAT_INDEXED             (12 << 24)
+#define   PLANE_CTL_FORMAT_RGB_565             (14 << 24)
 #define   ICL_PLANE_CTL_FORMAT_MASK            (0x1f << 23)
 #define   PLANE_CTL_PIPE_CSC_ENABLE            (1 << 23) /* Pre-GLK */
 #define   PLANE_CTL_KEY_ENABLE_MASK            (0x3 << 21)
-#define   PLANE_CTL_KEY_ENABLE_SOURCE          (  1 << 21)
-#define   PLANE_CTL_KEY_ENABLE_DESTINATION     (  2 << 21)
+#define   PLANE_CTL_KEY_ENABLE_SOURCE          (1 << 21)
+#define   PLANE_CTL_KEY_ENABLE_DESTINATION     (2 << 21)
 #define   PLANE_CTL_ORDER_BGRX                 (0 << 20)
 #define   PLANE_CTL_ORDER_RGBX                 (1 << 20)
 #define   PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709        (1 << 18)
 #define   PLANE_CTL_YUV422_ORDER_MASK          (0x3 << 16)
-#define   PLANE_CTL_YUV422_YUYV                        (  0 << 16)
-#define   PLANE_CTL_YUV422_UYVY                        (  1 << 16)
-#define   PLANE_CTL_YUV422_YVYU                        (  2 << 16)
-#define   PLANE_CTL_YUV422_VYUY                        (  3 << 16)
+#define   PLANE_CTL_YUV422_YUYV                        (0 << 16)
+#define   PLANE_CTL_YUV422_UYVY                        (1 << 16)
+#define   PLANE_CTL_YUV422_YVYU                        (2 << 16)
+#define   PLANE_CTL_YUV422_VYUY                        (3 << 16)
 #define   PLANE_CTL_DECOMPRESSION_ENABLE       (1 << 15)
 #define   PLANE_CTL_TRICKLE_FEED_DISABLE       (1 << 14)
 #define   PLANE_CTL_PLANE_GAMMA_DISABLE                (1 << 13) /* Pre-GLK */
 #define   PLANE_CTL_TILED_MASK                 (0x7 << 10)
-#define   PLANE_CTL_TILED_LINEAR               (  0 << 10)
-#define   PLANE_CTL_TILED_X                    (  1 << 10)
-#define   PLANE_CTL_TILED_Y                    (  4 << 10)
-#define   PLANE_CTL_TILED_YF                   (  5 << 10)
-#define   PLANE_CTL_FLIP_HORIZONTAL            (  1 << 8)
+#define   PLANE_CTL_TILED_LINEAR               (0 << 10)
+#define   PLANE_CTL_TILED_X                    (1 << 10)
+#define   PLANE_CTL_TILED_Y                    (4 << 10)
+#define   PLANE_CTL_TILED_YF                   (5 << 10)
+#define   PLANE_CTL_FLIP_HORIZONTAL            (1 << 8)
 #define   PLANE_CTL_ALPHA_MASK                 (0x3 << 4) /* Pre-GLK */
-#define   PLANE_CTL_ALPHA_DISABLE              (  0 << 4)
-#define   PLANE_CTL_ALPHA_SW_PREMULTIPLY       (  2 << 4)
-#define   PLANE_CTL_ALPHA_HW_PREMULTIPLY       (  3 << 4)
+#define   PLANE_CTL_ALPHA_DISABLE              (0 << 4)
+#define   PLANE_CTL_ALPHA_SW_PREMULTIPLY       (2 << 4)
+#define   PLANE_CTL_ALPHA_HW_PREMULTIPLY       (3 << 4)
 #define   PLANE_CTL_ROTATE_MASK                        0x3
 #define   PLANE_CTL_ROTATE_0                   0x0
 #define   PLANE_CTL_ROTATE_90                  0x1
@@ -6605,7 +6767,7 @@ enum {
 # define VFMUNIT_CLOCK_GATE_DISABLE            (1 << 11)
 
 #define FDI_PLL_FREQ_CTL        _MMIO(0x46030)
-#define  FDI_PLL_FREQ_CHANGE_REQUEST    (1<<24)
+#define  FDI_PLL_FREQ_CHANGE_REQUEST    (1 << 24)
 #define  FDI_PLL_FREQ_LOCK_LIMIT_MASK   0xfff00
 #define  FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK  0xff
 
@@ -6654,14 +6816,14 @@ enum {
 /* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
 #define _PFA_CTL_1               0x68080
 #define _PFB_CTL_1               0x68880
-#define  PF_ENABLE              (1<<31)
-#define  PF_PIPE_SEL_MASK_IVB  (3<<29)
-#define  PF_PIPE_SEL_IVB(pipe) ((pipe)<<29)
-#define  PF_FILTER_MASK                (3<<23)
-#define  PF_FILTER_PROGRAMMED  (0<<23)
-#define  PF_FILTER_MED_3x3     (1<<23)
-#define  PF_FILTER_EDGE_ENHANCE        (2<<23)
-#define  PF_FILTER_EDGE_SOFTEN (3<<23)
+#define  PF_ENABLE              (1 << 31)
+#define  PF_PIPE_SEL_MASK_IVB  (3 << 29)
+#define  PF_PIPE_SEL_IVB(pipe) ((pipe) << 29)
+#define  PF_FILTER_MASK                (3 << 23)
+#define  PF_FILTER_PROGRAMMED  (0 << 23)
+#define  PF_FILTER_MED_3x3     (1 << 23)
+#define  PF_FILTER_EDGE_ENHANCE        (2 << 23)
+#define  PF_FILTER_EDGE_SOFTEN (3 << 23)
 #define _PFA_WIN_SZ            0x68074
 #define _PFB_WIN_SZ            0x68874
 #define _PFA_WIN_POS           0x68070
@@ -6679,7 +6841,7 @@ enum {
 
 #define _PSA_CTL               0x68180
 #define _PSB_CTL               0x68980
-#define PS_ENABLE              (1<<31)
+#define PS_ENABLE              (1 << 31)
 #define _PSA_WIN_SZ            0x68174
 #define _PSB_WIN_SZ            0x68974
 #define _PSA_WIN_POS           0x68170
@@ -6764,6 +6926,10 @@ enum {
 #define _PS_VPHASE_1B       0x68988
 #define _PS_VPHASE_2B       0x68A88
 #define _PS_VPHASE_1C       0x69188
+#define  PS_Y_PHASE(x)         ((x) << 16)
+#define  PS_UV_RGB_PHASE(x)    ((x) << 0)
+#define   PS_PHASE_MASK        (0x7fff << 1) /* u2.13 */
+#define   PS_PHASE_TRIP        (1 << 0)
 
 #define _PS_HPHASE_1A       0x68194
 #define _PS_HPHASE_2A       0x68294
@@ -6777,7 +6943,7 @@ enum {
 #define _PS_ECC_STAT_2B     0x68AD0
 #define _PS_ECC_STAT_1C     0x691D0
 
-#define _ID(id, a, b) ((a) + (id)*((b)-(a)))
+#define _ID(id, a, b) _PICK_EVEN(id, a, b)
 #define SKL_PS_CTRL(pipe, id) _MMIO_PIPE(pipe,        \
                        _ID(id, _PS_1A_CTRL, _PS_2A_CTRL),       \
                        _ID(id, _PS_1B_CTRL, _PS_2B_CTRL))
@@ -6858,37 +7024,37 @@ enum {
 #define DE_PIPEB_CRC_DONE      (1 << 10)
 #define DE_PIPEB_FIFO_UNDERRUN  (1 << 8)
 #define DE_PIPEA_VBLANK         (1 << 7)
-#define DE_PIPE_VBLANK(pipe)    (1 << (7 + 8*(pipe)))
+#define DE_PIPE_VBLANK(pipe)    (1 << (7 + 8 * (pipe)))
 #define DE_PIPEA_EVEN_FIELD     (1 << 6)
 #define DE_PIPEA_ODD_FIELD      (1 << 5)
 #define DE_PIPEA_LINE_COMPARE   (1 << 4)
 #define DE_PIPEA_VSYNC          (1 << 3)
 #define DE_PIPEA_CRC_DONE      (1 << 2)
-#define DE_PIPE_CRC_DONE(pipe) (1 << (2 + 8*(pipe)))
+#define DE_PIPE_CRC_DONE(pipe) (1 << (2 + 8 * (pipe)))
 #define DE_PIPEA_FIFO_UNDERRUN  (1 << 0)
-#define DE_PIPE_FIFO_UNDERRUN(pipe)  (1 << (8*(pipe)))
+#define DE_PIPE_FIFO_UNDERRUN(pipe)  (1 << (8 * (pipe)))
 
 /* More Ivybridge lolz */
-#define DE_ERR_INT_IVB                 (1<<30)
-#define DE_GSE_IVB                     (1<<29)
-#define DE_PCH_EVENT_IVB               (1<<28)
-#define DE_DP_A_HOTPLUG_IVB            (1<<27)
-#define DE_AUX_CHANNEL_A_IVB           (1<<26)
-#define DE_EDP_PSR_INT_HSW             (1<<19)
-#define DE_SPRITEC_FLIP_DONE_IVB       (1<<14)
-#define DE_PLANEC_FLIP_DONE_IVB                (1<<13)
-#define DE_PIPEC_VBLANK_IVB            (1<<10)
-#define DE_SPRITEB_FLIP_DONE_IVB       (1<<9)
-#define DE_PLANEB_FLIP_DONE_IVB                (1<<8)
-#define DE_PIPEB_VBLANK_IVB            (1<<5)
-#define DE_SPRITEA_FLIP_DONE_IVB       (1<<4)
-#define DE_PLANEA_FLIP_DONE_IVB                (1<<3)
-#define DE_PLANE_FLIP_DONE_IVB(plane)  (1<< (3 + 5*(plane)))
-#define DE_PIPEA_VBLANK_IVB            (1<<0)
+#define DE_ERR_INT_IVB                 (1 << 30)
+#define DE_GSE_IVB                     (1 << 29)
+#define DE_PCH_EVENT_IVB               (1 << 28)
+#define DE_DP_A_HOTPLUG_IVB            (1 << 27)
+#define DE_AUX_CHANNEL_A_IVB           (1 << 26)
+#define DE_EDP_PSR_INT_HSW             (1 << 19)
+#define DE_SPRITEC_FLIP_DONE_IVB       (1 << 14)
+#define DE_PLANEC_FLIP_DONE_IVB                (1 << 13)
+#define DE_PIPEC_VBLANK_IVB            (1 << 10)
+#define DE_SPRITEB_FLIP_DONE_IVB       (1 << 9)
+#define DE_PLANEB_FLIP_DONE_IVB                (1 << 8)
+#define DE_PIPEB_VBLANK_IVB            (1 << 5)
+#define DE_SPRITEA_FLIP_DONE_IVB       (1 << 4)
+#define DE_PLANEA_FLIP_DONE_IVB                (1 << 3)
+#define DE_PLANE_FLIP_DONE_IVB(plane)  (1 << (3 + 5 * (plane)))
+#define DE_PIPEA_VBLANK_IVB            (1 << 0)
 #define DE_PIPE_VBLANK_IVB(pipe)       (1 << ((pipe) * 5))
 
 #define VLV_MASTER_IER                 _MMIO(0x4400c) /* Gunit master IER */
-#define   MASTER_INTERRUPT_ENABLE      (1<<31)
+#define   MASTER_INTERRUPT_ENABLE      (1 << 31)
 
 #define DEISR   _MMIO(0x44000)
 #define DEIMR   _MMIO(0x44004)
@@ -6901,37 +7067,37 @@ enum {
 #define GTIER   _MMIO(0x4401c)
 
 #define GEN8_MASTER_IRQ                        _MMIO(0x44200)
-#define  GEN8_MASTER_IRQ_CONTROL       (1<<31)
-#define  GEN8_PCU_IRQ                  (1<<30)
-#define  GEN8_DE_PCH_IRQ               (1<<23)
-#define  GEN8_DE_MISC_IRQ              (1<<22)
-#define  GEN8_DE_PORT_IRQ              (1<<20)
-#define  GEN8_DE_PIPE_C_IRQ            (1<<18)
-#define  GEN8_DE_PIPE_B_IRQ            (1<<17)
-#define  GEN8_DE_PIPE_A_IRQ            (1<<16)
-#define  GEN8_DE_PIPE_IRQ(pipe)                (1<<(16+(pipe)))
-#define  GEN8_GT_VECS_IRQ              (1<<6)
-#define  GEN8_GT_GUC_IRQ               (1<<5)
-#define  GEN8_GT_PM_IRQ                        (1<<4)
-#define  GEN8_GT_VCS2_IRQ              (1<<3)
-#define  GEN8_GT_VCS1_IRQ              (1<<2)
-#define  GEN8_GT_BCS_IRQ               (1<<1)
-#define  GEN8_GT_RCS_IRQ               (1<<0)
+#define  GEN8_MASTER_IRQ_CONTROL       (1 << 31)
+#define  GEN8_PCU_IRQ                  (1 << 30)
+#define  GEN8_DE_PCH_IRQ               (1 << 23)
+#define  GEN8_DE_MISC_IRQ              (1 << 22)
+#define  GEN8_DE_PORT_IRQ              (1 << 20)
+#define  GEN8_DE_PIPE_C_IRQ            (1 << 18)
+#define  GEN8_DE_PIPE_B_IRQ            (1 << 17)
+#define  GEN8_DE_PIPE_A_IRQ            (1 << 16)
+#define  GEN8_DE_PIPE_IRQ(pipe)                (1 << (16 + (pipe)))
+#define  GEN8_GT_VECS_IRQ              (1 << 6)
+#define  GEN8_GT_GUC_IRQ               (1 << 5)
+#define  GEN8_GT_PM_IRQ                        (1 << 4)
+#define  GEN8_GT_VCS2_IRQ              (1 << 3)
+#define  GEN8_GT_VCS1_IRQ              (1 << 2)
+#define  GEN8_GT_BCS_IRQ               (1 << 1)
+#define  GEN8_GT_RCS_IRQ               (1 << 0)
 
 #define GEN8_GT_ISR(which) _MMIO(0x44300 + (0x10 * (which)))
 #define GEN8_GT_IMR(which) _MMIO(0x44304 + (0x10 * (which)))
 #define GEN8_GT_IIR(which) _MMIO(0x44308 + (0x10 * (which)))
 #define GEN8_GT_IER(which) _MMIO(0x4430c + (0x10 * (which)))
 
-#define GEN9_GUC_TO_HOST_INT_EVENT     (1<<31)
-#define GEN9_GUC_EXEC_ERROR_EVENT      (1<<30)
-#define GEN9_GUC_DISPLAY_EVENT         (1<<29)
-#define GEN9_GUC_SEMA_SIGNAL_EVENT     (1<<28)
-#define GEN9_GUC_IOMMU_MSG_EVENT       (1<<27)
-#define GEN9_GUC_DB_RING_EVENT         (1<<26)
-#define GEN9_GUC_DMA_DONE_EVENT                (1<<25)
-#define GEN9_GUC_FATAL_ERROR_EVENT     (1<<24)
-#define GEN9_GUC_NOTIFICATION_EVENT    (1<<23)
+#define GEN9_GUC_TO_HOST_INT_EVENT     (1 << 31)
+#define GEN9_GUC_EXEC_ERROR_EVENT      (1 << 30)
+#define GEN9_GUC_DISPLAY_EVENT         (1 << 29)
+#define GEN9_GUC_SEMA_SIGNAL_EVENT     (1 << 28)
+#define GEN9_GUC_IOMMU_MSG_EVENT       (1 << 27)
+#define GEN9_GUC_DB_RING_EVENT         (1 << 26)
+#define GEN9_GUC_DMA_DONE_EVENT                (1 << 25)
+#define GEN9_GUC_FATAL_ERROR_EVENT     (1 << 24)
+#define GEN9_GUC_NOTIFICATION_EVENT    (1 << 23)
 
 #define GEN8_RCS_IRQ_SHIFT 0
 #define GEN8_BCS_IRQ_SHIFT 16
@@ -6980,6 +7146,7 @@ enum {
 #define GEN8_DE_PORT_IMR _MMIO(0x44444)
 #define GEN8_DE_PORT_IIR _MMIO(0x44448)
 #define GEN8_DE_PORT_IER _MMIO(0x4444c)
+#define  ICL_AUX_CHANNEL_E             (1 << 29)
 #define  CNL_AUX_CHANNEL_F             (1 << 28)
 #define  GEN9_AUX_CHANNEL_D            (1 << 27)
 #define  GEN9_AUX_CHANNEL_C            (1 << 26)
@@ -7006,9 +7173,16 @@ enum {
 #define GEN8_PCU_IIR _MMIO(0x444e8)
 #define GEN8_PCU_IER _MMIO(0x444ec)
 
+#define GEN11_GU_MISC_ISR      _MMIO(0x444f0)
+#define GEN11_GU_MISC_IMR      _MMIO(0x444f4)
+#define GEN11_GU_MISC_IIR      _MMIO(0x444f8)
+#define GEN11_GU_MISC_IER      _MMIO(0x444fc)
+#define  GEN11_GU_MISC_GSE     (1 << 27)
+
 #define GEN11_GFX_MSTR_IRQ             _MMIO(0x190010)
 #define  GEN11_MASTER_IRQ              (1 << 31)
 #define  GEN11_PCU_IRQ                 (1 << 30)
+#define  GEN11_GU_MISC_IRQ             (1 << 29)
 #define  GEN11_DISPLAY_IRQ             (1 << 16)
 #define  GEN11_GT_DW_IRQ(x)            (1 << (x))
 #define  GEN11_GT_DW1_IRQ              (1 << 1)
@@ -7019,11 +7193,40 @@ enum {
 #define  GEN11_AUDIO_CODEC_IRQ         (1 << 24)
 #define  GEN11_DE_PCH_IRQ              (1 << 23)
 #define  GEN11_DE_MISC_IRQ             (1 << 22)
+#define  GEN11_DE_HPD_IRQ              (1 << 21)
 #define  GEN11_DE_PORT_IRQ             (1 << 20)
 #define  GEN11_DE_PIPE_C               (1 << 18)
 #define  GEN11_DE_PIPE_B               (1 << 17)
 #define  GEN11_DE_PIPE_A               (1 << 16)
 
+#define GEN11_DE_HPD_ISR               _MMIO(0x44470)
+#define GEN11_DE_HPD_IMR               _MMIO(0x44474)
+#define GEN11_DE_HPD_IIR               _MMIO(0x44478)
+#define GEN11_DE_HPD_IER               _MMIO(0x4447c)
+#define  GEN11_TC4_HOTPLUG                     (1 << 19)
+#define  GEN11_TC3_HOTPLUG                     (1 << 18)
+#define  GEN11_TC2_HOTPLUG                     (1 << 17)
+#define  GEN11_TC1_HOTPLUG                     (1 << 16)
+#define  GEN11_DE_TC_HOTPLUG_MASK              (GEN11_TC4_HOTPLUG | \
+                                                GEN11_TC3_HOTPLUG | \
+                                                GEN11_TC2_HOTPLUG | \
+                                                GEN11_TC1_HOTPLUG)
+#define  GEN11_TBT4_HOTPLUG                    (1 << 3)
+#define  GEN11_TBT3_HOTPLUG                    (1 << 2)
+#define  GEN11_TBT2_HOTPLUG                    (1 << 1)
+#define  GEN11_TBT1_HOTPLUG                    (1 << 0)
+#define  GEN11_DE_TBT_HOTPLUG_MASK             (GEN11_TBT4_HOTPLUG | \
+                                                GEN11_TBT3_HOTPLUG | \
+                                                GEN11_TBT2_HOTPLUG | \
+                                                GEN11_TBT1_HOTPLUG)
+
+#define GEN11_TBT_HOTPLUG_CTL                          _MMIO(0x44030)
+#define GEN11_TC_HOTPLUG_CTL                           _MMIO(0x44038)
+#define  GEN11_HOTPLUG_CTL_ENABLE(tc_port)             (8 << (tc_port) * 4)
+#define  GEN11_HOTPLUG_CTL_LONG_DETECT(tc_port)                (2 << (tc_port) * 4)
+#define  GEN11_HOTPLUG_CTL_SHORT_DETECT(tc_port)       (1 << (tc_port) * 4)
+#define  GEN11_HOTPLUG_CTL_NO_DETECT(tc_port)          (0 << (tc_port) * 4)
+
 #define GEN11_GT_INTR_DW0              _MMIO(0x190018)
 #define  GEN11_CSME                    (31)
 #define  GEN11_GUNIT                   (28)
@@ -7038,7 +7241,7 @@ enum {
 #define  GEN11_VECS(x)                 (31 - (x))
 #define  GEN11_VCS(x)                  (x)
 
-#define GEN11_GT_INTR_DW(x)            _MMIO(0x190018 + (x * 4))
+#define GEN11_GT_INTR_DW(x)            _MMIO(0x190018 + ((x) * 4))
 
 #define GEN11_INTR_IDENTITY_REG0       _MMIO(0x190060)
 #define GEN11_INTR_IDENTITY_REG1       _MMIO(0x190064)
@@ -7047,12 +7250,12 @@ enum {
 #define  GEN11_INTR_ENGINE_INSTANCE(x) (((x) & GENMASK(25, 20)) >> 20)
 #define  GEN11_INTR_ENGINE_INTR(x)     ((x) & 0xffff)
 
-#define GEN11_INTR_IDENTITY_REG(x)     _MMIO(0x190060 + (x * 4))
+#define GEN11_INTR_IDENTITY_REG(x)     _MMIO(0x190060 + ((x) * 4))
 
 #define GEN11_IIR_REG0_SELECTOR                _MMIO(0x190070)
 #define GEN11_IIR_REG1_SELECTOR                _MMIO(0x190074)
 
-#define GEN11_IIR_REG_SELECTOR(x)      _MMIO(0x190070 + (x * 4))
+#define GEN11_IIR_REG_SELECTOR(x)      _MMIO(0x190070 + ((x) * 4))
 
 #define GEN11_RENDER_COPY_INTR_ENABLE  _MMIO(0x190030)
 #define GEN11_VCS_VECS_INTR_ENABLE     _MMIO(0x190034)
@@ -7074,8 +7277,8 @@ enum {
 #define ILK_DISPLAY_CHICKEN2   _MMIO(0x42004)
 /* Required on all Ironlake and Sandybridge according to the B-Spec. */
 #define  ILK_ELPIN_409_SELECT  (1 << 25)
-#define  ILK_DPARB_GATE        (1<<22)
-#define  ILK_VSDPFD_FULL       (1<<21)
+#define  ILK_DPARB_GATE        (1 << 22)
+#define  ILK_VSDPFD_FULL       (1 << 21)
 #define FUSE_STRAP                     _MMIO(0x42014)
 #define  ILK_INTERNAL_GRAPHICS_DISABLE (1 << 31)
 #define  ILK_INTERNAL_DISPLAY_DISABLE  (1 << 30)
@@ -7125,31 +7328,31 @@ enum {
 #define CHICKEN_TRANS_A         0x420c0
 #define CHICKEN_TRANS_B         0x420c4
 #define CHICKEN_TRANS(trans) _MMIO_TRANS(trans, CHICKEN_TRANS_A, CHICKEN_TRANS_B)
-#define  VSC_DATA_SEL_SOFTWARE_CONTROL (1<<25) /* GLK and CNL+ */
-#define  DDI_TRAINING_OVERRIDE_ENABLE  (1<<19)
-#define  DDI_TRAINING_OVERRIDE_VALUE   (1<<18)
-#define  DDIE_TRAINING_OVERRIDE_ENABLE (1<<17) /* CHICKEN_TRANS_A only */
-#define  DDIE_TRAINING_OVERRIDE_VALUE  (1<<16) /* CHICKEN_TRANS_A only */
-#define  PSR2_ADD_VERTICAL_LINE_COUNT   (1<<15)
-#define  PSR2_VSC_ENABLE_PROG_HEADER    (1<<12)
+#define  VSC_DATA_SEL_SOFTWARE_CONTROL (1 << 25) /* GLK and CNL+ */
+#define  DDI_TRAINING_OVERRIDE_ENABLE  (1 << 19)
+#define  DDI_TRAINING_OVERRIDE_VALUE   (1 << 18)
+#define  DDIE_TRAINING_OVERRIDE_ENABLE (1 << 17) /* CHICKEN_TRANS_A only */
+#define  DDIE_TRAINING_OVERRIDE_VALUE  (1 << 16) /* CHICKEN_TRANS_A only */
+#define  PSR2_ADD_VERTICAL_LINE_COUNT   (1 << 15)
+#define  PSR2_VSC_ENABLE_PROG_HEADER    (1 << 12)
 
 #define DISP_ARB_CTL   _MMIO(0x45000)
-#define  DISP_FBC_MEMORY_WAKE          (1<<31)
-#define  DISP_TILE_SURFACE_SWIZZLING   (1<<13)
-#define  DISP_FBC_WM_DIS               (1<<15)
+#define  DISP_FBC_MEMORY_WAKE          (1 << 31)
+#define  DISP_TILE_SURFACE_SWIZZLING   (1 << 13)
+#define  DISP_FBC_WM_DIS               (1 << 15)
 #define DISP_ARB_CTL2  _MMIO(0x45004)
-#define  DISP_DATA_PARTITION_5_6       (1<<6)
-#define  DISP_IPC_ENABLE               (1<<3)
+#define  DISP_DATA_PARTITION_5_6       (1 << 6)
+#define  DISP_IPC_ENABLE               (1 << 3)
 #define DBUF_CTL       _MMIO(0x45008)
 #define DBUF_CTL_S1    _MMIO(0x45008)
 #define DBUF_CTL_S2    _MMIO(0x44FE8)
-#define  DBUF_POWER_REQUEST            (1<<31)
-#define  DBUF_POWER_STATE              (1<<30)
+#define  DBUF_POWER_REQUEST            (1 << 31)
+#define  DBUF_POWER_STATE              (1 << 30)
 #define GEN7_MSG_CTL   _MMIO(0x45010)
-#define  WAIT_FOR_PCH_RESET_ACK                (1<<1)
-#define  WAIT_FOR_PCH_FLR_ACK          (1<<0)
+#define  WAIT_FOR_PCH_RESET_ACK                (1 << 1)
+#define  WAIT_FOR_PCH_FLR_ACK          (1 << 0)
 #define HSW_NDE_RSTWRN_OPT     _MMIO(0x46408)
-#define  RESET_PCH_HANDSHAKE_ENABLE    (1<<4)
+#define  RESET_PCH_HANDSHAKE_ENABLE    (1 << 4)
 
 #define GEN8_CHICKEN_DCPR_1            _MMIO(0x46430)
 #define   SKL_SELECT_ALTERNATE_DC_EXIT (1 << 30)
@@ -7174,16 +7377,16 @@ enum {
 #define ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz      (2 << 29)
 
 #define GEN7_FF_SLICE_CS_CHICKEN1      _MMIO(0x20e0)
-#define   GEN9_FFSC_PERCTX_PREEMPT_CTRL        (1<<14)
+#define   GEN9_FFSC_PERCTX_PREEMPT_CTRL        (1 << 14)
 
 #define FF_SLICE_CS_CHICKEN2                   _MMIO(0x20e4)
-#define  GEN9_TSG_BARRIER_ACK_DISABLE          (1<<8)
-#define  GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE  (1<<10)
+#define  GEN9_TSG_BARRIER_ACK_DISABLE          (1 << 8)
+#define  GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE  (1 << 10)
 
 #define GEN9_CS_DEBUG_MODE1            _MMIO(0x20ec)
 #define GEN9_CTX_PREEMPT_REG           _MMIO(0x2248)
 #define GEN8_CS_CHICKEN1               _MMIO(0x2580)
-#define GEN9_PREEMPT_3D_OBJECT_LEVEL           (1<<0)
+#define GEN9_PREEMPT_3D_OBJECT_LEVEL           (1 << 0)
 #define GEN9_PREEMPT_GPGPU_LEVEL(hi, lo)       (((hi) << 2) | ((lo) << 1))
 #define GEN9_PREEMPT_GPGPU_MID_THREAD_LEVEL    GEN9_PREEMPT_GPGPU_LEVEL(0, 0)
 #define GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL  GEN9_PREEMPT_GPGPU_LEVEL(0, 1)
@@ -7192,22 +7395,27 @@ enum {
 
 /* GEN7 chicken */
 #define GEN7_COMMON_SLICE_CHICKEN1             _MMIO(0x7010)
-# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC     ((1<<10) | (1<<26))
-# define GEN9_RHWO_OPTIMIZATION_DISABLE                (1<<14)
-#define COMMON_SLICE_CHICKEN2                  _MMIO(0x7014)
-# define GEN9_PBE_COMPRESSED_HASH_SELECTION    (1<<13)
-# define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1<<12)
-# define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8)
-# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE  (1<<0)
+  #define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC    ((1 << 10) | (1 << 26))
+  #define GEN9_RHWO_OPTIMIZATION_DISABLE       (1 << 14)
+
+#define COMMON_SLICE_CHICKEN2                                  _MMIO(0x7014)
+  #define GEN9_PBE_COMPRESSED_HASH_SELECTION                   (1 << 13)
+  #define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE       (1 << 12)
+  #define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION             (1 << 8)
+  #define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE                 (1 << 0)
+
+#define GEN11_COMMON_SLICE_CHICKEN3            _MMIO(0x7304)
+  #define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC   (1 << 11)
 
 #define HIZ_CHICKEN                                    _MMIO(0x7018)
-# define CHV_HZ_8X8_MODE_IN_1X                         (1<<15)
-# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE   (1<<3)
+# define CHV_HZ_8X8_MODE_IN_1X                         (1 << 15)
+# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE   (1 << 3)
 
 #define GEN9_SLICE_COMMON_ECO_CHICKEN0         _MMIO(0x7308)
-#define  DISABLE_PIXEL_MASK_CAMMING            (1<<14)
+#define  DISABLE_PIXEL_MASK_CAMMING            (1 << 14)
 
 #define GEN9_SLICE_COMMON_ECO_CHICKEN1         _MMIO(0x731c)
+#define   GEN11_STATE_CACHE_REDIRECT_TO_CS     (1 << 11)
 
 #define GEN7_L3SQCREG1                         _MMIO(0xB010)
 #define  VLV_B0_WA_L3SQCREG1_VALUE             0x00D30000
@@ -7225,7 +7433,7 @@ enum {
 
 #define GEN7_L3CNTLREG1                                _MMIO(0xB01C)
 #define  GEN7_WA_FOR_GEN7_L3_CONTROL                   0x3C47FF8C
-#define  GEN7_L3AGDIS                          (1<<19)
+#define  GEN7_L3AGDIS                          (1 << 19)
 #define GEN7_L3CNTLREG2                                _MMIO(0xB020)
 #define GEN7_L3CNTLREG3                                _MMIO(0xB024)
 
@@ -7235,7 +7443,7 @@ enum {
 #define   GEN11_I2M_WRITE_DISABLE              (1 << 28)
 
 #define GEN7_L3SQCREG4                         _MMIO(0xb034)
-#define  L3SQ_URB_READ_CAM_MATCH_DISABLE       (1<<27)
+#define  L3SQ_URB_READ_CAM_MATCH_DISABLE       (1 << 27)
 
 #define GEN8_L3SQCREG4                         _MMIO(0xb118)
 #define  GEN11_LQSC_CLEAN_EVICT_DISABLE                (1 << 6)
@@ -7246,12 +7454,12 @@ enum {
 #define HDC_CHICKEN0                           _MMIO(0x7300)
 #define CNL_HDC_CHICKEN0                       _MMIO(0xE5F0)
 #define ICL_HDC_MODE                           _MMIO(0xE5F4)
-#define  HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE        (1<<15)
-#define  HDC_FENCE_DEST_SLM_DISABLE            (1<<14)
-#define  HDC_DONOT_FETCH_MEM_WHEN_MASKED       (1<<11)
-#define  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT   (1<<5)
-#define  HDC_FORCE_NON_COHERENT                        (1<<4)
-#define  HDC_BARRIER_PERFORMANCE_DISABLE       (1<<10)
+#define  HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE        (1 << 15)
+#define  HDC_FENCE_DEST_SLM_DISABLE            (1 << 14)
+#define  HDC_DONOT_FETCH_MEM_WHEN_MASKED       (1 << 11)
+#define  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT   (1 << 5)
+#define  HDC_FORCE_NON_COHERENT                        (1 << 4)
+#define  HDC_BARRIER_PERFORMANCE_DISABLE       (1 << 10)
 
 #define GEN8_HDC_CHICKEN1                      _MMIO(0x7304)
 
@@ -7264,13 +7472,21 @@ enum {
 
 /* WaCatErrorRejectionIssue */
 #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG         _MMIO(0x9030)
-#define  GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB      (1<<11)
+#define  GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB      (1 << 11)
 
 #define HSW_SCRATCH1                           _MMIO(0xb038)
-#define  HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE  (1<<27)
+#define  HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE  (1 << 27)
 
 #define BDW_SCRATCH1                                   _MMIO(0xb11c)
-#define  GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE     (1<<2)
+#define  GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE     (1 << 2)
+
+/*GEN11 chicken */
+#define _PIPEA_CHICKEN                 0x70038
+#define _PIPEB_CHICKEN                 0x71038
+#define _PIPEC_CHICKEN                 0x72038
+#define  PER_PIXEL_ALPHA_BYPASS_EN     (1 << 7)
+#define PIPE_CHICKEN(pipe)             _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\
+                                                  _PIPEB_CHICKEN)
 
 /* PCH */
 
@@ -7315,7 +7531,7 @@ enum {
 #define SDE_TRANSA_FIFO_UNDER  (1 << 0)
 #define SDE_TRANS_MASK         (0x3f)
 
-/* south display engine interrupt: CPT/PPT */
+/* south display engine interrupt: CPT - CNP */
 #define SDE_AUDIO_POWER_D_CPT  (1 << 31)
 #define SDE_AUDIO_POWER_C_CPT  (1 << 30)
 #define SDE_AUDIO_POWER_B_CPT  (1 << 29)
@@ -7363,14 +7579,29 @@ enum {
                                 SDE_FDI_RXB_CPT | \
                                 SDE_FDI_RXA_CPT)
 
+/* south display engine interrupt: ICP */
+#define SDE_TC4_HOTPLUG_ICP            (1 << 27)
+#define SDE_TC3_HOTPLUG_ICP            (1 << 26)
+#define SDE_TC2_HOTPLUG_ICP            (1 << 25)
+#define SDE_TC1_HOTPLUG_ICP            (1 << 24)
+#define SDE_GMBUS_ICP                  (1 << 23)
+#define SDE_DDIB_HOTPLUG_ICP           (1 << 17)
+#define SDE_DDIA_HOTPLUG_ICP           (1 << 16)
+#define SDE_DDI_MASK_ICP               (SDE_DDIB_HOTPLUG_ICP | \
+                                        SDE_DDIA_HOTPLUG_ICP)
+#define SDE_TC_MASK_ICP                        (SDE_TC4_HOTPLUG_ICP |  \
+                                        SDE_TC3_HOTPLUG_ICP |  \
+                                        SDE_TC2_HOTPLUG_ICP |  \
+                                        SDE_TC1_HOTPLUG_ICP)
+
 #define SDEISR  _MMIO(0xc4000)
 #define SDEIMR  _MMIO(0xc4004)
 #define SDEIIR  _MMIO(0xc4008)
 #define SDEIER  _MMIO(0xc400c)
 
 #define SERR_INT                       _MMIO(0xc4040)
-#define  SERR_INT_POISON               (1<<31)
-#define  SERR_INT_TRANS_FIFO_UNDERRUN(pipe)    (1<<((pipe)*3))
+#define  SERR_INT_POISON               (1 << 31)
+#define  SERR_INT_TRANS_FIFO_UNDERRUN(pipe)    (1 << ((pipe) * 3))
 
 /* digital port hotplug */
 #define PCH_PORT_HOTPLUG               _MMIO(0xc4030)  /* SHOTPLUG_CTL */
@@ -7423,6 +7654,134 @@ enum {
 #define  PORTE_HOTPLUG_SHORT_DETECT    (1 << 0)
 #define  PORTE_HOTPLUG_LONG_DETECT     (2 << 0)
 
+/* This register is a reuse of PCH_PORT_HOTPLUG register. The
+ * functionality covered in PCH_PORT_HOTPLUG is split into
+ * SHOTPLUG_CTL_DDI and SHOTPLUG_CTL_TC.
+ */
+
+#define SHOTPLUG_CTL_DDI                       _MMIO(0xc4030)
+#define   ICP_DDIB_HPD_ENABLE                  (1 << 7)
+#define   ICP_DDIB_HPD_STATUS_MASK             (3 << 4)
+#define   ICP_DDIB_HPD_NO_DETECT               (0 << 4)
+#define   ICP_DDIB_HPD_SHORT_DETECT            (1 << 4)
+#define   ICP_DDIB_HPD_LONG_DETECT             (2 << 4)
+#define   ICP_DDIB_HPD_SHORT_LONG_DETECT       (3 << 4)
+#define   ICP_DDIA_HPD_ENABLE                  (1 << 3)
+#define   ICP_DDIA_HPD_STATUS_MASK             (3 << 0)
+#define   ICP_DDIA_HPD_NO_DETECT               (0 << 0)
+#define   ICP_DDIA_HPD_SHORT_DETECT            (1 << 0)
+#define   ICP_DDIA_HPD_LONG_DETECT             (2 << 0)
+#define   ICP_DDIA_HPD_SHORT_LONG_DETECT       (3 << 0)
+
+#define SHOTPLUG_CTL_TC                                _MMIO(0xc4034)
+#define   ICP_TC_HPD_ENABLE(tc_port)           (8 << (tc_port) * 4)
+/* Icelake DSC Rate Control Range Parameter Registers */
+#define DSCA_RC_RANGE_PARAMETERS_0             _MMIO(0x6B240)
+#define DSCA_RC_RANGE_PARAMETERS_0_UDW         _MMIO(0x6B240 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_0             _MMIO(0x6BA40)
+#define DSCC_RC_RANGE_PARAMETERS_0_UDW         _MMIO(0x6BA40 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB     (0x78208)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB (0x78208 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB     (0x78308)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB (0x78308 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC     (0x78408)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC (0x78408 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC     (0x78508)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC (0x78508 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe)           _MMIO_PIPE((pipe) - PIPE_B, \
+                                                       _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB, \
+                                                       _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe)       _MMIO_PIPE((pipe) - PIPE_B, \
+                                                       _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB, \
+                                                       _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe)           _MMIO_PIPE((pipe) - PIPE_B, \
+                                                       _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB, \
+                                                       _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe)       _MMIO_PIPE((pipe) - PIPE_B, \
+                                                       _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB, \
+                                                       _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC)
+#define RC_BPG_OFFSET_SHIFT                    10
+#define RC_MAX_QP_SHIFT                                5
+#define RC_MIN_QP_SHIFT                                0
+
+#define DSCA_RC_RANGE_PARAMETERS_1             _MMIO(0x6B248)
+#define DSCA_RC_RANGE_PARAMETERS_1_UDW         _MMIO(0x6B248 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_1             _MMIO(0x6BA48)
+#define DSCC_RC_RANGE_PARAMETERS_1_UDW         _MMIO(0x6BA48 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB     (0x78210)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB (0x78210 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB     (0x78310)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB (0x78310 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC     (0x78410)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC (0x78410 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC     (0x78510)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC (0x78510 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe)           _MMIO_PIPE((pipe) - PIPE_B, \
+                                                       _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB, \
+                                                       _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe)       _MMIO_PIPE((pipe) - PIPE_B, \
+                                                       _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB, \
+                                                       _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe)           _MMIO_PIPE((pipe) - PIPE_B, \
+                                                       _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB, \
+                                                       _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe)       _MMIO_PIPE((pipe) - PIPE_B, \
+                                                       _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB, \
+                                                       _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC)
+
+#define DSCA_RC_RANGE_PARAMETERS_2             _MMIO(0x6B250)
+#define DSCA_RC_RANGE_PARAMETERS_2_UDW         _MMIO(0x6B250 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_2             _MMIO(0x6BA50)
+#define DSCC_RC_RANGE_PARAMETERS_2_UDW         _MMIO(0x6BA50 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB     (0x78218)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB (0x78218 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB     (0x78318)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB (0x78318 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC     (0x78418)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC (0x78418 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC     (0x78518)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC (0x78518 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe)           _MMIO_PIPE((pipe) - PIPE_B, \
+                                                       _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB, \
+                                                       _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe)       _MMIO_PIPE((pipe) - PIPE_B, \
+                                                       _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB, \
+                                                       _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe)           _MMIO_PIPE((pipe) - PIPE_B, \
+                                                       _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB, \
+                                                       _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe)       _MMIO_PIPE((pipe) - PIPE_B, \
+                                                       _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB, \
+                                                       _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC)
+
+#define DSCA_RC_RANGE_PARAMETERS_3             _MMIO(0x6B258)
+#define DSCA_RC_RANGE_PARAMETERS_3_UDW         _MMIO(0x6B258 + 4)
+#define DSCC_RC_RANGE_PARAMETERS_3             _MMIO(0x6BA58)
+#define DSCC_RC_RANGE_PARAMETERS_3_UDW         _MMIO(0x6BA58 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB     (0x78220)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB (0x78220 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB     (0x78320)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB (0x78320 + 4)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC     (0x78420)
+#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC (0x78420 + 4)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC     (0x78520)
+#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC (0x78520 + 4)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe)           _MMIO_PIPE((pipe) - PIPE_B, \
+                                                       _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB, \
+                                                       _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC)
+#define ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe)       _MMIO_PIPE((pipe) - PIPE_B, \
+                                                       _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB, \
+                                                       _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe)           _MMIO_PIPE((pipe) - PIPE_B, \
+                                                       _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB, \
+                                                       _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC)
+#define ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe)       _MMIO_PIPE((pipe) - PIPE_B, \
+                                                       _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB, \
+                                                       _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC)
+
+#define   ICP_TC_HPD_LONG_DETECT(tc_port)      (2 << (tc_port) * 4)
+#define   ICP_TC_HPD_SHORT_DETECT(tc_port)     (1 << (tc_port) * 4)
+
 #define PCH_GPIOA               _MMIO(0xc5010)
 #define PCH_GPIOB               _MMIO(0xc5014)
 #define PCH_GPIOC               _MMIO(0xc5018)
@@ -7439,46 +7798,46 @@ enum {
 
 #define _PCH_DPLL_A              0xc6014
 #define _PCH_DPLL_B              0xc6018
-#define PCH_DPLL(pll) _MMIO(pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
+#define PCH_DPLL(pll) _MMIO((pll) == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
 
 #define _PCH_FPA0                0xc6040
-#define  FP_CB_TUNE            (0x3<<22)
+#define  FP_CB_TUNE            (0x3 << 22)
 #define _PCH_FPA1                0xc6044
 #define _PCH_FPB0                0xc6048
 #define _PCH_FPB1                0xc604c
-#define PCH_FP0(pll) _MMIO(pll == 0 ? _PCH_FPA0 : _PCH_FPB0)
-#define PCH_FP1(pll) _MMIO(pll == 0 ? _PCH_FPA1 : _PCH_FPB1)
+#define PCH_FP0(pll) _MMIO((pll) == 0 ? _PCH_FPA0 : _PCH_FPB0)
+#define PCH_FP1(pll) _MMIO((pll) == 0 ? _PCH_FPA1 : _PCH_FPB1)
 
 #define PCH_DPLL_TEST           _MMIO(0xc606c)
 
 #define PCH_DREF_CONTROL        _MMIO(0xC6200)
 #define  DREF_CONTROL_MASK      0x7fc3
-#define  DREF_CPU_SOURCE_OUTPUT_DISABLE         (0<<13)
-#define  DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD      (2<<13)
-#define  DREF_CPU_SOURCE_OUTPUT_NONSPREAD       (3<<13)
-#define  DREF_CPU_SOURCE_OUTPUT_MASK           (3<<13)
-#define  DREF_SSC_SOURCE_DISABLE                (0<<11)
-#define  DREF_SSC_SOURCE_ENABLE                 (2<<11)
-#define  DREF_SSC_SOURCE_MASK                  (3<<11)
-#define  DREF_NONSPREAD_SOURCE_DISABLE          (0<<9)
-#define  DREF_NONSPREAD_CK505_ENABLE           (1<<9)
-#define  DREF_NONSPREAD_SOURCE_ENABLE           (2<<9)
-#define  DREF_NONSPREAD_SOURCE_MASK            (3<<9)
-#define  DREF_SUPERSPREAD_SOURCE_DISABLE        (0<<7)
-#define  DREF_SUPERSPREAD_SOURCE_ENABLE         (2<<7)
-#define  DREF_SUPERSPREAD_SOURCE_MASK          (3<<7)
-#define  DREF_SSC4_DOWNSPREAD                   (0<<6)
-#define  DREF_SSC4_CENTERSPREAD                 (1<<6)
-#define  DREF_SSC1_DISABLE                      (0<<1)
-#define  DREF_SSC1_ENABLE                       (1<<1)
+#define  DREF_CPU_SOURCE_OUTPUT_DISABLE         (0 << 13)
+#define  DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD      (2 << 13)
+#define  DREF_CPU_SOURCE_OUTPUT_NONSPREAD       (3 << 13)
+#define  DREF_CPU_SOURCE_OUTPUT_MASK           (3 << 13)
+#define  DREF_SSC_SOURCE_DISABLE                (0 << 11)
+#define  DREF_SSC_SOURCE_ENABLE                 (2 << 11)
+#define  DREF_SSC_SOURCE_MASK                  (3 << 11)
+#define  DREF_NONSPREAD_SOURCE_DISABLE          (0 << 9)
+#define  DREF_NONSPREAD_CK505_ENABLE           (1 << 9)
+#define  DREF_NONSPREAD_SOURCE_ENABLE           (2 << 9)
+#define  DREF_NONSPREAD_SOURCE_MASK            (3 << 9)
+#define  DREF_SUPERSPREAD_SOURCE_DISABLE        (0 << 7)
+#define  DREF_SUPERSPREAD_SOURCE_ENABLE         (2 << 7)
+#define  DREF_SUPERSPREAD_SOURCE_MASK          (3 << 7)
+#define  DREF_SSC4_DOWNSPREAD                   (0 << 6)
+#define  DREF_SSC4_CENTERSPREAD                 (1 << 6)
+#define  DREF_SSC1_DISABLE                      (0 << 1)
+#define  DREF_SSC1_ENABLE                       (1 << 1)
 #define  DREF_SSC4_DISABLE                      (0)
 #define  DREF_SSC4_ENABLE                       (1)
 
 #define PCH_RAWCLK_FREQ         _MMIO(0xc6204)
 #define  FDL_TP1_TIMER_SHIFT    12
-#define  FDL_TP1_TIMER_MASK     (3<<12)
+#define  FDL_TP1_TIMER_MASK     (3 << 12)
 #define  FDL_TP2_TIMER_SHIFT    10
-#define  FDL_TP2_TIMER_MASK     (3<<10)
+#define  FDL_TP2_TIMER_MASK     (3 << 10)
 #define  RAWCLK_FREQ_MASK       0x3ff
 #define  CNP_RAWCLK_DIV_MASK   (0x3ff << 16)
 #define  CNP_RAWCLK_DIV(div)   ((div) << 16)
@@ -7515,7 +7874,7 @@ enum {
 #define  TRANS_VBLANK_END_SHIFT                16
 #define  TRANS_VBLANK_START_SHIFT      0
 #define _PCH_TRANS_VSYNC_A             0xe0014
-#define  TRANS_VSYNC_END_SHIFT         16
+#define  TRANS_VSYNC_END_SHIFT         16
 #define  TRANS_VSYNC_START_SHIFT       0
 #define _PCH_TRANS_VSYNCSHIFT_A                0xe0028
 
@@ -7595,15 +7954,28 @@ enum {
 #define _HSW_VIDEO_DIP_VSC_ECC_B       0x61344
 #define _HSW_VIDEO_DIP_GCP_B           0x61210
 
+/* Icelake PPS_DATA and _ECC DIP Registers.
+ * These are available for transcoders B,C and eDP.
+ * Adding the _A so as to reuse the _MMIO_TRANS2
+ * definition, with which it offsets to the right location.
+ */
+
+#define _ICL_VIDEO_DIP_PPS_DATA_A      0x60350
+#define _ICL_VIDEO_DIP_PPS_DATA_B      0x61350
+#define _ICL_VIDEO_DIP_PPS_ECC_A       0x603D4
+#define _ICL_VIDEO_DIP_PPS_ECC_B       0x613D4
+
 #define HSW_TVIDEO_DIP_CTL(trans)              _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_CTL_A)
 #define HSW_TVIDEO_DIP_AVI_DATA(trans, i)      _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4)
 #define HSW_TVIDEO_DIP_VS_DATA(trans, i)       _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4)
 #define HSW_TVIDEO_DIP_SPD_DATA(trans, i)      _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4)
 #define HSW_TVIDEO_DIP_GCP(trans)              _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GCP_A)
 #define HSW_TVIDEO_DIP_VSC_DATA(trans, i)      _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
+#define ICL_VIDEO_DIP_PPS_DATA(trans, i)       _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4)
+#define ICL_VIDEO_DIP_PPS_ECC(trans, i)                _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4)
 
 #define _HSW_STEREO_3D_CTL_A           0x70020
-#define   S3D_ENABLE                   (1<<31)
+#define   S3D_ENABLE                   (1 << 31)
 #define _HSW_STEREO_3D_CTL_B           0x71020
 
 #define HSW_STEREO_3D_CTL(trans)       _MMIO_PIPE2(trans, _HSW_STEREO_3D_CTL_A)
@@ -7646,156 +8018,156 @@ enum {
 #define _PCH_TRANSBCONF              0xf1008
 #define PCH_TRANSCONF(pipe)    _MMIO_PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF)
 #define LPT_TRANSCONF          PCH_TRANSCONF(PIPE_A) /* lpt has only one transcoder */
-#define  TRANS_DISABLE          (0<<31)
-#define  TRANS_ENABLE           (1<<31)
-#define  TRANS_STATE_MASK       (1<<30)
-#define  TRANS_STATE_DISABLE    (0<<30)
-#define  TRANS_STATE_ENABLE     (1<<30)
-#define  TRANS_FSYNC_DELAY_HB1  (0<<27)
-#define  TRANS_FSYNC_DELAY_HB2  (1<<27)
-#define  TRANS_FSYNC_DELAY_HB3  (2<<27)
-#define  TRANS_FSYNC_DELAY_HB4  (3<<27)
-#define  TRANS_INTERLACE_MASK   (7<<21)
-#define  TRANS_PROGRESSIVE      (0<<21)
-#define  TRANS_INTERLACED       (3<<21)
-#define  TRANS_LEGACY_INTERLACED_ILK (2<<21)
-#define  TRANS_8BPC             (0<<5)
-#define  TRANS_10BPC            (1<<5)
-#define  TRANS_6BPC             (2<<5)
-#define  TRANS_12BPC            (3<<5)
+#define  TRANS_DISABLE          (0 << 31)
+#define  TRANS_ENABLE           (1 << 31)
+#define  TRANS_STATE_MASK       (1 << 30)
+#define  TRANS_STATE_DISABLE    (0 << 30)
+#define  TRANS_STATE_ENABLE     (1 << 30)
+#define  TRANS_FSYNC_DELAY_HB1  (0 << 27)
+#define  TRANS_FSYNC_DELAY_HB2  (1 << 27)
+#define  TRANS_FSYNC_DELAY_HB3  (2 << 27)
+#define  TRANS_FSYNC_DELAY_HB4  (3 << 27)
+#define  TRANS_INTERLACE_MASK   (7 << 21)
+#define  TRANS_PROGRESSIVE      (0 << 21)
+#define  TRANS_INTERLACED       (3 << 21)
+#define  TRANS_LEGACY_INTERLACED_ILK (2 << 21)
+#define  TRANS_8BPC             (0 << 5)
+#define  TRANS_10BPC            (1 << 5)
+#define  TRANS_6BPC             (2 << 5)
+#define  TRANS_12BPC            (3 << 5)
 
 #define _TRANSA_CHICKEN1        0xf0060
 #define _TRANSB_CHICKEN1        0xf1060
 #define TRANS_CHICKEN1(pipe)   _MMIO_PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
-#define  TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE    (1<<10)
-#define  TRANS_CHICKEN1_DP0UNIT_GC_DISABLE     (1<<4)
+#define  TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE    (1 << 10)
+#define  TRANS_CHICKEN1_DP0UNIT_GC_DISABLE     (1 << 4)
 #define _TRANSA_CHICKEN2        0xf0064
 #define _TRANSB_CHICKEN2        0xf1064
 #define TRANS_CHICKEN2(pipe)   _MMIO_PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
-#define  TRANS_CHICKEN2_TIMING_OVERRIDE                        (1<<31)
-#define  TRANS_CHICKEN2_FDI_POLARITY_REVERSED          (1<<29)
-#define  TRANS_CHICKEN2_FRAME_START_DELAY_MASK         (3<<27)
-#define  TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER     (1<<26)
-#define  TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH  (1<<25)
+#define  TRANS_CHICKEN2_TIMING_OVERRIDE                        (1 << 31)
+#define  TRANS_CHICKEN2_FDI_POLARITY_REVERSED          (1 << 29)
+#define  TRANS_CHICKEN2_FRAME_START_DELAY_MASK         (3 << 27)
+#define  TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER     (1 << 26)
+#define  TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH  (1 << 25)
 
 #define SOUTH_CHICKEN1         _MMIO(0xc2000)
 #define  FDIA_PHASE_SYNC_SHIFT_OVR     19
 #define  FDIA_PHASE_SYNC_SHIFT_EN      18
-#define  FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
-#define  FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
+#define  FDI_PHASE_SYNC_OVR(pipe) (1 << (FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
+#define  FDI_PHASE_SYNC_EN(pipe) (1 << (FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
 #define  FDI_BC_BIFURCATION_SELECT     (1 << 12)
 #define  CHASSIS_CLK_REQ_DURATION_MASK (0xf << 8)
 #define  CHASSIS_CLK_REQ_DURATION(x)   ((x) << 8)
-#define  SPT_PWM_GRANULARITY           (1<<0)
+#define  SPT_PWM_GRANULARITY           (1 << 0)
 #define SOUTH_CHICKEN2         _MMIO(0xc2004)
-#define  FDI_MPHY_IOSFSB_RESET_STATUS  (1<<13)
-#define  FDI_MPHY_IOSFSB_RESET_CTL     (1<<12)
-#define  LPT_PWM_GRANULARITY           (1<<5)
-#define  DPLS_EDP_PPS_FIX_DIS          (1<<0)
+#define  FDI_MPHY_IOSFSB_RESET_STATUS  (1 << 13)
+#define  FDI_MPHY_IOSFSB_RESET_CTL     (1 << 12)
+#define  LPT_PWM_GRANULARITY           (1 << 5)
+#define  DPLS_EDP_PPS_FIX_DIS          (1 << 0)
 
 #define _FDI_RXA_CHICKEN        0xc200c
 #define _FDI_RXB_CHICKEN        0xc2010
-#define  FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1)
-#define  FDI_RX_PHASE_SYNC_POINTER_EN  (1<<0)
+#define  FDI_RX_PHASE_SYNC_POINTER_OVR (1 << 1)
+#define  FDI_RX_PHASE_SYNC_POINTER_EN  (1 << 0)
 #define FDI_RX_CHICKEN(pipe)   _MMIO_PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
 
 #define SOUTH_DSPCLK_GATE_D    _MMIO(0xc2020)
-#define  PCH_GMBUSUNIT_CLOCK_GATE_DISABLE (1<<31)
-#define  PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30)
-#define  PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
-#define  PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14)
-#define  CNP_PWM_CGE_GATING_DISABLE (1<<13)
-#define  PCH_LP_PARTITION_LEVEL_DISABLE  (1<<12)
+#define  PCH_GMBUSUNIT_CLOCK_GATE_DISABLE (1 << 31)
+#define  PCH_DPLUNIT_CLOCK_GATE_DISABLE (1 << 30)
+#define  PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1 << 29)
+#define  PCH_CPUNIT_CLOCK_GATE_DISABLE (1 << 14)
+#define  CNP_PWM_CGE_GATING_DISABLE (1 << 13)
+#define  PCH_LP_PARTITION_LEVEL_DISABLE  (1 << 12)
 
 /* CPU: FDI_TX */
 #define _FDI_TXA_CTL            0x60100
 #define _FDI_TXB_CTL            0x61100
 #define FDI_TX_CTL(pipe)       _MMIO_PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL)
-#define  FDI_TX_DISABLE         (0<<31)
-#define  FDI_TX_ENABLE          (1<<31)
-#define  FDI_LINK_TRAIN_PATTERN_1       (0<<28)
-#define  FDI_LINK_TRAIN_PATTERN_2       (1<<28)
-#define  FDI_LINK_TRAIN_PATTERN_IDLE    (2<<28)
-#define  FDI_LINK_TRAIN_NONE            (3<<28)
-#define  FDI_LINK_TRAIN_VOLTAGE_0_4V    (0<<25)
-#define  FDI_LINK_TRAIN_VOLTAGE_0_6V    (1<<25)
-#define  FDI_LINK_TRAIN_VOLTAGE_0_8V    (2<<25)
-#define  FDI_LINK_TRAIN_VOLTAGE_1_2V    (3<<25)
-#define  FDI_LINK_TRAIN_PRE_EMPHASIS_NONE (0<<22)
-#define  FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22)
-#define  FDI_LINK_TRAIN_PRE_EMPHASIS_2X   (2<<22)
-#define  FDI_LINK_TRAIN_PRE_EMPHASIS_3X   (3<<22)
+#define  FDI_TX_DISABLE         (0 << 31)
+#define  FDI_TX_ENABLE          (1 << 31)
+#define  FDI_LINK_TRAIN_PATTERN_1       (0 << 28)
+#define  FDI_LINK_TRAIN_PATTERN_2       (1 << 28)
+#define  FDI_LINK_TRAIN_PATTERN_IDLE    (2 << 28)
+#define  FDI_LINK_TRAIN_NONE            (3 << 28)
+#define  FDI_LINK_TRAIN_VOLTAGE_0_4V    (0 << 25)
+#define  FDI_LINK_TRAIN_VOLTAGE_0_6V    (1 << 25)
+#define  FDI_LINK_TRAIN_VOLTAGE_0_8V    (2 << 25)
+#define  FDI_LINK_TRAIN_VOLTAGE_1_2V    (3 << 25)
+#define  FDI_LINK_TRAIN_PRE_EMPHASIS_NONE (0 << 22)
+#define  FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1 << 22)
+#define  FDI_LINK_TRAIN_PRE_EMPHASIS_2X   (2 << 22)
+#define  FDI_LINK_TRAIN_PRE_EMPHASIS_3X   (3 << 22)
 /* ILK always use 400mV 0dB for voltage swing and pre-emphasis level.
    SNB has different settings. */
 /* SNB A-stepping */
-#define  FDI_LINK_TRAIN_400MV_0DB_SNB_A                (0x38<<22)
-#define  FDI_LINK_TRAIN_400MV_6DB_SNB_A                (0x02<<22)
-#define  FDI_LINK_TRAIN_600MV_3_5DB_SNB_A      (0x01<<22)
-#define  FDI_LINK_TRAIN_800MV_0DB_SNB_A                (0x0<<22)
+#define  FDI_LINK_TRAIN_400MV_0DB_SNB_A                (0x38 << 22)
+#define  FDI_LINK_TRAIN_400MV_6DB_SNB_A                (0x02 << 22)
+#define  FDI_LINK_TRAIN_600MV_3_5DB_SNB_A      (0x01 << 22)
+#define  FDI_LINK_TRAIN_800MV_0DB_SNB_A                (0x0 << 22)
 /* SNB B-stepping */
-#define  FDI_LINK_TRAIN_400MV_0DB_SNB_B                (0x0<<22)
-#define  FDI_LINK_TRAIN_400MV_6DB_SNB_B                (0x3a<<22)
-#define  FDI_LINK_TRAIN_600MV_3_5DB_SNB_B      (0x39<<22)
-#define  FDI_LINK_TRAIN_800MV_0DB_SNB_B                (0x38<<22)
-#define  FDI_LINK_TRAIN_VOL_EMP_MASK           (0x3f<<22)
+#define  FDI_LINK_TRAIN_400MV_0DB_SNB_B                (0x0 << 22)
+#define  FDI_LINK_TRAIN_400MV_6DB_SNB_B                (0x3a << 22)
+#define  FDI_LINK_TRAIN_600MV_3_5DB_SNB_B      (0x39 << 22)
+#define  FDI_LINK_TRAIN_800MV_0DB_SNB_B                (0x38 << 22)
+#define  FDI_LINK_TRAIN_VOL_EMP_MASK           (0x3f << 22)
 #define  FDI_DP_PORT_WIDTH_SHIFT               19
 #define  FDI_DP_PORT_WIDTH_MASK                        (7 << FDI_DP_PORT_WIDTH_SHIFT)
 #define  FDI_DP_PORT_WIDTH(width)           (((width) - 1) << FDI_DP_PORT_WIDTH_SHIFT)
-#define  FDI_TX_ENHANCE_FRAME_ENABLE    (1<<18)
+#define  FDI_TX_ENHANCE_FRAME_ENABLE    (1 << 18)
 /* Ironlake: hardwired to 1 */
-#define  FDI_TX_PLL_ENABLE              (1<<14)
+#define  FDI_TX_PLL_ENABLE              (1 << 14)
 
 /* Ivybridge has different bits for lolz */
-#define  FDI_LINK_TRAIN_PATTERN_1_IVB       (0<<8)
-#define  FDI_LINK_TRAIN_PATTERN_2_IVB       (1<<8)
-#define  FDI_LINK_TRAIN_PATTERN_IDLE_IVB    (2<<8)
-#define  FDI_LINK_TRAIN_NONE_IVB            (3<<8)
+#define  FDI_LINK_TRAIN_PATTERN_1_IVB       (0 << 8)
+#define  FDI_LINK_TRAIN_PATTERN_2_IVB       (1 << 8)
+#define  FDI_LINK_TRAIN_PATTERN_IDLE_IVB    (2 << 8)
+#define  FDI_LINK_TRAIN_NONE_IVB            (3 << 8)
 
 /* both Tx and Rx */
-#define  FDI_COMPOSITE_SYNC            (1<<11)
-#define  FDI_LINK_TRAIN_AUTO           (1<<10)
-#define  FDI_SCRAMBLING_ENABLE          (0<<7)
-#define  FDI_SCRAMBLING_DISABLE         (1<<7)
+#define  FDI_COMPOSITE_SYNC            (1 << 11)
+#define  FDI_LINK_TRAIN_AUTO           (1 << 10)
+#define  FDI_SCRAMBLING_ENABLE          (0 << 7)
+#define  FDI_SCRAMBLING_DISABLE         (1 << 7)
 
 /* FDI_RX, FDI_X is hard-wired to Transcoder_X */
 #define _FDI_RXA_CTL             0xf000c
 #define _FDI_RXB_CTL             0xf100c
 #define FDI_RX_CTL(pipe)       _MMIO_PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL)
-#define  FDI_RX_ENABLE          (1<<31)
+#define  FDI_RX_ENABLE          (1 << 31)
 /* train, dp width same as FDI_TX */
-#define  FDI_FS_ERRC_ENABLE            (1<<27)
-#define  FDI_FE_ERRC_ENABLE            (1<<26)
-#define  FDI_RX_POLARITY_REVERSED_LPT  (1<<16)
-#define  FDI_8BPC                       (0<<16)
-#define  FDI_10BPC                      (1<<16)
-#define  FDI_6BPC                       (2<<16)
-#define  FDI_12BPC                      (3<<16)
-#define  FDI_RX_LINK_REVERSAL_OVERRIDE  (1<<15)
-#define  FDI_DMI_LINK_REVERSE_MASK      (1<<14)
-#define  FDI_RX_PLL_ENABLE              (1<<13)
-#define  FDI_FS_ERR_CORRECT_ENABLE      (1<<11)
-#define  FDI_FE_ERR_CORRECT_ENABLE      (1<<10)
-#define  FDI_FS_ERR_REPORT_ENABLE       (1<<9)
-#define  FDI_FE_ERR_REPORT_ENABLE       (1<<8)
-#define  FDI_RX_ENHANCE_FRAME_ENABLE    (1<<6)
-#define  FDI_PCDCLK                    (1<<4)
+#define  FDI_FS_ERRC_ENABLE            (1 << 27)
+#define  FDI_FE_ERRC_ENABLE            (1 << 26)
+#define  FDI_RX_POLARITY_REVERSED_LPT  (1 << 16)
+#define  FDI_8BPC                       (0 << 16)
+#define  FDI_10BPC                      (1 << 16)
+#define  FDI_6BPC                       (2 << 16)
+#define  FDI_12BPC                      (3 << 16)
+#define  FDI_RX_LINK_REVERSAL_OVERRIDE  (1 << 15)
+#define  FDI_DMI_LINK_REVERSE_MASK      (1 << 14)
+#define  FDI_RX_PLL_ENABLE              (1 << 13)
+#define  FDI_FS_ERR_CORRECT_ENABLE      (1 << 11)
+#define  FDI_FE_ERR_CORRECT_ENABLE      (1 << 10)
+#define  FDI_FS_ERR_REPORT_ENABLE       (1 << 9)
+#define  FDI_FE_ERR_REPORT_ENABLE       (1 << 8)
+#define  FDI_RX_ENHANCE_FRAME_ENABLE    (1 << 6)
+#define  FDI_PCDCLK                    (1 << 4)
 /* CPT */
-#define  FDI_AUTO_TRAINING                     (1<<10)
-#define  FDI_LINK_TRAIN_PATTERN_1_CPT          (0<<8)
-#define  FDI_LINK_TRAIN_PATTERN_2_CPT          (1<<8)
-#define  FDI_LINK_TRAIN_PATTERN_IDLE_CPT       (2<<8)
-#define  FDI_LINK_TRAIN_NORMAL_CPT             (3<<8)
-#define  FDI_LINK_TRAIN_PATTERN_MASK_CPT       (3<<8)
+#define  FDI_AUTO_TRAINING                     (1 << 10)
+#define  FDI_LINK_TRAIN_PATTERN_1_CPT          (0 << 8)
+#define  FDI_LINK_TRAIN_PATTERN_2_CPT          (1 << 8)
+#define  FDI_LINK_TRAIN_PATTERN_IDLE_CPT       (2 << 8)
+#define  FDI_LINK_TRAIN_NORMAL_CPT             (3 << 8)
+#define  FDI_LINK_TRAIN_PATTERN_MASK_CPT       (3 << 8)
 
 #define _FDI_RXA_MISC                  0xf0010
 #define _FDI_RXB_MISC                  0xf1010
-#define  FDI_RX_PWRDN_LANE1_MASK       (3<<26)
-#define  FDI_RX_PWRDN_LANE1_VAL(x)     ((x)<<26)
-#define  FDI_RX_PWRDN_LANE0_MASK       (3<<24)
-#define  FDI_RX_PWRDN_LANE0_VAL(x)     ((x)<<24)
-#define  FDI_RX_TP1_TO_TP2_48          (2<<20)
-#define  FDI_RX_TP1_TO_TP2_64          (3<<20)
-#define  FDI_RX_FDI_DELAY_90           (0x90<<0)
+#define  FDI_RX_PWRDN_LANE1_MASK       (3 << 26)
+#define  FDI_RX_PWRDN_LANE1_VAL(x)     ((x) << 26)
+#define  FDI_RX_PWRDN_LANE0_MASK       (3 << 24)
+#define  FDI_RX_PWRDN_LANE0_VAL(x)     ((x) << 24)
+#define  FDI_RX_TP1_TO_TP2_48          (2 << 20)
+#define  FDI_RX_TP1_TO_TP2_64          (3 << 20)
+#define  FDI_RX_FDI_DELAY_90           (0x90 << 0)
 #define FDI_RX_MISC(pipe)      _MMIO_PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
 
 #define _FDI_RXA_TUSIZE1        0xf0030
@@ -7806,17 +8178,17 @@ enum {
 #define FDI_RX_TUSIZE2(pipe)   _MMIO_PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
 
 /* FDI_RX interrupt register format */
-#define FDI_RX_INTER_LANE_ALIGN         (1<<10)
-#define FDI_RX_SYMBOL_LOCK              (1<<9) /* train 2 */
-#define FDI_RX_BIT_LOCK                 (1<<8) /* train 1 */
-#define FDI_RX_TRAIN_PATTERN_2_FAIL     (1<<7)
-#define FDI_RX_FS_CODE_ERR              (1<<6)
-#define FDI_RX_FE_CODE_ERR              (1<<5)
-#define FDI_RX_SYMBOL_ERR_RATE_ABOVE    (1<<4)
-#define FDI_RX_HDCP_LINK_FAIL           (1<<3)
-#define FDI_RX_PIXEL_FIFO_OVERFLOW      (1<<2)
-#define FDI_RX_CROSS_CLOCK_OVERFLOW     (1<<1)
-#define FDI_RX_SYMBOL_QUEUE_OVERFLOW    (1<<0)
+#define FDI_RX_INTER_LANE_ALIGN         (1 << 10)
+#define FDI_RX_SYMBOL_LOCK              (1 << 9) /* train 2 */
+#define FDI_RX_BIT_LOCK                 (1 << 8) /* train 1 */
+#define FDI_RX_TRAIN_PATTERN_2_FAIL     (1 << 7)
+#define FDI_RX_FS_CODE_ERR              (1 << 6)
+#define FDI_RX_FE_CODE_ERR              (1 << 5)
+#define FDI_RX_SYMBOL_ERR_RATE_ABOVE    (1 << 4)
+#define FDI_RX_HDCP_LINK_FAIL           (1 << 3)
+#define FDI_RX_PIXEL_FIFO_OVERFLOW      (1 << 2)
+#define FDI_RX_CROSS_CLOCK_OVERFLOW     (1 << 1)
+#define FDI_RX_SYMBOL_QUEUE_OVERFLOW    (1 << 0)
 
 #define _FDI_RXA_IIR            0xf0014
 #define _FDI_RXA_IMR            0xf0018
@@ -7862,71 +8234,58 @@ enum {
 #define PCH_DP_AUX_CH_DATA(aux_ch, i)  _MMIO(_PORT((aux_ch) - AUX_CH_B, _PCH_DPB_AUX_CH_DATA1, _PCH_DPC_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
 
 /* CPT */
-#define  PORT_TRANS_A_SEL_CPT  0
-#define  PORT_TRANS_B_SEL_CPT  (1<<29)
-#define  PORT_TRANS_C_SEL_CPT  (2<<29)
-#define  PORT_TRANS_SEL_MASK   (3<<29)
-#define  PORT_TRANS_SEL_CPT(pipe)      ((pipe) << 29)
-#define  PORT_TO_PIPE(val)     (((val) & (1<<30)) >> 30)
-#define  PORT_TO_PIPE_CPT(val) (((val) & PORT_TRANS_SEL_MASK) >> 29)
-#define  SDVO_PORT_TO_PIPE_CHV(val)    (((val) & (3<<24)) >> 24)
-#define  DP_PORT_TO_PIPE_CHV(val)      (((val) & (3<<16)) >> 16)
-
 #define _TRANS_DP_CTL_A                0xe0300
 #define _TRANS_DP_CTL_B                0xe1300
 #define _TRANS_DP_CTL_C                0xe2300
 #define TRANS_DP_CTL(pipe)     _MMIO_PIPE(pipe, _TRANS_DP_CTL_A, _TRANS_DP_CTL_B)
-#define  TRANS_DP_OUTPUT_ENABLE        (1<<31)
-#define  TRANS_DP_PORT_SEL_B   (0<<29)
-#define  TRANS_DP_PORT_SEL_C   (1<<29)
-#define  TRANS_DP_PORT_SEL_D   (2<<29)
-#define  TRANS_DP_PORT_SEL_NONE        (3<<29)
-#define  TRANS_DP_PORT_SEL_MASK        (3<<29)
-#define  TRANS_DP_PIPE_TO_PORT(val)    ((((val) & TRANS_DP_PORT_SEL_MASK) >> 29) + PORT_B)
-#define  TRANS_DP_AUDIO_ONLY   (1<<26)
-#define  TRANS_DP_ENH_FRAMING  (1<<18)
-#define  TRANS_DP_8BPC         (0<<9)
-#define  TRANS_DP_10BPC                (1<<9)
-#define  TRANS_DP_6BPC         (2<<9)
-#define  TRANS_DP_12BPC                (3<<9)
-#define  TRANS_DP_BPC_MASK     (3<<9)
-#define  TRANS_DP_VSYNC_ACTIVE_HIGH    (1<<4)
+#define  TRANS_DP_OUTPUT_ENABLE        (1 << 31)
+#define  TRANS_DP_PORT_SEL_MASK                (3 << 29)
+#define  TRANS_DP_PORT_SEL_NONE                (3 << 29)
+#define  TRANS_DP_PORT_SEL(port)       (((port) - PORT_B) << 29)
+#define  TRANS_DP_AUDIO_ONLY   (1 << 26)
+#define  TRANS_DP_ENH_FRAMING  (1 << 18)
+#define  TRANS_DP_8BPC         (0 << 9)
+#define  TRANS_DP_10BPC                (1 << 9)
+#define  TRANS_DP_6BPC         (2 << 9)
+#define  TRANS_DP_12BPC                (3 << 9)
+#define  TRANS_DP_BPC_MASK     (3 << 9)
+#define  TRANS_DP_VSYNC_ACTIVE_HIGH    (1 << 4)
 #define  TRANS_DP_VSYNC_ACTIVE_LOW     0
-#define  TRANS_DP_HSYNC_ACTIVE_HIGH    (1<<3)
+#define  TRANS_DP_HSYNC_ACTIVE_HIGH    (1 << 3)
 #define  TRANS_DP_HSYNC_ACTIVE_LOW     0
-#define  TRANS_DP_SYNC_MASK    (3<<3)
+#define  TRANS_DP_SYNC_MASK    (3 << 3)
 
 /* SNB eDP training params */
 /* SNB A-stepping */
-#define  EDP_LINK_TRAIN_400MV_0DB_SNB_A                (0x38<<22)
-#define  EDP_LINK_TRAIN_400MV_6DB_SNB_A                (0x02<<22)
-#define  EDP_LINK_TRAIN_600MV_3_5DB_SNB_A      (0x01<<22)
-#define  EDP_LINK_TRAIN_800MV_0DB_SNB_A                (0x0<<22)
+#define  EDP_LINK_TRAIN_400MV_0DB_SNB_A                (0x38 << 22)
+#define  EDP_LINK_TRAIN_400MV_6DB_SNB_A                (0x02 << 22)
+#define  EDP_LINK_TRAIN_600MV_3_5DB_SNB_A      (0x01 << 22)
+#define  EDP_LINK_TRAIN_800MV_0DB_SNB_A                (0x0 << 22)
 /* SNB B-stepping */
-#define  EDP_LINK_TRAIN_400_600MV_0DB_SNB_B    (0x0<<22)
-#define  EDP_LINK_TRAIN_400MV_3_5DB_SNB_B      (0x1<<22)
-#define  EDP_LINK_TRAIN_400_600MV_6DB_SNB_B    (0x3a<<22)
-#define  EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B  (0x39<<22)
-#define  EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B   (0x38<<22)
-#define  EDP_LINK_TRAIN_VOL_EMP_MASK_SNB       (0x3f<<22)
+#define  EDP_LINK_TRAIN_400_600MV_0DB_SNB_B    (0x0 << 22)
+#define  EDP_LINK_TRAIN_400MV_3_5DB_SNB_B      (0x1 << 22)
+#define  EDP_LINK_TRAIN_400_600MV_6DB_SNB_B    (0x3a << 22)
+#define  EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B  (0x39 << 22)
+#define  EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B   (0x38 << 22)
+#define  EDP_LINK_TRAIN_VOL_EMP_MASK_SNB       (0x3f << 22)
 
 /* IVB */
-#define EDP_LINK_TRAIN_400MV_0DB_IVB           (0x24 <<22)
-#define EDP_LINK_TRAIN_400MV_3_5DB_IVB         (0x2a <<22)
-#define EDP_LINK_TRAIN_400MV_6DB_IVB           (0x2f <<22)
-#define EDP_LINK_TRAIN_600MV_0DB_IVB           (0x30 <<22)
-#define EDP_LINK_TRAIN_600MV_3_5DB_IVB         (0x36 <<22)
-#define EDP_LINK_TRAIN_800MV_0DB_IVB           (0x38 <<22)
-#define EDP_LINK_TRAIN_800MV_3_5DB_IVB         (0x3e <<22)
+#define EDP_LINK_TRAIN_400MV_0DB_IVB           (0x24 << 22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_IVB         (0x2a << 22)
+#define EDP_LINK_TRAIN_400MV_6DB_IVB           (0x2f << 22)
+#define EDP_LINK_TRAIN_600MV_0DB_IVB           (0x30 << 22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_IVB         (0x36 << 22)
+#define EDP_LINK_TRAIN_800MV_0DB_IVB           (0x38 << 22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB         (0x3e << 22)
 
 /* legacy values */
-#define EDP_LINK_TRAIN_500MV_0DB_IVB           (0x00 <<22)
-#define EDP_LINK_TRAIN_1000MV_0DB_IVB          (0x20 <<22)
-#define EDP_LINK_TRAIN_500MV_3_5DB_IVB         (0x02 <<22)
-#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB                (0x22 <<22)
-#define EDP_LINK_TRAIN_1000MV_6DB_IVB          (0x23 <<22)
+#define EDP_LINK_TRAIN_500MV_0DB_IVB           (0x00 << 22)
+#define EDP_LINK_TRAIN_1000MV_0DB_IVB          (0x20 << 22)
+#define EDP_LINK_TRAIN_500MV_3_5DB_IVB         (0x02 << 22)
+#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB                (0x22 << 22)
+#define EDP_LINK_TRAIN_1000MV_6DB_IVB          (0x23 << 22)
 
-#define  EDP_LINK_TRAIN_VOL_EMP_MASK_IVB       (0x3f<<22)
+#define  EDP_LINK_TRAIN_VOL_EMP_MASK_IVB       (0x3f << 22)
 
 #define  VLV_PMWGICZ                           _MMIO(0x1300a4)
 
@@ -7973,7 +8332,7 @@ enum {
 #define   FORCEWAKE_KERNEL_FALLBACK            BIT(15)
 #define  FORCEWAKE_MT_ACK                      _MMIO(0x130040)
 #define  ECOBUS                                        _MMIO(0xa180)
-#define    FORCEWAKE_MT_ENABLE                 (1<<5)
+#define    FORCEWAKE_MT_ENABLE                 (1 << 5)
 #define  VLV_SPAREG2H                          _MMIO(0xA194)
 #define  GEN9_PWRGT_DOMAIN_STATUS              _MMIO(0xA2A0)
 #define   GEN9_PWRGT_MEDIA_STATUS_MASK         (1 << 0)
@@ -7982,13 +8341,13 @@ enum {
 #define  GTFIFODBG                             _MMIO(0x120000)
 #define    GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV   (0x1f << 20)
 #define    GT_FIFO_FREE_ENTRIES_CHV            (0x7f << 13)
-#define    GT_FIFO_SBDROPERR                   (1<<6)
-#define    GT_FIFO_BLOBDROPERR                 (1<<5)
-#define    GT_FIFO_SB_READ_ABORTERR            (1<<4)
-#define    GT_FIFO_DROPERR                     (1<<3)
-#define    GT_FIFO_OVFERR                      (1<<2)
-#define    GT_FIFO_IAWRERR                     (1<<1)
-#define    GT_FIFO_IARDERR                     (1<<0)
+#define    GT_FIFO_SBDROPERR                   (1 << 6)
+#define    GT_FIFO_BLOBDROPERR                 (1 << 5)
+#define    GT_FIFO_SB_READ_ABORTERR            (1 << 4)
+#define    GT_FIFO_DROPERR                     (1 << 3)
+#define    GT_FIFO_OVFERR                      (1 << 2)
+#define    GT_FIFO_IAWRERR                     (1 << 1)
+#define    GT_FIFO_IARDERR                     (1 << 0)
 
 #define  GTFIFOCTL                             _MMIO(0x120008)
 #define    GT_FIFO_FREE_ENTRIES_MASK           0x7f
@@ -8022,37 +8381,37 @@ enum {
 # define GEN6_OACSUNIT_CLOCK_GATE_DISABLE              (1 << 20)
 
 #define GEN7_UCGCTL4                           _MMIO(0x940c)
-#define  GEN7_L3BANK2X_CLOCK_GATE_DISABLE      (1<<25)
-#define  GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE     (1<<14)
+#define  GEN7_L3BANK2X_CLOCK_GATE_DISABLE      (1 << 25)
+#define  GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE     (1 << 14)
 
 #define GEN6_RCGCTL1                           _MMIO(0x9410)
 #define GEN6_RCGCTL2                           _MMIO(0x9414)
 #define GEN6_RSTCTL                            _MMIO(0x9420)
 
 #define GEN8_UCGCTL6                           _MMIO(0x9430)
-#define   GEN8_GAPSUNIT_CLOCK_GATE_DISABLE     (1<<24)
-#define   GEN8_SDEUNIT_CLOCK_GATE_DISABLE      (1<<14)
-#define   GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1<<28)
+#define   GEN8_GAPSUNIT_CLOCK_GATE_DISABLE     (1 << 24)
+#define   GEN8_SDEUNIT_CLOCK_GATE_DISABLE      (1 << 14)
+#define   GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1 << 28)
 
 #define GEN6_GFXPAUSE                          _MMIO(0xA000)
 #define GEN6_RPNSWREQ                          _MMIO(0xA008)
-#define   GEN6_TURBO_DISABLE                   (1<<31)
-#define   GEN6_FREQUENCY(x)                    ((x)<<25)
-#define   HSW_FREQUENCY(x)                     ((x)<<24)
-#define   GEN9_FREQUENCY(x)                    ((x)<<23)
-#define   GEN6_OFFSET(x)                       ((x)<<19)
-#define   GEN6_AGGRESSIVE_TURBO                        (0<<15)
+#define   GEN6_TURBO_DISABLE                   (1 << 31)
+#define   GEN6_FREQUENCY(x)                    ((x) << 25)
+#define   HSW_FREQUENCY(x)                     ((x) << 24)
+#define   GEN9_FREQUENCY(x)                    ((x) << 23)
+#define   GEN6_OFFSET(x)                       ((x) << 19)
+#define   GEN6_AGGRESSIVE_TURBO                        (0 << 15)
 #define GEN6_RC_VIDEO_FREQ                     _MMIO(0xA00C)
 #define GEN6_RC_CONTROL                                _MMIO(0xA090)
-#define   GEN6_RC_CTL_RC6pp_ENABLE             (1<<16)
-#define   GEN6_RC_CTL_RC6p_ENABLE              (1<<17)
-#define   GEN6_RC_CTL_RC6_ENABLE               (1<<18)
-#define   GEN6_RC_CTL_RC1e_ENABLE              (1<<20)
-#define   GEN6_RC_CTL_RC7_ENABLE               (1<<22)
-#define   VLV_RC_CTL_CTX_RST_PARALLEL          (1<<24)
-#define   GEN7_RC_CTL_TO_MODE                  (1<<28)
-#define   GEN6_RC_CTL_EI_MODE(x)               ((x)<<27)
-#define   GEN6_RC_CTL_HW_ENABLE                        (1<<31)
+#define   GEN6_RC_CTL_RC6pp_ENABLE             (1 << 16)
+#define   GEN6_RC_CTL_RC6p_ENABLE              (1 << 17)
+#define   GEN6_RC_CTL_RC6_ENABLE               (1 << 18)
+#define   GEN6_RC_CTL_RC1e_ENABLE              (1 << 20)
+#define   GEN6_RC_CTL_RC7_ENABLE               (1 << 22)
+#define   VLV_RC_CTL_CTX_RST_PARALLEL          (1 << 24)
+#define   GEN7_RC_CTL_TO_MODE                  (1 << 28)
+#define   GEN6_RC_CTL_EI_MODE(x)               ((x) << 27)
+#define   GEN6_RC_CTL_HW_ENABLE                        (1 << 31)
 #define GEN6_RP_DOWN_TIMEOUT                   _MMIO(0xA010)
 #define GEN6_RP_INTERRUPT_LIMITS               _MMIO(0xA014)
 #define GEN6_RPSTAT1                           _MMIO(0xA01C)
@@ -8063,19 +8422,19 @@ enum {
 #define   HSW_CAGF_MASK                                (0x7f << HSW_CAGF_SHIFT)
 #define   GEN9_CAGF_MASK                       (0x1ff << GEN9_CAGF_SHIFT)
 #define GEN6_RP_CONTROL                                _MMIO(0xA024)
-#define   GEN6_RP_MEDIA_TURBO                  (1<<11)
-#define   GEN6_RP_MEDIA_MODE_MASK              (3<<9)
-#define   GEN6_RP_MEDIA_HW_TURBO_MODE          (3<<9)
-#define   GEN6_RP_MEDIA_HW_NORMAL_MODE         (2<<9)
-#define   GEN6_RP_MEDIA_HW_MODE                        (1<<9)
-#define   GEN6_RP_MEDIA_SW_MODE                        (0<<9)
-#define   GEN6_RP_MEDIA_IS_GFX                 (1<<8)
-#define   GEN6_RP_ENABLE                       (1<<7)
-#define   GEN6_RP_UP_IDLE_MIN                  (0x1<<3)
-#define   GEN6_RP_UP_BUSY_AVG                  (0x2<<3)
-#define   GEN6_RP_UP_BUSY_CONT                 (0x4<<3)
-#define   GEN6_RP_DOWN_IDLE_AVG                        (0x2<<0)
-#define   GEN6_RP_DOWN_IDLE_CONT               (0x1<<0)
+#define   GEN6_RP_MEDIA_TURBO                  (1 << 11)
+#define   GEN6_RP_MEDIA_MODE_MASK              (3 << 9)
+#define   GEN6_RP_MEDIA_HW_TURBO_MODE          (3 << 9)
+#define   GEN6_RP_MEDIA_HW_NORMAL_MODE         (2 << 9)
+#define   GEN6_RP_MEDIA_HW_MODE                        (1 << 9)
+#define   GEN6_RP_MEDIA_SW_MODE                        (0 << 9)
+#define   GEN6_RP_MEDIA_IS_GFX                 (1 << 8)
+#define   GEN6_RP_ENABLE                       (1 << 7)
+#define   GEN6_RP_UP_IDLE_MIN                  (0x1 << 3)
+#define   GEN6_RP_UP_BUSY_AVG                  (0x2 << 3)
+#define   GEN6_RP_UP_BUSY_CONT                 (0x4 << 3)
+#define   GEN6_RP_DOWN_IDLE_AVG                        (0x2 << 0)
+#define   GEN6_RP_DOWN_IDLE_CONT               (0x1 << 0)
 #define GEN6_RP_UP_THRESHOLD                   _MMIO(0xA02C)
 #define GEN6_RP_DOWN_THRESHOLD                 _MMIO(0xA030)
 #define GEN6_RP_CUR_UP_EI                      _MMIO(0xA050)
@@ -8111,15 +8470,15 @@ enum {
 #define VLV_RCEDATA                            _MMIO(0xA0BC)
 #define GEN6_RC6pp_THRESHOLD                   _MMIO(0xA0C0)
 #define GEN6_PMINTRMSK                         _MMIO(0xA168)
-#define   GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC  (1<<31)
-#define   ARAT_EXPIRED_INTRMSK                 (1<<9)
+#define   GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC  (1 << 31)
+#define   ARAT_EXPIRED_INTRMSK                 (1 << 9)
 #define GEN8_MISC_CTRL0                                _MMIO(0xA180)
 #define VLV_PWRDWNUPCTL                                _MMIO(0xA294)
 #define GEN9_MEDIA_PG_IDLE_HYSTERESIS          _MMIO(0xA0C4)
 #define GEN9_RENDER_PG_IDLE_HYSTERESIS         _MMIO(0xA0C8)
 #define GEN9_PG_ENABLE                         _MMIO(0xA210)
-#define GEN9_RENDER_PG_ENABLE                  (1<<0)
-#define GEN9_MEDIA_PG_ENABLE                   (1<<1)
+#define GEN9_RENDER_PG_ENABLE                  (1 << 0)
+#define GEN9_MEDIA_PG_ENABLE                   (1 << 1)
 #define GEN8_PUSHBUS_CONTROL                   _MMIO(0xA248)
 #define GEN8_PUSHBUS_ENABLE                    _MMIO(0xA250)
 #define GEN8_PUSHBUS_SHIFT                     _MMIO(0xA25C)
@@ -8132,13 +8491,13 @@ enum {
 #define GEN6_PMIMR                             _MMIO(0x44024) /* rps_lock */
 #define GEN6_PMIIR                             _MMIO(0x44028)
 #define GEN6_PMIER                             _MMIO(0x4402C)
-#define  GEN6_PM_MBOX_EVENT                    (1<<25)
-#define  GEN6_PM_THERMAL_EVENT                 (1<<24)
-#define  GEN6_PM_RP_DOWN_TIMEOUT               (1<<6)
-#define  GEN6_PM_RP_UP_THRESHOLD               (1<<5)
-#define  GEN6_PM_RP_DOWN_THRESHOLD             (1<<4)
-#define  GEN6_PM_RP_UP_EI_EXPIRED              (1<<2)
-#define  GEN6_PM_RP_DOWN_EI_EXPIRED            (1<<1)
+#define  GEN6_PM_MBOX_EVENT                    (1 << 25)
+#define  GEN6_PM_THERMAL_EVENT                 (1 << 24)
+#define  GEN6_PM_RP_DOWN_TIMEOUT               (1 << 6)
+#define  GEN6_PM_RP_UP_THRESHOLD               (1 << 5)
+#define  GEN6_PM_RP_DOWN_THRESHOLD             (1 << 4)
+#define  GEN6_PM_RP_UP_EI_EXPIRED              (1 << 2)
+#define  GEN6_PM_RP_DOWN_EI_EXPIRED            (1 << 1)
 #define  GEN6_PM_RPS_EVENTS                    (GEN6_PM_RP_UP_THRESHOLD | \
                                                 GEN6_PM_RP_DOWN_THRESHOLD | \
                                                 GEN6_PM_RP_DOWN_TIMEOUT)
@@ -8147,16 +8506,16 @@ enum {
 #define GEN7_GT_SCRATCH_REG_NUM                        8
 
 #define VLV_GTLC_SURVIVABILITY_REG              _MMIO(0x130098)
-#define VLV_GFX_CLK_STATUS_BIT                 (1<<3)
-#define VLV_GFX_CLK_FORCE_ON_BIT               (1<<2)
+#define VLV_GFX_CLK_STATUS_BIT                 (1 << 3)
+#define VLV_GFX_CLK_FORCE_ON_BIT               (1 << 2)
 
 #define GEN6_GT_GFX_RC6_LOCKED                 _MMIO(0x138104)
 #define VLV_COUNTER_CONTROL                    _MMIO(0x138104)
-#define   VLV_COUNT_RANGE_HIGH                 (1<<15)
-#define   VLV_MEDIA_RC0_COUNT_EN               (1<<5)
-#define   VLV_RENDER_RC0_COUNT_EN              (1<<4)
-#define   VLV_MEDIA_RC6_COUNT_EN               (1<<1)
-#define   VLV_RENDER_RC6_COUNT_EN              (1<<0)
+#define   VLV_COUNT_RANGE_HIGH                 (1 << 15)
+#define   VLV_MEDIA_RC0_COUNT_EN               (1 << 5)
+#define   VLV_RENDER_RC0_COUNT_EN              (1 << 4)
+#define   VLV_MEDIA_RC6_COUNT_EN               (1 << 1)
+#define   VLV_RENDER_RC6_COUNT_EN              (1 << 0)
 #define GEN6_GT_GFX_RC6                                _MMIO(0x138108)
 #define VLV_GT_RENDER_RC6                      _MMIO(0x138108)
 #define VLV_GT_MEDIA_RC6                       _MMIO(0x13810C)
@@ -8167,7 +8526,7 @@ enum {
 #define VLV_MEDIA_C0_COUNT                     _MMIO(0x13811C)
 
 #define GEN6_PCODE_MAILBOX                     _MMIO(0x138124)
-#define   GEN6_PCODE_READY                     (1<<31)
+#define   GEN6_PCODE_READY                     (1 << 31)
 #define   GEN6_PCODE_ERROR_MASK                        0xFF
 #define     GEN6_PCODE_SUCCESS                 0x0
 #define     GEN6_PCODE_ILLEGAL_CMD             0x1
@@ -8211,7 +8570,7 @@ enum {
 #define GEN6_PCODE_DATA1                       _MMIO(0x13812C)
 
 #define GEN6_GT_CORE_STATUS            _MMIO(0x138060)
-#define   GEN6_CORE_CPD_STATE_MASK     (7<<4)
+#define   GEN6_CORE_CPD_STATE_MASK     (7 << 4)
 #define   GEN6_RCn_MASK                        7
 #define   GEN6_RC0                     0
 #define   GEN6_RC3                     2
@@ -8223,26 +8582,26 @@ enum {
 
 #define CHV_POWER_SS0_SIG1             _MMIO(0xa720)
 #define CHV_POWER_SS1_SIG1             _MMIO(0xa728)
-#define   CHV_SS_PG_ENABLE             (1<<1)
-#define   CHV_EU08_PG_ENABLE           (1<<9)
-#define   CHV_EU19_PG_ENABLE           (1<<17)
-#define   CHV_EU210_PG_ENABLE          (1<<25)
+#define   CHV_SS_PG_ENABLE             (1 << 1)
+#define   CHV_EU08_PG_ENABLE           (1 << 9)
+#define   CHV_EU19_PG_ENABLE           (1 << 17)
+#define   CHV_EU210_PG_ENABLE          (1 << 25)
 
 #define CHV_POWER_SS0_SIG2             _MMIO(0xa724)
 #define CHV_POWER_SS1_SIG2             _MMIO(0xa72c)
-#define   CHV_EU311_PG_ENABLE          (1<<1)
+#define   CHV_EU311_PG_ENABLE          (1 << 1)
 
-#define GEN9_SLICE_PGCTL_ACK(slice)    _MMIO(0x804c + (slice)*0x4)
+#define GEN9_SLICE_PGCTL_ACK(slice)    _MMIO(0x804c + (slice) * 0x4)
 #define GEN10_SLICE_PGCTL_ACK(slice)   _MMIO(0x804c + ((slice) / 3) * 0x34 + \
                                              ((slice) % 3) * 0x4)
 #define   GEN9_PGCTL_SLICE_ACK         (1 << 0)
-#define   GEN9_PGCTL_SS_ACK(subslice)  (1 << (2 + (subslice)*2))
+#define   GEN9_PGCTL_SS_ACK(subslice)  (1 << (2 + (subslice) * 2))
 #define   GEN10_PGCTL_VALID_SS_MASK(slice) ((slice) == 0 ? 0x7F : 0x1F)
 
-#define GEN9_SS01_EU_PGCTL_ACK(slice)  _MMIO(0x805c + (slice)*0x8)
+#define GEN9_SS01_EU_PGCTL_ACK(slice)  _MMIO(0x805c + (slice) * 0x8)
 #define GEN10_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + ((slice) / 3) * 0x30 + \
                                              ((slice) % 3) * 0x8)
-#define GEN9_SS23_EU_PGCTL_ACK(slice)  _MMIO(0x8060 + (slice)*0x8)
+#define GEN9_SS23_EU_PGCTL_ACK(slice)  _MMIO(0x8060 + (slice) * 0x8)
 #define GEN10_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + ((slice) / 3) * 0x30 + \
                                              ((slice) % 3) * 0x8)
 #define   GEN9_PGCTL_SSA_EU08_ACK      (1 << 0)
@@ -8255,10 +8614,10 @@ enum {
 #define   GEN9_PGCTL_SSB_EU311_ACK     (1 << 14)
 
 #define GEN7_MISCCPCTL                         _MMIO(0x9424)
-#define   GEN7_DOP_CLOCK_GATE_ENABLE           (1<<0)
-#define   GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE     (1<<2)
-#define   GEN8_DOP_CLOCK_GATE_GUC_ENABLE       (1<<4)
-#define   GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE     (1<<6)
+#define   GEN7_DOP_CLOCK_GATE_ENABLE           (1 << 0)
+#define   GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE     (1 << 2)
+#define   GEN8_DOP_CLOCK_GATE_GUC_ENABLE       (1 << 4)
+#define   GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE     (1 << 6)
 
 #define GEN8_GARBCNTL                          _MMIO(0xB004)
 #define   GEN9_GAPS_TSV_CREDIT_DISABLE         (1 << 7)
@@ -8287,61 +8646,62 @@ enum {
 
 /* IVYBRIDGE DPF */
 #define GEN7_L3CDERRST1(slice)         _MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */
-#define   GEN7_L3CDERRST1_ROW_MASK     (0x7ff<<14)
-#define   GEN7_PARITY_ERROR_VALID      (1<<13)
-#define   GEN7_L3CDERRST1_BANK_MASK    (3<<11)
-#define   GEN7_L3CDERRST1_SUBBANK_MASK (7<<8)
+#define   GEN7_L3CDERRST1_ROW_MASK     (0x7ff << 14)
+#define   GEN7_PARITY_ERROR_VALID      (1 << 13)
+#define   GEN7_L3CDERRST1_BANK_MASK    (3 << 11)
+#define   GEN7_L3CDERRST1_SUBBANK_MASK (7 << 8)
 #define GEN7_PARITY_ERROR_ROW(reg) \
-               ((reg & GEN7_L3CDERRST1_ROW_MASK) >> 14)
+               (((reg) & GEN7_L3CDERRST1_ROW_MASK) >> 14)
 #define GEN7_PARITY_ERROR_BANK(reg) \
-               ((reg & GEN7_L3CDERRST1_BANK_MASK) >> 11)
+               (((reg) & GEN7_L3CDERRST1_BANK_MASK) >> 11)
 #define GEN7_PARITY_ERROR_SUBBANK(reg) \
-               ((reg & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8)
-#define   GEN7_L3CDERRST1_ENABLE       (1<<7)
+               (((reg) & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8)
+#define   GEN7_L3CDERRST1_ENABLE       (1 << 7)
 
 #define GEN7_L3LOG(slice, i)           _MMIO(0xB070 + (slice) * 0x200 + (i) * 4)
 #define GEN7_L3LOG_SIZE                        0x80
 
 #define GEN7_HALF_SLICE_CHICKEN1       _MMIO(0xe100) /* IVB GT1 + VLV */
 #define GEN7_HALF_SLICE_CHICKEN1_GT2   _MMIO(0xf100)
-#define   GEN7_MAX_PS_THREAD_DEP               (8<<12)
-#define   GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE  (1<<10)
-#define   GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE      (1<<4)
-#define   GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
+#define   GEN7_MAX_PS_THREAD_DEP               (8 << 12)
+#define   GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE  (1 << 10)
+#define   GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE      (1 << 4)
+#define   GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1 << 3)
 
 #define GEN9_HALF_SLICE_CHICKEN5       _MMIO(0xe188)
-#define   GEN9_DG_MIRROR_FIX_ENABLE    (1<<5)
-#define   GEN9_CCS_TLB_PREFETCH_ENABLE (1<<3)
+#define   GEN9_DG_MIRROR_FIX_ENABLE    (1 << 5)
+#define   GEN9_CCS_TLB_PREFETCH_ENABLE (1 << 3)
 
 #define GEN8_ROW_CHICKEN               _MMIO(0xe4f0)
-#define   FLOW_CONTROL_ENABLE          (1<<15)
-#define   PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE        (1<<8)
-#define   STALL_DOP_GATING_DISABLE             (1<<5)
-#define   THROTTLE_12_5                                (7<<2)
-#define   DISABLE_EARLY_EOT                    (1<<1)
+#define   FLOW_CONTROL_ENABLE          (1 << 15)
+#define   PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE        (1 << 8)
+#define   STALL_DOP_GATING_DISABLE             (1 << 5)
+#define   THROTTLE_12_5                                (7 << 2)
+#define   DISABLE_EARLY_EOT                    (1 << 1)
 
 #define GEN7_ROW_CHICKEN2              _MMIO(0xe4f4)
 #define GEN7_ROW_CHICKEN2_GT2          _MMIO(0xf4f4)
-#define   DOP_CLOCK_GATING_DISABLE     (1<<0)
-#define   PUSH_CONSTANT_DEREF_DISABLE  (1<<8)
+#define   DOP_CLOCK_GATING_DISABLE     (1 << 0)
+#define   PUSH_CONSTANT_DEREF_DISABLE  (1 << 8)
+#define   GEN11_TDL_CLOCK_GATING_FIX_DISABLE   (1 << 1)
 
 #define HSW_ROW_CHICKEN3               _MMIO(0xe49c)
 #define  HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE    (1 << 6)
 
 #define HALF_SLICE_CHICKEN2            _MMIO(0xe180)
-#define   GEN8_ST_PO_DISABLE           (1<<13)
+#define   GEN8_ST_PO_DISABLE           (1 << 13)
 
 #define HALF_SLICE_CHICKEN3            _MMIO(0xe184)
-#define   HSW_SAMPLE_C_PERFORMANCE     (1<<9)
-#define   GEN8_CENTROID_PIXEL_OPT_DIS  (1<<8)
-#define   GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC  (1<<5)
-#define   CNL_FAST_ANISO_L1_BANKING_FIX        (1<<4)
-#define   GEN8_SAMPLER_POWER_BYPASS_DIS        (1<<1)
+#define   HSW_SAMPLE_C_PERFORMANCE     (1 << 9)
+#define   GEN8_CENTROID_PIXEL_OPT_DIS  (1 << 8)
+#define   GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC  (1 << 5)
+#define   CNL_FAST_ANISO_L1_BANKING_FIX        (1 << 4)
+#define   GEN8_SAMPLER_POWER_BYPASS_DIS        (1 << 1)
 
 #define GEN9_HALF_SLICE_CHICKEN7       _MMIO(0xe194)
-#define   GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR       (1<<8)
-#define   GEN9_ENABLE_YV12_BUGFIX      (1<<4)
-#define   GEN9_ENABLE_GPGPU_PREEMPTION (1<<2)
+#define   GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR       (1 << 8)
+#define   GEN9_ENABLE_YV12_BUGFIX      (1 << 4)
+#define   GEN9_ENABLE_GPGPU_PREEMPTION (1 << 2)
 
 /* Audio */
 #define G4X_AUD_VID_DID                        _MMIO(dev_priv->info.display_mmio_offset + 0x62020)
@@ -8473,6 +8833,14 @@ enum {
 #define _HSW_PWR_WELL_CTL3                     0x45408
 #define _HSW_PWR_WELL_CTL4                     0x4540C
 
+#define _ICL_PWR_WELL_CTL_AUX1                 0x45440
+#define _ICL_PWR_WELL_CTL_AUX2                 0x45444
+#define _ICL_PWR_WELL_CTL_AUX4                 0x4544C
+
+#define _ICL_PWR_WELL_CTL_DDI1                 0x45450
+#define _ICL_PWR_WELL_CTL_DDI2                 0x45454
+#define _ICL_PWR_WELL_CTL_DDI4                 0x4545C
+
 /*
  * Each power well control register contains up to 16 (request, status) HW
  * flag tuples. The register index and HW flag shift is determined by the
@@ -8482,21 +8850,27 @@ enum {
  */
 #define _HSW_PW_REG_IDX(pw)                    ((pw) >> 4)
 #define _HSW_PW_SHIFT(pw)                      (((pw) & 0xf) * 2)
-/* TODO: Add all PWR_WELL_CTL registers below for new platforms */
 #define HSW_PWR_WELL_CTL_BIOS(pw)      _MMIO(_PICK(_HSW_PW_REG_IDX(pw),       \
-                                                   _HSW_PWR_WELL_CTL1))
+                                                   _HSW_PWR_WELL_CTL1,        \
+                                                   _ICL_PWR_WELL_CTL_AUX1,    \
+                                                   _ICL_PWR_WELL_CTL_DDI1))
 #define HSW_PWR_WELL_CTL_DRIVER(pw)    _MMIO(_PICK(_HSW_PW_REG_IDX(pw),       \
-                                                   _HSW_PWR_WELL_CTL2))
+                                                   _HSW_PWR_WELL_CTL2,        \
+                                                   _ICL_PWR_WELL_CTL_AUX2,    \
+                                                   _ICL_PWR_WELL_CTL_DDI2))
+/* KVMR doesn't have a reg for AUX or DDI power well control */
 #define HSW_PWR_WELL_CTL_KVMR          _MMIO(_HSW_PWR_WELL_CTL3)
 #define HSW_PWR_WELL_CTL_DEBUG(pw)     _MMIO(_PICK(_HSW_PW_REG_IDX(pw),       \
-                                                   _HSW_PWR_WELL_CTL4))
+                                                   _HSW_PWR_WELL_CTL4,        \
+                                                   _ICL_PWR_WELL_CTL_AUX4,    \
+                                                   _ICL_PWR_WELL_CTL_DDI4))
 
 #define   HSW_PWR_WELL_CTL_REQ(pw)             (1 << (_HSW_PW_SHIFT(pw) + 1))
 #define   HSW_PWR_WELL_CTL_STATE(pw)           (1 << _HSW_PW_SHIFT(pw))
 #define HSW_PWR_WELL_CTL5                      _MMIO(0x45410)
-#define   HSW_PWR_WELL_ENABLE_SINGLE_STEP      (1<<31)
-#define   HSW_PWR_WELL_PWR_GATE_OVERRIDE       (1<<20)
-#define   HSW_PWR_WELL_FORCE_ON                        (1<<19)
+#define   HSW_PWR_WELL_ENABLE_SINGLE_STEP      (1 << 31)
+#define   HSW_PWR_WELL_PWR_GATE_OVERRIDE       (1 << 20)
+#define   HSW_PWR_WELL_FORCE_ON                        (1 << 19)
 #define HSW_PWR_WELL_CTL6                      _MMIO(0x45414)
 
 /* SKL Fuse Status */
@@ -8507,9 +8881,11 @@ enum skl_power_gate {
 };
 
 #define SKL_FUSE_STATUS                                _MMIO(0x42000)
-#define  SKL_FUSE_DOWNLOAD_STATUS              (1<<31)
+#define  SKL_FUSE_DOWNLOAD_STATUS              (1 << 31)
 /* PG0 (HW control->no power well ID), PG1..PG2 (SKL_DISP_PW1..SKL_DISP_PW2) */
 #define  SKL_PW_TO_PG(pw)                      ((pw) - SKL_DISP_PW_1 + SKL_PG1)
+/* PG0 (HW control->no power well ID), PG1..PG4 (ICL_DISP_PW1..ICL_DISP_PW4) */
+#define  ICL_PW_TO_PG(pw)                      ((pw) - ICL_DISP_PW_1 + SKL_PG1)
 #define  SKL_FUSE_PG_DIST_STATUS(pg)           (1 << (27 - (pg)))
 
 #define _CNL_AUX_REG_IDX(pw)           ((pw) - 9)
@@ -8522,8 +8898,8 @@ enum skl_power_gate {
                                                    _CNL_AUX_ANAOVRD1_C, \
                                                    _CNL_AUX_ANAOVRD1_D, \
                                                    _CNL_AUX_ANAOVRD1_F))
-#define   CNL_AUX_ANAOVRD1_ENABLE      (1<<16)
-#define   CNL_AUX_ANAOVRD1_LDO_BYPASS  (1<<23)
+#define   CNL_AUX_ANAOVRD1_ENABLE      (1 << 16)
+#define   CNL_AUX_ANAOVRD1_LDO_BYPASS  (1 << 23)
 
 /* HDCP Key Registers */
 #define HDCP_KEY_CONF                  _MMIO(0x66c00)
@@ -8568,7 +8944,7 @@ enum skl_power_gate {
 #define HDCP_SHA_V_PRIME_H2            _MMIO(0x66d0C)
 #define HDCP_SHA_V_PRIME_H3            _MMIO(0x66d10)
 #define HDCP_SHA_V_PRIME_H4            _MMIO(0x66d14)
-#define HDCP_SHA_V_PRIME(h)            _MMIO((0x66d04 + h * 4))
+#define HDCP_SHA_V_PRIME(h)            _MMIO((0x66d04 + (h) * 4))
 #define HDCP_SHA_TEXT                  _MMIO(0x66d18)
 
 /* HDCP Auth Registers */
@@ -8584,7 +8960,7 @@ enum skl_power_gate {
                                          _PORTC_HDCP_AUTHENC, \
                                          _PORTD_HDCP_AUTHENC, \
                                          _PORTE_HDCP_AUTHENC, \
-                                         _PORTF_HDCP_AUTHENC) + x)
+                                         _PORTF_HDCP_AUTHENC) + (x))
 #define PORT_HDCP_CONF(port)           _PORT_HDCP_AUTHENC(port, 0x0)
 #define  HDCP_CONF_CAPTURE_AN          BIT(0)
 #define  HDCP_CONF_AUTH_AND_ENC                (BIT(1) | BIT(0))
@@ -8605,7 +8981,7 @@ enum skl_power_gate {
 #define  HDCP_STATUS_R0_READY          BIT(18)
 #define  HDCP_STATUS_AN_READY          BIT(17)
 #define  HDCP_STATUS_CIPHER            BIT(16)
-#define  HDCP_STATUS_FRAME_CNT(x)      ((x >> 8) & 0xff)
+#define  HDCP_STATUS_FRAME_CNT(x)      (((x) >> 8) & 0xff)
 
 /* Per-pipe DDI Function Control */
 #define _TRANS_DDI_FUNC_CTL_A          0x60400
@@ -8614,37 +8990,37 @@ enum skl_power_gate {
 #define _TRANS_DDI_FUNC_CTL_EDP                0x6F400
 #define TRANS_DDI_FUNC_CTL(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL_A)
 
-#define  TRANS_DDI_FUNC_ENABLE         (1<<31)
+#define  TRANS_DDI_FUNC_ENABLE         (1 << 31)
 /* Those bits are ignored by pipe EDP since it can only connect to DDI A */
-#define  TRANS_DDI_PORT_MASK           (7<<28)
+#define  TRANS_DDI_PORT_MASK           (7 << 28)
 #define  TRANS_DDI_PORT_SHIFT          28
-#define  TRANS_DDI_SELECT_PORT(x)      ((x)<<28)
-#define  TRANS_DDI_PORT_NONE           (0<<28)
-#define  TRANS_DDI_MODE_SELECT_MASK    (7<<24)
-#define  TRANS_DDI_MODE_SELECT_HDMI    (0<<24)
-#define  TRANS_DDI_MODE_SELECT_DVI     (1<<24)
-#define  TRANS_DDI_MODE_SELECT_DP_SST  (2<<24)
-#define  TRANS_DDI_MODE_SELECT_DP_MST  (3<<24)
-#define  TRANS_DDI_MODE_SELECT_FDI     (4<<24)
-#define  TRANS_DDI_BPC_MASK            (7<<20)
-#define  TRANS_DDI_BPC_8               (0<<20)
-#define  TRANS_DDI_BPC_10              (1<<20)
-#define  TRANS_DDI_BPC_6               (2<<20)
-#define  TRANS_DDI_BPC_12              (3<<20)
-#define  TRANS_DDI_PVSYNC              (1<<17)
-#define  TRANS_DDI_PHSYNC              (1<<16)
-#define  TRANS_DDI_EDP_INPUT_MASK      (7<<12)
-#define  TRANS_DDI_EDP_INPUT_A_ON      (0<<12)
-#define  TRANS_DDI_EDP_INPUT_A_ONOFF   (4<<12)
-#define  TRANS_DDI_EDP_INPUT_B_ONOFF   (5<<12)
-#define  TRANS_DDI_EDP_INPUT_C_ONOFF   (6<<12)
-#define  TRANS_DDI_HDCP_SIGNALLING     (1<<9)
-#define  TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1<<8)
-#define  TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1<<7)
-#define  TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1<<6)
-#define  TRANS_DDI_BFI_ENABLE          (1<<4)
-#define  TRANS_DDI_HIGH_TMDS_CHAR_RATE (1<<4)
-#define  TRANS_DDI_HDMI_SCRAMBLING     (1<<0)
+#define  TRANS_DDI_SELECT_PORT(x)      ((x) << 28)
+#define  TRANS_DDI_PORT_NONE           (0 << 28)
+#define  TRANS_DDI_MODE_SELECT_MASK    (7 << 24)
+#define  TRANS_DDI_MODE_SELECT_HDMI    (0 << 24)
+#define  TRANS_DDI_MODE_SELECT_DVI     (1 << 24)
+#define  TRANS_DDI_MODE_SELECT_DP_SST  (2 << 24)
+#define  TRANS_DDI_MODE_SELECT_DP_MST  (3 << 24)
+#define  TRANS_DDI_MODE_SELECT_FDI     (4 << 24)
+#define  TRANS_DDI_BPC_MASK            (7 << 20)
+#define  TRANS_DDI_BPC_8               (0 << 20)
+#define  TRANS_DDI_BPC_10              (1 << 20)
+#define  TRANS_DDI_BPC_6               (2 << 20)
+#define  TRANS_DDI_BPC_12              (3 << 20)
+#define  TRANS_DDI_PVSYNC              (1 << 17)
+#define  TRANS_DDI_PHSYNC              (1 << 16)
+#define  TRANS_DDI_EDP_INPUT_MASK      (7 << 12)
+#define  TRANS_DDI_EDP_INPUT_A_ON      (0 << 12)
+#define  TRANS_DDI_EDP_INPUT_A_ONOFF   (4 << 12)
+#define  TRANS_DDI_EDP_INPUT_B_ONOFF   (5 << 12)
+#define  TRANS_DDI_EDP_INPUT_C_ONOFF   (6 << 12)
+#define  TRANS_DDI_HDCP_SIGNALLING     (1 << 9)
+#define  TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1 << 8)
+#define  TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1 << 7)
+#define  TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1 << 6)
+#define  TRANS_DDI_BFI_ENABLE          (1 << 4)
+#define  TRANS_DDI_HIGH_TMDS_CHAR_RATE (1 << 4)
+#define  TRANS_DDI_HDMI_SCRAMBLING     (1 << 0)
 #define  TRANS_DDI_HDMI_SCRAMBLING_MASK (TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE \
                                        | TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ \
                                        | TRANS_DDI_HDMI_SCRAMBLING)
@@ -8653,28 +9029,29 @@ enum skl_power_gate {
 #define _DP_TP_CTL_A                   0x64040
 #define _DP_TP_CTL_B                   0x64140
 #define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B)
-#define  DP_TP_CTL_ENABLE                      (1<<31)
-#define  DP_TP_CTL_MODE_SST                    (0<<27)
-#define  DP_TP_CTL_MODE_MST                    (1<<27)
-#define  DP_TP_CTL_FORCE_ACT                   (1<<25)
-#define  DP_TP_CTL_ENHANCED_FRAME_ENABLE       (1<<18)
-#define  DP_TP_CTL_FDI_AUTOTRAIN               (1<<15)
-#define  DP_TP_CTL_LINK_TRAIN_MASK             (7<<8)
-#define  DP_TP_CTL_LINK_TRAIN_PAT1             (0<<8)
-#define  DP_TP_CTL_LINK_TRAIN_PAT2             (1<<8)
-#define  DP_TP_CTL_LINK_TRAIN_PAT3             (4<<8)
-#define  DP_TP_CTL_LINK_TRAIN_IDLE             (2<<8)
-#define  DP_TP_CTL_LINK_TRAIN_NORMAL           (3<<8)
-#define  DP_TP_CTL_SCRAMBLE_DISABLE            (1<<7)
+#define  DP_TP_CTL_ENABLE                      (1 << 31)
+#define  DP_TP_CTL_MODE_SST                    (0 << 27)
+#define  DP_TP_CTL_MODE_MST                    (1 << 27)
+#define  DP_TP_CTL_FORCE_ACT                   (1 << 25)
+#define  DP_TP_CTL_ENHANCED_FRAME_ENABLE       (1 << 18)
+#define  DP_TP_CTL_FDI_AUTOTRAIN               (1 << 15)
+#define  DP_TP_CTL_LINK_TRAIN_MASK             (7 << 8)
+#define  DP_TP_CTL_LINK_TRAIN_PAT1             (0 << 8)
+#define  DP_TP_CTL_LINK_TRAIN_PAT2             (1 << 8)
+#define  DP_TP_CTL_LINK_TRAIN_PAT3             (4 << 8)
+#define  DP_TP_CTL_LINK_TRAIN_PAT4             (5 << 8)
+#define  DP_TP_CTL_LINK_TRAIN_IDLE             (2 << 8)
+#define  DP_TP_CTL_LINK_TRAIN_NORMAL           (3 << 8)
+#define  DP_TP_CTL_SCRAMBLE_DISABLE            (1 << 7)
 
 /* DisplayPort Transport Status */
 #define _DP_TP_STATUS_A                        0x64044
 #define _DP_TP_STATUS_B                        0x64144
 #define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B)
-#define  DP_TP_STATUS_IDLE_DONE                        (1<<25)
-#define  DP_TP_STATUS_ACT_SENT                 (1<<24)
-#define  DP_TP_STATUS_MODE_STATUS_MST          (1<<23)
-#define  DP_TP_STATUS_AUTOTRAIN_DONE           (1<<12)
+#define  DP_TP_STATUS_IDLE_DONE                        (1 << 25)
+#define  DP_TP_STATUS_ACT_SENT                 (1 << 24)
+#define  DP_TP_STATUS_MODE_STATUS_MST          (1 << 23)
+#define  DP_TP_STATUS_AUTOTRAIN_DONE           (1 << 12)
 #define  DP_TP_STATUS_PAYLOAD_MAPPING_VC2      (3 << 8)
 #define  DP_TP_STATUS_PAYLOAD_MAPPING_VC1      (3 << 4)
 #define  DP_TP_STATUS_PAYLOAD_MAPPING_VC0      (3 << 0)
@@ -8683,16 +9060,16 @@ enum skl_power_gate {
 #define _DDI_BUF_CTL_A                         0x64000
 #define _DDI_BUF_CTL_B                         0x64100
 #define DDI_BUF_CTL(port) _MMIO_PORT(port, _DDI_BUF_CTL_A, _DDI_BUF_CTL_B)
-#define  DDI_BUF_CTL_ENABLE                    (1<<31)
+#define  DDI_BUF_CTL_ENABLE                    (1 << 31)
 #define  DDI_BUF_TRANS_SELECT(n)       ((n) << 24)
-#define  DDI_BUF_EMP_MASK                      (0xf<<24)
-#define  DDI_BUF_PORT_REVERSAL                 (1<<16)
-#define  DDI_BUF_IS_IDLE                       (1<<7)
-#define  DDI_A_4_LANES                         (1<<4)
+#define  DDI_BUF_EMP_MASK                      (0xf << 24)
+#define  DDI_BUF_PORT_REVERSAL                 (1 << 16)
+#define  DDI_BUF_IS_IDLE                       (1 << 7)
+#define  DDI_A_4_LANES                         (1 << 4)
 #define  DDI_PORT_WIDTH(width)                 (((width) - 1) << 1)
 #define  DDI_PORT_WIDTH_MASK                   (7 << 1)
 #define  DDI_PORT_WIDTH_SHIFT                  1
-#define  DDI_INIT_DISPLAY_DETECTED             (1<<0)
+#define  DDI_INIT_DISPLAY_DETECTED             (1 << 0)
 
 /* DDI Buffer Translations */
 #define _DDI_BUF_TRANS_A               0x64E00
@@ -8707,95 +9084,99 @@ enum skl_power_gate {
 #define SBI_ADDR                       _MMIO(0xC6000)
 #define SBI_DATA                       _MMIO(0xC6004)
 #define SBI_CTL_STAT                   _MMIO(0xC6008)
-#define  SBI_CTL_DEST_ICLK             (0x0<<16)
-#define  SBI_CTL_DEST_MPHY             (0x1<<16)
-#define  SBI_CTL_OP_IORD               (0x2<<8)
-#define  SBI_CTL_OP_IOWR               (0x3<<8)
-#define  SBI_CTL_OP_CRRD               (0x6<<8)
-#define  SBI_CTL_OP_CRWR               (0x7<<8)
-#define  SBI_RESPONSE_FAIL             (0x1<<1)
-#define  SBI_RESPONSE_SUCCESS          (0x0<<1)
-#define  SBI_BUSY                      (0x1<<0)
-#define  SBI_READY                     (0x0<<0)
+#define  SBI_CTL_DEST_ICLK             (0x0 << 16)
+#define  SBI_CTL_DEST_MPHY             (0x1 << 16)
+#define  SBI_CTL_OP_IORD               (0x2 << 8)
+#define  SBI_CTL_OP_IOWR               (0x3 << 8)
+#define  SBI_CTL_OP_CRRD               (0x6 << 8)
+#define  SBI_CTL_OP_CRWR               (0x7 << 8)
+#define  SBI_RESPONSE_FAIL             (0x1 << 1)
+#define  SBI_RESPONSE_SUCCESS          (0x0 << 1)
+#define  SBI_BUSY                      (0x1 << 0)
+#define  SBI_READY                     (0x0 << 0)
 
 /* SBI offsets */
 #define  SBI_SSCDIVINTPHASE                    0x0200
 #define  SBI_SSCDIVINTPHASE6                   0x0600
 #define   SBI_SSCDIVINTPHASE_DIVSEL_SHIFT      1
-#define   SBI_SSCDIVINTPHASE_DIVSEL_MASK       (0x7f<<1)
-#define   SBI_SSCDIVINTPHASE_DIVSEL(x)         ((x)<<1)
+#define   SBI_SSCDIVINTPHASE_DIVSEL_MASK       (0x7f << 1)
+#define   SBI_SSCDIVINTPHASE_DIVSEL(x)         ((x) << 1)
 #define   SBI_SSCDIVINTPHASE_INCVAL_SHIFT      8
-#define   SBI_SSCDIVINTPHASE_INCVAL_MASK       (0x7f<<8)
-#define   SBI_SSCDIVINTPHASE_INCVAL(x)         ((x)<<8)
-#define   SBI_SSCDIVINTPHASE_DIR(x)            ((x)<<15)
-#define   SBI_SSCDIVINTPHASE_PROPAGATE         (1<<0)
+#define   SBI_SSCDIVINTPHASE_INCVAL_MASK       (0x7f << 8)
+#define   SBI_SSCDIVINTPHASE_INCVAL(x)         ((x) << 8)
+#define   SBI_SSCDIVINTPHASE_DIR(x)            ((x) << 15)
+#define   SBI_SSCDIVINTPHASE_PROPAGATE         (1 << 0)
 #define  SBI_SSCDITHPHASE                      0x0204
 #define  SBI_SSCCTL                            0x020c
 #define  SBI_SSCCTL6                           0x060C
-#define   SBI_SSCCTL_PATHALT                   (1<<3)
-#define   SBI_SSCCTL_DISABLE                   (1<<0)
+#define   SBI_SSCCTL_PATHALT                   (1 << 3)
+#define   SBI_SSCCTL_DISABLE                   (1 << 0)
 #define  SBI_SSCAUXDIV6                                0x0610
 #define   SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT     4
-#define   SBI_SSCAUXDIV_FINALDIV2SEL_MASK      (1<<4)
-#define   SBI_SSCAUXDIV_FINALDIV2SEL(x)                ((x)<<4)
+#define   SBI_SSCAUXDIV_FINALDIV2SEL_MASK      (1 << 4)
+#define   SBI_SSCAUXDIV_FINALDIV2SEL(x)                ((x) << 4)
 #define  SBI_DBUFF0                            0x2a00
 #define  SBI_GEN0                              0x1f00
-#define   SBI_GEN0_CFG_BUFFENABLE_DISABLE      (1<<0)
+#define   SBI_GEN0_CFG_BUFFENABLE_DISABLE      (1 << 0)
 
 /* LPT PIXCLK_GATE */
 #define PIXCLK_GATE                    _MMIO(0xC6020)
-#define  PIXCLK_GATE_UNGATE            (1<<0)
-#define  PIXCLK_GATE_GATE              (0<<0)
+#define  PIXCLK_GATE_UNGATE            (1 << 0)
+#define  PIXCLK_GATE_GATE              (0 << 0)
 
 /* SPLL */
 #define SPLL_CTL                       _MMIO(0x46020)
-#define  SPLL_PLL_ENABLE               (1<<31)
-#define  SPLL_PLL_SSC                  (1<<28)
-#define  SPLL_PLL_NON_SSC              (2<<28)
-#define  SPLL_PLL_LCPLL                        (3<<28)
-#define  SPLL_PLL_REF_MASK             (3<<28)
-#define  SPLL_PLL_FREQ_810MHz          (0<<26)
-#define  SPLL_PLL_FREQ_1350MHz         (1<<26)
-#define  SPLL_PLL_FREQ_2700MHz         (2<<26)
-#define  SPLL_PLL_FREQ_MASK            (3<<26)
+#define  SPLL_PLL_ENABLE               (1 << 31)
+#define  SPLL_PLL_SSC                  (1 << 28)
+#define  SPLL_PLL_NON_SSC              (2 << 28)
+#define  SPLL_PLL_LCPLL                        (3 << 28)
+#define  SPLL_PLL_REF_MASK             (3 << 28)
+#define  SPLL_PLL_FREQ_810MHz          (0 << 26)
+#define  SPLL_PLL_FREQ_1350MHz         (1 << 26)
+#define  SPLL_PLL_FREQ_2700MHz         (2 << 26)
+#define  SPLL_PLL_FREQ_MASK            (3 << 26)
 
 /* WRPLL */
 #define _WRPLL_CTL1                    0x46040
 #define _WRPLL_CTL2                    0x46060
 #define WRPLL_CTL(pll)                 _MMIO_PIPE(pll, _WRPLL_CTL1, _WRPLL_CTL2)
-#define  WRPLL_PLL_ENABLE              (1<<31)
-#define  WRPLL_PLL_SSC                 (1<<28)
-#define  WRPLL_PLL_NON_SSC             (2<<28)
-#define  WRPLL_PLL_LCPLL               (3<<28)
-#define  WRPLL_PLL_REF_MASK            (3<<28)
+#define  WRPLL_PLL_ENABLE              (1 << 31)
+#define  WRPLL_PLL_SSC                 (1 << 28)
+#define  WRPLL_PLL_NON_SSC             (2 << 28)
+#define  WRPLL_PLL_LCPLL               (3 << 28)
+#define  WRPLL_PLL_REF_MASK            (3 << 28)
 /* WRPLL divider programming */
-#define  WRPLL_DIVIDER_REFERENCE(x)    ((x)<<0)
+#define  WRPLL_DIVIDER_REFERENCE(x)    ((x) << 0)
 #define  WRPLL_DIVIDER_REF_MASK                (0xff)
-#define  WRPLL_DIVIDER_POST(x)         ((x)<<8)
-#define  WRPLL_DIVIDER_POST_MASK       (0x3f<<8)
+#define  WRPLL_DIVIDER_POST(x)         ((x) << 8)
+#define  WRPLL_DIVIDER_POST_MASK       (0x3f << 8)
 #define  WRPLL_DIVIDER_POST_SHIFT      8
-#define  WRPLL_DIVIDER_FEEDBACK(x)     ((x)<<16)
+#define  WRPLL_DIVIDER_FEEDBACK(x)     ((x) << 16)
 #define  WRPLL_DIVIDER_FB_SHIFT                16
-#define  WRPLL_DIVIDER_FB_MASK         (0xff<<16)
+#define  WRPLL_DIVIDER_FB_MASK         (0xff << 16)
 
 /* Port clock selection */
 #define _PORT_CLK_SEL_A                        0x46100
 #define _PORT_CLK_SEL_B                        0x46104
 #define PORT_CLK_SEL(port) _MMIO_PORT(port, _PORT_CLK_SEL_A, _PORT_CLK_SEL_B)
-#define  PORT_CLK_SEL_LCPLL_2700       (0<<29)
-#define  PORT_CLK_SEL_LCPLL_1350       (1<<29)
-#define  PORT_CLK_SEL_LCPLL_810                (2<<29)
-#define  PORT_CLK_SEL_SPLL             (3<<29)
-#define  PORT_CLK_SEL_WRPLL(pll)       (((pll)+4)<<29)
-#define  PORT_CLK_SEL_WRPLL1           (4<<29)
-#define  PORT_CLK_SEL_WRPLL2           (5<<29)
-#define  PORT_CLK_SEL_NONE             (7<<29)
-#define  PORT_CLK_SEL_MASK             (7<<29)
+#define  PORT_CLK_SEL_LCPLL_2700       (0 << 29)
+#define  PORT_CLK_SEL_LCPLL_1350       (1 << 29)
+#define  PORT_CLK_SEL_LCPLL_810                (2 << 29)
+#define  PORT_CLK_SEL_SPLL             (3 << 29)
+#define  PORT_CLK_SEL_WRPLL(pll)       (((pll) + 4) << 29)
+#define  PORT_CLK_SEL_WRPLL1           (4 << 29)
+#define  PORT_CLK_SEL_WRPLL2           (5 << 29)
+#define  PORT_CLK_SEL_NONE             (7 << 29)
+#define  PORT_CLK_SEL_MASK             (7 << 29)
 
 /* On ICL+ this is the same as PORT_CLK_SEL, but all bits change. */
 #define DDI_CLK_SEL(port)              PORT_CLK_SEL(port)
 #define  DDI_CLK_SEL_NONE              (0x0 << 28)
 #define  DDI_CLK_SEL_MG                        (0x8 << 28)
+#define  DDI_CLK_SEL_TBT_162           (0xC << 28)
+#define  DDI_CLK_SEL_TBT_270           (0xD << 28)
+#define  DDI_CLK_SEL_TBT_540           (0xE << 28)
+#define  DDI_CLK_SEL_TBT_810           (0xF << 28)
 #define  DDI_CLK_SEL_MASK              (0xF << 28)
 
 /* Transcoder clock selection */
@@ -8803,8 +9184,8 @@ enum skl_power_gate {
 #define _TRANS_CLK_SEL_B               0x46144
 #define TRANS_CLK_SEL(tran) _MMIO_TRANS(tran, _TRANS_CLK_SEL_A, _TRANS_CLK_SEL_B)
 /* For each transcoder, we need to select the corresponding port clock */
-#define  TRANS_CLK_SEL_DISABLED                (0x0<<29)
-#define  TRANS_CLK_SEL_PORT(x)         (((x)+1)<<29)
+#define  TRANS_CLK_SEL_DISABLED                (0x0 << 29)
+#define  TRANS_CLK_SEL_PORT(x)         (((x) + 1) << 29)
 
 #define CDCLK_FREQ                     _MMIO(0x46200)
 
@@ -8814,28 +9195,28 @@ enum skl_power_gate {
 #define _TRANS_EDP_MSA_MISC            0x6f410
 #define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC)
 
-#define  TRANS_MSA_SYNC_CLK            (1<<0)
-#define  TRANS_MSA_6_BPC               (0<<5)
-#define  TRANS_MSA_8_BPC               (1<<5)
-#define  TRANS_MSA_10_BPC              (2<<5)
-#define  TRANS_MSA_12_BPC              (3<<5)
-#define  TRANS_MSA_16_BPC              (4<<5)
+#define  TRANS_MSA_SYNC_CLK            (1 << 0)
+#define  TRANS_MSA_6_BPC               (0 << 5)
+#define  TRANS_MSA_8_BPC               (1 << 5)
+#define  TRANS_MSA_10_BPC              (2 << 5)
+#define  TRANS_MSA_12_BPC              (3 << 5)
+#define  TRANS_MSA_16_BPC              (4 << 5)
 
 /* LCPLL Control */
 #define LCPLL_CTL                      _MMIO(0x130040)
-#define  LCPLL_PLL_DISABLE             (1<<31)
-#define  LCPLL_PLL_LOCK                        (1<<30)
-#define  LCPLL_CLK_FREQ_MASK           (3<<26)
-#define  LCPLL_CLK_FREQ_450            (0<<26)
-#define  LCPLL_CLK_FREQ_54O_BDW                (1<<26)
-#define  LCPLL_CLK_FREQ_337_5_BDW      (2<<26)
-#define  LCPLL_CLK_FREQ_675_BDW                (3<<26)
-#define  LCPLL_CD_CLOCK_DISABLE                (1<<25)
-#define  LCPLL_ROOT_CD_CLOCK_DISABLE   (1<<24)
-#define  LCPLL_CD2X_CLOCK_DISABLE      (1<<23)
-#define  LCPLL_POWER_DOWN_ALLOW                (1<<22)
-#define  LCPLL_CD_SOURCE_FCLK          (1<<21)
-#define  LCPLL_CD_SOURCE_FCLK_DONE     (1<<19)
+#define  LCPLL_PLL_DISABLE             (1 << 31)
+#define  LCPLL_PLL_LOCK                        (1 << 30)
+#define  LCPLL_CLK_FREQ_MASK           (3 << 26)
+#define  LCPLL_CLK_FREQ_450            (0 << 26)
+#define  LCPLL_CLK_FREQ_54O_BDW                (1 << 26)
+#define  LCPLL_CLK_FREQ_337_5_BDW      (2 << 26)
+#define  LCPLL_CLK_FREQ_675_BDW                (3 << 26)
+#define  LCPLL_CD_CLOCK_DISABLE                (1 << 25)
+#define  LCPLL_ROOT_CD_CLOCK_DISABLE   (1 << 24)
+#define  LCPLL_CD2X_CLOCK_DISABLE      (1 << 23)
+#define  LCPLL_POWER_DOWN_ALLOW                (1 << 22)
+#define  LCPLL_CD_SOURCE_FCLK          (1 << 21)
+#define  LCPLL_CD_SOURCE_FCLK_DONE     (1 << 19)
 
 /*
  * SKL Clocks
@@ -8863,16 +9244,16 @@ enum skl_power_gate {
 /* LCPLL_CTL */
 #define LCPLL1_CTL             _MMIO(0x46010)
 #define LCPLL2_CTL             _MMIO(0x46014)
-#define  LCPLL_PLL_ENABLE      (1<<31)
+#define  LCPLL_PLL_ENABLE      (1 << 31)
 
 /* DPLL control1 */
 #define DPLL_CTRL1             _MMIO(0x6C058)
-#define  DPLL_CTRL1_HDMI_MODE(id)              (1<<((id)*6+5))
-#define  DPLL_CTRL1_SSC(id)                    (1<<((id)*6+4))
-#define  DPLL_CTRL1_LINK_RATE_MASK(id)         (7<<((id)*6+1))
-#define  DPLL_CTRL1_LINK_RATE_SHIFT(id)                ((id)*6+1)
-#define  DPLL_CTRL1_LINK_RATE(linkrate, id)    ((linkrate)<<((id)*6+1))
-#define  DPLL_CTRL1_OVERRIDE(id)               (1<<((id)*6))
+#define  DPLL_CTRL1_HDMI_MODE(id)              (1 << ((id) * 6 + 5))
+#define  DPLL_CTRL1_SSC(id)                    (1 << ((id) * 6 + 4))
+#define  DPLL_CTRL1_LINK_RATE_MASK(id)         (7 << ((id) * 6 + 1))
+#define  DPLL_CTRL1_LINK_RATE_SHIFT(id)                ((id) * 6 + 1)
+#define  DPLL_CTRL1_LINK_RATE(linkrate, id)    ((linkrate) << ((id) * 6 + 1))
+#define  DPLL_CTRL1_OVERRIDE(id)               (1 << ((id) * 6))
 #define  DPLL_CTRL1_LINK_RATE_2700             0
 #define  DPLL_CTRL1_LINK_RATE_1350             1
 #define  DPLL_CTRL1_LINK_RATE_810              2
@@ -8882,43 +9263,43 @@ enum skl_power_gate {
 
 /* DPLL control2 */
 #define DPLL_CTRL2                             _MMIO(0x6C05C)
-#define  DPLL_CTRL2_DDI_CLK_OFF(port)          (1<<((port)+15))
-#define  DPLL_CTRL2_DDI_CLK_SEL_MASK(port)     (3<<((port)*3+1))
-#define  DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port)    ((port)*3+1)
-#define  DPLL_CTRL2_DDI_CLK_SEL(clk, port)     ((clk)<<((port)*3+1))
-#define  DPLL_CTRL2_DDI_SEL_OVERRIDE(port)     (1<<((port)*3))
+#define  DPLL_CTRL2_DDI_CLK_OFF(port)          (1 << ((port) + 15))
+#define  DPLL_CTRL2_DDI_CLK_SEL_MASK(port)     (3 << ((port) * 3 + 1))
+#define  DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port)    ((port) * 3 + 1)
+#define  DPLL_CTRL2_DDI_CLK_SEL(clk, port)     ((clk) << ((port) * 3 + 1))
+#define  DPLL_CTRL2_DDI_SEL_OVERRIDE(port)     (1 << ((port) * 3))
 
 /* DPLL Status */
 #define DPLL_STATUS    _MMIO(0x6C060)
-#define  DPLL_LOCK(id) (1<<((id)*8))
+#define  DPLL_LOCK(id) (1 << ((id) * 8))
 
 /* DPLL cfg */
 #define _DPLL1_CFGCR1  0x6C040
 #define _DPLL2_CFGCR1  0x6C048
 #define _DPLL3_CFGCR1  0x6C050
-#define  DPLL_CFGCR1_FREQ_ENABLE       (1<<31)
-#define  DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff<<9)
-#define  DPLL_CFGCR1_DCO_FRACTION(x)   ((x)<<9)
+#define  DPLL_CFGCR1_FREQ_ENABLE       (1 << 31)
+#define  DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff << 9)
+#define  DPLL_CFGCR1_DCO_FRACTION(x)   ((x) << 9)
 #define  DPLL_CFGCR1_DCO_INTEGER_MASK  (0x1ff)
 
 #define _DPLL1_CFGCR2  0x6C044
 #define _DPLL2_CFGCR2  0x6C04C
 #define _DPLL3_CFGCR2  0x6C054
-#define  DPLL_CFGCR2_QDIV_RATIO_MASK   (0xff<<8)
-#define  DPLL_CFGCR2_QDIV_RATIO(x)     ((x)<<8)
-#define  DPLL_CFGCR2_QDIV_MODE(x)      ((x)<<7)
-#define  DPLL_CFGCR2_KDIV_MASK         (3<<5)
-#define  DPLL_CFGCR2_KDIV(x)           ((x)<<5)
-#define  DPLL_CFGCR2_KDIV_5 (0<<5)
-#define  DPLL_CFGCR2_KDIV_2 (1<<5)
-#define  DPLL_CFGCR2_KDIV_3 (2<<5)
-#define  DPLL_CFGCR2_KDIV_1 (3<<5)
-#define  DPLL_CFGCR2_PDIV_MASK         (7<<2)
-#define  DPLL_CFGCR2_PDIV(x)           ((x)<<2)
-#define  DPLL_CFGCR2_PDIV_1 (0<<2)
-#define  DPLL_CFGCR2_PDIV_2 (1<<2)
-#define  DPLL_CFGCR2_PDIV_3 (2<<2)
-#define  DPLL_CFGCR2_PDIV_7 (4<<2)
+#define  DPLL_CFGCR2_QDIV_RATIO_MASK   (0xff << 8)
+#define  DPLL_CFGCR2_QDIV_RATIO(x)     ((x) << 8)
+#define  DPLL_CFGCR2_QDIV_MODE(x)      ((x) << 7)
+#define  DPLL_CFGCR2_KDIV_MASK         (3 << 5)
+#define  DPLL_CFGCR2_KDIV(x)           ((x) << 5)
+#define  DPLL_CFGCR2_KDIV_5 (0 << 5)
+#define  DPLL_CFGCR2_KDIV_2 (1 << 5)
+#define  DPLL_CFGCR2_KDIV_3 (2 << 5)
+#define  DPLL_CFGCR2_KDIV_1 (3 << 5)
+#define  DPLL_CFGCR2_PDIV_MASK         (7 << 2)
+#define  DPLL_CFGCR2_PDIV(x)           ((x) << 2)
+#define  DPLL_CFGCR2_PDIV_1 (0 << 2)
+#define  DPLL_CFGCR2_PDIV_2 (1 << 2)
+#define  DPLL_CFGCR2_PDIV_3 (2 << 2)
+#define  DPLL_CFGCR2_PDIV_7 (4 << 2)
 #define  DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
 
 #define DPLL_CFGCR1(id)        _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1)
@@ -8930,9 +9311,9 @@ enum skl_power_gate {
 #define DPCLKA_CFGCR0                          _MMIO(0x6C200)
 #define DPCLKA_CFGCR0_ICL                      _MMIO(0x164280)
 #define  DPCLKA_CFGCR0_DDI_CLK_OFF(port)       (1 << ((port) ==  PORT_F ? 23 : \
-                                                     (port)+10))
+                                                     (port) + 10))
 #define  DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port) ((port) == PORT_F ? 21 : \
-                                               (port)*2)
+                                               (port) * 2)
 #define  DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port)  (3 << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port))
 #define  DPCLKA_CFGCR0_DDI_CLK_SEL(pll, port)  ((pll) << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port))
 
@@ -8945,6 +9326,8 @@ enum skl_power_gate {
 #define  PLL_POWER_STATE       (1 << 26)
 #define CNL_DPLL_ENABLE(pll)   _MMIO_PLL(pll, DPLL0_ENABLE, DPLL1_ENABLE)
 
+#define TBT_PLL_ENABLE         _MMIO(0x46020)
+
 #define _MG_PLL1_ENABLE                0x46030
 #define _MG_PLL2_ENABLE                0x46034
 #define _MG_PLL3_ENABLE                0x46038
@@ -8958,6 +9341,7 @@ enum skl_power_gate {
 #define _MG_REFCLKIN_CTL_PORT3                         0x16A92C
 #define _MG_REFCLKIN_CTL_PORT4                         0x16B92C
 #define   MG_REFCLKIN_CTL_OD_2_MUX(x)                  ((x) << 8)
+#define   MG_REFCLKIN_CTL_OD_2_MUX_MASK                        (0x7 << 8)
 #define MG_REFCLKIN_CTL(port) _MMIO_PORT((port) - PORT_C, \
                                         _MG_REFCLKIN_CTL_PORT1, \
                                         _MG_REFCLKIN_CTL_PORT2)
@@ -8967,7 +9351,9 @@ enum skl_power_gate {
 #define _MG_CLKTOP2_CORECLKCTL1_PORT3                  0x16A8D8
 #define _MG_CLKTOP2_CORECLKCTL1_PORT4                  0x16B8D8
 #define   MG_CLKTOP2_CORECLKCTL1_B_DIVRATIO(x)         ((x) << 16)
+#define   MG_CLKTOP2_CORECLKCTL1_B_DIVRATIO_MASK       (0xff << 16)
 #define   MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(x)         ((x) << 8)
+#define   MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK       (0xff << 8)
 #define MG_CLKTOP2_CORECLKCTL1(port) _MMIO_PORT((port) - PORT_C, \
                                                _MG_CLKTOP2_CORECLKCTL1_PORT1, \
                                                _MG_CLKTOP2_CORECLKCTL1_PORT2)
@@ -8977,9 +9363,13 @@ enum skl_power_gate {
 #define _MG_CLKTOP2_HSCLKCTL_PORT3                     0x16A8D4
 #define _MG_CLKTOP2_HSCLKCTL_PORT4                     0x16B8D4
 #define   MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(x)         ((x) << 16)
+#define   MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK       (0x1 << 16)
 #define   MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(x)       ((x) << 14)
+#define   MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK     (0x3 << 14)
 #define   MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO(x)           ((x) << 12)
+#define   MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK         (0x3 << 12)
 #define   MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(x)           ((x) << 8)
+#define   MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK         (0xf << 8)
 #define MG_CLKTOP2_HSCLKCTL(port) _MMIO_PORT((port) - PORT_C, \
                                             _MG_CLKTOP2_HSCLKCTL_PORT1, \
                                             _MG_CLKTOP2_HSCLKCTL_PORT2)
@@ -9053,12 +9443,18 @@ enum skl_power_gate {
 #define _MG_PLL_BIAS_PORT3                             0x16AA14
 #define _MG_PLL_BIAS_PORT4                             0x16BA14
 #define   MG_PLL_BIAS_BIAS_GB_SEL(x)                   ((x) << 30)
+#define   MG_PLL_BIAS_BIAS_GB_SEL_MASK                 (0x3 << 30)
 #define   MG_PLL_BIAS_INIT_DCOAMP(x)                   ((x) << 24)
+#define   MG_PLL_BIAS_INIT_DCOAMP_MASK                 (0x3f << 24)
 #define   MG_PLL_BIAS_BIAS_BONUS(x)                    ((x) << 16)
+#define   MG_PLL_BIAS_BIAS_BONUS_MASK                  (0xff << 16)
 #define   MG_PLL_BIAS_BIASCAL_EN                       (1 << 15)
 #define   MG_PLL_BIAS_CTRIM(x)                         ((x) << 8)
+#define   MG_PLL_BIAS_CTRIM_MASK                       (0x1f << 8)
 #define   MG_PLL_BIAS_VREF_RDAC(x)                     ((x) << 5)
+#define   MG_PLL_BIAS_VREF_RDAC_MASK                   (0x7 << 5)
 #define   MG_PLL_BIAS_IREFTRIM(x)                      ((x) << 0)
+#define   MG_PLL_BIAS_IREFTRIM_MASK                    (0x1f << 0)
 #define MG_PLL_BIAS(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_BIAS_PORT1, \
                                     _MG_PLL_BIAS_PORT2)
 
@@ -9100,13 +9496,16 @@ enum skl_power_gate {
 #define  DPLL_CFGCR1_QDIV_RATIO_MASK   (0xff << 10)
 #define  DPLL_CFGCR1_QDIV_RATIO_SHIFT  (10)
 #define  DPLL_CFGCR1_QDIV_RATIO(x)     ((x) << 10)
+#define  DPLL_CFGCR1_QDIV_MODE_SHIFT   (9)
 #define  DPLL_CFGCR1_QDIV_MODE(x)      ((x) << 9)
 #define  DPLL_CFGCR1_KDIV_MASK         (7 << 6)
+#define  DPLL_CFGCR1_KDIV_SHIFT                (6)
 #define  DPLL_CFGCR1_KDIV(x)           ((x) << 6)
 #define  DPLL_CFGCR1_KDIV_1            (1 << 6)
 #define  DPLL_CFGCR1_KDIV_2            (2 << 6)
 #define  DPLL_CFGCR1_KDIV_4            (4 << 6)
 #define  DPLL_CFGCR1_PDIV_MASK         (0xf << 2)
+#define  DPLL_CFGCR1_PDIV_SHIFT                (2)
 #define  DPLL_CFGCR1_PDIV(x)           ((x) << 2)
 #define  DPLL_CFGCR1_PDIV_2            (1 << 2)
 #define  DPLL_CFGCR1_PDIV_3            (2 << 2)
@@ -9140,22 +9539,22 @@ enum skl_power_gate {
 /* GEN9 DC */
 #define DC_STATE_EN                    _MMIO(0x45504)
 #define  DC_STATE_DISABLE              0
-#define  DC_STATE_EN_UPTO_DC5          (1<<0)
-#define  DC_STATE_EN_DC9               (1<<3)
-#define  DC_STATE_EN_UPTO_DC6          (2<<0)
+#define  DC_STATE_EN_UPTO_DC5          (1 << 0)
+#define  DC_STATE_EN_DC9               (1 << 3)
+#define  DC_STATE_EN_UPTO_DC6          (2 << 0)
 #define  DC_STATE_EN_UPTO_DC5_DC6_MASK   0x3
 
 #define  DC_STATE_DEBUG                  _MMIO(0x45520)
-#define  DC_STATE_DEBUG_MASK_CORES     (1<<0)
-#define  DC_STATE_DEBUG_MASK_MEMORY_UP (1<<1)
+#define  DC_STATE_DEBUG_MASK_CORES     (1 << 0)
+#define  DC_STATE_DEBUG_MASK_MEMORY_UP (1 << 1)
 
 /* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
  * since on HSW we can't write to it using I915_WRITE. */
 #define D_COMP_HSW                     _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
 #define D_COMP_BDW                     _MMIO(0x138144)
-#define  D_COMP_RCOMP_IN_PROGRESS      (1<<9)
-#define  D_COMP_COMP_FORCE             (1<<8)
-#define  D_COMP_COMP_DISABLE           (1<<0)
+#define  D_COMP_RCOMP_IN_PROGRESS      (1 << 9)
+#define  D_COMP_COMP_FORCE             (1 << 8)
+#define  D_COMP_COMP_DISABLE           (1 << 0)
 
 /* Pipe WM_LINETIME - watermark line time */
 #define _PIPE_WM_LINETIME_A            0x45270
@@ -9163,27 +9562,27 @@ enum skl_power_gate {
 #define PIPE_WM_LINETIME(pipe) _MMIO_PIPE(pipe, _PIPE_WM_LINETIME_A, _PIPE_WM_LINETIME_B)
 #define   PIPE_WM_LINETIME_MASK                        (0x1ff)
 #define   PIPE_WM_LINETIME_TIME(x)             ((x))
-#define   PIPE_WM_LINETIME_IPS_LINETIME_MASK   (0x1ff<<16)
-#define   PIPE_WM_LINETIME_IPS_LINETIME(x)     ((x)<<16)
+#define   PIPE_WM_LINETIME_IPS_LINETIME_MASK   (0x1ff << 16)
+#define   PIPE_WM_LINETIME_IPS_LINETIME(x)     ((x) << 16)
 
 /* SFUSE_STRAP */
 #define SFUSE_STRAP                    _MMIO(0xc2014)
-#define  SFUSE_STRAP_FUSE_LOCK         (1<<13)
-#define  SFUSE_STRAP_RAW_FREQUENCY     (1<<8)
-#define  SFUSE_STRAP_DISPLAY_DISABLED  (1<<7)
-#define  SFUSE_STRAP_CRT_DISABLED      (1<<6)
-#define  SFUSE_STRAP_DDIF_DETECTED     (1<<3)
-#define  SFUSE_STRAP_DDIB_DETECTED     (1<<2)
-#define  SFUSE_STRAP_DDIC_DETECTED     (1<<1)
-#define  SFUSE_STRAP_DDID_DETECTED     (1<<0)
+#define  SFUSE_STRAP_FUSE_LOCK         (1 << 13)
+#define  SFUSE_STRAP_RAW_FREQUENCY     (1 << 8)
+#define  SFUSE_STRAP_DISPLAY_DISABLED  (1 << 7)
+#define  SFUSE_STRAP_CRT_DISABLED      (1 << 6)
+#define  SFUSE_STRAP_DDIF_DETECTED     (1 << 3)
+#define  SFUSE_STRAP_DDIB_DETECTED     (1 << 2)
+#define  SFUSE_STRAP_DDIC_DETECTED     (1 << 1)
+#define  SFUSE_STRAP_DDID_DETECTED     (1 << 0)
 
 #define WM_MISC                                _MMIO(0x45260)
 #define  WM_MISC_DATA_PARTITION_5_6    (1 << 0)
 
 #define WM_DBG                         _MMIO(0x45280)
-#define  WM_DBG_DISALLOW_MULTIPLE_LP   (1<<0)
-#define  WM_DBG_DISALLOW_MAXFIFO       (1<<1)
-#define  WM_DBG_DISALLOW_SPRITE                (1<<2)
+#define  WM_DBG_DISALLOW_MULTIPLE_LP   (1 << 0)
+#define  WM_DBG_DISALLOW_MAXFIFO       (1 << 1)
+#define  WM_DBG_DISALLOW_SPRITE                (1 << 2)
 
 /* pipe CSC */
 #define _PIPE_A_CSC_COEFF_RY_GY        0x49010
@@ -9309,6 +9708,22 @@ enum skl_power_gate {
 #define MIPIO_TXESC_CLK_DIV2                   _MMIO(0x160008)
 #define  GLK_TX_ESC_CLK_DIV2_MASK                      0x3FF
 
+#define _ICL_DSI_ESC_CLK_DIV0          0x6b090
+#define _ICL_DSI_ESC_CLK_DIV1          0x6b890
+#define ICL_DSI_ESC_CLK_DIV(port)      _MMIO_PORT((port),      \
+                                                       _ICL_DSI_ESC_CLK_DIV0, \
+                                                       _ICL_DSI_ESC_CLK_DIV1)
+#define _ICL_DPHY_ESC_CLK_DIV0         0x162190
+#define _ICL_DPHY_ESC_CLK_DIV1         0x6C190
+#define ICL_DPHY_ESC_CLK_DIV(port)     _MMIO_PORT((port),      \
+                                               _ICL_DPHY_ESC_CLK_DIV0, \
+                                               _ICL_DPHY_ESC_CLK_DIV1)
+#define  ICL_BYTE_CLK_PER_ESC_CLK_MASK         (0x1f << 16)
+#define  ICL_BYTE_CLK_PER_ESC_CLK_SHIFT        16
+#define  ICL_ESC_CLK_DIV_MASK                  0x1ff
+#define  ICL_ESC_CLK_DIV_SHIFT                 0
+#define DSI_MAX_ESC_CLK                        20000           /* in KHz */
+
 /* Gen4+ Timestamp and Pipe Frame time stamp registers */
 #define GEN4_TIMESTAMP         _MMIO(0x2358)
 #define ILK_TIMESTAMP_HI       _MMIO(0x70070)
@@ -9346,7 +9761,7 @@ enum skl_power_gate {
                        _MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_FIXDIV_MASK, \
                                        BXT_MIPI2_TX_ESCLK_FIXDIV_MASK)
 #define  BXT_MIPI_TX_ESCLK_DIVIDER(port, val)  \
-               ((val & 0x3F) << BXT_MIPI_TX_ESCLK_SHIFT(port))
+               (((val) & 0x3F) << BXT_MIPI_TX_ESCLK_SHIFT(port))
 /* RX upper control divider to select actual RX clock output from 8x */
 #define  BXT_MIPI1_RX_ESCLK_UPPER_SHIFT                21
 #define  BXT_MIPI2_RX_ESCLK_UPPER_SHIFT                5
@@ -9359,7 +9774,7 @@ enum skl_power_gate {
                        _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_UPPER_FIXDIV_MASK, \
                                        BXT_MIPI2_RX_ESCLK_UPPER_FIXDIV_MASK)
 #define  BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, val)    \
-               ((val & 3) << BXT_MIPI_RX_ESCLK_UPPER_SHIFT(port))
+               (((val) & 3) << BXT_MIPI_RX_ESCLK_UPPER_SHIFT(port))
 /* 8/3X divider to select the actual 8/3X clock output from 8x */
 #define  BXT_MIPI1_8X_BY3_SHIFT                19
 #define  BXT_MIPI2_8X_BY3_SHIFT                3
@@ -9372,7 +9787,7 @@ enum skl_power_gate {
                        _MIPI_PORT(port, BXT_MIPI1_8X_BY3_DIVIDER_MASK, \
                                                BXT_MIPI2_8X_BY3_DIVIDER_MASK)
 #define  BXT_MIPI_8X_BY3_DIVIDER(port, val)    \
-                       ((val & 3) << BXT_MIPI_8X_BY3_SHIFT(port))
+                       (((val) & 3) << BXT_MIPI_8X_BY3_SHIFT(port))
 /* RX lower control divider to select actual RX clock output from 8x */
 #define  BXT_MIPI1_RX_ESCLK_LOWER_SHIFT                16
 #define  BXT_MIPI2_RX_ESCLK_LOWER_SHIFT                0
@@ -9385,7 +9800,7 @@ enum skl_power_gate {
                        _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_LOWER_FIXDIV_MASK, \
                                        BXT_MIPI2_RX_ESCLK_LOWER_FIXDIV_MASK)
 #define  BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, val)    \
-               ((val & 3) << BXT_MIPI_RX_ESCLK_LOWER_SHIFT(port))
+               (((val) & 3) << BXT_MIPI_RX_ESCLK_LOWER_SHIFT(port))
 
 #define RX_DIVIDER_BIT_1_2                     0x3
 #define RX_DIVIDER_BIT_3_4                     0xC
@@ -9443,6 +9858,14 @@ enum skl_power_gate {
 #define _BXT_MIPIC_PORT_CTRL                           0x6B8C0
 #define BXT_MIPI_PORT_CTRL(tc) _MMIO_MIPI(tc, _BXT_MIPIA_PORT_CTRL, _BXT_MIPIC_PORT_CTRL)
 
+/* ICL DSI MODE control */
+#define _ICL_DSI_IO_MODECTL_0                          0x6B094
+#define _ICL_DSI_IO_MODECTL_1                          0x6B894
+#define ICL_DSI_IO_MODECTL(port)       _MMIO_PORT(port,        \
+                                                   _ICL_DSI_IO_MODECTL_0, \
+                                                   _ICL_DSI_IO_MODECTL_1)
+#define  COMBO_PHY_MODE_DSI                            (1 << 0)
+
 #define BXT_P_DSI_REGULATOR_CFG                        _MMIO(0x160020)
 #define  STAP_SELECT                                   (1 << 0)
 
@@ -9922,4 +10345,310 @@ enum skl_power_gate {
                                                 _ICL_PHY_MISC_B)
 #define  ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN      (1 << 23)
 
+/* Icelake Display Stream Compression Registers */
+#define DSCA_PICTURE_PARAMETER_SET_0           0x6B200
+#define DSCC_PICTURE_PARAMETER_SET_0           0x6BA00
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB   0x78270
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB   0x78370
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC   0x78470
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC   0x78570
+#define ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC)
+#define  DSC_VBR_ENABLE                        (1 << 19)
+#define  DSC_422_ENABLE                        (1 << 18)
+#define  DSC_COLOR_SPACE_CONVERSION    (1 << 17)
+#define  DSC_BLOCK_PREDICTION          (1 << 16)
+#define  DSC_LINE_BUF_DEPTH_SHIFT      12
+#define  DSC_BPC_SHIFT                 8
+#define  DSC_VER_MIN_SHIFT             4
+#define  DSC_VER_MAJ                   (0x1 << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_1           0x6B204
+#define DSCC_PICTURE_PARAMETER_SET_1           0x6BA04
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB   0x78274
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB   0x78374
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC   0x78474
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC   0x78574
+#define ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC)
+#define  DSC_BPP(bpp)                          ((bpp) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_2           0x6B208
+#define DSCC_PICTURE_PARAMETER_SET_2           0x6BA08
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB   0x78278
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB   0x78378
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC   0x78478
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC   0x78578
+#define ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+                                           _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB, \
+                                           _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC)
+#define  DSC_PIC_WIDTH(pic_width)      ((pic_width) << 16)
+#define  DSC_PIC_HEIGHT(pic_height)    ((pic_height) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_3           0x6B20C
+#define DSCC_PICTURE_PARAMETER_SET_3           0x6BA0C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB   0x7827C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB   0x7837C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC   0x7847C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC   0x7857C
+#define ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC)
+#define  DSC_SLICE_WIDTH(slice_width)   ((slice_width) << 16)
+#define  DSC_SLICE_HEIGHT(slice_height) ((slice_height) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_4           0x6B210
+#define DSCC_PICTURE_PARAMETER_SET_4           0x6BA10
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB   0x78280
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB   0x78380
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC   0x78480
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC   0x78580
+#define ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC)
+#define  DSC_INITIAL_DEC_DELAY(dec_delay)       ((dec_delay) << 16)
+#define  DSC_INITIAL_XMIT_DELAY(xmit_delay)     ((xmit_delay) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_5           0x6B214
+#define DSCC_PICTURE_PARAMETER_SET_5           0x6BA14
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB   0x78284
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB   0x78384
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC   0x78484
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC   0x78584
+#define ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC)
+#define  DSC_SCALE_DEC_INTINT(scale_dec)       ((scale_dec) << 16)
+#define  DSC_SCALE_INC_INT(scale_inc)          ((scale_inc) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_6           0x6B218
+#define DSCC_PICTURE_PARAMETER_SET_6           0x6BA18
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB   0x78288
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB   0x78388
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC   0x78488
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC   0x78588
+#define ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC)
+#define  DSC_FLATNESS_MAX_QP(max_qp)           (qp << 24)
+#define  DSC_FLATNESS_MIN_QP(min_qp)           (qp << 16)
+#define  DSC_FIRST_LINE_BPG_OFFSET(offset)     ((offset) << 8)
+#define  DSC_INITIAL_SCALE_VALUE(value)                ((value) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_7           0x6B21C
+#define DSCC_PICTURE_PARAMETER_SET_7           0x6BA1C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB   0x7828C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB   0x7838C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC   0x7848C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC   0x7858C
+#define ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+                                                           _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB, \
+                                                           _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+                                                           _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB, \
+                                                           _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC)
+#define  DSC_NFL_BPG_OFFSET(bpg_offset)                ((bpg_offset) << 16)
+#define  DSC_SLICE_BPG_OFFSET(bpg_offset)      ((bpg_offset) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_8           0x6B220
+#define DSCC_PICTURE_PARAMETER_SET_8           0x6BA20
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB   0x78290
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB   0x78390
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC   0x78490
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC   0x78590
+#define ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC)
+#define  DSC_INITIAL_OFFSET(initial_offset)            ((initial_offset) << 16)
+#define  DSC_FINAL_OFFSET(final_offset)                        ((final_offset) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_9           0x6B224
+#define DSCC_PICTURE_PARAMETER_SET_9           0x6BA24
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB   0x78294
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB   0x78394
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC   0x78494
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC   0x78594
+#define ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC)
+#define  DSC_RC_EDGE_FACTOR(rc_edge_fact)      ((rc_edge_fact) << 16)
+#define  DSC_RC_MODEL_SIZE(rc_model_size)      ((rc_model_size) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_10          0x6B228
+#define DSCC_PICTURE_PARAMETER_SET_10          0x6BA28
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB  0x78298
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB  0x78398
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC  0x78498
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC  0x78598
+#define ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe)        _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe)        _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC)
+#define  DSC_RC_TARGET_OFF_LOW(rc_tgt_off_low)         ((rc_tgt_off_low) << 20)
+#define  DSC_RC_TARGET_OFF_HIGH(rc_tgt_off_high)       ((rc_tgt_off_high) << 16)
+#define  DSC_RC_QUANT_INC_LIMIT1(lim)                  ((lim) << 8)
+#define  DSC_RC_QUANT_INC_LIMIT0(lim)                  ((lim) << 0)
+
+#define DSCA_PICTURE_PARAMETER_SET_11          0x6B22C
+#define DSCC_PICTURE_PARAMETER_SET_11          0x6BA2C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB  0x7829C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB  0x7839C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC  0x7849C
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC  0x7859C
+#define ICL_DSC0_PICTURE_PARAMETER_SET_11(pipe)        _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_11(pipe)        _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_12          0x6B260
+#define DSCC_PICTURE_PARAMETER_SET_12          0x6BA60
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB  0x782A0
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB  0x783A0
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC  0x784A0
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC  0x785A0
+#define ICL_DSC0_PICTURE_PARAMETER_SET_12(pipe)        _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_12(pipe)        _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_13          0x6B264
+#define DSCC_PICTURE_PARAMETER_SET_13          0x6BA64
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB  0x782A4
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB  0x783A4
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC  0x784A4
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC  0x785A4
+#define ICL_DSC0_PICTURE_PARAMETER_SET_13(pipe)        _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_13(pipe)        _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_14          0x6B268
+#define DSCC_PICTURE_PARAMETER_SET_14          0x6BA68
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB  0x782A8
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB  0x783A8
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC  0x784A8
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC  0x785A8
+#define ICL_DSC0_PICTURE_PARAMETER_SET_14(pipe)        _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_14(pipe)        _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_15          0x6B26C
+#define DSCC_PICTURE_PARAMETER_SET_15          0x6BA6C
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB  0x782AC
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB  0x783AC
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC  0x784AC
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC  0x785AC
+#define ICL_DSC0_PICTURE_PARAMETER_SET_15(pipe)        _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_15(pipe)        _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC)
+
+#define DSCA_PICTURE_PARAMETER_SET_16          0x6B270
+#define DSCC_PICTURE_PARAMETER_SET_16          0x6BA70
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB  0x782B0
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB  0x783B0
+#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC  0x784B0
+#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC  0x785B0
+#define ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe)        _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB, \
+                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC)
+#define ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe)        _MMIO_PIPE((pipe) - PIPE_B, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC)
+#define  DSC_SLICE_PER_LINE(slice_per_line)            ((slice_per_line) << 16)
+#define  DSC_SLICE_CHUNK_SIZE(slice_chunk_aize)                (slice_chunk_size << 0)
+
+/* Icelake Rate Control Buffer Threshold Registers */
+#define DSCA_RC_BUF_THRESH_0                   _MMIO(0x6B230)
+#define DSCA_RC_BUF_THRESH_0_UDW               _MMIO(0x6B230 + 4)
+#define DSCC_RC_BUF_THRESH_0                   _MMIO(0x6BA30)
+#define DSCC_RC_BUF_THRESH_0_UDW               _MMIO(0x6BA30 + 4)
+#define _ICL_DSC0_RC_BUF_THRESH_0_PB           (0x78254)
+#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB       (0x78254 + 4)
+#define _ICL_DSC1_RC_BUF_THRESH_0_PB           (0x78354)
+#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB       (0x78354 + 4)
+#define _ICL_DSC0_RC_BUF_THRESH_0_PC           (0x78454)
+#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC       (0x78454 + 4)
+#define _ICL_DSC1_RC_BUF_THRESH_0_PC           (0x78554)
+#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC       (0x78554 + 4)
+#define ICL_DSC0_RC_BUF_THRESH_0(pipe)         _MMIO_PIPE((pipe) - PIPE_B, \
+                                               _ICL_DSC0_RC_BUF_THRESH_0_PB, \
+                                               _ICL_DSC0_RC_BUF_THRESH_0_PC)
+#define ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe)     _MMIO_PIPE((pipe) - PIPE_B, \
+                                               _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB, \
+                                               _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC)
+#define ICL_DSC1_RC_BUF_THRESH_0(pipe)         _MMIO_PIPE((pipe) - PIPE_B, \
+                                               _ICL_DSC1_RC_BUF_THRESH_0_PB, \
+                                               _ICL_DSC1_RC_BUF_THRESH_0_PC)
+#define ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe)     _MMIO_PIPE((pipe) - PIPE_B, \
+                                               _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB, \
+                                               _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC)
+
+#define DSCA_RC_BUF_THRESH_1                   _MMIO(0x6B238)
+#define DSCA_RC_BUF_THRESH_1_UDW               _MMIO(0x6B238 + 4)
+#define DSCC_RC_BUF_THRESH_1                   _MMIO(0x6BA38)
+#define DSCC_RC_BUF_THRESH_1_UDW               _MMIO(0x6BA38 + 4)
+#define _ICL_DSC0_RC_BUF_THRESH_1_PB           (0x7825C)
+#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB       (0x7825C + 4)
+#define _ICL_DSC1_RC_BUF_THRESH_1_PB           (0x7835C)
+#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB       (0x7835C + 4)
+#define _ICL_DSC0_RC_BUF_THRESH_1_PC           (0x7845C)
+#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC       (0x7845C + 4)
+#define _ICL_DSC1_RC_BUF_THRESH_1_PC           (0x7855C)
+#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC       (0x7855C + 4)
+#define ICL_DSC0_RC_BUF_THRESH_1(pipe)         _MMIO_PIPE((pipe) - PIPE_B, \
+                                               _ICL_DSC0_RC_BUF_THRESH_1_PB, \
+                                               _ICL_DSC0_RC_BUF_THRESH_1_PC)
+#define ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe)     _MMIO_PIPE((pipe) - PIPE_B, \
+                                               _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB, \
+                                               _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC)
+#define ICL_DSC1_RC_BUF_THRESH_1(pipe)         _MMIO_PIPE((pipe) - PIPE_B, \
+                                               _ICL_DSC1_RC_BUF_THRESH_1_PB, \
+                                               _ICL_DSC1_RC_BUF_THRESH_1_PC)
+#define ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe)     _MMIO_PIPE((pipe) - PIPE_B, \
+                                               _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \
+                                               _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC)
+
 #endif /* _I915_REG_H_ */
index 8928894dd9c77d1b1aa4d28c5a0ab06ec4b83a25..5c2c93cbab12f8ebff29507a00953a24a9a877c6 100644 (file)
@@ -206,7 +206,8 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
        /* Carefully retire all requests without writing to the rings */
        ret = i915_gem_wait_for_idle(i915,
                                     I915_WAIT_INTERRUPTIBLE |
-                                    I915_WAIT_LOCKED);
+                                    I915_WAIT_LOCKED,
+                                    MAX_SCHEDULE_TIMEOUT);
        if (ret)
                return ret;
 
@@ -320,6 +321,7 @@ static void advance_ring(struct i915_request *request)
                 * is just about to be. Either works, if we miss the last two
                 * noops - they are safe to be replayed on a reset.
                 */
+               GEM_TRACE("marking %s as inactive\n", ring->timeline->name);
                tail = READ_ONCE(request->tail);
                list_del(&ring->active_link);
        } else {
@@ -383,8 +385,8 @@ static void __retire_engine_request(struct intel_engine_cs *engine,
         * the subsequent request.
         */
        if (engine->last_retired_context)
-               intel_context_unpin(engine->last_retired_context, engine);
-       engine->last_retired_context = rq->ctx;
+               intel_context_unpin(engine->last_retired_context);
+       engine->last_retired_context = rq->hw_context;
 }
 
 static void __retire_engine_upto(struct intel_engine_cs *engine,
@@ -455,8 +457,8 @@ static void i915_request_retire(struct i915_request *request)
        i915_request_remove_from_client(request);
 
        /* Retirement decays the ban score as it is a sign of ctx progress */
-       atomic_dec_if_positive(&request->ctx->ban_score);
-       intel_context_unpin(request->ctx, request->engine);
+       atomic_dec_if_positive(&request->gem_context->ban_score);
+       intel_context_unpin(request->hw_context);
 
        __retire_engine_upto(request->engine, request);
 
@@ -502,7 +504,7 @@ static void move_to_timeline(struct i915_request *request,
        GEM_BUG_ON(request->timeline == &request->engine->timeline);
        lockdep_assert_held(&request->engine->timeline.lock);
 
-       spin_lock_nested(&request->timeline->lock, SINGLE_DEPTH_NESTING);
+       spin_lock(&request->timeline->lock);
        list_move_tail(&request->link, &timeline->requests);
        spin_unlock(&request->timeline->lock);
 }
@@ -657,7 +659,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 {
        struct drm_i915_private *i915 = engine->i915;
        struct i915_request *rq;
-       struct intel_ring *ring;
+       struct intel_context *ce;
        int ret;
 
        lockdep_assert_held(&i915->drm.struct_mutex);
@@ -681,22 +683,21 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
         * GGTT space, so do this first before we reserve a seqno for
         * ourselves.
         */
-       ring = intel_context_pin(ctx, engine);
-       if (IS_ERR(ring))
-               return ERR_CAST(ring);
-       GEM_BUG_ON(!ring);
+       ce = intel_context_pin(ctx, engine);
+       if (IS_ERR(ce))
+               return ERR_CAST(ce);
 
        ret = reserve_gt(i915);
        if (ret)
                goto err_unpin;
 
-       ret = intel_ring_wait_for_space(ring, MIN_SPACE_FOR_ADD_REQUEST);
+       ret = intel_ring_wait_for_space(ce->ring, MIN_SPACE_FOR_ADD_REQUEST);
        if (ret)
                goto err_unreserve;
 
        /* Move our oldest request to the slab-cache (if not in use!) */
-       rq = list_first_entry(&ring->request_list, typeof(*rq), ring_link);
-       if (!list_is_last(&rq->ring_link, &ring->request_list) &&
+       rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
+       if (!list_is_last(&rq->ring_link, &ce->ring->request_list) &&
            i915_request_completed(rq))
                i915_request_retire(rq);
 
@@ -735,7 +736,8 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
                /* Ratelimit ourselves to prevent oom from malicious clients */
                ret = i915_gem_wait_for_idle(i915,
                                             I915_WAIT_LOCKED |
-                                            I915_WAIT_INTERRUPTIBLE);
+                                            I915_WAIT_INTERRUPTIBLE,
+                                            MAX_SCHEDULE_TIMEOUT);
                if (ret)
                        goto err_unreserve;
 
@@ -760,9 +762,10 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
        INIT_LIST_HEAD(&rq->active_list);
        rq->i915 = i915;
        rq->engine = engine;
-       rq->ctx = ctx;
-       rq->ring = ring;
-       rq->timeline = ring->timeline;
+       rq->gem_context = ctx;
+       rq->hw_context = ce;
+       rq->ring = ce->ring;
+       rq->timeline = ce->ring->timeline;
        GEM_BUG_ON(rq->timeline == &engine->timeline);
 
        spin_lock_init(&rq->lock);
@@ -814,14 +817,16 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
                goto err_unwind;
 
        /* Keep a second pin for the dual retirement along engine and ring */
-       __intel_context_pin(rq->ctx, engine);
+       __intel_context_pin(ce);
+
+       rq->infix = rq->ring->emit; /* end of header; start of user payload */
 
        /* Check that we didn't interrupt ourselves with a new request */
        GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno);
        return rq;
 
 err_unwind:
-       rq->ring->emit = rq->head;
+       ce->ring->emit = rq->head;
 
        /* Make sure we didn't add ourselves to external state before freeing */
        GEM_BUG_ON(!list_empty(&rq->active_list));
@@ -832,7 +837,7 @@ err_unwind:
 err_unreserve:
        unreserve_gt(i915);
 err_unpin:
-       intel_context_unpin(ctx, engine);
+       intel_context_unpin(ce);
        return ERR_PTR(ret);
 }
 
@@ -1010,19 +1015,39 @@ i915_request_await_object(struct i915_request *to,
        return ret;
 }
 
+void i915_request_skip(struct i915_request *rq, int error)
+{
+       void *vaddr = rq->ring->vaddr;
+       u32 head;
+
+       GEM_BUG_ON(!IS_ERR_VALUE((long)error));
+       dma_fence_set_error(&rq->fence, error);
+
+       /*
+        * As this request likely depends on state from the lost
+        * context, clear out all the user operations leaving the
+        * breadcrumb at the end (so we get the fence notifications).
+        */
+       head = rq->infix;
+       if (rq->postfix < head) {
+               memset(vaddr + head, 0, rq->ring->size - head);
+               head = 0;
+       }
+       memset(vaddr + head, 0, rq->postfix - head);
+}
+
 /*
  * NB: This function is not allowed to fail. Doing so would mean the the
  * request is not being tracked for completion but the work itself is
  * going to happen on the hardware. This would be a Bad Thing(tm).
  */
-void __i915_request_add(struct i915_request *request, bool flush_caches)
+void i915_request_add(struct i915_request *request)
 {
        struct intel_engine_cs *engine = request->engine;
-       struct intel_ring *ring = request->ring;
        struct i915_timeline *timeline = request->timeline;
+       struct intel_ring *ring = request->ring;
        struct i915_request *prev;
        u32 *cs;
-       int err;
 
        GEM_TRACE("%s fence %llx:%d\n",
                  engine->name, request->fence.context, request->fence.seqno);
@@ -1043,20 +1068,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
         * know that it is time to use that space up.
         */
        request->reserved_space = 0;
-
-       /*
-        * Emit any outstanding flushes - execbuf can fail to emit the flush
-        * after having emitted the batchbuffer command. Hence we need to fix
-        * things up similar to emitting the lazy request. The difference here
-        * is that the flush _must_ happen before the next request, no matter
-        * what.
-        */
-       if (flush_caches) {
-               err = engine->emit_flush(request, EMIT_FLUSH);
-
-               /* Not allowed to fail! */
-               WARN(err, "engine->emit_flush() failed: %d!\n", err);
-       }
+       engine->emit_flush(request, EMIT_FLUSH);
 
        /*
         * Record the position of the start of the breadcrumb so that
@@ -1095,8 +1107,10 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
        i915_gem_active_set(&timeline->last_request, request);
 
        list_add_tail(&request->ring_link, &ring->request_list);
-       if (list_is_first(&request->ring_link, &ring->request_list))
+       if (list_is_first(&request->ring_link, &ring->request_list)) {
+               GEM_TRACE("marking %s as active\n", ring->timeline->name);
                list_add(&ring->active_link, &request->i915->gt.active_rings);
+       }
        request->emitted_jiffies = jiffies;
 
        /*
@@ -1113,7 +1127,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
        local_bh_disable();
        rcu_read_lock(); /* RCU serialisation for set-wedged protection */
        if (engine->schedule)
-               engine->schedule(request, &request->ctx->sched);
+               engine->schedule(request, &request->gem_context->sched);
        rcu_read_unlock();
        i915_sw_fence_commit(&request->submit);
        local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
@@ -1205,7 +1219,7 @@ static bool __i915_spin_request(const struct i915_request *rq,
         * takes to sleep on a request, on the order of a microsecond.
         */
 
-       irq = atomic_read(&engine->irq_count);
+       irq = READ_ONCE(engine->breadcrumbs.irq_count);
        timeout_us += local_clock_us(&cpu);
        do {
                if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno))
@@ -1217,7 +1231,7 @@ static bool __i915_spin_request(const struct i915_request *rq,
                 * assume we won't see one in the near future but require
                 * the engine->seqno_barrier() to fixup coherency.
                 */
-               if (atomic_read(&engine->irq_count) != irq)
+               if (READ_ONCE(engine->breadcrumbs.irq_count) != irq)
                        break;
 
                if (signal_pending_state(state, current))
@@ -1294,7 +1308,7 @@ long i915_request_wait(struct i915_request *rq,
        if (flags & I915_WAIT_LOCKED)
                add_wait_queue(errq, &reset);
 
-       intel_wait_init(&wait, rq);
+       intel_wait_init(&wait);
 
 restart:
        do {
index eddbd4245cb3afa92253d2c023f9ffb29b73f6d9..e1c9365dfefb1ef9ddf80183ecea07185b6d7ed4 100644 (file)
@@ -93,8 +93,9 @@ struct i915_request {
         * i915_request_free() will then decrement the refcount on the
         * context.
         */
-       struct i915_gem_context *ctx;
+       struct i915_gem_context *gem_context;
        struct intel_engine_cs *engine;
+       struct intel_context *hw_context;
        struct intel_ring *ring;
        struct i915_timeline *timeline;
        struct intel_signal_node signaling;
@@ -133,6 +134,9 @@ struct i915_request {
        /** Position in the ring of the start of the request */
        u32 head;
 
+       /** Position in the ring of the start of the user packets */
+       u32 infix;
+
        /**
         * Position in the ring of the start of the postfix.
         * This is required to calculate the maximum available ring space
@@ -249,13 +253,13 @@ int i915_request_await_object(struct i915_request *to,
 int i915_request_await_dma_fence(struct i915_request *rq,
                                 struct dma_fence *fence);
 
-void __i915_request_add(struct i915_request *rq, bool flush_caches);
-#define i915_request_add(rq) \
-       __i915_request_add(rq, false)
+void i915_request_add(struct i915_request *rq);
 
 void __i915_request_submit(struct i915_request *request);
 void i915_request_submit(struct i915_request *request);
 
+void i915_request_skip(struct i915_request *request, int error);
+
 void __i915_request_unsubmit(struct i915_request *request);
 void i915_request_unsubmit(struct i915_request *request);
 
@@ -266,6 +270,7 @@ long i915_request_wait(struct i915_request *rq,
 #define I915_WAIT_INTERRUPTIBLE        BIT(0)
 #define I915_WAIT_LOCKED       BIT(1) /* struct_mutex held, handle GPU reset */
 #define I915_WAIT_ALL          BIT(2) /* used by i915_gem_object_wait() */
+#define I915_WAIT_FOR_IDLE_BOOST BIT(3)
 
 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
 
@@ -375,6 +380,7 @@ static inline void
 init_request_active(struct i915_gem_active *active,
                    i915_gem_retire_fn retire)
 {
+       RCU_INIT_POINTER(active->request, NULL);
        INIT_LIST_HEAD(&active->link);
        active->retire = retire ?: i915_gem_retire_noop;
 }
index 9766e806dce636aa2f14311d5a21d8e11658bdc1..a73472dd12fd926da14d45e35af9a4ac70b97a1e 100644 (file)
@@ -99,6 +99,6 @@ __printf(2, 3)
 bool __igt_timeout(unsigned long timeout, const char *fmt, ...);
 
 #define igt_timeout(t, fmt, ...) \
-       __igt_timeout((t), KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+       __igt_timeout((t), KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
 
 #endif /* !__I915_SELFTEST_H__ */
index dc2a4632faa7da7c253633f152a6e4220d56e34e..a2c2c3ab5fb0ceea22c2c1426b1c307b5d1aa460 100644 (file)
@@ -37,6 +37,8 @@ struct i915_timeline {
        u32 seqno;
 
        spinlock_t lock;
+#define TIMELINE_CLIENT 0 /* default subclass */
+#define TIMELINE_ENGINE 1
 
        /**
         * List of breadcrumbs associated with GPU requests currently
index 8cc3a256f29d3e0ae4cb2afebd04cac76fbd5784..b50c6b829715e220c9f3edede3dfa0e83497a804 100644 (file)
@@ -591,21 +591,26 @@ TRACE_EVENT(i915_gem_ring_sync_to,
 
            TP_STRUCT__entry(
                             __field(u32, dev)
-                            __field(u32, sync_from)
-                            __field(u32, sync_to)
+                            __field(u32, from_class)
+                            __field(u32, from_instance)
+                            __field(u32, to_class)
+                            __field(u32, to_instance)
                             __field(u32, seqno)
                             ),
 
            TP_fast_assign(
                           __entry->dev = from->i915->drm.primary->index;
-                          __entry->sync_from = from->engine->id;
-                          __entry->sync_to = to->engine->id;
+                          __entry->from_class = from->engine->uabi_class;
+                          __entry->from_instance = from->engine->instance;
+                          __entry->to_class = to->engine->uabi_class;
+                          __entry->to_instance = to->engine->instance;
                           __entry->seqno = from->global_seqno;
                           ),
 
-           TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
+           TP_printk("dev=%u, sync-from=%u:%u, sync-to=%u:%u, seqno=%u",
                      __entry->dev,
-                     __entry->sync_from, __entry->sync_to,
+                     __entry->from_class, __entry->from_instance,
+                     __entry->to_class, __entry->to_instance,
                      __entry->seqno)
 );
 
@@ -616,24 +621,27 @@ TRACE_EVENT(i915_request_queue,
            TP_STRUCT__entry(
                             __field(u32, dev)
                             __field(u32, hw_id)
-                            __field(u32, ring)
-                            __field(u32, ctx)
+                            __field(u64, ctx)
+                            __field(u16, class)
+                            __field(u16, instance)
                             __field(u32, seqno)
                             __field(u32, flags)
                             ),
 
            TP_fast_assign(
                           __entry->dev = rq->i915->drm.primary->index;
-                          __entry->hw_id = rq->ctx->hw_id;
-                          __entry->ring = rq->engine->id;
+                          __entry->hw_id = rq->gem_context->hw_id;
+                          __entry->class = rq->engine->uabi_class;
+                          __entry->instance = rq->engine->instance;
                           __entry->ctx = rq->fence.context;
                           __entry->seqno = rq->fence.seqno;
                           __entry->flags = flags;
                           ),
 
-           TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, flags=0x%x",
-                     __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
-                     __entry->seqno, __entry->flags)
+           TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, flags=0x%x",
+                     __entry->dev, __entry->class, __entry->instance,
+                     __entry->hw_id, __entry->ctx, __entry->seqno,
+                     __entry->flags)
 );
 
 DECLARE_EVENT_CLASS(i915_request,
@@ -643,24 +651,27 @@ DECLARE_EVENT_CLASS(i915_request,
            TP_STRUCT__entry(
                             __field(u32, dev)
                             __field(u32, hw_id)
-                            __field(u32, ring)
-                            __field(u32, ctx)
+                            __field(u64, ctx)
+                            __field(u16, class)
+                            __field(u16, instance)
                             __field(u32, seqno)
                             __field(u32, global)
                             ),
 
            TP_fast_assign(
                           __entry->dev = rq->i915->drm.primary->index;
-                          __entry->hw_id = rq->ctx->hw_id;
-                          __entry->ring = rq->engine->id;
+                          __entry->hw_id = rq->gem_context->hw_id;
+                          __entry->class = rq->engine->uabi_class;
+                          __entry->instance = rq->engine->instance;
                           __entry->ctx = rq->fence.context;
                           __entry->seqno = rq->fence.seqno;
                           __entry->global = rq->global_seqno;
                           ),
 
-           TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u",
-                     __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
-                     __entry->seqno, __entry->global)
+           TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u",
+                     __entry->dev, __entry->class, __entry->instance,
+                     __entry->hw_id, __entry->ctx, __entry->seqno,
+                     __entry->global)
 );
 
 DEFINE_EVENT(i915_request, i915_request_add,
@@ -686,8 +697,9 @@ TRACE_EVENT(i915_request_in,
            TP_STRUCT__entry(
                             __field(u32, dev)
                             __field(u32, hw_id)
-                            __field(u32, ring)
-                            __field(u32, ctx)
+                            __field(u64, ctx)
+                            __field(u16, class)
+                            __field(u16, instance)
                             __field(u32, seqno)
                             __field(u32, global_seqno)
                             __field(u32, port)
@@ -696,8 +708,9 @@ TRACE_EVENT(i915_request_in,
 
            TP_fast_assign(
                           __entry->dev = rq->i915->drm.primary->index;
-                          __entry->hw_id = rq->ctx->hw_id;
-                          __entry->ring = rq->engine->id;
+                          __entry->hw_id = rq->gem_context->hw_id;
+                          __entry->class = rq->engine->uabi_class;
+                          __entry->instance = rq->engine->instance;
                           __entry->ctx = rq->fence.context;
                           __entry->seqno = rq->fence.seqno;
                           __entry->global_seqno = rq->global_seqno;
@@ -705,10 +718,10 @@ TRACE_EVENT(i915_request_in,
                           __entry->port = port;
                           ),
 
-           TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, prio=%u, global=%u, port=%u",
-                     __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
-                     __entry->seqno, __entry->prio, __entry->global_seqno,
-                     __entry->port)
+           TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, prio=%u, global=%u, port=%u",
+                     __entry->dev, __entry->class, __entry->instance,
+                     __entry->hw_id, __entry->ctx, __entry->seqno,
+                     __entry->prio, __entry->global_seqno, __entry->port)
 );
 
 TRACE_EVENT(i915_request_out,
@@ -718,8 +731,9 @@ TRACE_EVENT(i915_request_out,
            TP_STRUCT__entry(
                             __field(u32, dev)
                             __field(u32, hw_id)
-                            __field(u32, ring)
-                            __field(u32, ctx)
+                            __field(u64, ctx)
+                            __field(u16, class)
+                            __field(u16, instance)
                             __field(u32, seqno)
                             __field(u32, global_seqno)
                             __field(u32, completed)
@@ -727,17 +741,18 @@ TRACE_EVENT(i915_request_out,
 
            TP_fast_assign(
                           __entry->dev = rq->i915->drm.primary->index;
-                          __entry->hw_id = rq->ctx->hw_id;
-                          __entry->ring = rq->engine->id;
+                          __entry->hw_id = rq->gem_context->hw_id;
+                          __entry->class = rq->engine->uabi_class;
+                          __entry->instance = rq->engine->instance;
                           __entry->ctx = rq->fence.context;
                           __entry->seqno = rq->fence.seqno;
                           __entry->global_seqno = rq->global_seqno;
                           __entry->completed = i915_request_completed(rq);
                           ),
 
-                   TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, completed?=%u",
-                             __entry->dev, __entry->hw_id, __entry->ring,
-                             __entry->ctx, __entry->seqno,
+                   TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u, completed?=%u",
+                             __entry->dev, __entry->class, __entry->instance,
+                             __entry->hw_id, __entry->ctx, __entry->seqno,
                              __entry->global_seqno, __entry->completed)
 );
 
@@ -771,21 +786,23 @@ TRACE_EVENT(intel_engine_notify,
 
            TP_STRUCT__entry(
                             __field(u32, dev)
-                            __field(u32, ring)
+                            __field(u16, class)
+                            __field(u16, instance)
                             __field(u32, seqno)
                             __field(bool, waiters)
                             ),
 
            TP_fast_assign(
                           __entry->dev = engine->i915->drm.primary->index;
-                          __entry->ring = engine->id;
+                          __entry->class = engine->uabi_class;
+                          __entry->instance = engine->instance;
                           __entry->seqno = intel_engine_get_seqno(engine);
                           __entry->waiters = waiters;
                           ),
 
-           TP_printk("dev=%u, ring=%u, seqno=%u, waiters=%u",
-                     __entry->dev, __entry->ring, __entry->seqno,
-                     __entry->waiters)
+           TP_printk("dev=%u, engine=%u:%u, seqno=%u, waiters=%u",
+                     __entry->dev, __entry->class, __entry->instance,
+                     __entry->seqno, __entry->waiters)
 );
 
 DEFINE_EVENT(i915_request, i915_request_retire,
@@ -800,8 +817,9 @@ TRACE_EVENT(i915_request_wait_begin,
            TP_STRUCT__entry(
                             __field(u32, dev)
                             __field(u32, hw_id)
-                            __field(u32, ring)
-                            __field(u32, ctx)
+                            __field(u64, ctx)
+                            __field(u16, class)
+                            __field(u16, instance)
                             __field(u32, seqno)
                             __field(u32, global)
                             __field(unsigned int, flags)
@@ -815,18 +833,20 @@ TRACE_EVENT(i915_request_wait_begin,
             */
            TP_fast_assign(
                           __entry->dev = rq->i915->drm.primary->index;
-                          __entry->hw_id = rq->ctx->hw_id;
-                          __entry->ring = rq->engine->id;
+                          __entry->hw_id = rq->gem_context->hw_id;
+                          __entry->class = rq->engine->uabi_class;
+                          __entry->instance = rq->engine->instance;
                           __entry->ctx = rq->fence.context;
                           __entry->seqno = rq->fence.seqno;
                           __entry->global = rq->global_seqno;
                           __entry->flags = flags;
                           ),
 
-           TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, blocking=%u, flags=0x%x",
-                     __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
-                     __entry->seqno, __entry->global,
-                     !!(__entry->flags & I915_WAIT_LOCKED), __entry->flags)
+           TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u, blocking=%u, flags=0x%x",
+                     __entry->dev, __entry->class, __entry->instance,
+                     __entry->hw_id, __entry->ctx, __entry->seqno,
+                     __entry->global, !!(__entry->flags & I915_WAIT_LOCKED),
+                     __entry->flags)
 );
 
 DEFINE_EVENT(i915_request, i915_request_wait_end,
@@ -936,7 +956,7 @@ DECLARE_EVENT_CLASS(i915_context,
                        __entry->dev = ctx->i915->drm.primary->index;
                        __entry->ctx = ctx;
                        __entry->hw_id = ctx->hw_id;
-                       __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
+                       __entry->vm = ctx->ppgtt ? &ctx->ppgtt->vm : NULL;
        ),
 
        TP_printk("dev=%u, ctx=%p, ctx_vm=%p, hw_id=%u",
@@ -953,36 +973,6 @@ DEFINE_EVENT(i915_context, i915_context_free,
        TP_ARGS(ctx)
 );
 
-/**
- * DOC: switch_mm tracepoint
- *
- * This tracepoint allows tracking of the mm switch, which is an important point
- * in the lifetime of the vm in the legacy submission path. This tracepoint is
- * called only if full ppgtt is enabled.
- */
-TRACE_EVENT(switch_mm,
-       TP_PROTO(struct intel_engine_cs *engine, struct i915_gem_context *to),
-
-       TP_ARGS(engine, to),
-
-       TP_STRUCT__entry(
-                       __field(u32, ring)
-                       __field(struct i915_gem_context *, to)
-                       __field(struct i915_address_space *, vm)
-                       __field(u32, dev)
-       ),
-
-       TP_fast_assign(
-                       __entry->ring = engine->id;
-                       __entry->to = to;
-                       __entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
-                       __entry->dev = engine->i915->drm.primary->index;
-       ),
-
-       TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
-                 __entry->dev, __entry->ring, __entry->to, __entry->vm)
-);
-
 #endif /* _I915_TRACE_H_ */
 
 /* This part must be outside protection */
index 5fe9f3f3946728aa7985ab6f251f685bc97cfe6b..869cf4a3b6de75fee593c0f66c953cc1035434a6 100644 (file)
@@ -105,7 +105,7 @@ static void vgt_deballoon_space(struct i915_ggtt *ggtt,
                         node->start + node->size,
                         node->size / 1024);
 
-       ggtt->base.reserved -= node->size;
+       ggtt->vm.reserved -= node->size;
        drm_mm_remove_node(node);
 }
 
@@ -141,11 +141,11 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
 
        DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
                 start, end, size / 1024);
-       ret = i915_gem_gtt_reserve(&ggtt->base, node,
+       ret = i915_gem_gtt_reserve(&ggtt->vm, node,
                                   size, start, I915_COLOR_UNEVICTABLE,
                                   0);
        if (!ret)
-               ggtt->base.reserved += size;
+               ggtt->vm.reserved += size;
 
        return ret;
 }
@@ -197,7 +197,7 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
 int intel_vgt_balloon(struct drm_i915_private *dev_priv)
 {
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       unsigned long ggtt_end = ggtt->base.total;
+       unsigned long ggtt_end = ggtt->vm.total;
 
        unsigned long mappable_base, mappable_size, mappable_end;
        unsigned long unmappable_base, unmappable_size, unmappable_end;
index bb8338450dc11021b003155d59b3a4bbbe24b4f9..551acc3900464bb7d4aee586ba7792155631bef5 100644 (file)
@@ -36,6 +36,12 @@ intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv)
        return dev_priv->vgpu.caps & VGT_CAPS_HWSP_EMULATION;
 }
 
+static inline bool
+intel_vgpu_has_huge_gtt(struct drm_i915_private *dev_priv)
+{
+       return dev_priv->vgpu.caps & VGT_CAPS_HUGE_GTT;
+}
+
 int intel_vgt_balloon(struct drm_i915_private *dev_priv);
 void intel_vgt_deballoon(struct drm_i915_private *dev_priv);
 
index 9324d476e0a7c356b39cb02374e904a2b0a95262..11d834f942205f37c10c1a95a345b8908cb3411f 100644 (file)
@@ -21,7 +21,7 @@
  * IN THE SOFTWARE.
  *
  */
+
 #include "i915_vma.h"
 
 #include "i915_drv.h"
 
 #include <drm/drm_gem.h>
 
+#if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
+
+#include <linux/stackdepot.h>
+
+static void vma_print_allocator(struct i915_vma *vma, const char *reason)
+{
+       unsigned long entries[12];
+       struct stack_trace trace = {
+               .entries = entries,
+               .max_entries = ARRAY_SIZE(entries),
+       };
+       char buf[512];
+
+       if (!vma->node.stack) {
+               DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
+                                vma->node.start, vma->node.size, reason);
+               return;
+       }
+
+       depot_fetch_stack(vma->node.stack, &trace);
+       snprint_stack_trace(buf, sizeof(buf), &trace, 0);
+       DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
+                        vma->node.start, vma->node.size, reason, buf);
+}
+
+#else
+
+static void vma_print_allocator(struct i915_vma *vma, const char *reason)
+{
+}
+
+#endif
+
+struct i915_vma_active {
+       struct i915_gem_active base;
+       struct i915_vma *vma;
+       struct rb_node node;
+       u64 timeline;
+};
+
 static void
-i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq)
+__i915_vma_retire(struct i915_vma *vma, struct i915_request *rq)
 {
-       const unsigned int idx = rq->engine->id;
-       struct i915_vma *vma =
-               container_of(active, struct i915_vma, last_read[idx]);
        struct drm_i915_gem_object *obj = vma->obj;
 
-       GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
-
-       i915_vma_clear_active(vma, idx);
-       if (i915_vma_is_active(vma))
+       GEM_BUG_ON(!i915_vma_is_active(vma));
+       if (--vma->active_count)
                return;
 
        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
@@ -75,6 +110,21 @@ i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq)
        }
 }
 
+static void
+i915_vma_retire(struct i915_gem_active *base, struct i915_request *rq)
+{
+       struct i915_vma_active *active =
+               container_of(base, typeof(*active), base);
+
+       __i915_vma_retire(active->vma, rq);
+}
+
+static void
+i915_vma_last_retire(struct i915_gem_active *base, struct i915_request *rq)
+{
+       __i915_vma_retire(container_of(base, struct i915_vma, last_active), rq);
+}
+
 static struct i915_vma *
 vma_create(struct drm_i915_gem_object *obj,
           struct i915_address_space *vm,
@@ -82,19 +132,20 @@ vma_create(struct drm_i915_gem_object *obj,
 {
        struct i915_vma *vma;
        struct rb_node *rb, **p;
-       int i;
 
        /* The aliasing_ppgtt should never be used directly! */
-       GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
+       GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
 
        vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
        if (vma == NULL)
                return ERR_PTR(-ENOMEM);
 
-       for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
-               init_request_active(&vma->last_read[i], i915_vma_retire);
+       vma->active = RB_ROOT;
+
+       init_request_active(&vma->last_active, i915_vma_last_retire);
        init_request_active(&vma->last_fence, NULL);
        vma->vm = vm;
+       vma->ops = &vm->vma_ops;
        vma->obj = obj;
        vma->resv = obj->resv;
        vma->size = obj->base.size;
@@ -109,7 +160,7 @@ vma_create(struct drm_i915_gem_object *obj,
                                                     obj->base.size >> PAGE_SHIFT));
                        vma->size = view->partial.size;
                        vma->size <<= PAGE_SHIFT;
-                       GEM_BUG_ON(vma->size >= obj->base.size);
+                       GEM_BUG_ON(vma->size > obj->base.size);
                } else if (view->type == I915_GGTT_VIEW_ROTATED) {
                        vma->size = intel_rotation_info_size(&view->rotated);
                        vma->size <<= PAGE_SHIFT;
@@ -280,7 +331,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
        GEM_BUG_ON(!vma->pages);
 
        trace_i915_vma_bind(vma, bind_flags);
-       ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
+       ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
        if (ret)
                return ret;
 
@@ -345,7 +396,7 @@ void i915_vma_flush_writes(struct i915_vma *vma)
 
 void i915_vma_unpin_iomap(struct i915_vma *vma)
 {
-       lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
+       lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
 
        GEM_BUG_ON(vma->iomap == NULL);
 
@@ -365,6 +416,7 @@ void i915_vma_unpin_and_release(struct i915_vma **p_vma)
                return;
 
        obj = vma->obj;
+       GEM_BUG_ON(!obj);
 
        i915_vma_unpin(vma);
        i915_vma_close(vma);
@@ -459,6 +511,18 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
        return true;
 }
 
+static void assert_bind_count(const struct drm_i915_gem_object *obj)
+{
+       /*
+        * Combine the assertion that the object is bound and that we have
+        * pinned its pages. But we should never have bound the object
+        * more than we have pinned its pages. (For complete accuracy, we
+        * assume that no else is pinning the pages, but as a rough assertion
+        * that we will not run into problems later, this will do!)
+        */
+       GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+}
+
 /**
  * i915_vma_insert - finds a slot for the vma in its address space
  * @vma: the vma
@@ -477,7 +541,7 @@ static int
 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 {
        struct drm_i915_private *dev_priv = vma->vm->i915;
-       struct drm_i915_gem_object *obj = vma->obj;
+       unsigned int cache_level;
        u64 start, end;
        int ret;
 
@@ -512,20 +576,25 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
         * attempt to find space.
         */
        if (size > end) {
-               DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
-                         size, obj->base.size,
-                         flags & PIN_MAPPABLE ? "mappable" : "total",
+               DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
+                         size, flags & PIN_MAPPABLE ? "mappable" : "total",
                          end);
                return -ENOSPC;
        }
 
-       ret = i915_gem_object_pin_pages(obj);
-       if (ret)
-               return ret;
+       if (vma->obj) {
+               ret = i915_gem_object_pin_pages(vma->obj);
+               if (ret)
+                       return ret;
+
+               cache_level = vma->obj->cache_level;
+       } else {
+               cache_level = 0;
+       }
 
        GEM_BUG_ON(vma->pages);
 
-       ret = vma->vm->set_pages(vma);
+       ret = vma->ops->set_pages(vma);
        if (ret)
                goto err_unpin;
 
@@ -538,7 +607,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
                }
 
                ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
-                                          size, offset, obj->cache_level,
+                                          size, offset, cache_level,
                                           flags);
                if (ret)
                        goto err_clear;
@@ -577,7 +646,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
                }
 
                ret = i915_gem_gtt_insert(vma->vm, &vma->node,
-                                         size, alignment, obj->cache_level,
+                                         size, alignment, cache_level,
                                          start, end, flags);
                if (ret)
                        goto err_clear;
@@ -586,23 +655,28 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
                GEM_BUG_ON(vma->node.start + vma->node.size > end);
        }
        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
-       GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
+       GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
 
        list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
 
-       spin_lock(&dev_priv->mm.obj_lock);
-       list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
-       obj->bind_count++;
-       spin_unlock(&dev_priv->mm.obj_lock);
+       if (vma->obj) {
+               struct drm_i915_gem_object *obj = vma->obj;
 
-       GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+               spin_lock(&dev_priv->mm.obj_lock);
+               list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
+               obj->bind_count++;
+               spin_unlock(&dev_priv->mm.obj_lock);
+
+               assert_bind_count(obj);
+       }
 
        return 0;
 
 err_clear:
-       vma->vm->clear_pages(vma);
+       vma->ops->clear_pages(vma);
 err_unpin:
-       i915_gem_object_unpin_pages(obj);
+       if (vma->obj)
+               i915_gem_object_unpin_pages(vma->obj);
        return ret;
 }
 
@@ -610,30 +684,35 @@ static void
 i915_vma_remove(struct i915_vma *vma)
 {
        struct drm_i915_private *i915 = vma->vm->i915;
-       struct drm_i915_gem_object *obj = vma->obj;
 
        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
        GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
 
-       vma->vm->clear_pages(vma);
+       vma->ops->clear_pages(vma);
 
        drm_mm_remove_node(&vma->node);
        list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
 
-       /* Since the unbound list is global, only move to that list if
+       /*
+        * Since the unbound list is global, only move to that list if
         * no more VMAs exist.
         */
-       spin_lock(&i915->mm.obj_lock);
-       if (--obj->bind_count == 0)
-               list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
-       spin_unlock(&i915->mm.obj_lock);
-
-       /* And finally now the object is completely decoupled from this vma,
-        * we can drop its hold on the backing storage and allow it to be
-        * reaped by the shrinker.
-        */
-       i915_gem_object_unpin_pages(obj);
-       GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+       if (vma->obj) {
+               struct drm_i915_gem_object *obj = vma->obj;
+
+               spin_lock(&i915->mm.obj_lock);
+               if (--obj->bind_count == 0)
+                       list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
+               spin_unlock(&i915->mm.obj_lock);
+
+               /*
+                * And finally now the object is completely decoupled from this
+                * vma, we can drop its hold on the backing storage and allow
+                * it to be reaped by the shrinker.
+                */
+               i915_gem_object_unpin_pages(obj);
+               assert_bind_count(obj);
+       }
 }
 
 int __i915_vma_do_pin(struct i915_vma *vma,
@@ -658,7 +737,7 @@ int __i915_vma_do_pin(struct i915_vma *vma,
        }
        GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 
-       ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
+       ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags);
        if (ret)
                goto err_remove;
 
@@ -715,23 +794,28 @@ void i915_vma_reopen(struct i915_vma *vma)
 
 static void __i915_vma_destroy(struct i915_vma *vma)
 {
-       int i;
+       struct drm_i915_private *i915 = vma->vm->i915;
+       struct i915_vma_active *iter, *n;
 
        GEM_BUG_ON(vma->node.allocated);
        GEM_BUG_ON(vma->fence);
 
-       for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
-               GEM_BUG_ON(i915_gem_active_isset(&vma->last_read[i]));
        GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence));
 
        list_del(&vma->obj_link);
        list_del(&vma->vm_link);
-       rb_erase(&vma->obj_node, &vma->obj->vma_tree);
+       if (vma->obj)
+               rb_erase(&vma->obj_node, &vma->obj->vma_tree);
 
        if (!i915_vma_is_ggtt(vma))
                i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
 
-       kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
+       rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) {
+               GEM_BUG_ON(i915_gem_active_isset(&iter->base));
+               kfree(iter);
+       }
+
+       kmem_cache_free(i915->vmas, vma);
 }
 
 void i915_vma_destroy(struct i915_vma *vma)
@@ -795,23 +879,173 @@ void i915_vma_revoke_mmap(struct i915_vma *vma)
                list_del(&vma->obj->userfault_link);
 }
 
-int i915_vma_unbind(struct i915_vma *vma)
+static void export_fence(struct i915_vma *vma,
+                        struct i915_request *rq,
+                        unsigned int flags)
+{
+       struct reservation_object *resv = vma->resv;
+
+       /*
+        * Ignore errors from failing to allocate the new fence, we can't
+        * handle an error right now. Worst case should be missed
+        * synchronisation leading to rendering corruption.
+        */
+       reservation_object_lock(resv, NULL);
+       if (flags & EXEC_OBJECT_WRITE)
+               reservation_object_add_excl_fence(resv, &rq->fence);
+       else if (reservation_object_reserve_shared(resv) == 0)
+               reservation_object_add_shared_fence(resv, &rq->fence);
+       reservation_object_unlock(resv);
+}
+
+static struct i915_gem_active *active_instance(struct i915_vma *vma, u64 idx)
+{
+       struct i915_vma_active *active;
+       struct rb_node **p, *parent;
+       struct i915_request *old;
+
+       /*
+        * We track the most recently used timeline to skip a rbtree search
+        * for the common case, under typical loads we never need the rbtree
+        * at all. We can reuse the last_active slot if it is empty, that is
+        * after the previous activity has been retired, or if the active
+        * matches the current timeline.
+        *
+        * Note that we allow the timeline to be active simultaneously in
+        * the rbtree and the last_active cache. We do this to avoid having
+        * to search and replace the rbtree element for a new timeline, with
+        * the cost being that we must be aware that the vma may be retired
+        * twice for the same timeline (as the older rbtree element will be
+        * retired before the new request added to last_active).
+        */
+       old = i915_gem_active_raw(&vma->last_active,
+                                 &vma->vm->i915->drm.struct_mutex);
+       if (!old || old->fence.context == idx)
+               goto out;
+
+       /* Move the currently active fence into the rbtree */
+       idx = old->fence.context;
+
+       parent = NULL;
+       p = &vma->active.rb_node;
+       while (*p) {
+               parent = *p;
+
+               active = rb_entry(parent, struct i915_vma_active, node);
+               if (active->timeline == idx)
+                       goto replace;
+
+               if (active->timeline < idx)
+                       p = &parent->rb_right;
+               else
+                       p = &parent->rb_left;
+       }
+
+       active = kmalloc(sizeof(*active), GFP_KERNEL);
+
+       /* kmalloc may retire the vma->last_active request (thanks shrinker)! */
+       if (unlikely(!i915_gem_active_raw(&vma->last_active,
+                                         &vma->vm->i915->drm.struct_mutex))) {
+               kfree(active);
+               goto out;
+       }
+
+       if (unlikely(!active))
+               return ERR_PTR(-ENOMEM);
+
+       init_request_active(&active->base, i915_vma_retire);
+       active->vma = vma;
+       active->timeline = idx;
+
+       rb_link_node(&active->node, parent, p);
+       rb_insert_color(&active->node, &vma->active);
+
+replace:
+       /*
+        * Overwrite the previous active slot in the rbtree with last_active,
+        * leaving last_active zeroed. If the previous slot is still active,
+        * we must be careful as we now only expect to receive one retire
+        * callback not two, and so much undo the active counting for the
+        * overwritten slot.
+        */
+       if (i915_gem_active_isset(&active->base)) {
+               /* Retire ourselves from the old rq->active_list */
+               __list_del_entry(&active->base.link);
+               vma->active_count--;
+               GEM_BUG_ON(!vma->active_count);
+       }
+       GEM_BUG_ON(list_empty(&vma->last_active.link));
+       list_replace_init(&vma->last_active.link, &active->base.link);
+       active->base.request = fetch_and_zero(&vma->last_active.request);
+
+out:
+       return &vma->last_active;
+}
+
+int i915_vma_move_to_active(struct i915_vma *vma,
+                           struct i915_request *rq,
+                           unsigned int flags)
 {
        struct drm_i915_gem_object *obj = vma->obj;
-       unsigned long active;
+       struct i915_gem_active *active;
+
+       lockdep_assert_held(&rq->i915->drm.struct_mutex);
+       GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+
+       active = active_instance(vma, rq->fence.context);
+       if (IS_ERR(active))
+               return PTR_ERR(active);
+
+       /*
+        * Add a reference if we're newly entering the active list.
+        * The order in which we add operations to the retirement queue is
+        * vital here: mark_active adds to the start of the callback list,
+        * such that subsequent callbacks are called first. Therefore we
+        * add the active reference first and queue for it to be dropped
+        * *last*.
+        */
+       if (!i915_gem_active_isset(active) && !vma->active_count++) {
+               list_move_tail(&vma->vm_link, &vma->vm->active_list);
+               obj->active_count++;
+       }
+       i915_gem_active_set(active, rq);
+       GEM_BUG_ON(!i915_vma_is_active(vma));
+       GEM_BUG_ON(!obj->active_count);
+
+       obj->write_domain = 0;
+       if (flags & EXEC_OBJECT_WRITE) {
+               obj->write_domain = I915_GEM_DOMAIN_RENDER;
+
+               if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
+                       i915_gem_active_set(&obj->frontbuffer_write, rq);
+
+               obj->read_domains = 0;
+       }
+       obj->read_domains |= I915_GEM_GPU_DOMAINS;
+
+       if (flags & EXEC_OBJECT_NEEDS_FENCE)
+               i915_gem_active_set(&vma->last_fence, rq);
+
+       export_fence(vma, rq, flags);
+       return 0;
+}
+
+int i915_vma_unbind(struct i915_vma *vma)
+{
        int ret;
 
-       lockdep_assert_held(&obj->base.dev->struct_mutex);
+       lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
 
-       /* First wait upon any activity as retiring the request may
+       /*
+        * First wait upon any activity as retiring the request may
         * have side-effects such as unpinning or even unbinding this vma.
         */
        might_sleep();
-       active = i915_vma_get_active(vma);
-       if (active) {
-               int idx;
+       if (i915_vma_is_active(vma)) {
+               struct i915_vma_active *active, *n;
 
-               /* When a closed VMA is retired, it is unbound - eek.
+               /*
+                * When a closed VMA is retired, it is unbound - eek.
                 * In order to prevent it from being recursively closed,
                 * take a pin on the vma so that the second unbind is
                 * aborted.
@@ -825,33 +1059,36 @@ int i915_vma_unbind(struct i915_vma *vma)
                 */
                __i915_vma_pin(vma);
 
-               for_each_active(active, idx) {
-                       ret = i915_gem_active_retire(&vma->last_read[idx],
-                                                    &vma->vm->i915->drm.struct_mutex);
-                       if (ret)
-                               break;
-               }
+               ret = i915_gem_active_retire(&vma->last_active,
+                                            &vma->vm->i915->drm.struct_mutex);
+               if (ret)
+                       goto unpin;
 
-               if (!ret) {
-                       ret = i915_gem_active_retire(&vma->last_fence,
+               rbtree_postorder_for_each_entry_safe(active, n,
+                                                    &vma->active, node) {
+                       ret = i915_gem_active_retire(&active->base,
                                                     &vma->vm->i915->drm.struct_mutex);
+                       if (ret)
+                               goto unpin;
                }
 
+               ret = i915_gem_active_retire(&vma->last_fence,
+                                            &vma->vm->i915->drm.struct_mutex);
+unpin:
                __i915_vma_unpin(vma);
                if (ret)
                        return ret;
        }
        GEM_BUG_ON(i915_vma_is_active(vma));
 
-       if (i915_vma_is_pinned(vma))
+       if (i915_vma_is_pinned(vma)) {
+               vma_print_allocator(vma, "is pinned");
                return -EBUSY;
+       }
 
        if (!drm_mm_node_allocated(&vma->node))
                return 0;
 
-       GEM_BUG_ON(obj->bind_count == 0);
-       GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
-
        if (i915_vma_is_map_and_fenceable(vma)) {
                /*
                 * Check that we have flushed all writes through the GGTT
@@ -878,7 +1115,7 @@ int i915_vma_unbind(struct i915_vma *vma)
 
        if (likely(!vma->vm->closed)) {
                trace_i915_vma_unbind(vma);
-               vma->vm->unbind_vma(vma);
+               vma->ops->unbind_vma(vma);
        }
        vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
 
index fc4294cfaa91314bbfccaa75aa2692b9194c737b..f06d663771070a66cb3df27f81d7f58298bb3708 100644 (file)
@@ -26,6 +26,7 @@
 #define __I915_VMA_H__
 
 #include <linux/io-mapping.h>
+#include <linux/rbtree.h>
 
 #include <drm/drm_mm.h>
 
@@ -49,10 +50,12 @@ struct i915_vma {
        struct drm_mm_node node;
        struct drm_i915_gem_object *obj;
        struct i915_address_space *vm;
+       const struct i915_vma_ops *ops;
        struct drm_i915_fence_reg *fence;
        struct reservation_object *resv; /** Alias of obj->resv */
        struct sg_table *pages;
        void __iomem *iomap;
+       void *private; /* owned by creator */
        u64 size;
        u64 display_alignment;
        struct i915_page_sizes page_sizes;
@@ -92,8 +95,9 @@ struct i915_vma {
 #define I915_VMA_USERFAULT     BIT(I915_VMA_USERFAULT_BIT)
 #define I915_VMA_GGTT_WRITE    BIT(12)
 
-       unsigned int active;
-       struct i915_gem_active last_read[I915_NUM_ENGINES];
+       unsigned int active_count;
+       struct rb_root active;
+       struct i915_gem_active last_active;
        struct i915_gem_active last_fence;
 
        /**
@@ -136,6 +140,15 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
 
 void i915_vma_unpin_and_release(struct i915_vma **p_vma);
 
+static inline bool i915_vma_is_active(struct i915_vma *vma)
+{
+       return vma->active_count;
+}
+
+int __must_check i915_vma_move_to_active(struct i915_vma *vma,
+                                        struct i915_request *rq,
+                                        unsigned int flags);
+
 static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
 {
        return vma->flags & I915_VMA_GGTT;
@@ -185,34 +198,6 @@ static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
        return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
 }
 
-static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
-{
-       return vma->active;
-}
-
-static inline bool i915_vma_is_active(const struct i915_vma *vma)
-{
-       return i915_vma_get_active(vma);
-}
-
-static inline void i915_vma_set_active(struct i915_vma *vma,
-                                      unsigned int engine)
-{
-       vma->active |= BIT(engine);
-}
-
-static inline void i915_vma_clear_active(struct i915_vma *vma,
-                                        unsigned int engine)
-{
-       vma->active &= ~BIT(engine);
-}
-
-static inline bool i915_vma_has_active_engine(const struct i915_vma *vma,
-                                             unsigned int engine)
-{
-       return vma->active & BIT(engine);
-}
-
 static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
 {
        GEM_BUG_ON(!i915_vma_is_ggtt(vma));
@@ -339,6 +324,12 @@ static inline void i915_vma_unpin(struct i915_vma *vma)
        __i915_vma_unpin(vma);
 }
 
+static inline bool i915_vma_is_bound(const struct i915_vma *vma,
+                                    unsigned int where)
+{
+       return vma->flags & where;
+}
+
 /**
  * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
  * @vma: VMA to iomap
@@ -407,7 +398,7 @@ static inline void __i915_vma_unpin_fence(struct i915_vma *vma)
 static inline void
 i915_vma_unpin_fence(struct i915_vma *vma)
 {
-       lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
+       /* lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); */
        if (vma->fence)
                __i915_vma_unpin_fence(vma);
 }
diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c
new file mode 100644 (file)
index 0000000..13830e4
--- /dev/null
@@ -0,0 +1,127 @@
+/*
+ * Copyright © 2018 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *   Madhav Chauhan <madhav.chauhan@intel.com>
+ *   Jani Nikula <jani.nikula@intel.com>
+ */
+
+#include "intel_dsi.h"
+
+static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+       enum port port;
+       u32 bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+       u32 afe_clk_khz; /* 8X Clock */
+       u32 esc_clk_div_m;
+
+       afe_clk_khz = DIV_ROUND_CLOSEST(intel_dsi->pclk * bpp,
+                                       intel_dsi->lane_count);
+
+       esc_clk_div_m = DIV_ROUND_UP(afe_clk_khz, DSI_MAX_ESC_CLK);
+
+       for_each_dsi_port(port, intel_dsi->ports) {
+               I915_WRITE(ICL_DSI_ESC_CLK_DIV(port),
+                          esc_clk_div_m & ICL_ESC_CLK_DIV_MASK);
+               POSTING_READ(ICL_DSI_ESC_CLK_DIV(port));
+       }
+
+       for_each_dsi_port(port, intel_dsi->ports) {
+               I915_WRITE(ICL_DPHY_ESC_CLK_DIV(port),
+                          esc_clk_div_m & ICL_ESC_CLK_DIV_MASK);
+               POSTING_READ(ICL_DPHY_ESC_CLK_DIV(port));
+       }
+}
+
+static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+       enum port port;
+       u32 tmp;
+
+       for_each_dsi_port(port, intel_dsi->ports) {
+               tmp = I915_READ(ICL_DSI_IO_MODECTL(port));
+               tmp |= COMBO_PHY_MODE_DSI;
+               I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp);
+       }
+
+       for_each_dsi_port(port, intel_dsi->ports) {
+               intel_display_power_get(dev_priv, port == PORT_A ?
+                                       POWER_DOMAIN_PORT_DDI_A_IO :
+                                       POWER_DOMAIN_PORT_DDI_B_IO);
+       }
+}
+
+static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+       enum port port;
+       u32 tmp;
+       u32 lane_mask;
+
+       switch (intel_dsi->lane_count) {
+       case 1:
+               lane_mask = PWR_DOWN_LN_3_1_0;
+               break;
+       case 2:
+               lane_mask = PWR_DOWN_LN_3_1;
+               break;
+       case 3:
+               lane_mask = PWR_DOWN_LN_3;
+               break;
+       case 4:
+       default:
+               lane_mask = PWR_UP_ALL_LANES;
+               break;
+       }
+
+       for_each_dsi_port(port, intel_dsi->ports) {
+               tmp = I915_READ(ICL_PORT_CL_DW10(port));
+               tmp &= ~PWR_DOWN_LN_MASK;
+               I915_WRITE(ICL_PORT_CL_DW10(port), tmp | lane_mask);
+       }
+}
+
+static void gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder)
+{
+       /* step 4a: power up all lanes of the DDI used by DSI */
+       gen11_dsi_power_up_lanes(encoder);
+}
+
+static void __attribute__((unused))
+gen11_dsi_pre_enable(struct intel_encoder *encoder,
+                    const struct intel_crtc_state *pipe_config,
+                    const struct drm_connector_state *conn_state)
+{
+       /* step2: enable IO power */
+       gen11_dsi_enable_io_power(encoder);
+
+       /* step3: enable DSI PLL */
+       gen11_dsi_program_esc_clk_div(encoder);
+
+       /* step4: enable DSI port and DPHY */
+       gen11_dsi_enable_port_and_phy(encoder);
+}
index d1abf4bb7c819ca34426aed485cdf4142f2294eb..6ba478e57b9bc51f9cebab5f6e00ad442ea8c13a 100644 (file)
 #define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
 #define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
 
-static struct intel_dsm_priv {
-       acpi_handle dhandle;
-} intel_dsm_priv;
-
 static const guid_t intel_dsm_guid =
        GUID_INIT(0x7ed873d3, 0xc2d0, 0x4e4f,
                  0xa8, 0x54, 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c);
@@ -72,12 +68,12 @@ static char *intel_dsm_mux_type(u8 type)
        }
 }
 
-static void intel_dsm_platform_mux_info(void)
+static void intel_dsm_platform_mux_info(acpi_handle dhandle)
 {
        int i;
        union acpi_object *pkg, *connector_count;
 
-       pkg = acpi_evaluate_dsm_typed(intel_dsm_priv.dhandle, &intel_dsm_guid,
+       pkg = acpi_evaluate_dsm_typed(dhandle, &intel_dsm_guid,
                        INTEL_DSM_REVISION_ID, INTEL_DSM_FN_PLATFORM_MUX_INFO,
                        NULL, ACPI_TYPE_PACKAGE);
        if (!pkg) {
@@ -107,41 +103,40 @@ static void intel_dsm_platform_mux_info(void)
        ACPI_FREE(pkg);
 }
 
-static bool intel_dsm_pci_probe(struct pci_dev *pdev)
+static acpi_handle intel_dsm_pci_probe(struct pci_dev *pdev)
 {
        acpi_handle dhandle;
 
        dhandle = ACPI_HANDLE(&pdev->dev);
        if (!dhandle)
-               return false;
+               return NULL;
 
        if (!acpi_check_dsm(dhandle, &intel_dsm_guid, INTEL_DSM_REVISION_ID,
                            1 << INTEL_DSM_FN_PLATFORM_MUX_INFO)) {
                DRM_DEBUG_KMS("no _DSM method for intel device\n");
-               return false;
+               return NULL;
        }
 
-       intel_dsm_priv.dhandle = dhandle;
-       intel_dsm_platform_mux_info();
+       intel_dsm_platform_mux_info(dhandle);
 
-       return true;
+       return dhandle;
 }
 
 static bool intel_dsm_detect(void)
 {
+       acpi_handle dhandle = NULL;
        char acpi_method_name[255] = { 0 };
        struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
        struct pci_dev *pdev = NULL;
-       bool has_dsm = false;
        int vga_count = 0;
 
        while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
                vga_count++;
-               has_dsm |= intel_dsm_pci_probe(pdev);
+               dhandle = intel_dsm_pci_probe(pdev) ?: dhandle;
        }
 
-       if (vga_count == 2 && has_dsm) {
-               acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
+       if (vga_count == 2 && dhandle) {
+               acpi_get_name(dhandle, ACPI_FULL_PATHNAME, &buffer);
                DRM_DEBUG_DRIVER("vga_switcheroo: detected DSM switching method %s handle\n",
                                 acpi_method_name);
                return true;
index 40285d1b91b7fb8968bcfea69b9353ab8b2e782f..b04952bacf77c01896ffdd910eed3d692e538d10 100644 (file)
@@ -59,7 +59,8 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
        else if (property == dev_priv->broadcast_rgb_property)
                *val = intel_conn_state->broadcast_rgb;
        else {
-               DRM_DEBUG_ATOMIC("Unknown property %s\n", property->name);
+               DRM_DEBUG_ATOMIC("Unknown property [PROP:%d:%s]\n",
+                                property->base.id, property->name);
                return -EINVAL;
        }
 
@@ -95,7 +96,8 @@ int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
                return 0;
        }
 
-       DRM_DEBUG_ATOMIC("Unknown property %s\n", property->name);
+       DRM_DEBUG_ATOMIC("Unknown property [PROP:%d:%s]\n",
+                        property->base.id, property->name);
        return -EINVAL;
 }
 
@@ -124,6 +126,7 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn,
        if (new_conn_state->force_audio != old_conn_state->force_audio ||
            new_conn_state->broadcast_rgb != old_conn_state->broadcast_rgb ||
            new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio ||
+           new_conn_state->base.content_type != old_conn_state->base.content_type ||
            new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode)
                crtc_state->mode_changed = true;
 
index 6d068786eb41367283494a4ec9213600045442fe..dcba645cabb87db8fbd7a0eb85e710e1b48691c5 100644 (file)
@@ -120,12 +120,6 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
                &crtc_state->base.adjusted_mode;
        int ret;
 
-       /*
-        * Both crtc and plane->crtc could be NULL if we're updating a
-        * property while the plane is disabled.  We don't actually have
-        * anything driver-specific we need to test in that case, so
-        * just return success.
-        */
        if (!intel_state->base.crtc && !old_plane_state->base.crtc)
                return 0;
 
@@ -209,12 +203,6 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
        const struct drm_crtc_state *old_crtc_state;
        struct drm_crtc_state *new_crtc_state;
 
-       /*
-        * Both crtc and plane->crtc could be NULL if we're updating a
-        * property while the plane is disabled.  We don't actually have
-        * anything driver-specific we need to test in that case, so
-        * just return success.
-        */
        if (!crtc)
                return 0;
 
@@ -277,7 +265,8 @@ intel_plane_atomic_get_property(struct drm_plane *plane,
                                struct drm_property *property,
                                uint64_t *val)
 {
-       DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
+       DRM_DEBUG_KMS("Unknown property [PROP:%d:%s]\n",
+                     property->base.id, property->name);
        return -EINVAL;
 }
 
@@ -299,6 +288,7 @@ intel_plane_atomic_set_property(struct drm_plane *plane,
                                struct drm_property *property,
                                uint64_t val)
 {
-       DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name);
+       DRM_DEBUG_KMS("Unknown property [PROP:%d:%s]\n",
+                     property->base.id, property->name);
        return -EINVAL;
 }
index 3ea566f99450e37f10317712b651b8104bda2a28..bb94172ffc07402461bbfdecc9906387ef8bbfd1 100644 (file)
@@ -59,6 +59,7 @@
  */
 
 /* DP N/M table */
+#define LC_810M        810000
 #define LC_540M        540000
 #define LC_270M        270000
 #define LC_162M        162000
@@ -99,6 +100,15 @@ static const struct dp_aud_n_m dp_aud_n_m[] = {
        { 128000, LC_540M, 4096, 33750 },
        { 176400, LC_540M, 3136, 18750 },
        { 192000, LC_540M, 2048, 11250 },
+       { 32000, LC_810M, 1024, 50625 },
+       { 44100, LC_810M, 784, 28125 },
+       { 48000, LC_810M, 512, 16875 },
+       { 64000, LC_810M, 2048, 50625 },
+       { 88200, LC_810M, 1568, 28125 },
+       { 96000, LC_810M, 1024, 16875 },
+       { 128000, LC_810M, 4096, 50625 },
+       { 176400, LC_810M, 3136, 28125 },
+       { 192000, LC_810M, 2048, 16875 },
 };
 
 static const struct dp_aud_n_m *
@@ -198,13 +208,13 @@ static int audio_config_hdmi_get_n(const struct intel_crtc_state *crtc_state,
 }
 
 static bool intel_eld_uptodate(struct drm_connector *connector,
-                              i915_reg_t reg_eldv, uint32_t bits_eldv,
-                              i915_reg_t reg_elda, uint32_t bits_elda,
+                              i915_reg_t reg_eldv, u32 bits_eldv,
+                              i915_reg_t reg_elda, u32 bits_elda,
                               i915_reg_t reg_edid)
 {
        struct drm_i915_private *dev_priv = to_i915(connector->dev);
-       uint8_t *eld = connector->eld;
-       uint32_t tmp;
+       const u8 *eld = connector->eld;
+       u32 tmp;
        int i;
 
        tmp = I915_READ(reg_eldv);
@@ -218,7 +228,7 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
        I915_WRITE(reg_elda, tmp);
 
        for (i = 0; i < drm_eld_size(eld) / 4; i++)
-               if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
+               if (I915_READ(reg_edid) != *((const u32 *)eld + i))
                        return false;
 
        return true;
@@ -229,7 +239,7 @@ static void g4x_audio_codec_disable(struct intel_encoder *encoder,
                                    const struct drm_connector_state *old_conn_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       uint32_t eldv, tmp;
+       u32 eldv, tmp;
 
        DRM_DEBUG_KMS("Disable audio codec\n");
 
@@ -251,12 +261,12 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder,
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct drm_connector *connector = conn_state->connector;
-       uint8_t *eld = connector->eld;
-       uint32_t eldv;
-       uint32_t tmp;
+       const u8 *eld = connector->eld;
+       u32 eldv;
+       u32 tmp;
        int len, i;
 
-       DRM_DEBUG_KMS("Enable audio codec, %u bytes ELD\n", eld[2]);
+       DRM_DEBUG_KMS("Enable audio codec, %u bytes ELD\n", drm_eld_size(eld));
 
        tmp = I915_READ(G4X_AUD_VID_DID);
        if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
@@ -278,7 +288,7 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder,
        len = min(drm_eld_size(eld) / 4, len);
        DRM_DEBUG_DRIVER("ELD size %d\n", len);
        for (i = 0; i < len; i++)
-               I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
+               I915_WRITE(G4X_HDMIW_HDMIEDID, *((const u32 *)eld + i));
 
        tmp = I915_READ(G4X_AUD_CNTL_ST);
        tmp |= eldv;
@@ -393,7 +403,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
        enum pipe pipe = crtc->pipe;
-       uint32_t tmp;
+       u32 tmp;
 
        DRM_DEBUG_KMS("Disable audio codec on pipe %c\n", pipe_name(pipe));
 
@@ -426,8 +436,8 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_connector *connector = conn_state->connector;
        enum pipe pipe = crtc->pipe;
-       const uint8_t *eld = connector->eld;
-       uint32_t tmp;
+       const u8 *eld = connector->eld;
+       u32 tmp;
        int len, i;
 
        DRM_DEBUG_KMS("Enable audio codec on pipe %c, %u bytes ELD\n",
@@ -456,7 +466,7 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
        /* Up to 84 bytes of hw ELD buffer */
        len = min(drm_eld_size(eld), 84);
        for (i = 0; i < len / 4; i++)
-               I915_WRITE(HSW_AUD_EDID_DATA(pipe), *((uint32_t *)eld + i));
+               I915_WRITE(HSW_AUD_EDID_DATA(pipe), *((const u32 *)eld + i));
 
        /* ELD valid */
        tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
@@ -477,7 +487,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder,
        struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
        enum pipe pipe = crtc->pipe;
        enum port port = encoder->port;
-       uint32_t tmp, eldv;
+       u32 tmp, eldv;
        i915_reg_t aud_config, aud_cntrl_st2;
 
        DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n",
@@ -524,8 +534,8 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
        struct drm_connector *connector = conn_state->connector;
        enum pipe pipe = crtc->pipe;
        enum port port = encoder->port;
-       uint8_t *eld = connector->eld;
-       uint32_t tmp, eldv;
+       const u8 *eld = connector->eld;
+       u32 tmp, eldv;
        int len, i;
        i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
 
@@ -575,7 +585,7 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
        /* Up to 84 bytes of hw ELD buffer */
        len = min(drm_eld_size(eld), 84);
        for (i = 0; i < len / 4; i++)
-               I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
+               I915_WRITE(hdmiw_hdmiedid, *((const u32 *)eld + i));
 
        /* ELD valid */
        tmp = I915_READ(aud_cntrl_st2);
index 54270bdde1005615d67b37a29bf34b87a89b3382..1faa494e2bc91a245861135ab5de015a7a6810ff 100644 (file)
@@ -267,8 +267,6 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
        if (!lvds_lfp_data_ptrs)
                return;
 
-       dev_priv->vbt.lvds_vbt = 1;
-
        panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
                                               lvds_lfp_data_ptrs,
                                               panel_type);
@@ -518,8 +516,31 @@ parse_driver_features(struct drm_i915_private *dev_priv,
        if (!driver)
                return;
 
-       if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
-               dev_priv->vbt.edp.support = 1;
+       if (INTEL_GEN(dev_priv) >= 5) {
+               /*
+                * Note that we consider BDB_DRIVER_FEATURE_INT_SDVO_LVDS
+                * to mean "eDP". The VBT spec doesn't agree with that
+                * interpretation, but real world VBTs seem to.
+                */
+               if (driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS)
+                       dev_priv->vbt.int_lvds_support = 0;
+       } else {
+               /*
+                * FIXME it's not clear which BDB version has the LVDS config
+                * bits defined. Revision history in the VBT spec says:
+                * "0.92 | Add two definitions for VBT value of LVDS Active
+                *  Config (00b and 11b values defined) | 06/13/2005"
+                * but does not the specify the BDB version.
+                *
+                * So far version 134 (on i945gm) is the oldest VBT observed
+                * in the wild with the bits correctly populated. Version
+                * 108 (on i85x) does not have the bits correctly populated.
+                */
+               if (bdb->version >= 134 &&
+                   driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS &&
+                   driver->lvds_config != BDB_DRIVER_FEATURE_INT_SDVO_LVDS)
+                       dev_priv->vbt.int_lvds_support = 0;
+       }
 
        DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
        /*
@@ -542,11 +563,8 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
        int panel_type = dev_priv->vbt.panel_type;
 
        edp = find_section(bdb, BDB_EDP);
-       if (!edp) {
-               if (dev_priv->vbt.edp.support)
-                       DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
+       if (!edp)
                return;
-       }
 
        switch ((edp->color_depth >> (panel_type * 2)) & 3) {
        case EDP_18BPP:
@@ -634,7 +652,7 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
        }
 
        if (bdb->version >= 173) {
-               uint8_t vswing;
+               u8 vswing;
 
                /* Don't read from VBT if module parameter has valid value*/
                if (i915_modparams.edp_vswing) {
@@ -688,8 +706,54 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
                break;
        }
 
-       dev_priv->vbt.psr.tp1_wakeup_time = psr_table->tp1_wakeup_time;
-       dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time;
+       /*
+        * New psr options 0=500us, 1=100us, 2=2500us, 3=0us
+        * Old decimal value is wake up time in multiples of 100 us.
+        */
+       if (bdb->version >= 205 &&
+           (IS_GEN9_BC(dev_priv) || IS_GEMINILAKE(dev_priv) ||
+            INTEL_GEN(dev_priv) >= 10)) {
+               switch (psr_table->tp1_wakeup_time) {
+               case 0:
+                       dev_priv->vbt.psr.tp1_wakeup_time_us = 500;
+                       break;
+               case 1:
+                       dev_priv->vbt.psr.tp1_wakeup_time_us = 100;
+                       break;
+               case 3:
+                       dev_priv->vbt.psr.tp1_wakeup_time_us = 0;
+                       break;
+               default:
+                       DRM_DEBUG_KMS("VBT tp1 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
+                                       psr_table->tp1_wakeup_time);
+                       /* fallthrough */
+               case 2:
+                       dev_priv->vbt.psr.tp1_wakeup_time_us = 2500;
+                       break;
+               }
+
+               switch (psr_table->tp2_tp3_wakeup_time) {
+               case 0:
+                       dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 500;
+                       break;
+               case 1:
+                       dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 100;
+                       break;
+               case 3:
+                       dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 0;
+                       break;
+               default:
+                       DRM_DEBUG_KMS("VBT tp2_tp3 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
+                                       psr_table->tp2_tp3_wakeup_time);
+                       /* fallthrough */
+               case 2:
+                       dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 2500;
+               break;
+               }
+       } else {
+               dev_priv->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100;
+               dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
+       }
 }
 
 static void parse_dsi_backlight_ports(struct drm_i915_private *dev_priv,
@@ -902,7 +966,7 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)
         * includes MIPI_SEQ_ELEM_END byte, excludes the final MIPI_SEQ_END
         * byte.
         */
-       size_of_sequence = *((const uint32_t *)(data + index));
+       size_of_sequence = *((const u32 *)(data + index));
        index += 4;
 
        seq_end = index + size_of_sequence;
@@ -1197,18 +1261,37 @@ static const u8 cnp_ddc_pin_map[] = {
        [DDC_BUS_DDI_F] = GMBUS_PIN_3_BXT, /* sic */
 };
 
+static const u8 icp_ddc_pin_map[] = {
+       [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
+       [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
+       [ICL_DDC_BUS_PORT_1] = GMBUS_PIN_9_TC1_ICP,
+       [ICL_DDC_BUS_PORT_2] = GMBUS_PIN_10_TC2_ICP,
+       [ICL_DDC_BUS_PORT_3] = GMBUS_PIN_11_TC3_ICP,
+       [ICL_DDC_BUS_PORT_4] = GMBUS_PIN_12_TC4_ICP,
+};
+
 static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
 {
-       if (HAS_PCH_CNP(dev_priv)) {
-               if (vbt_pin < ARRAY_SIZE(cnp_ddc_pin_map)) {
-                       return cnp_ddc_pin_map[vbt_pin];
-               } else {
-                       DRM_DEBUG_KMS("Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n", vbt_pin);
-                       return 0;
-               }
+       const u8 *ddc_pin_map;
+       int n_entries;
+
+       if (HAS_PCH_ICP(dev_priv)) {
+               ddc_pin_map = icp_ddc_pin_map;
+               n_entries = ARRAY_SIZE(icp_ddc_pin_map);
+       } else if (HAS_PCH_CNP(dev_priv)) {
+               ddc_pin_map = cnp_ddc_pin_map;
+               n_entries = ARRAY_SIZE(cnp_ddc_pin_map);
+       } else {
+               /* Assuming direct map */
+               return vbt_pin;
        }
 
-       return vbt_pin;
+       if (vbt_pin < n_entries && ddc_pin_map[vbt_pin] != 0)
+               return ddc_pin_map[vbt_pin];
+
+       DRM_DEBUG_KMS("Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n",
+                     vbt_pin);
+       return 0;
 }
 
 static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
@@ -1504,7 +1587,6 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
 
        /* LFP panel data */
        dev_priv->vbt.lvds_dither = 1;
-       dev_priv->vbt.lvds_vbt = 0;
 
        /* SDVO panel data */
        dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
@@ -1513,6 +1595,9 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
        dev_priv->vbt.int_tv_support = 1;
        dev_priv->vbt.int_crt_support = 1;
 
+       /* driver features */
+       dev_priv->vbt.int_lvds_support = 1;
+
        /* Default to using SSC */
        dev_priv->vbt.lvds_use_ssc = 1;
        /*
@@ -1636,7 +1721,7 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
        const struct bdb_header *bdb;
        u8 __iomem *bios = NULL;
 
-       if (HAS_PCH_NOP(dev_priv)) {
+       if (INTEL_INFO(dev_priv)->num_pipes == 0) {
                DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
                return;
        }
index 18e643df523e5ff074d4629da20670e77fea660b..1db6ba7d926ee3b27b0c00e9bf82f956173e8b59 100644 (file)
@@ -98,12 +98,14 @@ static void intel_breadcrumbs_hangcheck(struct timer_list *t)
        struct intel_engine_cs *engine =
                from_timer(engine, t, breadcrumbs.hangcheck);
        struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       unsigned int irq_count;
 
        if (!b->irq_armed)
                return;
 
-       if (b->hangcheck_interrupts != atomic_read(&engine->irq_count)) {
-               b->hangcheck_interrupts = atomic_read(&engine->irq_count);
+       irq_count = READ_ONCE(b->irq_count);
+       if (b->hangcheck_interrupts != irq_count) {
+               b->hangcheck_interrupts = irq_count;
                mod_timer(&b->hangcheck, wait_timeout());
                return;
        }
@@ -272,13 +274,14 @@ static bool use_fake_irq(const struct intel_breadcrumbs *b)
        if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings))
                return false;
 
-       /* Only start with the heavy weight fake irq timer if we have not
+       /*
+        * Only start with the heavy weight fake irq timer if we have not
         * seen any interrupts since enabling it the first time. If the
         * interrupts are still arriving, it means we made a mistake in our
         * engine->seqno_barrier(), a timing error that should be transient
         * and unlikely to reoccur.
         */
-       return atomic_read(&engine->irq_count) == b->hangcheck_interrupts;
+       return READ_ONCE(b->irq_count) == b->hangcheck_interrupts;
 }
 
 static void enable_fake_irq(struct intel_breadcrumbs *b)
@@ -846,8 +849,9 @@ static void cancel_fake_irq(struct intel_engine_cs *engine)
 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
 {
        struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       unsigned long flags;
 
-       spin_lock_irq(&b->irq_lock);
+       spin_lock_irqsave(&b->irq_lock, flags);
 
        /*
         * Leave the fake_irq timer enabled (if it is running), but clear the
@@ -871,7 +875,7 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
         */
        clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
 
-       spin_unlock_irq(&b->irq_lock);
+       spin_unlock_irqrestore(&b->irq_lock, flags);
 }
 
 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
index 704ddb4d3ca7ef6f40c565eeb07a5cbe8c427a97..29075c763428055ddb3625a80b59643e694f3d76 100644 (file)
@@ -316,6 +316,7 @@ static void pnv_get_cdclk(struct drm_i915_private *dev_priv,
                break;
        default:
                DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
+               /* fall through */
        case GC_DISPLAY_CLOCK_133_MHZ_PNV:
                cdclk_state->cdclk = 133333;
                break;
@@ -991,6 +992,16 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
        u32 freq_select, cdclk_ctl;
        int ret;
 
+       /*
+        * Based on WA#1183 CDCLK rates 308 and 617MHz CDCLK rates are
+        * unsupported on SKL. In theory this should never happen since only
+        * the eDP1.4 2.16 and 4.32Gbps rates require it, but eDP1.4 is not
+        * supported on SKL either, see the above WA. WARN whenever trying to
+        * use the corresponding VCO freq as that always leads to using the
+        * minimum 308MHz CDCLK.
+        */
+       WARN_ON_ONCE(IS_SKYLAKE(dev_priv) && vco == 8640000);
+
        mutex_lock(&dev_priv->pcu_lock);
        ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
                                SKL_CDCLK_PREPARE_FOR_CHANGE,
@@ -1787,6 +1798,7 @@ static int icl_calc_cdclk(int min_cdclk, unsigned int ref)
        switch (ref) {
        default:
                MISSING_CASE(ref);
+               /* fall through */
        case 24000:
                ranges = ranges_24;
                break;
@@ -1814,6 +1826,7 @@ static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
        switch (cdclk) {
        default:
                MISSING_CASE(cdclk);
+               /* fall through */
        case 307200:
        case 556800:
        case 652800:
@@ -1861,11 +1874,36 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv,
                              skl_cdclk_decimal(cdclk));
 
        mutex_lock(&dev_priv->pcu_lock);
-       /* TODO: add proper DVFS support. */
-       sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, 2);
+       sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+                               cdclk_state->voltage_level);
        mutex_unlock(&dev_priv->pcu_lock);
 
        intel_update_cdclk(dev_priv);
+
+       /*
+        * Can't read out the voltage level :(
+        * Let's just assume everything is as expected.
+        */
+       dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
+}
+
+static u8 icl_calc_voltage_level(int cdclk)
+{
+       switch (cdclk) {
+       case 50000:
+       case 307200:
+       case 312000:
+               return 0;
+       case 556800:
+       case 552000:
+               return 1;
+       default:
+               MISSING_CASE(cdclk);
+               /* fall through */
+       case 652800:
+       case 648000:
+               return 2;
+       }
 }
 
 static void icl_get_cdclk(struct drm_i915_private *dev_priv,
@@ -1879,6 +1917,7 @@ static void icl_get_cdclk(struct drm_i915_private *dev_priv,
        switch (val & ICL_DSSM_CDCLK_PLL_REFCLK_MASK) {
        default:
                MISSING_CASE(val);
+               /* fall through */
        case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz:
                cdclk_state->ref = 24000;
                break;
@@ -1899,7 +1938,7 @@ static void icl_get_cdclk(struct drm_i915_private *dev_priv,
                 */
                cdclk_state->vco = 0;
                cdclk_state->cdclk = cdclk_state->bypass;
-               return;
+               goto out;
        }
 
        cdclk_state->vco = (val & BXT_DE_PLL_RATIO_MASK) * cdclk_state->ref;
@@ -1908,6 +1947,14 @@ static void icl_get_cdclk(struct drm_i915_private *dev_priv,
        WARN_ON((val & BXT_CDCLK_CD2X_DIV_SEL_MASK) != 0);
 
        cdclk_state->cdclk = cdclk_state->vco / 2;
+
+out:
+       /*
+        * Can't read this out :( Let's assume it's
+        * at least what the CDCLK frequency requires.
+        */
+       cdclk_state->voltage_level =
+               icl_calc_voltage_level(cdclk_state->cdclk);
 }
 
 /**
@@ -1950,6 +1997,8 @@ sanitize:
        sanitized_state.cdclk = icl_calc_cdclk(0, sanitized_state.ref);
        sanitized_state.vco = icl_calc_cdclk_pll_vco(dev_priv,
                                                     sanitized_state.cdclk);
+       sanitized_state.voltage_level =
+                               icl_calc_voltage_level(sanitized_state.cdclk);
 
        icl_set_cdclk(dev_priv, &sanitized_state);
 }
@@ -1967,6 +2016,7 @@ void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
 
        cdclk_state.cdclk = cdclk_state.bypass;
        cdclk_state.vco = 0;
+       cdclk_state.voltage_level = icl_calc_voltage_level(cdclk_state.cdclk);
 
        icl_set_cdclk(dev_priv, &cdclk_state);
 }
@@ -2470,6 +2520,9 @@ static int icl_modeset_calc_cdclk(struct drm_atomic_state *state)
 
        intel_state->cdclk.logical.vco = vco;
        intel_state->cdclk.logical.cdclk = cdclk;
+       intel_state->cdclk.logical.voltage_level =
+               max(icl_calc_voltage_level(cdclk),
+                   cnl_compute_min_voltage_level(intel_state));
 
        if (!intel_state->active_crtcs) {
                cdclk = icl_calc_cdclk(0, ref);
@@ -2477,6 +2530,8 @@ static int icl_modeset_calc_cdclk(struct drm_atomic_state *state)
 
                intel_state->cdclk.actual.vco = vco;
                intel_state->cdclk.actual.cdclk = cdclk;
+               intel_state->cdclk.actual.voltage_level =
+                       icl_calc_voltage_level(cdclk);
        } else {
                intel_state->cdclk.actual = intel_state->cdclk.logical;
        }
index de0e22322c76ed649c2f36266e65247ed9d02c28..0c6bf82bb059a87e1b6ce96e1412bb0aa60f92f3 100644 (file)
@@ -63,33 +63,35 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
        return intel_encoder_to_crt(intel_attached_encoder(connector));
 }
 
+bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
+                           i915_reg_t adpa_reg, enum pipe *pipe)
+{
+       u32 val;
+
+       val = I915_READ(adpa_reg);
+
+       /* asserts want to know the pipe even if the port is disabled */
+       if (HAS_PCH_CPT(dev_priv))
+               *pipe = (val & ADPA_PIPE_SEL_MASK_CPT) >> ADPA_PIPE_SEL_SHIFT_CPT;
+       else
+               *pipe = (val & ADPA_PIPE_SEL_MASK) >> ADPA_PIPE_SEL_SHIFT;
+
+       return val & ADPA_DAC_ENABLE;
+}
+
 static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
                                   enum pipe *pipe)
 {
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crt *crt = intel_encoder_to_crt(encoder);
-       u32 tmp;
        bool ret;
 
        if (!intel_display_power_get_if_enabled(dev_priv,
                                                encoder->power_domain))
                return false;
 
-       ret = false;
-
-       tmp = I915_READ(crt->adpa_reg);
-
-       if (!(tmp & ADPA_DAC_ENABLE))
-               goto out;
-
-       if (HAS_PCH_CPT(dev_priv))
-               *pipe = PORT_TO_PIPE_CPT(tmp);
-       else
-               *pipe = PORT_TO_PIPE(tmp);
+       ret = intel_crt_port_enabled(dev_priv, crt->adpa_reg, pipe);
 
-       ret = true;
-out:
        intel_display_power_put(dev_priv, encoder->power_domain);
 
        return ret;
@@ -168,11 +170,9 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
        if (HAS_PCH_LPT(dev_priv))
                ; /* Those bits don't exist here */
        else if (HAS_PCH_CPT(dev_priv))
-               adpa |= PORT_TRANS_SEL_CPT(crtc->pipe);
-       else if (crtc->pipe == 0)
-               adpa |= ADPA_PIPE_A_SELECT;
+               adpa |= ADPA_PIPE_SEL_CPT(crtc->pipe);
        else
-               adpa |= ADPA_PIPE_B_SELECT;
+               adpa |= ADPA_PIPE_SEL(crtc->pipe);
 
        if (!HAS_PCH_SPLIT(dev_priv))
                I915_WRITE(BCLRPAT(crtc->pipe), 0);
@@ -232,6 +232,8 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder,
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
+       intel_ddi_disable_pipe_clock(old_crtc_state);
+
        pch_post_disable_crt(encoder, old_crtc_state, old_conn_state);
 
        lpt_disable_pch_transcoder(dev_priv);
@@ -268,6 +270,8 @@ static void hsw_pre_enable_crt(struct intel_encoder *encoder,
        intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 
        dev_priv->display.fdi_link_train(crtc, crtc_state);
+
+       intel_ddi_enable_pipe_clock(crtc_state);
 }
 
 static void hsw_enable_crt(struct intel_encoder *encoder,
@@ -304,6 +308,9 @@ intel_crt_mode_valid(struct drm_connector *connector,
        int max_dotclk = dev_priv->max_dotclk_freq;
        int max_clock;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
        if (mode->clock < 25000)
                return MODE_CLOCK_LOW;
 
@@ -330,6 +337,10 @@ intel_crt_mode_valid(struct drm_connector *connector,
            (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
                return MODE_CLOCK_HIGH;
 
+       /* HSW/BDW FDI limited to 4k */
+       if (mode->hdisplay > 4096)
+               return MODE_H_ILLEGAL;
+
        return MODE_OK;
 }
 
@@ -337,6 +348,12 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
                                     struct intel_crtc_state *pipe_config,
                                     struct drm_connector_state *conn_state)
 {
+       struct drm_display_mode *adjusted_mode =
+               &pipe_config->base.adjusted_mode;
+
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
        return true;
 }
 
@@ -344,6 +361,12 @@ static bool pch_crt_compute_config(struct intel_encoder *encoder,
                                   struct intel_crtc_state *pipe_config,
                                   struct drm_connector_state *conn_state)
 {
+       struct drm_display_mode *adjusted_mode =
+               &pipe_config->base.adjusted_mode;
+
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
        pipe_config->has_pch_encoder = true;
 
        return true;
@@ -354,6 +377,16 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder,
                                   struct drm_connector_state *conn_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct drm_display_mode *adjusted_mode =
+               &pipe_config->base.adjusted_mode;
+
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
+       /* HSW/BDW FDI limited to 4k */
+       if (adjusted_mode->crtc_hdisplay > 4096 ||
+           adjusted_mode->crtc_hblank_start > 4096)
+               return false;
 
        pipe_config->has_pch_encoder = true;
 
@@ -493,7 +526,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
         * to get a reliable result.
         */
 
-       if (IS_G4X(dev_priv) && !IS_GM45(dev_priv))
+       if (IS_G45(dev_priv))
                tries = 2;
        else
                tries = 1;
index f4a8598a2d392d607e8d17338f1ff251bc7c627b..39d66f8493faea5162fd88315c7e709e4d51350d 100644 (file)
@@ -915,7 +915,14 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
 
        level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
 
-       if (IS_CANNONLAKE(dev_priv)) {
+       if (IS_ICELAKE(dev_priv)) {
+               if (port == PORT_A || port == PORT_B)
+                       icl_get_combo_buf_trans(dev_priv, port,
+                                               INTEL_OUTPUT_HDMI, &n_entries);
+               else
+                       n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
+               default_entry = n_entries - 1;
+       } else if (IS_CANNONLAKE(dev_priv)) {
                cnl_get_buf_trans_hdmi(dev_priv, &n_entries);
                default_entry = n_entries - 1;
        } else if (IS_GEN9_LP(dev_priv)) {
@@ -1055,14 +1062,31 @@ static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
 static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
                                       const struct intel_shared_dpll *pll)
 {
+       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+       int clock = crtc->config->port_clock;
        const enum intel_dpll_id id = pll->info->id;
 
        switch (id) {
        default:
                MISSING_CASE(id);
+               /* fall through */
        case DPLL_ID_ICL_DPLL0:
        case DPLL_ID_ICL_DPLL1:
                return DDI_CLK_SEL_NONE;
+       case DPLL_ID_ICL_TBTPLL:
+               switch (clock) {
+               case 162000:
+                       return DDI_CLK_SEL_TBT_162;
+               case 270000:
+                       return DDI_CLK_SEL_TBT_270;
+               case 540000:
+                       return DDI_CLK_SEL_TBT_540;
+               case 810000:
+                       return DDI_CLK_SEL_TBT_810;
+               default:
+                       MISSING_CASE(clock);
+                       break;
+               }
        case DPLL_ID_ICL_MGPLL1:
        case DPLL_ID_ICL_MGPLL2:
        case DPLL_ID_ICL_MGPLL3:
@@ -1243,35 +1267,6 @@ intel_ddi_get_crtc_encoder(struct intel_crtc *crtc)
        return ret;
 }
 
-/* Finds the only possible encoder associated with the given CRTC. */
-struct intel_encoder *
-intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct intel_encoder *ret = NULL;
-       struct drm_atomic_state *state;
-       struct drm_connector *connector;
-       struct drm_connector_state *connector_state;
-       int num_encoders = 0;
-       int i;
-
-       state = crtc_state->base.state;
-
-       for_each_new_connector_in_state(state, connector, connector_state, i) {
-               if (connector_state->crtc != crtc_state->base.crtc)
-                       continue;
-
-               ret = to_intel_encoder(connector_state->best_encoder);
-               num_encoders++;
-       }
-
-       WARN(num_encoders != 1, "%d encoders on crtc for pipe %c\n", num_encoders,
-            pipe_name(crtc->pipe));
-
-       BUG_ON(ret == NULL);
-       return ret;
-}
-
 #define LC_FREQ 2700
 
 static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
@@ -1374,8 +1369,13 @@ static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
        uint32_t cfgcr0, cfgcr1;
        uint32_t p0, p1, p2, dco_freq, ref_clock;
 
-       cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id));
-       cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll_id));
+       if (INTEL_GEN(dev_priv) >= 11) {
+               cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id));
+               cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(pll_id));
+       } else {
+               cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id));
+               cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll_id));
+       }
 
        p0 = cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
        p2 = cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
@@ -1451,6 +1451,30 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
        pipe_config->base.adjusted_mode.crtc_clock = dotclock;
 }
 
+static void icl_ddi_clock_get(struct intel_encoder *encoder,
+                             struct intel_crtc_state *pipe_config)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       enum port port = encoder->port;
+       int link_clock = 0;
+       uint32_t pll_id;
+
+       pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
+       if (port == PORT_A || port == PORT_B) {
+               if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
+                       link_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
+               else
+                       link_clock = icl_calc_dp_combo_pll_link(dev_priv,
+                                                               pll_id);
+       } else {
+               /* FIXME - Add for MG PLL */
+               WARN(1, "MG PLL clock_get code not implemented yet\n");
+       }
+
+       pipe_config->port_clock = link_clock;
+       ddi_dotclock_get(pipe_config);
+}
+
 static void cnl_ddi_clock_get(struct intel_encoder *encoder,
                              struct intel_crtc_state *pipe_config)
 {
@@ -1644,6 +1668,8 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder,
                bxt_ddi_clock_get(encoder, pipe_config);
        else if (IS_CANNONLAKE(dev_priv))
                cnl_ddi_clock_get(encoder, pipe_config);
+       else if (IS_ICELAKE(dev_priv))
+               icl_ddi_clock_get(encoder, pipe_config);
 }
 
 void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
@@ -1782,15 +1808,24 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
        I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
 }
 
-void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
-                                      enum transcoder cpu_transcoder)
+void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
 {
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
        i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
        uint32_t val = I915_READ(reg);
 
        val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
        val |= TRANS_DDI_PORT_NONE;
        I915_WRITE(reg, val);
+
+       if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
+           intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+               DRM_DEBUG_KMS("Quirk Increase DDI disabled time\n");
+               /* Quirk time at 100ms for reliable operation */
+               msleep(100);
+       }
 }
 
 int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
@@ -1958,15 +1993,50 @@ out:
        return ret;
 }
 
-static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder)
+static inline enum intel_display_power_domain
+intel_ddi_main_link_aux_domain(struct intel_dp *intel_dp)
+{
+       /* CNL HW requires corresponding AUX IOs to be powered up for PSR with
+        * DC states enabled at the same time, while for driver initiated AUX
+        * transfers we need the same AUX IOs to be powered but with DC states
+        * disabled. Accordingly use the AUX power domain here which leaves DC
+        * states enabled.
+        * However, for non-A AUX ports the corresponding non-EDP transcoders
+        * would have already enabled power well 2 and DC_OFF. This means we can
+        * acquire a wider POWER_DOMAIN_AUX_{B,C,D,F} reference instead of a
+        * specific AUX_IO reference without powering up any extra wells.
+        * Note that PSR is enabled only on Port A even though this function
+        * returns the correct domain for other ports too.
+        */
+       return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
+                                             intel_dp->aux_power_domain;
+}
+
+static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
+                                      struct intel_crtc_state *crtc_state)
 {
-       struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
-       enum pipe pipe;
+       struct intel_digital_port *dig_port;
+       u64 domains;
 
-       if (intel_ddi_get_hw_state(encoder, &pipe))
-               return BIT_ULL(dig_port->ddi_io_power_domain);
+       /*
+        * TODO: Add support for MST encoders. Atm, the following should never
+        * happen since fake-MST encoders don't set their get_power_domains()
+        * hook.
+        */
+       if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)))
+               return 0;
 
-       return 0;
+       dig_port = enc_to_dig_port(&encoder->base);
+       domains = BIT_ULL(dig_port->ddi_io_power_domain);
+
+       /* AUX power is only needed for (e)DP mode, not for HDMI. */
+       if (intel_crtc_has_dp_encoder(crtc_state)) {
+               struct intel_dp *intel_dp = &dig_port->dp;
+
+               domains |= BIT_ULL(intel_ddi_main_link_aux_domain(intel_dp));
+       }
+
+       return domains;
 }
 
 void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
@@ -2115,6 +2185,26 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
                DP_TRAIN_VOLTAGE_SWING_MASK;
 }
 
+/*
+ * We assume that the full set of pre-emphasis values can be
+ * used on all DDI platforms. Should that change we need to
+ * rethink this code.
+ */
+u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder, u8 voltage_swing)
+{
+       switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+               return DP_TRAIN_PRE_EMPH_LEVEL_3;
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+               return DP_TRAIN_PRE_EMPH_LEVEL_2;
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+               return DP_TRAIN_PRE_EMPH_LEVEL_1;
+       case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
+       default:
+               return DP_TRAIN_PRE_EMPH_LEVEL_0;
+       }
+}
+
 static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
                                   int level, enum intel_output_type type)
 {
@@ -2586,6 +2676,9 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
 
        WARN_ON(is_mst && (port == PORT_A || port == PORT_E));
 
+       intel_display_power_get(dev_priv,
+                               intel_ddi_main_link_aux_domain(intel_dp));
+
        intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
                                 crtc_state->lane_count, is_mst);
 
@@ -2610,6 +2703,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
        intel_dp_start_link_train(intel_dp);
        if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
                intel_dp_stop_link_train(intel_dp);
+
+       intel_ddi_enable_pipe_clock(crtc_state);
 }
 
 static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
@@ -2640,6 +2735,8 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
        if (IS_GEN9_BC(dev_priv))
                skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI);
 
+       intel_ddi_enable_pipe_clock(crtc_state);
+
        intel_dig_port->set_infoframes(&encoder->base,
                                       crtc_state->has_infoframe,
                                       crtc_state, conn_state);
@@ -2709,6 +2806,8 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
        bool is_mst = intel_crtc_has_type(old_crtc_state,
                                          INTEL_OUTPUT_DP_MST);
 
+       intel_ddi_disable_pipe_clock(old_crtc_state);
+
        /*
         * Power down sink before disabling the port, otherwise we end
         * up getting interrupts from the sink on detecting link loss.
@@ -2724,6 +2823,9 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
        intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
 
        intel_ddi_clk_disable(encoder);
+
+       intel_display_power_put(dev_priv,
+                               intel_ddi_main_link_aux_domain(intel_dp));
 }
 
 static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
@@ -2734,11 +2836,13 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
        struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
        struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
 
-       intel_disable_ddi_buf(encoder);
-
        dig_port->set_infoframes(&encoder->base, false,
                                 old_crtc_state, old_conn_state);
 
+       intel_ddi_disable_pipe_clock(old_crtc_state);
+
+       intel_disable_ddi_buf(encoder);
+
        intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
 
        intel_ddi_clk_disable(encoder);
@@ -3025,6 +3129,8 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
 {
        if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
                crtc_state->min_voltage_level = 2;
+       else if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000)
+               crtc_state->min_voltage_level = 1;
 }
 
 void intel_ddi_get_config(struct intel_encoder *encoder,
@@ -3533,7 +3639,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
                        goto err;
 
                intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
-               dev_priv->hotplug.irq_port[port] = intel_dig_port;
        }
 
        /* In theory we don't need the encoder->type check, but leave it just in
index 0fd13df424cf19719958b5de465cebec849895c3..0ef0c6448d53a835fbdf5319a8010c64d613bd0f 100644 (file)
@@ -858,6 +858,8 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
 void intel_driver_caps_print(const struct intel_driver_caps *caps,
                             struct drm_printer *p)
 {
+       drm_printf(p, "Has logical contexts? %s\n",
+                  yesno(caps->has_logical_contexts));
        drm_printf(p, "scheduler: %x\n", caps->scheduler);
 }
 
index 933e31669557e74311ab6f7ff685921517c38f0a..633f9fbf72eab7787102d094f8f442799da4c401 100644 (file)
@@ -186,6 +186,7 @@ struct intel_device_info {
 
 struct intel_driver_caps {
        unsigned int scheduler;
+       bool has_logical_contexts:1;
 };
 
 static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
index dee3a8e659f1d6c9dbe2040abd6e2ba42020070a..ed3fa1c8a98342d549ec8bf5b027b3b783affa08 100644 (file)
@@ -1022,7 +1022,7 @@ bool intel_crtc_active(struct intel_crtc *crtc)
         * We can ditch the adjusted_mode.crtc_clock check as soon
         * as Haswell has gained clock readout/fastboot support.
         *
-        * We can ditch the crtc->primary->fb check as soon as we can
+        * We can ditch the crtc->primary->state->fb check as soon as we can
         * properly reconstruct framebuffers.
         *
         * FIXME: The intel_crtc->active here should be switched to
@@ -1202,7 +1202,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
        i915_reg_t pp_reg;
        u32 val;
-       enum pipe panel_pipe = PIPE_A;
+       enum pipe panel_pipe = INVALID_PIPE;
        bool locked = true;
 
        if (WARN_ON(HAS_DDI(dev_priv)))
@@ -1214,18 +1214,35 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
                pp_reg = PP_CONTROL(0);
                port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
 
-               if (port_sel == PANEL_PORT_SELECT_LVDS &&
-                   I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
-                       panel_pipe = PIPE_B;
-               /* XXX: else fix for eDP */
+               switch (port_sel) {
+               case PANEL_PORT_SELECT_LVDS:
+                       intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
+                       break;
+               case PANEL_PORT_SELECT_DPA:
+                       intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
+                       break;
+               case PANEL_PORT_SELECT_DPC:
+                       intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
+                       break;
+               case PANEL_PORT_SELECT_DPD:
+                       intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
+                       break;
+               default:
+                       MISSING_CASE(port_sel);
+                       break;
+               }
        } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                /* presumably write lock depends on pipe, not port select */
                pp_reg = PP_CONTROL(pipe);
                panel_pipe = pipe;
        } else {
+               u32 port_sel;
+
                pp_reg = PP_CONTROL(0);
-               if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
-                       panel_pipe = PIPE_B;
+               port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
+
+               WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
+               intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
        }
 
        val = I915_READ(pp_reg);
@@ -1267,7 +1284,10 @@ void assert_pipe(struct drm_i915_private *dev_priv,
 
 static void assert_plane(struct intel_plane *plane, bool state)
 {
-       bool cur_state = plane->get_hw_state(plane);
+       enum pipe pipe;
+       bool cur_state;
+
+       cur_state = plane->get_hw_state(plane, &pipe);
 
        I915_STATE_WARN(cur_state != state,
                        "%s assertion failure (expected %s, current %s)\n",
@@ -1305,125 +1325,64 @@ void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
             pipe_name(pipe));
 }
 
-static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
-                           enum pipe pipe, u32 port_sel, u32 val)
+static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
+                                  enum pipe pipe, enum port port,
+                                  i915_reg_t dp_reg)
 {
-       if ((val & DP_PORT_EN) == 0)
-               return false;
+       enum pipe port_pipe;
+       bool state;
 
-       if (HAS_PCH_CPT(dev_priv)) {
-               u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
-               if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
-                       return false;
-       } else if (IS_CHERRYVIEW(dev_priv)) {
-               if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
-                       return false;
-       } else {
-               if ((val & DP_PIPE_MASK) != (pipe << 30))
-                       return false;
-       }
-       return true;
-}
+       state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
 
-static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
-                             enum pipe pipe, u32 val)
-{
-       if ((val & SDVO_ENABLE) == 0)
-               return false;
+       I915_STATE_WARN(state && port_pipe == pipe,
+                       "PCH DP %c enabled on transcoder %c, should be disabled\n",
+                       port_name(port), pipe_name(pipe));
 
-       if (HAS_PCH_CPT(dev_priv)) {
-               if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
-                       return false;
-       } else if (IS_CHERRYVIEW(dev_priv)) {
-               if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
-                       return false;
-       } else {
-               if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
-                       return false;
-       }
-       return true;
+       I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
+                       "IBX PCH DP %c still using transcoder B\n",
+                       port_name(port));
 }
 
-static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
-                             enum pipe pipe, u32 val)
-{
-       if ((val & LVDS_PORT_EN) == 0)
-               return false;
-
-       if (HAS_PCH_CPT(dev_priv)) {
-               if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
-                       return false;
-       } else {
-               if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
-                       return false;
-       }
-       return true;
-}
-
-static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
-                             enum pipe pipe, u32 val)
+static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
+                                    enum pipe pipe, enum port port,
+                                    i915_reg_t hdmi_reg)
 {
-       if ((val & ADPA_DAC_ENABLE) == 0)
-               return false;
-       if (HAS_PCH_CPT(dev_priv)) {
-               if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
-                       return false;
-       } else {
-               if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
-                       return false;
-       }
-       return true;
-}
+       enum pipe port_pipe;
+       bool state;
 
-static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
-                                  enum pipe pipe, i915_reg_t reg,
-                                  u32 port_sel)
-{
-       u32 val = I915_READ(reg);
-       I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
-            "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
-            i915_mmio_reg_offset(reg), pipe_name(pipe));
+       state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
 
-       I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0
-            && (val & DP_PIPEB_SELECT),
-            "IBX PCH dp port still using transcoder B\n");
-}
+       I915_STATE_WARN(state && port_pipe == pipe,
+                       "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
+                       port_name(port), pipe_name(pipe));
 
-static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
-                                    enum pipe pipe, i915_reg_t reg)
-{
-       u32 val = I915_READ(reg);
-       I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
-            "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
-            i915_mmio_reg_offset(reg), pipe_name(pipe));
-
-       I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0
-            && (val & SDVO_PIPE_B_SELECT),
-            "IBX PCH hdmi port still using transcoder B\n");
+       I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
+                       "IBX PCH HDMI %c still using transcoder B\n",
+                       port_name(port));
 }
 
 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
                                      enum pipe pipe)
 {
-       u32 val;
+       enum pipe port_pipe;
 
-       assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
-       assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
-       assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
+       assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
+       assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
+       assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
 
-       val = I915_READ(PCH_ADPA);
-       I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
-            "PCH VGA enabled on transcoder %c, should be disabled\n",
-            pipe_name(pipe));
+       I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
+                       port_pipe == pipe,
+                       "PCH VGA enabled on transcoder %c, should be disabled\n",
+                       pipe_name(pipe));
 
-       val = I915_READ(PCH_LVDS);
-       I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
-            "PCH LVDS enabled on transcoder %c, should be disabled\n",
-            pipe_name(pipe));
+       I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
+                       port_pipe == pipe,
+                       "PCH LVDS enabled on transcoder %c, should be disabled\n",
+                       pipe_name(pipe));
 
-       assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
-       assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
-       assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
+       assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
+       assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
+       assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
 }
 
 static void _vlv_enable_pll(struct intel_crtc *crtc,
@@ -2521,6 +2480,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
 {
        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
        struct intel_rotation_info *rot_info = &intel_fb->rot_info;
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        u32 gtt_offset_rotated = 0;
        unsigned int max_size = 0;
        int i, num_planes = fb->format->num_planes;
@@ -2585,7 +2545,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
                 * fb layout agrees with the fence layout. We already check that the
                 * fb stride matches the fence stride elsewhere.
                 */
-               if (i == 0 && i915_gem_object_is_tiled(intel_fb->obj) &&
+               if (i == 0 && i915_gem_object_is_tiled(obj) &&
                    (x + width) * cpp > fb->pitches[i]) {
                        DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
                                      i, fb->offsets[i]);
@@ -2670,9 +2630,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
                max_size = max(max_size, offset + size);
        }
 
-       if (max_size * tile_size > intel_fb->obj->base.size) {
+       if (max_size * tile_size > obj->base.size) {
                DRM_DEBUG_KMS("fb too big for bo (need %u bytes, have %zu bytes)\n",
-                             max_size * tile_size, intel_fb->obj->base.size);
+                             max_size * tile_size, obj->base.size);
                return -EINVAL;
        }
 
@@ -2796,10 +2756,10 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
 
        /* FIXME pre-g4x don't work like this */
        if (visible) {
-               crtc_state->base.plane_mask |= BIT(drm_plane_index(&plane->base));
+               crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
                crtc_state->active_planes |= BIT(plane->id);
        } else {
-               crtc_state->base.plane_mask &= ~BIT(drm_plane_index(&plane->base));
+               crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
                crtc_state->active_planes &= ~BIT(plane->id);
        }
 
@@ -2922,9 +2882,8 @@ valid_fb:
        if (i915_gem_object_is_tiled(obj))
                dev_priv->preserve_bios_swizzle = true;
 
-       drm_framebuffer_get(fb);
-       primary->fb = primary->state->fb = fb;
-       primary->crtc = primary->state->crtc = &intel_crtc->base;
+       plane_state->fb = fb;
+       plane_state->crtc = &intel_crtc->base;
 
        intel_set_plane_visible(to_intel_crtc_state(crtc_state),
                                to_intel_plane_state(plane_state),
@@ -3430,24 +3389,33 @@ static void i9xx_disable_plane(struct intel_plane *plane,
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
-static bool i9xx_plane_get_hw_state(struct intel_plane *plane)
+static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
+                                   enum pipe *pipe)
 {
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        enum intel_display_power_domain power_domain;
        enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
-       enum pipe pipe = plane->pipe;
        bool ret;
+       u32 val;
 
        /*
         * Not 100% correct for planes that can move between pipes,
         * but that's only the case for gen2-4 which don't have any
         * display power wells.
         */
-       power_domain = POWER_DOMAIN_PIPE(pipe);
+       power_domain = POWER_DOMAIN_PIPE(plane->pipe);
        if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                return false;
 
-       ret = I915_READ(DSPCNTR(i9xx_plane)) & DISPLAY_PLANE_ENABLE;
+       val = I915_READ(DSPCNTR(i9xx_plane));
+
+       ret = val & DISPLAY_PLANE_ENABLE;
+
+       if (INTEL_GEN(dev_priv) >= 5)
+               *pipe = plane->pipe;
+       else
+               *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
+                       DISPPLANE_SEL_PIPE_SHIFT;
 
        intel_display_power_put(dev_priv, power_domain);
 
@@ -3689,7 +3657,7 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
        plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
        plane_color_ctl |= glk_plane_color_ctl_alpha(fb->format->format);
 
-       if (intel_format_is_yuv(fb->format->format)) {
+       if (fb->format->is_yuv) {
                if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
                        plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
                else
@@ -4631,20 +4599,33 @@ static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
        }
 }
 
-/* Return which DP Port should be selected for Transcoder DP control */
-static enum port
-intel_trans_dp_port_sel(struct intel_crtc *crtc)
+/*
+ * Finds the encoder associated with the given CRTC. This can only be
+ * used when we know that the CRTC isn't feeding multiple encoders!
+ */
+static struct intel_encoder *
+intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
+                          const struct intel_crtc_state *crtc_state)
 {
-       struct drm_device *dev = crtc->base.dev;
-       struct intel_encoder *encoder;
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       const struct drm_connector_state *connector_state;
+       const struct drm_connector *connector;
+       struct intel_encoder *encoder = NULL;
+       int num_encoders = 0;
+       int i;
 
-       for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
-               if (encoder->type == INTEL_OUTPUT_DP ||
-                   encoder->type == INTEL_OUTPUT_EDP)
-                       return encoder->port;
+       for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
+               if (connector_state->crtc != &crtc->base)
+                       continue;
+
+               encoder = to_intel_encoder(connector_state->best_encoder);
+               num_encoders++;
        }
 
-       return -1;
+       WARN(num_encoders != 1, "%d encoders for pipe %c\n",
+            num_encoders, pipe_name(crtc->pipe));
+
+       return encoder;
 }
 
 /*
@@ -4655,7 +4636,8 @@ intel_trans_dp_port_sel(struct intel_crtc *crtc)
  *   - DP transcoding bits
  *   - transcoder
  */
-static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state)
+static void ironlake_pch_enable(const struct intel_atomic_state *state,
+                               const struct intel_crtc_state *crtc_state)
 {
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_device *dev = crtc->base.dev;
@@ -4714,6 +4696,8 @@ static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state)
                        &crtc_state->base.adjusted_mode;
                u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
                i915_reg_t reg = TRANS_DP_CTL(pipe);
+               enum port port;
+
                temp = I915_READ(reg);
                temp &= ~(TRANS_DP_PORT_SEL_MASK |
                          TRANS_DP_SYNC_MASK |
@@ -4726,19 +4710,9 @@ static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state)
                if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
                        temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
 
-               switch (intel_trans_dp_port_sel(crtc)) {
-               case PORT_B:
-                       temp |= TRANS_DP_PORT_SEL_B;
-                       break;
-               case PORT_C:
-                       temp |= TRANS_DP_PORT_SEL_C;
-                       break;
-               case PORT_D:
-                       temp |= TRANS_DP_PORT_SEL_D;
-                       break;
-               default:
-                       BUG();
-               }
+               port = intel_get_crtc_new_encoder(state, crtc_state)->port;
+               WARN_ON(port < PORT_B || port > PORT_D);
+               temp |= TRANS_DP_PORT_SEL(port);
 
                I915_WRITE(reg, temp);
        }
@@ -4746,7 +4720,8 @@ static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state)
        ironlake_enable_pch_transcoder(dev_priv, pipe);
 }
 
-static void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
+static void lpt_pch_enable(const struct intel_atomic_state *state,
+                          const struct intel_crtc_state *crtc_state)
 {
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -4776,6 +4751,39 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
        }
 }
 
+/*
+ * The hardware phase 0.0 refers to the center of the pixel.
+ * We want to start from the top/left edge which is phase
+ * -0.5. That matches how the hardware calculates the scaling
+ * factors (from top-left of the first pixel to bottom-right
+ * of the last pixel, as opposed to the pixel centers).
+ *
+ * For 4:2:0 subsampled chroma planes we obviously have to
+ * adjust that so that the chroma sample position lands in
+ * the right spot.
+ *
+ * Note that for packed YCbCr 4:2:2 formats there is no way to
+ * control chroma siting. The hardware simply replicates the
+ * chroma samples for both of the luma samples, and thus we don't
+ * actually get the expected MPEG2 chroma siting convention :(
+ * The same behaviour is observed on pre-SKL platforms as well.
+ */
+u16 skl_scaler_calc_phase(int sub, bool chroma_cosited)
+{
+       int phase = -0x8000;
+       u16 trip = 0;
+
+       if (chroma_cosited)
+               phase += (sub - 1) * 0x8000 / sub;
+
+       if (phase < 0)
+               phase = 0x10000 + phase;
+       else
+               trip = PS_PHASE_TRIP;
+
+       return ((phase >> 2) & PS_PHASE_MASK) | trip;
+}
+
 static int
 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
                  unsigned int scaler_user, int *scaler_id,
@@ -4975,14 +4983,22 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
                &crtc->config->scaler_state;
 
        if (crtc->config->pch_pfit.enabled) {
+               u16 uv_rgb_hphase, uv_rgb_vphase;
                int id;
 
                if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
                        return;
 
+               uv_rgb_hphase = skl_scaler_calc_phase(1, false);
+               uv_rgb_vphase = skl_scaler_calc_phase(1, false);
+
                id = scaler_state->scaler_id;
                I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
                        PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
+               I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
+                             PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+               I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
+                             PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
                I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
                I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
        }
@@ -5501,10 +5517,8 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
         *
         * Spurious PCH underruns also occur during PCH enabling.
         */
-       if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv))
-               intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-       if (intel_crtc->config->has_pch_encoder)
-               intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
+       intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+       intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
 
        if (intel_crtc->config->has_pch_encoder)
                intel_prepare_shared_dpll(intel_crtc);
@@ -5549,7 +5563,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
        intel_enable_pipe(pipe_config);
 
        if (intel_crtc->config->has_pch_encoder)
-               ironlake_pch_enable(pipe_config);
+               ironlake_pch_enable(old_intel_state, pipe_config);
 
        assert_vblank_disabled(crtc);
        drm_crtc_vblank_on(crtc);
@@ -5559,9 +5573,16 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
        if (HAS_PCH_CPT(dev_priv))
                cpt_verify_modeset(dev, intel_crtc->pipe);
 
-       /* Must wait for vblank to avoid spurious PCH FIFO underruns */
-       if (intel_crtc->config->has_pch_encoder)
+       /*
+        * Must wait for vblank to avoid spurious PCH FIFO underruns.
+        * And a second vblank wait is needed at least on ILK with
+        * some interlaced HDMI modes. Let's do the double wait always
+        * in case there are more corner cases we don't know about.
+        */
+       if (intel_crtc->config->has_pch_encoder) {
+               intel_wait_for_vblank(dev_priv, pipe);
                intel_wait_for_vblank(dev_priv, pipe);
+       }
        intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
        intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
 }
@@ -5611,6 +5632,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
        struct intel_atomic_state *old_intel_state =
                to_intel_atomic_state(old_state);
        bool psl_clkgate_wa;
+       u32 pipe_chicken;
 
        if (WARN_ON(intel_crtc->active))
                return;
@@ -5623,6 +5645,8 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
        if (INTEL_GEN(dev_priv) >= 11)
                icl_map_plls_to_ports(crtc, pipe_config, old_state);
 
+       intel_encoders_pre_enable(crtc, pipe_config, old_state);
+
        if (intel_crtc_has_dp_encoder(intel_crtc->config))
                intel_dp_set_m_n(intel_crtc, M1_N1);
 
@@ -5651,11 +5675,6 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
 
        intel_crtc->active = true;
 
-       intel_encoders_pre_enable(crtc, pipe_config, old_state);
-
-       if (!transcoder_is_dsi(cpu_transcoder))
-               intel_ddi_enable_pipe_clock(pipe_config);
-
        /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
        psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
                         intel_crtc->config->pch_pfit.enabled;
@@ -5673,6 +5692,17 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
         */
        intel_color_load_luts(&pipe_config->base);
 
+       /*
+        * Display WA #1153: enable hardware to bypass the alpha math
+        * and rounding for per-pixel values 00 and 0xff
+        */
+       if (INTEL_GEN(dev_priv) >= 11) {
+               pipe_chicken = I915_READ(PIPE_CHICKEN(pipe));
+               if (!(pipe_chicken & PER_PIXEL_ALPHA_BYPASS_EN))
+                       I915_WRITE_FW(PIPE_CHICKEN(pipe),
+                                     pipe_chicken | PER_PIXEL_ALPHA_BYPASS_EN);
+       }
+
        intel_ddi_set_pipe_settings(pipe_config);
        if (!transcoder_is_dsi(cpu_transcoder))
                intel_ddi_enable_transcoder_func(pipe_config);
@@ -5688,7 +5718,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
                intel_enable_pipe(pipe_config);
 
        if (intel_crtc->config->has_pch_encoder)
-               lpt_pch_enable(pipe_config);
+               lpt_pch_enable(old_intel_state, pipe_config);
 
        if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
                intel_ddi_set_vc_payload_alloc(pipe_config, true);
@@ -5741,10 +5771,8 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
         * pipe is already disabled, but FDI RX/TX is still enabled.
         * Happens at least with VGA+HDMI cloning. Suppress them.
         */
-       if (intel_crtc->config->has_pch_encoder) {
-               intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-               intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
-       }
+       intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+       intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
 
        intel_encoders_disable(crtc, old_crtc_state, old_state);
 
@@ -5794,7 +5822,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
        struct drm_crtc *crtc = old_crtc_state->base.crtc;
        struct drm_i915_private *dev_priv = to_i915(crtc->dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+       enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
 
        intel_encoders_disable(crtc, old_crtc_state, old_state);
 
@@ -5805,20 +5833,17 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
        if (!transcoder_is_dsi(cpu_transcoder))
                intel_disable_pipe(old_crtc_state);
 
-       if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
-               intel_ddi_set_vc_payload_alloc(intel_crtc->config, false);
+       if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
+               intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
 
        if (!transcoder_is_dsi(cpu_transcoder))
-               intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
+               intel_ddi_disable_transcoder_func(old_crtc_state);
 
        if (INTEL_GEN(dev_priv) >= 9)
                skylake_scaler_disable(intel_crtc);
        else
                ironlake_pfit_disable(intel_crtc, false);
 
-       if (!transcoder_is_dsi(cpu_transcoder))
-               intel_ddi_disable_pipe_clock(intel_crtc->config);
-
        intel_encoders_post_disable(crtc, old_crtc_state, old_state);
 
        if (INTEL_GEN(dev_priv) >= 11)
@@ -5849,6 +5874,22 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
        I915_WRITE(BCLRPAT(crtc->pipe), 0);
 }
 
+bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
+{
+       if (IS_ICELAKE(dev_priv))
+               return port >= PORT_C && port <= PORT_F;
+
+       return false;
+}
+
+enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
+{
+       if (!intel_port_is_tc(dev_priv, port))
+               return PORT_TC_NONE;
+
+       return port - PORT_C;
+}
+
 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
 {
        switch (port) {
@@ -7675,16 +7716,18 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_plane *plane = to_intel_plane(crtc->base.primary);
        enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
-       enum pipe pipe = crtc->pipe;
+       enum pipe pipe;
        u32 val, base, offset;
        int fourcc, pixel_format;
        unsigned int aligned_height;
        struct drm_framebuffer *fb;
        struct intel_framebuffer *intel_fb;
 
-       if (!plane->get_hw_state(plane))
+       if (!plane->get_hw_state(plane, &pipe))
                return;
 
+       WARN_ON(pipe != crtc->pipe);
+
        intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
        if (!intel_fb) {
                DRM_DEBUG_KMS("failed to alloc fb\n");
@@ -8705,16 +8748,18 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_plane *plane = to_intel_plane(crtc->base.primary);
        enum plane_id plane_id = plane->id;
-       enum pipe pipe = crtc->pipe;
+       enum pipe pipe;
        u32 val, base, offset, stride_mult, tiling, alpha;
        int fourcc, pixel_format;
        unsigned int aligned_height;
        struct drm_framebuffer *fb;
        struct intel_framebuffer *intel_fb;
 
-       if (!plane->get_hw_state(plane))
+       if (!plane->get_hw_state(plane, &pipe))
                return;
 
+       WARN_ON(pipe != crtc->pipe);
+
        intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
        if (!intel_fb) {
                DRM_DEBUG_KMS("failed to alloc fb\n");
@@ -9142,9 +9187,12 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
                                      struct intel_crtc_state *crtc_state)
 {
+       struct intel_atomic_state *state =
+               to_intel_atomic_state(crtc_state->base.state);
+
        if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
                struct intel_encoder *encoder =
-                       intel_ddi_get_crtc_new_encoder(crtc_state);
+                       intel_get_crtc_new_encoder(state, crtc_state);
 
                if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) {
                        DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
@@ -9172,6 +9220,44 @@ static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
        pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
 }
 
+static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
+                               enum port port,
+                               struct intel_crtc_state *pipe_config)
+{
+       enum intel_dpll_id id;
+       u32 temp;
+
+       /* TODO: TBT pll not implemented. */
+       switch (port) {
+       case PORT_A:
+       case PORT_B:
+               temp = I915_READ(DPCLKA_CFGCR0_ICL) &
+                      DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
+               id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
+
+               if (WARN_ON(id != DPLL_ID_ICL_DPLL0 && id != DPLL_ID_ICL_DPLL1))
+                       return;
+               break;
+       case PORT_C:
+               id = DPLL_ID_ICL_MGPLL1;
+               break;
+       case PORT_D:
+               id = DPLL_ID_ICL_MGPLL2;
+               break;
+       case PORT_E:
+               id = DPLL_ID_ICL_MGPLL3;
+               break;
+       case PORT_F:
+               id = DPLL_ID_ICL_MGPLL4;
+               break;
+       default:
+               MISSING_CASE(port);
+               return;
+       }
+
+       pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
+}
+
 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
                                enum port port,
                                struct intel_crtc_state *pipe_config)
@@ -9273,6 +9359,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
                switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
                default:
                        WARN(1, "unknown pipe linked to edp transcoder\n");
+                       /* fall through */
                case TRANS_DDI_EDP_INPUT_A_ONOFF:
                case TRANS_DDI_EDP_INPUT_A_ON:
                        trans_edp_pipe = PIPE_A;
@@ -9328,7 +9415,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
                 * registers/MIPI[BXT]. We can break out here early, since we
                 * need the same DSI PLL to be enabled for both DSI ports.
                 */
-               if (!intel_dsi_pll_is_enabled(dev_priv))
+               if (!bxt_dsi_pll_is_enabled(dev_priv))
                        break;
 
                /* XXX: this works for video mode only */
@@ -9359,7 +9446,9 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
 
        port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
 
-       if (IS_CANNONLAKE(dev_priv))
+       if (IS_ICELAKE(dev_priv))
+               icelake_get_ddi_pll(dev_priv, port, pipe_config);
+       else if (IS_CANNONLAKE(dev_priv))
                cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
        else if (IS_GEN9_BC(dev_priv))
                skylake_get_ddi_pll(dev_priv, port, pipe_config);
@@ -9692,7 +9781,8 @@ static void i845_disable_cursor(struct intel_plane *plane,
        i845_update_cursor(plane, NULL, NULL);
 }
 
-static bool i845_cursor_get_hw_state(struct intel_plane *plane)
+static bool i845_cursor_get_hw_state(struct intel_plane *plane,
+                                    enum pipe *pipe)
 {
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        enum intel_display_power_domain power_domain;
@@ -9704,6 +9794,8 @@ static bool i845_cursor_get_hw_state(struct intel_plane *plane)
 
        ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
 
+       *pipe = PIPE_A;
+
        intel_display_power_put(dev_priv, power_domain);
 
        return ret;
@@ -9715,25 +9807,30 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
        struct drm_i915_private *dev_priv =
                to_i915(plane_state->base.plane->dev);
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       u32 cntl;
+       u32 cntl = 0;
 
-       cntl = MCURSOR_GAMMA_ENABLE;
+       if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
+               cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
 
-       if (HAS_DDI(dev_priv))
-               cntl |= CURSOR_PIPE_CSC_ENABLE;
+       if (INTEL_GEN(dev_priv) <= 10) {
+               cntl |= MCURSOR_GAMMA_ENABLE;
+
+               if (HAS_DDI(dev_priv))
+                       cntl |= MCURSOR_PIPE_CSC_ENABLE;
+       }
 
        if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
                cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
 
        switch (plane_state->base.crtc_w) {
        case 64:
-               cntl |= CURSOR_MODE_64_ARGB_AX;
+               cntl |= MCURSOR_MODE_64_ARGB_AX;
                break;
        case 128:
-               cntl |= CURSOR_MODE_128_ARGB_AX;
+               cntl |= MCURSOR_MODE_128_ARGB_AX;
                break;
        case 256:
-               cntl |= CURSOR_MODE_256_ARGB_AX;
+               cntl |= MCURSOR_MODE_256_ARGB_AX;
                break;
        default:
                MISSING_CASE(plane_state->base.crtc_w);
@@ -9741,7 +9838,7 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
        }
 
        if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
-               cntl |= CURSOR_ROTATE_180;
+               cntl |= MCURSOR_ROTATE_180;
 
        return cntl;
 }
@@ -9903,23 +10000,32 @@ static void i9xx_disable_cursor(struct intel_plane *plane,
        i9xx_update_cursor(plane, NULL, NULL);
 }
 
-static bool i9xx_cursor_get_hw_state(struct intel_plane *plane)
+static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
+                                    enum pipe *pipe)
 {
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        enum intel_display_power_domain power_domain;
-       enum pipe pipe = plane->pipe;
        bool ret;
+       u32 val;
 
        /*
         * Not 100% correct for planes that can move between pipes,
         * but that's only the case for gen2-3 which don't have any
         * display power wells.
         */
-       power_domain = POWER_DOMAIN_PIPE(pipe);
+       power_domain = POWER_DOMAIN_PIPE(plane->pipe);
        if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                return false;
 
-       ret = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
+       val = I915_READ(CURCNTR(plane->pipe));
+
+       ret = val & MCURSOR_MODE;
+
+       if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+               *pipe = plane->pipe;
+       else
+               *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
+                       MCURSOR_PIPE_SELECT_SHIFT;
 
        intel_display_power_put(dev_priv, power_domain);
 
@@ -10631,7 +10737,7 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
        drm_connector_list_iter_begin(dev, &conn_iter);
        for_each_intel_connector_iter(connector, &conn_iter) {
                if (connector->base.state->crtc)
-                       drm_connector_unreference(&connector->base);
+                       drm_connector_put(&connector->base);
 
                if (connector->base.encoder) {
                        connector->base.state->best_encoder =
@@ -10639,7 +10745,7 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
                        connector->base.state->crtc =
                                connector->base.encoder->crtc;
 
-                       drm_connector_reference(&connector->base);
+                       drm_connector_get(&connector->base);
                } else {
                        connector->base.state->best_encoder = NULL;
                        connector->base.state->crtc = NULL;
@@ -10918,6 +11024,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
                case INTEL_OUTPUT_DDI:
                        if (WARN_ON(!HAS_DDI(to_i915(dev))))
                                break;
+                       /* else: fall through */
                case INTEL_OUTPUT_DP:
                case INTEL_OUTPUT_HDMI:
                case INTEL_OUTPUT_EDP:
@@ -11791,7 +11898,7 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
                         struct drm_crtc_state *new_state)
 {
        struct intel_dpll_hw_state dpll_hw_state;
-       unsigned crtc_mask;
+       unsigned int crtc_mask;
        bool active;
 
        memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
@@ -11818,7 +11925,7 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
                return;
        }
 
-       crtc_mask = 1 << drm_crtc_index(crtc);
+       crtc_mask = drm_crtc_mask(crtc);
 
        if (new_state->active)
                I915_STATE_WARN(!(pll->active_mask & crtc_mask),
@@ -11853,7 +11960,7 @@ verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
 
        if (old_state->shared_dpll &&
            old_state->shared_dpll != new_state->shared_dpll) {
-               unsigned crtc_mask = 1 << drm_crtc_index(crtc);
+               unsigned int crtc_mask = drm_crtc_mask(crtc);
                struct intel_shared_dpll *pll = old_state->shared_dpll;
 
                I915_STATE_WARN(pll->active_mask & crtc_mask,
@@ -12449,6 +12556,19 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat
        finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
 }
 
+static void intel_atomic_cleanup_work(struct work_struct *work)
+{
+       struct drm_atomic_state *state =
+               container_of(work, struct drm_atomic_state, commit_work);
+       struct drm_i915_private *i915 = to_i915(state->dev);
+
+       drm_atomic_helper_cleanup_planes(&i915->drm, state);
+       drm_atomic_helper_commit_cleanup_done(state);
+       drm_atomic_state_put(state);
+
+       intel_atomic_helper_free_state(i915);
+}
+
 static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 {
        struct drm_device *dev = state->dev;
@@ -12609,13 +12729,16 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
                intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
        }
 
-       drm_atomic_helper_cleanup_planes(dev, state);
-
-       drm_atomic_helper_commit_cleanup_done(state);
-
-       drm_atomic_state_put(state);
-
-       intel_atomic_helper_free_state(dev_priv);
+       /*
+        * Defer the cleanup of the old state to a separate worker to not
+        * impede the current task (userspace for blocking modesets) that
+        * are executed inline. For out-of-line asynchronous modesets/flips,
+        * deferring to a new worker seems overkill, but we would place a
+        * schedule point (cond_resched()) here anyway to keep latencies
+        * down.
+        */
+       INIT_WORK(&state->commit_work, intel_atomic_cleanup_work);
+       schedule_work(&state->commit_work);
 }
 
 static void intel_atomic_commit_work(struct work_struct *work)
@@ -12981,6 +13104,19 @@ intel_prepare_plane_fb(struct drm_plane *plane,
                add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
        }
 
+       /*
+        * We declare pageflips to be interactive and so merit a small bias
+        * towards upclocking to deliver the frame on time. By only changing
+        * the RPS thresholds to sample more regularly and aim for higher
+        * clocks we can hopefully deliver low power workloads (like kodi)
+        * that are not quite steady state without resorting to forcing
+        * maximum clocks following a vblank miss (see do_rps_boost()).
+        */
+       if (!intel_state->rps_interactive) {
+               intel_rps_mark_interactive(dev_priv, true);
+               intel_state->rps_interactive = true;
+       }
+
        return 0;
 }
 
@@ -12997,8 +13133,15 @@ void
 intel_cleanup_plane_fb(struct drm_plane *plane,
                       struct drm_plane_state *old_state)
 {
+       struct intel_atomic_state *intel_state =
+               to_intel_atomic_state(old_state->state);
        struct drm_i915_private *dev_priv = to_i915(plane->dev);
 
+       if (intel_state->rps_interactive) {
+               intel_rps_mark_interactive(dev_priv, false);
+               intel_state->rps_interactive = false;
+       }
+
        /* Should only be called after a successful intel_prepare_plane_fb()! */
        mutex_lock(&dev_priv->drm.struct_mutex);
        intel_plane_unpin_fb(to_intel_plane_state(old_state));
@@ -13181,8 +13324,17 @@ void intel_plane_destroy(struct drm_plane *plane)
        kfree(to_intel_plane(plane));
 }
 
-static bool i8xx_mod_supported(uint32_t format, uint64_t modifier)
+static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
+                                           u32 format, u64 modifier)
 {
+       switch (modifier) {
+       case DRM_FORMAT_MOD_LINEAR:
+       case I915_FORMAT_MOD_X_TILED:
+               break;
+       default:
+               return false;
+       }
+
        switch (format) {
        case DRM_FORMAT_C8:
        case DRM_FORMAT_RGB565:
@@ -13195,8 +13347,17 @@ static bool i8xx_mod_supported(uint32_t format, uint64_t modifier)
        }
 }
 
-static bool i965_mod_supported(uint32_t format, uint64_t modifier)
+static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
+                                           u32 format, u64 modifier)
 {
+       switch (modifier) {
+       case DRM_FORMAT_MOD_LINEAR:
+       case I915_FORMAT_MOD_X_TILED:
+               break;
+       default:
+               return false;
+       }
+
        switch (format) {
        case DRM_FORMAT_C8:
        case DRM_FORMAT_RGB565:
@@ -13211,8 +13372,26 @@ static bool i965_mod_supported(uint32_t format, uint64_t modifier)
        }
 }
 
-static bool skl_mod_supported(uint32_t format, uint64_t modifier)
+static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
+                                          u32 format, u64 modifier)
 {
+       struct intel_plane *plane = to_intel_plane(_plane);
+
+       switch (modifier) {
+       case DRM_FORMAT_MOD_LINEAR:
+       case I915_FORMAT_MOD_X_TILED:
+       case I915_FORMAT_MOD_Y_TILED:
+       case I915_FORMAT_MOD_Yf_TILED:
+               break;
+       case I915_FORMAT_MOD_Y_TILED_CCS:
+       case I915_FORMAT_MOD_Yf_TILED_CCS:
+               if (!plane->has_ccs)
+                       return false;
+               break;
+       default:
+               return false;
+       }
+
        switch (format) {
        case DRM_FORMAT_XRGB8888:
        case DRM_FORMAT_XBGR8888:
@@ -13244,38 +13423,36 @@ static bool skl_mod_supported(uint32_t format, uint64_t modifier)
        }
 }
 
-static bool intel_primary_plane_format_mod_supported(struct drm_plane *plane,
-                                                    uint32_t format,
-                                                    uint64_t modifier)
+static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
+                                             u32 format, u64 modifier)
 {
-       struct drm_i915_private *dev_priv = to_i915(plane->dev);
-
-       if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
-               return false;
-
-       if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_INTEL &&
-           modifier != DRM_FORMAT_MOD_LINEAR)
-               return false;
-
-       if (INTEL_GEN(dev_priv) >= 9)
-               return skl_mod_supported(format, modifier);
-       else if (INTEL_GEN(dev_priv) >= 4)
-               return i965_mod_supported(format, modifier);
-       else
-               return i8xx_mod_supported(format, modifier);
+       return modifier == DRM_FORMAT_MOD_LINEAR &&
+               format == DRM_FORMAT_ARGB8888;
 }
 
-static bool intel_cursor_plane_format_mod_supported(struct drm_plane *plane,
-                                                   uint32_t format,
-                                                   uint64_t modifier)
-{
-       if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
-               return false;
+static struct drm_plane_funcs skl_plane_funcs = {
+       .update_plane = drm_atomic_helper_update_plane,
+       .disable_plane = drm_atomic_helper_disable_plane,
+       .destroy = intel_plane_destroy,
+       .atomic_get_property = intel_plane_atomic_get_property,
+       .atomic_set_property = intel_plane_atomic_set_property,
+       .atomic_duplicate_state = intel_plane_duplicate_state,
+       .atomic_destroy_state = intel_plane_destroy_state,
+       .format_mod_supported = skl_plane_format_mod_supported,
+};
 
-       return modifier == DRM_FORMAT_MOD_LINEAR && format == DRM_FORMAT_ARGB8888;
-}
+static struct drm_plane_funcs i965_plane_funcs = {
+       .update_plane = drm_atomic_helper_update_plane,
+       .disable_plane = drm_atomic_helper_disable_plane,
+       .destroy = intel_plane_destroy,
+       .atomic_get_property = intel_plane_atomic_get_property,
+       .atomic_set_property = intel_plane_atomic_set_property,
+       .atomic_duplicate_state = intel_plane_duplicate_state,
+       .atomic_destroy_state = intel_plane_destroy_state,
+       .format_mod_supported = i965_plane_format_mod_supported,
+};
 
-static struct drm_plane_funcs intel_plane_funcs = {
+static struct drm_plane_funcs i8xx_plane_funcs = {
        .update_plane = drm_atomic_helper_update_plane,
        .disable_plane = drm_atomic_helper_disable_plane,
        .destroy = intel_plane_destroy,
@@ -13283,7 +13460,7 @@ static struct drm_plane_funcs intel_plane_funcs = {
        .atomic_set_property = intel_plane_atomic_set_property,
        .atomic_duplicate_state = intel_plane_duplicate_state,
        .atomic_destroy_state = intel_plane_destroy_state,
-       .format_mod_supported = intel_primary_plane_format_mod_supported,
+       .format_mod_supported = i8xx_plane_format_mod_supported,
 };
 
 static int
@@ -13408,7 +13585,7 @@ static const struct drm_plane_funcs intel_cursor_plane_funcs = {
        .atomic_set_property = intel_plane_atomic_set_property,
        .atomic_duplicate_state = intel_plane_duplicate_state,
        .atomic_destroy_state = intel_plane_destroy_state,
-       .format_mod_supported = intel_cursor_plane_format_mod_supported,
+       .format_mod_supported = intel_cursor_format_mod_supported,
 };
 
 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
@@ -13466,6 +13643,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
        struct intel_plane *primary = NULL;
        struct intel_plane_state *state = NULL;
+       const struct drm_plane_funcs *plane_funcs;
        const uint32_t *intel_primary_formats;
        unsigned int supported_rotations;
        unsigned int num_formats;
@@ -13521,6 +13699,9 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
        primary->check_plane = intel_check_primary_plane;
 
        if (INTEL_GEN(dev_priv) >= 9) {
+               primary->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
+                                                    PLANE_PRIMARY);
+
                if (skl_plane_has_planar(dev_priv, pipe, PLANE_PRIMARY)) {
                        intel_primary_formats = skl_pri_planar_formats;
                        num_formats = ARRAY_SIZE(skl_pri_planar_formats);
@@ -13529,7 +13710,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
                        num_formats = ARRAY_SIZE(skl_primary_formats);
                }
 
-               if (skl_plane_has_ccs(dev_priv, pipe, PLANE_PRIMARY))
+               if (primary->has_ccs)
                        modifiers = skl_format_modifiers_ccs;
                else
                        modifiers = skl_format_modifiers_noccs;
@@ -13537,6 +13718,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
                primary->update_plane = skl_update_plane;
                primary->disable_plane = skl_disable_plane;
                primary->get_hw_state = skl_plane_get_hw_state;
+
+               plane_funcs = &skl_plane_funcs;
        } else if (INTEL_GEN(dev_priv) >= 4) {
                intel_primary_formats = i965_primary_formats;
                num_formats = ARRAY_SIZE(i965_primary_formats);
@@ -13545,6 +13728,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
                primary->update_plane = i9xx_update_plane;
                primary->disable_plane = i9xx_disable_plane;
                primary->get_hw_state = i9xx_plane_get_hw_state;
+
+               plane_funcs = &i965_plane_funcs;
        } else {
                intel_primary_formats = i8xx_primary_formats;
                num_formats = ARRAY_SIZE(i8xx_primary_formats);
@@ -13553,25 +13738,27 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
                primary->update_plane = i9xx_update_plane;
                primary->disable_plane = i9xx_disable_plane;
                primary->get_hw_state = i9xx_plane_get_hw_state;
+
+               plane_funcs = &i8xx_plane_funcs;
        }
 
        if (INTEL_GEN(dev_priv) >= 9)
                ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
-                                              0, &intel_plane_funcs,
+                                              0, plane_funcs,
                                               intel_primary_formats, num_formats,
                                               modifiers,
                                               DRM_PLANE_TYPE_PRIMARY,
                                               "plane 1%c", pipe_name(pipe));
        else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
                ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
-                                              0, &intel_plane_funcs,
+                                              0, plane_funcs,
                                               intel_primary_formats, num_formats,
                                               modifiers,
                                               DRM_PLANE_TYPE_PRIMARY,
                                               "primary %c", pipe_name(pipe));
        else
                ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
-                                              0, &intel_plane_funcs,
+                                              0, plane_funcs,
                                               intel_primary_formats, num_formats,
                                               modifiers,
                                               DRM_PLANE_TYPE_PRIMARY,
@@ -13951,7 +14138,14 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
        if (intel_crt_present(dev_priv))
                intel_crt_init(dev_priv);
 
-       if (IS_GEN9_LP(dev_priv)) {
+       if (IS_ICELAKE(dev_priv)) {
+               intel_ddi_init(dev_priv, PORT_A);
+               intel_ddi_init(dev_priv, PORT_B);
+               intel_ddi_init(dev_priv, PORT_C);
+               intel_ddi_init(dev_priv, PORT_D);
+               intel_ddi_init(dev_priv, PORT_E);
+               intel_ddi_init(dev_priv, PORT_F);
+       } else if (IS_GEN9_LP(dev_priv)) {
                /*
                 * FIXME: Broxton doesn't support port detection via the
                 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
@@ -13961,7 +14155,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
                intel_ddi_init(dev_priv, PORT_B);
                intel_ddi_init(dev_priv, PORT_C);
 
-               intel_dsi_init(dev_priv);
+               vlv_dsi_init(dev_priv);
        } else if (HAS_DDI(dev_priv)) {
                int found;
 
@@ -14067,7 +14261,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
                                intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
                }
 
-               intel_dsi_init(dev_priv);
+               vlv_dsi_init(dev_priv);
        } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) {
                bool found = false;
 
@@ -14124,14 +14318,15 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 
        drm_framebuffer_cleanup(fb);
 
-       i915_gem_object_lock(intel_fb->obj);
-       WARN_ON(!intel_fb->obj->framebuffer_references--);
-       i915_gem_object_unlock(intel_fb->obj);
+       i915_gem_object_lock(obj);
+       WARN_ON(!obj->framebuffer_references--);
+       i915_gem_object_unlock(obj);
 
-       i915_gem_object_put(intel_fb->obj);
+       i915_gem_object_put(obj);
 
        kfree(intel_fb);
 }
@@ -14140,8 +14335,7 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
                                                struct drm_file *file,
                                                unsigned int *handle)
 {
-       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-       struct drm_i915_gem_object *obj = intel_fb->obj;
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 
        if (obj->userptr.mm) {
                DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
@@ -14349,11 +14543,6 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
                }
                break;
        case DRM_FORMAT_NV12:
-               if (mode_cmd->modifier[0] == I915_FORMAT_MOD_Y_TILED_CCS ||
-                   mode_cmd->modifier[0] == I915_FORMAT_MOD_Yf_TILED_CCS) {
-                       DRM_DEBUG_KMS("RC not to be enabled with NV12\n");
-                       goto err;
-               }
                if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) ||
                    IS_BROXTON(dev_priv)) {
                        DRM_DEBUG_KMS("unsupported pixel format: %s\n",
@@ -14411,9 +14600,9 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
                                      i, fb->pitches[i], stride_alignment);
                        goto err;
                }
-       }
 
-       intel_fb->obj = obj;
+               fb->obj[i] = &obj->base;
+       }
 
        ret = intel_fill_fb_info(dev_priv, fb);
        if (ret)
@@ -14469,12 +14658,26 @@ static enum drm_mode_status
 intel_mode_valid(struct drm_device *dev,
                 const struct drm_display_mode *mode)
 {
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       int hdisplay_max, htotal_max;
+       int vdisplay_max, vtotal_max;
+
+       /*
+        * Can't reject DBLSCAN here because Xorg ddxen can add piles
+        * of DBLSCAN modes to the output's mode list when they detect
+        * the scaling mode property on the connector. And they don't
+        * ask the kernel to validate those modes in any way until
+        * modeset time at which point the client gets a protocol error.
+        * So in order to not upset those clients we silently ignore the
+        * DBLSCAN flag on such connectors. For other connectors we will
+        * reject modes with the DBLSCAN flag in encoder->compute_config().
+        * And we always reject DBLSCAN modes in connector->mode_valid()
+        * as we never want such modes on the connector's mode list.
+        */
+
        if (mode->vscan > 1)
                return MODE_NO_VSCAN;
 
-       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
-               return MODE_NO_DBLESCAN;
-
        if (mode->flags & DRM_MODE_FLAG_HSKEW)
                return MODE_H_ILLEGAL;
 
@@ -14488,6 +14691,36 @@ intel_mode_valid(struct drm_device *dev,
                           DRM_MODE_FLAG_CLKDIV2))
                return MODE_BAD;
 
+       if (INTEL_GEN(dev_priv) >= 9 ||
+           IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+               hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
+               vdisplay_max = 4096;
+               htotal_max = 8192;
+               vtotal_max = 8192;
+       } else if (INTEL_GEN(dev_priv) >= 3) {
+               hdisplay_max = 4096;
+               vdisplay_max = 4096;
+               htotal_max = 8192;
+               vtotal_max = 8192;
+       } else {
+               hdisplay_max = 2048;
+               vdisplay_max = 2048;
+               htotal_max = 4096;
+               vtotal_max = 4096;
+       }
+
+       if (mode->hdisplay > hdisplay_max ||
+           mode->hsync_start > htotal_max ||
+           mode->hsync_end > htotal_max ||
+           mode->htotal > htotal_max)
+               return MODE_H_ILLEGAL;
+
+       if (mode->vdisplay > vdisplay_max ||
+           mode->vsync_start > vtotal_max ||
+           mode->vsync_end > vtotal_max ||
+           mode->vtotal > vtotal_max)
+               return MODE_V_ILLEGAL;
+
        return MODE_OK;
 }
 
@@ -14636,6 +14869,18 @@ static void quirk_increase_t12_delay(struct drm_device *dev)
        DRM_INFO("Applying T12 delay quirk\n");
 }
 
+/*
+ * GeminiLake NUC HDMI outputs require additional off time
+ * this allows the onboard retimer to correctly sync to signal
+ */
+static void quirk_increase_ddi_disabled_time(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+
+       dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
+       DRM_INFO("Applying Increase DDI Disabled quirk\n");
+}
+
 struct intel_quirk {
        int device;
        int subsystem_vendor;
@@ -14722,6 +14967,13 @@ static struct intel_quirk intel_quirks[] = {
 
        /* Toshiba Satellite P50-C-18C */
        { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
+
+       /* GeminiLake NUC */
+       { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
+       { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
+       /* ASRock ITX*/
+       { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
+       { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
 };
 
 static void intel_init_quirks(struct drm_device *dev)
@@ -14926,6 +15178,7 @@ int intel_modeset_init(struct drm_device *dev)
                }
        }
 
+       /* maximum framebuffer dimensions */
        if (IS_GEN2(dev_priv)) {
                dev->mode_config.max_width = 2048;
                dev->mode_config.max_height = 2048;
@@ -14941,11 +15194,11 @@ int intel_modeset_init(struct drm_device *dev)
                dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
                dev->mode_config.cursor_height = 1023;
        } else if (IS_GEN2(dev_priv)) {
-               dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
-               dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
+               dev->mode_config.cursor_width = 64;
+               dev->mode_config.cursor_height = 64;
        } else {
-               dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
-               dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
+               dev->mode_config.cursor_width = 256;
+               dev->mode_config.cursor_height = 256;
        }
 
        dev->mode_config.fb_base = ggtt->gmadr.start;
@@ -15095,8 +15348,8 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
        WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
        WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
        WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
-       WARN_ON(I915_READ(CURCNTR(PIPE_A)) & CURSOR_MODE);
-       WARN_ON(I915_READ(CURCNTR(PIPE_B)) & CURSOR_MODE);
+       WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
+       WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
 
        I915_WRITE(PIPECONF(pipe), 0);
        POSTING_READ(PIPECONF(pipe));
@@ -15110,12 +15363,12 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
 static bool intel_plane_mapping_ok(struct intel_crtc *crtc,
                                   struct intel_plane *plane)
 {
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
-       u32 val = I915_READ(DSPCNTR(i9xx_plane));
+       enum pipe pipe;
 
-       return (val & DISPLAY_PLANE_ENABLE) == 0 ||
-               (val & DISPPLANE_SEL_PIPE_MASK) == DISPPLANE_SEL_PIPE(crtc->pipe);
+       if (!plane->get_hw_state(plane, &pipe))
+               return true;
+
+       return pipe == crtc->pipe;
 }
 
 static void
@@ -15274,6 +15527,9 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
                connector->base.dpms = DRM_MODE_DPMS_OFF;
                connector->base.encoder = NULL;
        }
+
+       /* notify opregion of the sanitized encoder state */
+       intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
 }
 
 void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
@@ -15314,7 +15570,10 @@ static void readout_plane_state(struct intel_crtc *crtc)
        for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
                struct intel_plane_state *plane_state =
                        to_intel_plane_state(plane->base.state);
-               bool visible = plane->get_hw_state(plane);
+               enum pipe pipe;
+               bool visible;
+
+               visible = plane->get_hw_state(plane, &pipe);
 
                intel_set_plane_visible(crtc_state, plane_state, visible);
        }
@@ -15413,9 +15672,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
                                 * rely on the connector_mask being accurate.
                                 */
                                encoder->base.crtc->state->connector_mask |=
-                                       1 << drm_connector_index(&connector->base);
+                                       drm_connector_mask(&connector->base);
                                encoder->base.crtc->state->encoder_mask |=
-                                       1 << drm_encoder_index(&encoder->base);
+                                       drm_encoder_mask(&encoder->base);
                        }
 
                } else {
@@ -15481,11 +15740,20 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
        for_each_intel_encoder(&dev_priv->drm, encoder) {
                u64 get_domains;
                enum intel_display_power_domain domain;
+               struct intel_crtc_state *crtc_state;
 
                if (!encoder->get_power_domains)
                        continue;
 
-               get_domains = encoder->get_power_domains(encoder);
+               /*
+                * MST-primary and inactive encoders don't have a crtc state
+                * and neither of these require any power domain references.
+                */
+               if (!encoder->base.crtc)
+                       continue;
+
+               crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
+               get_domains = encoder->get_power_domains(encoder, crtc_state);
                for_each_power_domain(domain, get_domains)
                        intel_display_power_get(dev_priv, domain);
        }
@@ -15661,6 +15929,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
 
+       flush_workqueue(dev_priv->modeset_wq);
+
        flush_work(&dev_priv->atomic_helper.free_work);
        WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
 
@@ -15704,8 +15974,7 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
                                    struct intel_encoder *encoder)
 {
        connector->encoder = encoder;
-       drm_mode_connector_attach_encoder(&connector->base,
-                                         &encoder->base);
+       drm_connector_attach_encoder(&connector->base, &encoder->base);
 }
 
 /*
index 2ef31617614ab22972d9ce48acaf89724fdb6b09..9292001cdd14defd45e8ad75fe34d7b4bed1e52d 100644 (file)
@@ -126,6 +126,17 @@ enum port {
 
 #define port_name(p) ((p) + 'A')
 
+enum tc_port {
+       PORT_TC_NONE = -1,
+
+       PORT_TC1 = 0,
+       PORT_TC2,
+       PORT_TC3,
+       PORT_TC4,
+
+       I915_MAX_TC_PORTS
+};
+
 enum dpio_channel {
        DPIO_CH0,
        DPIO_CH1
@@ -144,7 +155,7 @@ enum aux_ch {
        AUX_CH_B,
        AUX_CH_C,
        AUX_CH_D,
-       _AUX_CH_E, /* does not exist */
+       AUX_CH_E, /* ICL+ */
        AUX_CH_F,
 };
 
@@ -185,8 +196,13 @@ enum intel_display_power_domain {
        POWER_DOMAIN_AUX_B,
        POWER_DOMAIN_AUX_C,
        POWER_DOMAIN_AUX_D,
+       POWER_DOMAIN_AUX_E,
        POWER_DOMAIN_AUX_F,
        POWER_DOMAIN_AUX_IO_A,
+       POWER_DOMAIN_AUX_TBT1,
+       POWER_DOMAIN_AUX_TBT2,
+       POWER_DOMAIN_AUX_TBT3,
+       POWER_DOMAIN_AUX_TBT4,
        POWER_DOMAIN_GMBUS,
        POWER_DOMAIN_MODESET,
        POWER_DOMAIN_GT_IRQ,
@@ -249,7 +265,7 @@ struct intel_link_m_n {
                            &(dev)->mode_config.plane_list,             \
                            base.head)                                  \
                for_each_if((plane_mask) &                              \
-                           BIT(drm_plane_index(&intel_plane->base)))
+                           drm_plane_mask(&intel_plane->base)))
 
 #define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane)     \
        list_for_each_entry(intel_plane,                                \
@@ -266,13 +282,17 @@ struct intel_link_m_n {
        list_for_each_entry(intel_crtc,                                 \
                            &(dev)->mode_config.crtc_list,              \
                            base.head)                                  \
-               for_each_if((crtc_mask) & BIT(drm_crtc_index(&intel_crtc->base)))
+               for_each_if((crtc_mask) & drm_crtc_mask(&intel_crtc->base))
 
 #define for_each_intel_encoder(dev, intel_encoder)             \
        list_for_each_entry(intel_encoder,                      \
                            &(dev)->mode_config.encoder_list,   \
                            base.head)
 
+#define for_each_intel_dp(dev, intel_encoder)                  \
+       for_each_intel_encoder(dev, intel_encoder)              \
+               for_each_if(intel_encoder_is_dp(intel_encoder))
+
 #define for_each_intel_connector_iter(intel_connector, iter) \
        while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter))))
 
index 8320f0e8e3bef8587b94a908bf87f3eecdf63fc5..cd0f649b57a5b75dff70265637a3a4b0ead4373b 100644 (file)
@@ -56,7 +56,7 @@ struct dp_link_dpll {
        struct dpll dpll;
 };
 
-static const struct dp_link_dpll gen4_dpll[] = {
+static const struct dp_link_dpll g4x_dpll[] = {
        { 162000,
                { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
        { 270000,
@@ -256,6 +256,17 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp)
        return 810000;
 }
 
+static int icl_max_source_rate(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       enum port port = dig_port->base.port;
+
+       if (port == PORT_B)
+               return 540000;
+
+       return 810000;
+}
+
 static void
 intel_dp_set_source_rates(struct intel_dp *intel_dp)
 {
@@ -285,10 +296,13 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
        /* This should only be done once */
        WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
 
-       if (IS_CANNONLAKE(dev_priv)) {
+       if (INTEL_GEN(dev_priv) >= 10) {
                source_rates = cnl_rates;
                size = ARRAY_SIZE(cnl_rates);
-               max_rate = cnl_max_source_rate(intel_dp);
+               if (INTEL_GEN(dev_priv) == 10)
+                       max_rate = cnl_max_source_rate(intel_dp);
+               else
+                       max_rate = icl_max_source_rate(intel_dp);
        } else if (IS_GEN9_LP(dev_priv)) {
                source_rates = bxt_rates;
                size = ARRAY_SIZE(bxt_rates);
@@ -420,6 +434,9 @@ intel_dp_mode_valid(struct drm_connector *connector,
        int max_rate, mode_rate, max_lanes, max_link_clock;
        int max_dotclk;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
        max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
 
        if (intel_dp_is_edp(intel_dp) && fixed_mode) {
@@ -513,7 +530,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
        uint32_t DP;
 
        if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
-                "skipping pipe %c power seqeuncer kick due to port %c being active\n",
+                "skipping pipe %c power sequencer kick due to port %c being active\n",
                 pipe_name(pipe), port_name(intel_dig_port->base.port)))
                return;
 
@@ -529,9 +546,9 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
        DP |= DP_LINK_TRAIN_PAT_1;
 
        if (IS_CHERRYVIEW(dev_priv))
-               DP |= DP_PIPE_SELECT_CHV(pipe);
-       else if (pipe == PIPE_B)
-               DP |= DP_PIPEB_SELECT;
+               DP |= DP_PIPE_SEL_CHV(pipe);
+       else
+               DP |= DP_PIPE_SEL(pipe);
 
        pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
 
@@ -554,7 +571,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
        /*
         * Similar magic as in intel_dp_enable_port().
         * We _must_ do this port enable + disable trick
-        * to make this power seqeuencer lock onto the port.
+        * to make this power sequencer lock onto the port.
         * Otherwise even VDD force bit won't work.
         */
        I915_WRITE(intel_dp->output_reg, DP);
@@ -583,14 +600,8 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
         * We don't have power sequencer currently.
         * Pick one that's not used by other ports.
         */
-       for_each_intel_encoder(&dev_priv->drm, encoder) {
-               struct intel_dp *intel_dp;
-
-               if (encoder->type != INTEL_OUTPUT_DP &&
-                   encoder->type != INTEL_OUTPUT_EDP)
-                       continue;
-
-               intel_dp = enc_to_intel_dp(&encoder->base);
+       for_each_intel_dp(&dev_priv->drm, encoder) {
+               struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
                if (encoder->type == INTEL_OUTPUT_EDP) {
                        WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
@@ -782,19 +793,8 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
         * should use them always.
         */
 
-       for_each_intel_encoder(&dev_priv->drm, encoder) {
-               struct intel_dp *intel_dp;
-
-               if (encoder->type != INTEL_OUTPUT_DP &&
-                   encoder->type != INTEL_OUTPUT_EDP &&
-                   encoder->type != INTEL_OUTPUT_DDI)
-                       continue;
-
-               intel_dp = enc_to_intel_dp(&encoder->base);
-
-               /* Skip pure DVI/HDMI DDI encoders */
-               if (!i915_mmio_reg_valid(intel_dp->output_reg))
-                       continue;
+       for_each_intel_dp(&dev_priv->drm, encoder) {
+               struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
                WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
 
@@ -936,7 +936,7 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
 }
 
 static uint32_t
-intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
+intel_dp_aux_wait_done(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
        i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
@@ -944,14 +944,10 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
        bool done;
 
 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
-       if (has_aux_irq)
-               done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
-                                         msecs_to_jiffies_timeout(10));
-       else
-               done = wait_for(C, 10) == 0;
+       done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
+                                 msecs_to_jiffies_timeout(10));
        if (!done)
-               DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
-                         has_aux_irq);
+               DRM_ERROR("dp aux hw did not signal timeout!\n");
 #undef C
 
        return status;
@@ -1016,7 +1012,6 @@ static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
 }
 
 static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
-                                    bool has_aux_irq,
                                     int send_bytes,
                                     uint32_t aux_clock_divider)
 {
@@ -1037,7 +1032,7 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
 
        return DP_AUX_CH_CTL_SEND_BUSY |
               DP_AUX_CH_CTL_DONE |
-              (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
+              DP_AUX_CH_CTL_INTERRUPT |
               DP_AUX_CH_CTL_TIME_OUT_ERROR |
               timeout |
               DP_AUX_CH_CTL_RECEIVE_ERROR |
@@ -1047,13 +1042,12 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
 }
 
 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
-                                     bool has_aux_irq,
                                      int send_bytes,
                                      uint32_t unused)
 {
        return DP_AUX_CH_CTL_SEND_BUSY |
               DP_AUX_CH_CTL_DONE |
-              (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
+              DP_AUX_CH_CTL_INTERRUPT |
               DP_AUX_CH_CTL_TIME_OUT_ERROR |
               DP_AUX_CH_CTL_TIME_OUT_MAX |
               DP_AUX_CH_CTL_RECEIVE_ERROR |
@@ -1076,7 +1070,6 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
        int i, ret, recv_bytes;
        uint32_t status;
        int try, clock = 0;
-       bool has_aux_irq = HAS_AUX_IRQ(dev_priv);
        bool vdd;
 
        ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
@@ -1131,7 +1124,6 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
 
        while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
                u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
-                                                         has_aux_irq,
                                                          send_bytes,
                                                          aux_clock_divider);
 
@@ -1148,7 +1140,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
                        /* Send the command and wait for it to complete */
                        I915_WRITE(ch_ctl, send_ctl);
 
-                       status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
+                       status = intel_dp_aux_wait_done(intel_dp);
 
                        /* Clear done status and any errors */
                        I915_WRITE(ch_ctl,
@@ -1347,6 +1339,9 @@ static enum aux_ch intel_aux_ch(struct intel_dp *intel_dp)
        case DP_AUX_D:
                aux_ch = AUX_CH_D;
                break;
+       case DP_AUX_E:
+               aux_ch = AUX_CH_E;
+               break;
        case DP_AUX_F:
                aux_ch = AUX_CH_F;
                break;
@@ -1374,6 +1369,8 @@ intel_aux_power_domain(struct intel_dp *intel_dp)
                return POWER_DOMAIN_AUX_C;
        case AUX_CH_D:
                return POWER_DOMAIN_AUX_D;
+       case AUX_CH_E:
+               return POWER_DOMAIN_AUX_E;
        case AUX_CH_F:
                return POWER_DOMAIN_AUX_F;
        default:
@@ -1460,6 +1457,7 @@ static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
        case AUX_CH_B:
        case AUX_CH_C:
        case AUX_CH_D:
+       case AUX_CH_E:
        case AUX_CH_F:
                return DP_AUX_CH_CTL(aux_ch);
        default:
@@ -1478,6 +1476,7 @@ static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
        case AUX_CH_B:
        case AUX_CH_C:
        case AUX_CH_D:
+       case AUX_CH_E:
        case AUX_CH_F:
                return DP_AUX_CH_DATA(aux_ch, index);
        default:
@@ -1541,6 +1540,13 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
        return max_rate >= 540000;
 }
 
+bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
+{
+       int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
+
+       return max_rate >= 810000;
+}
+
 static void
 intel_dp_set_clock(struct intel_encoder *encoder,
                   struct intel_crtc_state *pipe_config)
@@ -1550,8 +1556,8 @@ intel_dp_set_clock(struct intel_encoder *encoder,
        int i, count = 0;
 
        if (IS_G4X(dev_priv)) {
-               divisor = gen4_dpll;
-               count = ARRAY_SIZE(gen4_dpll);
+               divisor = g4x_dpll;
+               count = ARRAY_SIZE(g4x_dpll);
        } else if (HAS_PCH_SPLIT(dev_priv)) {
                divisor = pch_dpll;
                count = ARRAY_SIZE(pch_dpll);
@@ -1862,7 +1868,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
                                                conn_state->scaling_mode);
        }
 
-       if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
+       if (HAS_GMCH_DISPLAY(dev_priv) &&
            adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
                return false;
 
@@ -1964,7 +1973,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
 
        /* Split out the IBX/CPU vs CPT settings */
 
-       if (IS_GEN7(dev_priv) && port == PORT_A) {
+       if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
                if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
                        intel_dp->DP |= DP_SYNC_HS_HIGH;
                if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -1974,7 +1983,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
                if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
                        intel_dp->DP |= DP_ENHANCED_FRAMING;
 
-               intel_dp->DP |= crtc->pipe << 29;
+               intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
        } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
                u32 trans_dp;
 
@@ -2000,9 +2009,9 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
                        intel_dp->DP |= DP_ENHANCED_FRAMING;
 
                if (IS_CHERRYVIEW(dev_priv))
-                       intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
-               else if (crtc->pipe == PIPE_B)
-                       intel_dp->DP |= DP_PIPEB_SELECT;
+                       intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
+               else
+                       intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
        }
 }
 
@@ -2624,52 +2633,66 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
                              mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
 }
 
+static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
+                                enum port port, enum pipe *pipe)
+{
+       enum pipe p;
+
+       for_each_pipe(dev_priv, p) {
+               u32 val = I915_READ(TRANS_DP_CTL(p));
+
+               if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
+                       *pipe = p;
+                       return true;
+               }
+       }
+
+       DRM_DEBUG_KMS("No pipe for DP port %c found\n", port_name(port));
+
+       /* must initialize pipe to something for the asserts */
+       *pipe = PIPE_A;
+
+       return false;
+}
+
+bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
+                          i915_reg_t dp_reg, enum port port,
+                          enum pipe *pipe)
+{
+       bool ret;
+       u32 val;
+
+       val = I915_READ(dp_reg);
+
+       ret = val & DP_PORT_EN;
+
+       /* asserts want to know the pipe even if the port is disabled */
+       if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
+               *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
+       else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
+               ret &= cpt_dp_port_selected(dev_priv, port, pipe);
+       else if (IS_CHERRYVIEW(dev_priv))
+               *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
+       else
+               *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
+
+       return ret;
+}
+
 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
                                  enum pipe *pipe)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-       enum port port = encoder->port;
-       u32 tmp;
        bool ret;
 
        if (!intel_display_power_get_if_enabled(dev_priv,
                                                encoder->power_domain))
                return false;
 
-       ret = false;
+       ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
+                                   encoder->port, pipe);
 
-       tmp = I915_READ(intel_dp->output_reg);
-
-       if (!(tmp & DP_PORT_EN))
-               goto out;
-
-       if (IS_GEN7(dev_priv) && port == PORT_A) {
-               *pipe = PORT_TO_PIPE_CPT(tmp);
-       } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
-               enum pipe p;
-
-               for_each_pipe(dev_priv, p) {
-                       u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
-                       if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
-                               *pipe = p;
-                               ret = true;
-
-                               goto out;
-                       }
-               }
-
-               DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
-                             i915_mmio_reg_offset(intel_dp->output_reg));
-       } else if (IS_CHERRYVIEW(dev_priv)) {
-               *pipe = DP_PORT_TO_PIPE_CHV(tmp);
-       } else {
-               *pipe = PORT_TO_PIPE(tmp);
-       }
-
-       ret = true;
-
-out:
        intel_display_power_put(dev_priv, encoder->power_domain);
 
        return ret;
@@ -2782,16 +2805,6 @@ static void intel_disable_dp(struct intel_encoder *encoder,
 static void g4x_disable_dp(struct intel_encoder *encoder,
                           const struct intel_crtc_state *old_crtc_state,
                           const struct drm_connector_state *old_conn_state)
-{
-       intel_disable_dp(encoder, old_crtc_state, old_conn_state);
-
-       /* disable the port before the pipe on g4x */
-       intel_dp_link_down(encoder, old_crtc_state);
-}
-
-static void ilk_disable_dp(struct intel_encoder *encoder,
-                          const struct intel_crtc_state *old_crtc_state,
-                          const struct drm_connector_state *old_conn_state)
 {
        intel_disable_dp(encoder, old_crtc_state, old_conn_state);
 }
@@ -2800,20 +2813,22 @@ static void vlv_disable_dp(struct intel_encoder *encoder,
                           const struct intel_crtc_state *old_crtc_state,
                           const struct drm_connector_state *old_conn_state)
 {
-       struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-
-       intel_psr_disable(intel_dp, old_crtc_state);
-
        intel_disable_dp(encoder, old_crtc_state, old_conn_state);
 }
 
-static void ilk_post_disable_dp(struct intel_encoder *encoder,
+static void g4x_post_disable_dp(struct intel_encoder *encoder,
                                const struct intel_crtc_state *old_crtc_state,
                                const struct drm_connector_state *old_conn_state)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        enum port port = encoder->port;
 
+       /*
+        * Bspec does not list a specific disable sequence for g4x DP.
+        * Follow the ilk+ sequence (disable pipe before the port) for
+        * g4x DP as it does not suffer from underruns like the normal
+        * g4x modeset sequence (disable pipe after the port).
+        */
        intel_dp_link_down(encoder, old_crtc_state);
 
        /* Only ilk+ has port A */
@@ -2852,10 +2867,11 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
        struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        enum port port = intel_dig_port->base.port;
+       uint8_t train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
 
-       if (dp_train_pat & DP_TRAINING_PATTERN_MASK)
+       if (dp_train_pat & train_pat_mask)
                DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
-                             dp_train_pat & DP_TRAINING_PATTERN_MASK);
+                             dp_train_pat & train_pat_mask);
 
        if (HAS_DDI(dev_priv)) {
                uint32_t temp = I915_READ(DP_TP_CTL(port));
@@ -2866,7 +2882,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
                        temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
 
                temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
-               switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+               switch (dp_train_pat & train_pat_mask) {
                case DP_TRAINING_PATTERN_DISABLE:
                        temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
 
@@ -2880,10 +2896,13 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
                case DP_TRAINING_PATTERN_3:
                        temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
                        break;
+               case DP_TRAINING_PATTERN_4:
+                       temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
+                       break;
                }
                I915_WRITE(DP_TP_CTL(port), temp);
 
-       } else if ((IS_GEN7(dev_priv) && port == PORT_A) ||
+       } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
                   (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
                *DP &= ~DP_LINK_TRAIN_MASK_CPT;
 
@@ -3006,10 +3025,7 @@ static void vlv_enable_dp(struct intel_encoder *encoder,
                          const struct intel_crtc_state *pipe_config,
                          const struct drm_connector_state *conn_state)
 {
-       struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-
        intel_edp_backlight_on(pipe_config, conn_state);
-       intel_psr_enable(intel_dp, pipe_config);
 }
 
 static void g4x_pre_enable_dp(struct intel_encoder *encoder,
@@ -3041,11 +3057,11 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
        edp_panel_vdd_off_sync(intel_dp);
 
        /*
-        * VLV seems to get confused when multiple power seqeuencers
+        * VLV seems to get confused when multiple power sequencers
         * have the same port selected (even if only one has power/vdd
         * enabled). The failure manifests as vlv_wait_port_ready() failing
         * CHV on the other hand doesn't seem to mind having the same port
-        * selected in multiple power seqeuencers, but let's clear the
+        * selected in multiple power sequencers, but let's clear the
         * port select always when logically disconnecting a power sequencer
         * from a port.
         */
@@ -3064,16 +3080,9 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
 
        lockdep_assert_held(&dev_priv->pps_mutex);
 
-       for_each_intel_encoder(&dev_priv->drm, encoder) {
-               struct intel_dp *intel_dp;
-               enum port port;
-
-               if (encoder->type != INTEL_OUTPUT_DP &&
-                   encoder->type != INTEL_OUTPUT_EDP)
-                       continue;
-
-               intel_dp = enc_to_intel_dp(&encoder->base);
-               port = dp_to_dig_port(intel_dp)->base.port;
+       for_each_intel_dp(&dev_priv->drm, encoder) {
+               struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+               enum port port = encoder->port;
 
                WARN(intel_dp->active_pipe == pipe,
                     "stealing pipe %c power sequencer from active (e)DP port %c\n",
@@ -3195,14 +3204,14 @@ uint8_t
 intel_dp_voltage_max(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
-       enum port port = dp_to_dig_port(intel_dp)->base.port;
+       struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+       enum port port = encoder->port;
 
-       if (INTEL_GEN(dev_priv) >= 9) {
-               struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+       if (HAS_DDI(dev_priv))
                return intel_ddi_dp_voltage_max(encoder);
-       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
-       else if (IS_GEN7(dev_priv) && port == PORT_A)
+       else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
                return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
        else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
                return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
@@ -3214,33 +3223,11 @@ uint8_t
 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
 {
        struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
-       enum port port = dp_to_dig_port(intel_dp)->base.port;
+       struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+       enum port port = encoder->port;
 
-       if (INTEL_GEN(dev_priv) >= 9) {
-               switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
-               case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
-                       return DP_TRAIN_PRE_EMPH_LEVEL_3;
-               case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
-                       return DP_TRAIN_PRE_EMPH_LEVEL_2;
-               case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
-                       return DP_TRAIN_PRE_EMPH_LEVEL_1;
-               case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
-                       return DP_TRAIN_PRE_EMPH_LEVEL_0;
-               default:
-                       return DP_TRAIN_PRE_EMPH_LEVEL_0;
-               }
-       } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
-               switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
-               case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
-                       return DP_TRAIN_PRE_EMPH_LEVEL_3;
-               case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
-                       return DP_TRAIN_PRE_EMPH_LEVEL_2;
-               case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
-                       return DP_TRAIN_PRE_EMPH_LEVEL_1;
-               case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
-               default:
-                       return DP_TRAIN_PRE_EMPH_LEVEL_0;
-               }
+       if (HAS_DDI(dev_priv)) {
+               return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing);
        } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
                case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
@@ -3253,7 +3240,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
                default:
                        return DP_TRAIN_PRE_EMPH_LEVEL_0;
                }
-       } else if (IS_GEN7(dev_priv) && port == PORT_A) {
+       } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
                switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
                case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
                        return DP_TRAIN_PRE_EMPH_LEVEL_2;
@@ -3448,7 +3435,7 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
 }
 
 static uint32_t
-gen4_signal_levels(uint8_t train_set)
+g4x_signal_levels(uint8_t train_set)
 {
        uint32_t        signal_levels = 0;
 
@@ -3485,9 +3472,9 @@ gen4_signal_levels(uint8_t train_set)
        return signal_levels;
 }
 
-/* Gen6's DP voltage swing and pre-emphasis control */
+/* SNB CPU eDP voltage swing and pre-emphasis control */
 static uint32_t
-gen6_edp_signal_levels(uint8_t train_set)
+snb_cpu_edp_signal_levels(uint8_t train_set)
 {
        int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
                                         DP_TRAIN_PRE_EMPHASIS_MASK);
@@ -3513,9 +3500,9 @@ gen6_edp_signal_levels(uint8_t train_set)
        }
 }
 
-/* Gen7's DP voltage swing and pre-emphasis control */
+/* IVB CPU eDP voltage swing and pre-emphasis control */
 static uint32_t
-gen7_edp_signal_levels(uint8_t train_set)
+ivb_cpu_edp_signal_levels(uint8_t train_set)
 {
        int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
                                         DP_TRAIN_PRE_EMPHASIS_MASK);
@@ -3562,14 +3549,14 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
                signal_levels = chv_signal_levels(intel_dp);
        } else if (IS_VALLEYVIEW(dev_priv)) {
                signal_levels = vlv_signal_levels(intel_dp);
-       } else if (IS_GEN7(dev_priv) && port == PORT_A) {
-               signal_levels = gen7_edp_signal_levels(train_set);
+       } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
+               signal_levels = ivb_cpu_edp_signal_levels(train_set);
                mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
        } else if (IS_GEN6(dev_priv) && port == PORT_A) {
-               signal_levels = gen6_edp_signal_levels(train_set);
+               signal_levels = snb_cpu_edp_signal_levels(train_set);
                mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
        } else {
-               signal_levels = gen4_signal_levels(train_set);
+               signal_levels = g4x_signal_levels(train_set);
                mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
        }
 
@@ -3652,7 +3639,7 @@ intel_dp_link_down(struct intel_encoder *encoder,
 
        DRM_DEBUG_KMS("\n");
 
-       if ((IS_GEN7(dev_priv) && port == PORT_A) ||
+       if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
            (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
                DP &= ~DP_LINK_TRAIN_MASK_CPT;
                DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
@@ -3681,8 +3668,9 @@ intel_dp_link_down(struct intel_encoder *encoder,
                intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
 
                /* always enable with pattern 1 (as per spec) */
-               DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
-               DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
+               DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
+               DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
+                       DP_LINK_TRAIN_PAT_1;
                I915_WRITE(intel_dp->output_reg, DP);
                POSTING_READ(intel_dp->output_reg);
 
@@ -3737,8 +3725,6 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
                dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
                        DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
 
-       intel_psr_init_dpcd(intel_dp);
-
        /*
         * Read the eDP display control registers.
         *
@@ -3754,6 +3740,12 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
                DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
                              intel_dp->edp_dpcd);
 
+       /*
+        * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
+        * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
+        */
+       intel_psr_init_dpcd(intel_dp);
+
        /* Read the eDP 1.4+ supported link rates. */
        if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
                __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
@@ -3882,129 +3874,6 @@ intel_dp_configure_mst(struct intel_dp *intel_dp)
                                        intel_dp->is_mst);
 }
 
-static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp,
-                                 struct intel_crtc_state *crtc_state, bool disable_wa)
-{
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
-       u8 buf;
-       int ret = 0;
-       int count = 0;
-       int attempts = 10;
-
-       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
-               DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
-               ret = -EIO;
-               goto out;
-       }
-
-       if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
-                              buf & ~DP_TEST_SINK_START) < 0) {
-               DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
-               ret = -EIO;
-               goto out;
-       }
-
-       do {
-               intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
-
-               if (drm_dp_dpcd_readb(&intel_dp->aux,
-                                     DP_TEST_SINK_MISC, &buf) < 0) {
-                       ret = -EIO;
-                       goto out;
-               }
-               count = buf & DP_TEST_COUNT_MASK;
-       } while (--attempts && count);
-
-       if (attempts == 0) {
-               DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
-               ret = -ETIMEDOUT;
-       }
-
- out:
-       if (disable_wa)
-               hsw_enable_ips(crtc_state);
-       return ret;
-}
-
-static int intel_dp_sink_crc_start(struct intel_dp *intel_dp,
-                                  struct intel_crtc_state *crtc_state)
-{
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
-       u8 buf;
-       int ret;
-
-       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
-               return -EIO;
-
-       if (!(buf & DP_TEST_CRC_SUPPORTED))
-               return -ENOTTY;
-
-       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
-               return -EIO;
-
-       if (buf & DP_TEST_SINK_START) {
-               ret = intel_dp_sink_crc_stop(intel_dp, crtc_state, false);
-               if (ret)
-                       return ret;
-       }
-
-       hsw_disable_ips(crtc_state);
-
-       if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
-                              buf | DP_TEST_SINK_START) < 0) {
-               hsw_enable_ips(crtc_state);
-               return -EIO;
-       }
-
-       intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
-       return 0;
-}
-
-int intel_dp_sink_crc(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state, u8 *crc)
-{
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
-       u8 buf;
-       int count, ret;
-       int attempts = 6;
-
-       ret = intel_dp_sink_crc_start(intel_dp, crtc_state);
-       if (ret)
-               return ret;
-
-       do {
-               intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
-
-               if (drm_dp_dpcd_readb(&intel_dp->aux,
-                                     DP_TEST_SINK_MISC, &buf) < 0) {
-                       ret = -EIO;
-                       goto stop;
-               }
-               count = buf & DP_TEST_COUNT_MASK;
-
-       } while (--attempts && count == 0);
-
-       if (attempts == 0) {
-               DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
-               ret = -ETIMEDOUT;
-               goto stop;
-       }
-
-       if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
-               ret = -EIO;
-               goto stop;
-       }
-
-stop:
-       intel_dp_sink_crc_stop(intel_dp, crtc_state, true);
-       return ret;
-}
-
 static bool
 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
 {
@@ -4464,10 +4333,15 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
                        DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
        }
 
+       /* Handle CEC interrupts, if any */
+       drm_dp_cec_irq(&intel_dp->aux);
+
        /* defer to the hotplug work for link retraining if needed */
        if (intel_dp_needs_link_retrain(intel_dp))
                return false;
 
+       intel_psr_short_pulse(intel_dp);
+
        if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
                DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
                /* Send a Hotplug Uevent to userspace to start modeset */
@@ -4535,14 +4409,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
 static enum drm_connector_status
 edp_detect(struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
-       enum drm_connector_status status;
-
-       status = intel_panel_detect(dev_priv);
-       if (status == connector_status_unknown)
-               status = connector_status_connected;
-
-       return status;
+       return connector_status_connected;
 }
 
 static bool ibx_digital_port_connected(struct intel_encoder *encoder)
@@ -4778,6 +4645,7 @@ intel_dp_set_edid(struct intel_dp *intel_dp)
        intel_connector->detect_edid = edid;
 
        intel_dp->has_audio = drm_detect_monitor_audio(edid);
+       drm_dp_cec_set_edid(&intel_dp->aux, edid);
 }
 
 static void
@@ -4785,6 +4653,7 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
 {
        struct intel_connector *intel_connector = intel_dp->attached_connector;
 
+       drm_dp_cec_unset_edid(&intel_dp->aux);
        kfree(intel_connector->detect_edid);
        intel_connector->detect_edid = NULL;
 
@@ -4803,7 +4672,7 @@ intel_dp_long_pulse(struct intel_connector *connector)
 
        intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
 
-       /* Can't disconnect eDP, but you can close the lid... */
+       /* Can't disconnect eDP */
        if (intel_dp_is_edp(intel_dp))
                status = edp_detect(intel_dp);
        else if (intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base))
@@ -4973,6 +4842,7 @@ static int
 intel_dp_connector_register(struct drm_connector *connector)
 {
        struct intel_dp *intel_dp = intel_attached_dp(connector);
+       struct drm_device *dev = connector->dev;
        int ret;
 
        ret = intel_connector_register(connector);
@@ -4985,13 +4855,20 @@ intel_dp_connector_register(struct drm_connector *connector)
                      intel_dp->aux.name, connector->kdev->kobj.name);
 
        intel_dp->aux.dev = connector->kdev;
-       return drm_dp_aux_register(&intel_dp->aux);
+       ret = drm_dp_aux_register(&intel_dp->aux);
+       if (!ret)
+               drm_dp_cec_register_connector(&intel_dp->aux,
+                                             connector->name, dev->dev);
+       return ret;
 }
 
 static void
 intel_dp_connector_unregister(struct drm_connector *connector)
 {
-       drm_dp_aux_unregister(&intel_attached_dp(connector)->aux);
+       struct intel_dp *intel_dp = intel_attached_dp(connector);
+
+       drm_dp_cec_unregister_connector(&intel_dp->aux);
+       drm_dp_aux_unregister(&intel_dp->aux);
        intel_connector_unregister(connector);
 }
 
@@ -5317,14 +5194,14 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+       struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+       enum pipe pipe;
 
-       if ((intel_dp->DP & DP_PORT_EN) == 0)
-               return INVALID_PIPE;
+       if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
+                                 encoder->port, &pipe))
+               return pipe;
 
-       if (IS_CHERRYVIEW(dev_priv))
-               return DP_PORT_TO_PIPE_CHV(intel_dp->DP);
-       else
-               return PORT_TO_PIPE(intel_dp->DP);
+       return INVALID_PIPE;
 }
 
 void intel_dp_encoder_reset(struct drm_encoder *encoder)
@@ -5673,7 +5550,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
 
        /*
         * On some VLV machines the BIOS can leave the VDD
-        * enabled even on power seqeuencers which aren't
+        * enabled even on power sequencers which aren't
         * hooked up to any port. This would mess up the
         * power domain tracking the first time we pick
         * one of these power sequencers for use since
@@ -5681,7 +5558,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
         * already on and therefore wouldn't grab the power
         * domain reference. Disable VDD first to avoid this.
         * This also avoids spuriously turning the VDD on as
-        * soon as the new power seqeuencer gets initialized.
+        * soon as the new power sequencer gets initialized.
         */
        if (force_disable_vdd) {
                u32 pp = ironlake_get_pp_control(intel_dp);
@@ -5719,10 +5596,20 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                port_sel = PANEL_PORT_SELECT_VLV(port);
        } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
-               if (port == PORT_A)
+               switch (port) {
+               case PORT_A:
                        port_sel = PANEL_PORT_SELECT_DPA;
-               else
+                       break;
+               case PORT_C:
+                       port_sel = PANEL_PORT_SELECT_DPC;
+                       break;
+               case PORT_D:
                        port_sel = PANEL_PORT_SELECT_DPD;
+                       break;
+               default:
+                       MISSING_CASE(port);
+                       break;
+               }
        }
 
        pp_on |= port_sel;
@@ -6177,7 +6064,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
        edid = drm_get_edid(connector, &intel_dp->aux.ddc);
        if (edid) {
                if (drm_add_edid_modes(connector, edid)) {
-                       drm_mode_connector_update_edid_property(connector,
+                       drm_connector_update_edid_property(connector,
                                                                edid);
                } else {
                        kfree(edid);
@@ -6266,8 +6153,8 @@ static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
        /* Set connector link status to BAD and send a Uevent to notify
         * userspace to do a modeset.
         */
-       drm_mode_connector_set_link_status_property(connector,
-                                                   DRM_MODE_LINK_STATUS_BAD);
+       drm_connector_set_link_status_property(connector,
+                                              DRM_MODE_LINK_STATUS_BAD);
        mutex_unlock(&connector->dev->mode_config.mutex);
        /* Send Hotplug uevent so userspace can reprobe */
        drm_kms_helper_hotplug_event(connector->dev);
@@ -6337,7 +6224,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
        drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
        drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
 
-       if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
+       if (!HAS_GMCH_DISPLAY(dev_priv))
                connector->interlace_allowed = true;
        connector->doublescan_allowed = 0;
 
@@ -6380,7 +6267,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
         * 0xd.  Failure to do so will result in spurious interrupts being
         * generated on the port when a cable is not attached.
         */
-       if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) {
+       if (IS_G45(dev_priv)) {
                u32 temp = I915_READ(PEG_BAND_GAP_DATA);
                I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
        }
@@ -6436,15 +6323,11 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
                intel_encoder->enable = vlv_enable_dp;
                intel_encoder->disable = vlv_disable_dp;
                intel_encoder->post_disable = vlv_post_disable_dp;
-       } else if (INTEL_GEN(dev_priv) >= 5) {
-               intel_encoder->pre_enable = g4x_pre_enable_dp;
-               intel_encoder->enable = g4x_enable_dp;
-               intel_encoder->disable = ilk_disable_dp;
-               intel_encoder->post_disable = ilk_post_disable_dp;
        } else {
                intel_encoder->pre_enable = g4x_pre_enable_dp;
                intel_encoder->enable = g4x_enable_dp;
                intel_encoder->disable = g4x_disable_dp;
+               intel_encoder->post_disable = g4x_post_disable_dp;
        }
 
        intel_dig_port->dp.output_reg = output_reg;
@@ -6464,7 +6347,6 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
        intel_encoder->port = port;
 
        intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
-       dev_priv->hotplug.irq_port[port] = intel_dig_port;
 
        if (port != PORT_A)
                intel_infoframe_init(intel_dig_port);
@@ -6483,37 +6365,44 @@ err_connector_alloc:
        return false;
 }
 
-void intel_dp_mst_suspend(struct drm_device *dev)
+void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int i;
+       struct intel_encoder *encoder;
 
-       /* disable MST */
-       for (i = 0; i < I915_MAX_PORTS; i++) {
-               struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
+       for_each_intel_encoder(&dev_priv->drm, encoder) {
+               struct intel_dp *intel_dp;
+
+               if (encoder->type != INTEL_OUTPUT_DDI)
+                       continue;
 
-               if (!intel_dig_port || !intel_dig_port->dp.can_mst)
+               intel_dp = enc_to_intel_dp(&encoder->base);
+
+               if (!intel_dp->can_mst)
                        continue;
 
-               if (intel_dig_port->dp.is_mst)
-                       drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
+               if (intel_dp->is_mst)
+                       drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
        }
 }
 
-void intel_dp_mst_resume(struct drm_device *dev)
+void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int i;
+       struct intel_encoder *encoder;
 
-       for (i = 0; i < I915_MAX_PORTS; i++) {
-               struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
+       for_each_intel_encoder(&dev_priv->drm, encoder) {
+               struct intel_dp *intel_dp;
                int ret;
 
-               if (!intel_dig_port || !intel_dig_port->dp.can_mst)
+               if (encoder->type != INTEL_OUTPUT_DDI)
+                       continue;
+
+               intel_dp = enc_to_intel_dp(&encoder->base);
+
+               if (!intel_dp->can_mst)
                        continue;
 
-               ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
+               ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr);
                if (ret)
-                       intel_dp_check_mst_status(&intel_dig_port->dp);
+                       intel_dp_check_mst_status(intel_dp);
        }
 }
index 2bb2ceb9d463da2d9e5cce19b2460652b3cdd9a3..357136f17f85330742b846d1c2ea27985bf75a30 100644 (file)
@@ -26,7 +26,7 @@
 
 static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
 {
-       uint8_t reg_val = 0;
+       u8 reg_val = 0;
 
        /* Early return when display use other mechanism to enable backlight. */
        if (!(intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP))
@@ -54,11 +54,11 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
  * Read the current backlight value from DPCD register(s) based
  * on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported
  */
-static uint32_t intel_dp_aux_get_backlight(struct intel_connector *connector)
+static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
-       uint8_t read_val[2] = { 0x0 };
-       uint16_t level = 0;
+       u8 read_val[2] = { 0x0 };
+       u16 level = 0;
 
        if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
                             &read_val, sizeof(read_val)) < 0) {
@@ -82,7 +82,7 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
 {
        struct intel_connector *connector = to_intel_connector(conn_state->connector);
        struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
-       uint8_t vals[2] = { 0x0 };
+       u8 vals[2] = { 0x0 };
 
        vals[0] = level;
 
@@ -178,7 +178,7 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
 {
        struct intel_connector *connector = to_intel_connector(conn_state->connector);
        struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
-       uint8_t dpcd_buf, new_dpcd_buf, edp_backlight_mode;
+       u8 dpcd_buf, new_dpcd_buf, edp_backlight_mode;
 
        if (drm_dp_dpcd_readb(&intel_dp->aux,
                        DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) {
index 3fcaa98b90555b2fca7795c041f17090f18b0d30..4da6e33c7fa1c9a06839fc3777f074e66309f096 100644 (file)
@@ -219,14 +219,30 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
 }
 
 /*
- * Pick training pattern for channel equalization. Training Pattern 3 for HBR2
+ * Pick training pattern for channel equalization. Training pattern 4 for HBR3
+ * or for 1.4 devices that support it, training Pattern 3 for HBR2
  * or 1.2 devices that support it, Training Pattern 2 otherwise.
  */
 static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
 {
-       u32 training_pattern = DP_TRAINING_PATTERN_2;
-       bool source_tps3, sink_tps3;
+       bool source_tps3, sink_tps3, source_tps4, sink_tps4;
 
+       /*
+        * Intel platforms that support HBR3 also support TPS4. It is mandatory
+        * for all downstream devices that support HBR3. There are no known eDP
+        * panels that support TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1
+        * specification.
+        */
+       source_tps4 = intel_dp_source_supports_hbr3(intel_dp);
+       sink_tps4 = drm_dp_tps4_supported(intel_dp->dpcd);
+       if (source_tps4 && sink_tps4) {
+               return DP_TRAINING_PATTERN_4;
+       } else if (intel_dp->link_rate == 810000) {
+               if (!source_tps4)
+                       DRM_DEBUG_KMS("8.1 Gbps link rate without source HBR3/TPS4 support\n");
+               if (!sink_tps4)
+                       DRM_DEBUG_KMS("8.1 Gbps link rate without sink TPS4 support\n");
+       }
        /*
         * Intel platforms that support HBR2 also support TPS3. TPS3 support is
         * also mandatory for downstream devices that support HBR2. However, not
@@ -234,17 +250,16 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
         */
        source_tps3 = intel_dp_source_supports_hbr2(intel_dp);
        sink_tps3 = drm_dp_tps3_supported(intel_dp->dpcd);
-
        if (source_tps3 && sink_tps3) {
-               training_pattern = DP_TRAINING_PATTERN_3;
-       } else if (intel_dp->link_rate == 540000) {
+               return  DP_TRAINING_PATTERN_3;
+       } else if (intel_dp->link_rate >= 540000) {
                if (!source_tps3)
-                       DRM_DEBUG_KMS("5.4 Gbps link rate without source HBR2/TPS3 support\n");
+                       DRM_DEBUG_KMS(">=5.4/6.48 Gbps link rate without source HBR2/TPS3 support\n");
                if (!sink_tps3)
-                       DRM_DEBUG_KMS("5.4 Gbps link rate without sink TPS3 support\n");
+                       DRM_DEBUG_KMS(">=5.4/6.48 Gbps link rate without sink TPS3 support\n");
        }
 
-       return training_pattern;
+       return DP_TRAINING_PATTERN_2;
 }
 
 static bool
@@ -256,11 +271,13 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
        bool channel_eq = false;
 
        training_pattern = intel_dp_training_pattern(intel_dp);
+       /* Scrambling is disabled for TPS2/3 and enabled for TPS4 */
+       if (training_pattern != DP_TRAINING_PATTERN_4)
+               training_pattern |= DP_LINK_SCRAMBLING_DISABLE;
 
        /* channel equalization */
        if (!intel_dp_set_link_train(intel_dp,
-                                    training_pattern |
-                                    DP_LINK_SCRAMBLING_DISABLE)) {
+                                    training_pattern)) {
                DRM_ERROR("failed to start channel equalization\n");
                return false;
        }
index 9e6956c0868835a9bcdf156c45d151ee2479b99a..7e3e01607643d3e6b4dfc6332284be001966e280 100644 (file)
@@ -48,6 +48,9 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
        bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
                                           DP_DPCD_QUIRK_LIMITED_M_N);
 
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
        pipe_config->has_pch_encoder = false;
        bpp = 24;
        if (intel_dp->compliance.test_data.bpc) {
@@ -366,6 +369,9 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
        if (!intel_dp)
                return MODE_ERROR;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
        max_link_clock = intel_dp_max_link_rate(intel_dp);
        max_lanes = intel_dp_max_lane_count(intel_dp);
 
@@ -397,20 +403,10 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c
        return &intel_dp->mst_encoders[crtc->pipe]->base.base;
 }
 
-static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector)
-{
-       struct intel_connector *intel_connector = to_intel_connector(connector);
-       struct intel_dp *intel_dp = intel_connector->mst_port;
-       if (!intel_dp)
-               return NULL;
-       return &intel_dp->mst_encoders[0]->base.base;
-}
-
 static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
        .get_modes = intel_dp_mst_get_modes,
        .mode_valid = intel_dp_mst_mode_valid,
        .atomic_best_encoder = intel_mst_atomic_best_encoder,
-       .best_encoder = intel_mst_best_encoder,
        .atomic_check = intel_dp_mst_atomic_check,
 };
 
@@ -470,8 +466,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
                struct drm_encoder *enc =
                        &intel_dp->mst_encoders[pipe]->base.base;
 
-               ret = drm_mode_connector_attach_encoder(&intel_connector->base,
-                                                       enc);
+               ret = drm_connector_attach_encoder(&intel_connector->base, enc);
                if (ret)
                        goto err;
        }
@@ -479,7 +474,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
        drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
        drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
 
-       ret = drm_mode_connector_set_path_property(connector, pathprop);
+       ret = drm_connector_set_path_property(connector, pathprop);
        if (ret)
                goto err;
 
@@ -518,7 +513,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
        intel_connector->mst_port = NULL;
        drm_modeset_unlock(&connector->dev->mode_config.connection_mutex);
 
-       drm_connector_unreference(connector);
+       drm_connector_put(connector);
 }
 
 static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
index 383fbc15113da968864b369398e2dcd0598b129a..b51ad2917dbef4528d9a7c528e603f9602cd39fd 100644 (file)
@@ -163,8 +163,8 @@ void intel_enable_shared_dpll(struct intel_crtc *crtc)
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_shared_dpll *pll = crtc->config->shared_dpll;
-       unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
-       unsigned old_mask;
+       unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
+       unsigned int old_mask;
 
        if (WARN_ON(pll == NULL))
                return;
@@ -207,7 +207,7 @@ void intel_disable_shared_dpll(struct intel_crtc *crtc)
 {
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_shared_dpll *pll = crtc->config->shared_dpll;
-       unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
+       unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
 
        /* PCH only available on ILK+ */
        if (INTEL_GEN(dev_priv) < 5)
@@ -2525,6 +2525,77 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
        return true;
 }
 
+int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
+                              uint32_t pll_id)
+{
+       uint32_t cfgcr0, cfgcr1;
+       uint32_t pdiv, kdiv, qdiv_mode, qdiv_ratio, dco_integer, dco_fraction;
+       const struct skl_wrpll_params *params;
+       int index, n_entries, link_clock;
+
+       /* Read back values from DPLL CFGCR registers */
+       cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id));
+       cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(pll_id));
+
+       dco_integer = cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK;
+       dco_fraction = (cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
+               DPLL_CFGCR0_DCO_FRACTION_SHIFT;
+       pdiv = (cfgcr1 & DPLL_CFGCR1_PDIV_MASK) >> DPLL_CFGCR1_PDIV_SHIFT;
+       kdiv = (cfgcr1 & DPLL_CFGCR1_KDIV_MASK) >> DPLL_CFGCR1_KDIV_SHIFT;
+       qdiv_mode = (cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1)) >>
+               DPLL_CFGCR1_QDIV_MODE_SHIFT;
+       qdiv_ratio = (cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
+               DPLL_CFGCR1_QDIV_RATIO_SHIFT;
+
+       params = dev_priv->cdclk.hw.ref == 24000 ?
+               icl_dp_combo_pll_24MHz_values :
+               icl_dp_combo_pll_19_2MHz_values;
+       n_entries = ARRAY_SIZE(icl_dp_combo_pll_24MHz_values);
+
+       for (index = 0; index < n_entries; index++) {
+               if (dco_integer == params[index].dco_integer &&
+                   dco_fraction == params[index].dco_fraction &&
+                   pdiv == params[index].pdiv &&
+                   kdiv == params[index].kdiv &&
+                   qdiv_mode == params[index].qdiv_mode &&
+                   qdiv_ratio == params[index].qdiv_ratio)
+                       break;
+       }
+
+       /* Map PLL Index to Link Clock */
+       switch (index) {
+       default:
+               MISSING_CASE(index);
+               /* fall through */
+       case 0:
+               link_clock = 540000;
+               break;
+       case 1:
+               link_clock = 270000;
+               break;
+       case 2:
+               link_clock = 162000;
+               break;
+       case 3:
+               link_clock = 324000;
+               break;
+       case 4:
+               link_clock = 216000;
+               break;
+       case 5:
+               link_clock = 432000;
+               break;
+       case 6:
+               link_clock = 648000;
+               break;
+       case 7:
+               link_clock = 810000;
+               break;
+       }
+
+       return link_clock;
+}
+
 static enum port icl_mg_pll_id_to_port(enum intel_dpll_id id)
 {
        return id - DPLL_ID_ICL_MGPLL1 + PORT_C;
@@ -2569,6 +2640,7 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
                        switch (div1) {
                        default:
                                MISSING_CASE(div1);
+                               /* fall through */
                        case 2:
                                hsdiv = 0;
                                break;
@@ -2742,25 +2814,31 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
                                MG_PLL_SSC_FLLEN |
                                MG_PLL_SSC_STEPSIZE(ssc_stepsize);
 
-       pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART;
-
-       if (refclk_khz != 38400) {
-               pll_state->mg_pll_tdc_coldst_bias |=
-                       MG_PLL_TDC_COLDST_IREFINT_EN |
-                       MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
-                       MG_PLL_TDC_COLDST_COLDSTART |
-                       MG_PLL_TDC_TDCOVCCORR_EN |
-                       MG_PLL_TDC_TDCSEL(3);
-
-               pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) |
-                                        MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
-                                        MG_PLL_BIAS_BIAS_BONUS(10) |
-                                        MG_PLL_BIAS_BIASCAL_EN |
-                                        MG_PLL_BIAS_CTRIM(12) |
-                                        MG_PLL_BIAS_VREF_RDAC(4) |
-                                        MG_PLL_BIAS_IREFTRIM(iref_trim);
+       pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART |
+                                           MG_PLL_TDC_COLDST_IREFINT_EN |
+                                           MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
+                                           MG_PLL_TDC_TDCOVCCORR_EN |
+                                           MG_PLL_TDC_TDCSEL(3);
+
+       pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) |
+                                MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
+                                MG_PLL_BIAS_BIAS_BONUS(10) |
+                                MG_PLL_BIAS_BIASCAL_EN |
+                                MG_PLL_BIAS_CTRIM(12) |
+                                MG_PLL_BIAS_VREF_RDAC(4) |
+                                MG_PLL_BIAS_IREFTRIM(iref_trim);
+
+       if (refclk_khz == 38400) {
+               pll_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
+               pll_state->mg_pll_bias_mask = 0;
+       } else {
+               pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
+               pll_state->mg_pll_bias_mask = -1U;
        }
 
+       pll_state->mg_pll_tdc_coldst_bias &= pll_state->mg_pll_tdc_coldst_bias_mask;
+       pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
+
        return true;
 }
 
@@ -2787,10 +2865,17 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
        case PORT_D:
        case PORT_E:
        case PORT_F:
-               min = icl_port_to_mg_pll_id(port);
-               max = min;
-               ret = icl_calc_mg_pll_state(crtc_state, encoder, clock,
-                                           &pll_state);
+               if (0 /* TODO: TBT PLLs */) {
+                       min = DPLL_ID_ICL_TBTPLL;
+                       max = min;
+                       ret = icl_calc_dpll_state(crtc_state, encoder, clock,
+                                                 &pll_state);
+               } else {
+                       min = icl_port_to_mg_pll_id(port);
+                       max = min;
+                       ret = icl_calc_mg_pll_state(crtc_state, encoder, clock,
+                                                   &pll_state);
+               }
                break;
        default:
                MISSING_CASE(port);
@@ -2820,9 +2905,12 @@ static i915_reg_t icl_pll_id_to_enable_reg(enum intel_dpll_id id)
        switch (id) {
        default:
                MISSING_CASE(id);
+               /* fall through */
        case DPLL_ID_ICL_DPLL0:
        case DPLL_ID_ICL_DPLL1:
                return CNL_DPLL_ENABLE(id);
+       case DPLL_ID_ICL_TBTPLL:
+               return TBT_PLL_ENABLE;
        case DPLL_ID_ICL_MGPLL1:
        case DPLL_ID_ICL_MGPLL2:
        case DPLL_ID_ICL_MGPLL3:
@@ -2850,6 +2938,7 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
        switch (id) {
        case DPLL_ID_ICL_DPLL0:
        case DPLL_ID_ICL_DPLL1:
+       case DPLL_ID_ICL_TBTPLL:
                hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
                hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
                break;
@@ -2859,18 +2948,41 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
        case DPLL_ID_ICL_MGPLL4:
                port = icl_mg_pll_id_to_port(id);
                hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(port));
+               hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
+
                hw_state->mg_clktop2_coreclkctl1 =
                        I915_READ(MG_CLKTOP2_CORECLKCTL1(port));
+               hw_state->mg_clktop2_coreclkctl1 &=
+                       MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
+
                hw_state->mg_clktop2_hsclkctl =
                        I915_READ(MG_CLKTOP2_HSCLKCTL(port));
+               hw_state->mg_clktop2_hsclkctl &=
+                       MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
+                       MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
+                       MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
+                       MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
+
                hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(port));
                hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(port));
                hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(port));
                hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(port));
                hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(port));
+
                hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(port));
                hw_state->mg_pll_tdc_coldst_bias =
                        I915_READ(MG_PLL_TDC_COLDST_BIAS(port));
+
+               if (dev_priv->cdclk.hw.ref == 38400) {
+                       hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
+                       hw_state->mg_pll_bias_mask = 0;
+               } else {
+                       hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
+                       hw_state->mg_pll_bias_mask = -1U;
+               }
+
+               hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
+               hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
                break;
        default:
                MISSING_CASE(id);
@@ -2898,19 +3010,48 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
 {
        struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
        enum port port = icl_mg_pll_id_to_port(pll->info->id);
+       u32 val;
+
+       /*
+        * Some of the following registers have reserved fields, so program
+        * these with RMW based on a mask. The mask can be fixed or generated
+        * during the calc/readout phase if the mask depends on some other HW
+        * state like refclk, see icl_calc_mg_pll_state().
+        */
+       val = I915_READ(MG_REFCLKIN_CTL(port));
+       val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
+       val |= hw_state->mg_refclkin_ctl;
+       I915_WRITE(MG_REFCLKIN_CTL(port), val);
+
+       val = I915_READ(MG_CLKTOP2_CORECLKCTL1(port));
+       val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
+       val |= hw_state->mg_clktop2_coreclkctl1;
+       I915_WRITE(MG_CLKTOP2_CORECLKCTL1(port), val);
+
+       val = I915_READ(MG_CLKTOP2_HSCLKCTL(port));
+       val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
+                MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
+                MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
+                MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
+       val |= hw_state->mg_clktop2_hsclkctl;
+       I915_WRITE(MG_CLKTOP2_HSCLKCTL(port), val);
 
-       I915_WRITE(MG_REFCLKIN_CTL(port), hw_state->mg_refclkin_ctl);
-       I915_WRITE(MG_CLKTOP2_CORECLKCTL1(port),
-                  hw_state->mg_clktop2_coreclkctl1);
-       I915_WRITE(MG_CLKTOP2_HSCLKCTL(port), hw_state->mg_clktop2_hsclkctl);
        I915_WRITE(MG_PLL_DIV0(port), hw_state->mg_pll_div0);
        I915_WRITE(MG_PLL_DIV1(port), hw_state->mg_pll_div1);
        I915_WRITE(MG_PLL_LF(port), hw_state->mg_pll_lf);
        I915_WRITE(MG_PLL_FRAC_LOCK(port), hw_state->mg_pll_frac_lock);
        I915_WRITE(MG_PLL_SSC(port), hw_state->mg_pll_ssc);
-       I915_WRITE(MG_PLL_BIAS(port), hw_state->mg_pll_bias);
-       I915_WRITE(MG_PLL_TDC_COLDST_BIAS(port),
-                  hw_state->mg_pll_tdc_coldst_bias);
+
+       val = I915_READ(MG_PLL_BIAS(port));
+       val &= ~hw_state->mg_pll_bias_mask;
+       val |= hw_state->mg_pll_bias;
+       I915_WRITE(MG_PLL_BIAS(port), val);
+
+       val = I915_READ(MG_PLL_TDC_COLDST_BIAS(port));
+       val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
+       val |= hw_state->mg_pll_tdc_coldst_bias;
+       I915_WRITE(MG_PLL_TDC_COLDST_BIAS(port), val);
+
        POSTING_READ(MG_PLL_TDC_COLDST_BIAS(port));
 }
 
@@ -2936,6 +3077,7 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
        switch (id) {
        case DPLL_ID_ICL_DPLL0:
        case DPLL_ID_ICL_DPLL1:
+       case DPLL_ID_ICL_TBTPLL:
                icl_dpll_write(dev_priv, pll);
                break;
        case DPLL_ID_ICL_MGPLL1:
@@ -3034,6 +3176,7 @@ static const struct intel_shared_dpll_funcs icl_pll_funcs = {
 static const struct dpll_info icl_plls[] = {
        { "DPLL 0",   &icl_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
        { "DPLL 1",   &icl_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
+       { "TBT PLL",  &icl_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
        { "MG PLL 1", &icl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
        { "MG PLL 2", &icl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
        { "MG PLL 3", &icl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
index 7a0cd564a9ee1f9f562a61882922fac13be62285..7e522cf4f13f3bb35991a8771cbd3e4755a902cb 100644 (file)
@@ -113,24 +113,28 @@ enum intel_dpll_id {
         * @DPLL_ID_ICL_DPLL1: ICL combo PHY DPLL1
         */
        DPLL_ID_ICL_DPLL1 = 1,
+       /**
+        * @DPLL_ID_ICL_TBTPLL: ICL TBT PLL
+        */
+       DPLL_ID_ICL_TBTPLL = 2,
        /**
         * @DPLL_ID_ICL_MGPLL1: ICL MG PLL 1 port 1 (C)
         */
-       DPLL_ID_ICL_MGPLL1 = 2,
+       DPLL_ID_ICL_MGPLL1 = 3,
        /**
         * @DPLL_ID_ICL_MGPLL2: ICL MG PLL 1 port 2 (D)
         */
-       DPLL_ID_ICL_MGPLL2 = 3,
+       DPLL_ID_ICL_MGPLL2 = 4,
        /**
         * @DPLL_ID_ICL_MGPLL3: ICL MG PLL 1 port 3 (E)
         */
-       DPLL_ID_ICL_MGPLL3 = 4,
+       DPLL_ID_ICL_MGPLL3 = 5,
        /**
         * @DPLL_ID_ICL_MGPLL4: ICL MG PLL 1 port 4 (F)
         */
-       DPLL_ID_ICL_MGPLL4 = 5,
+       DPLL_ID_ICL_MGPLL4 = 6,
 };
-#define I915_NUM_PLLS 6
+#define I915_NUM_PLLS 7
 
 struct intel_dpll_hw_state {
        /* i9xx, pch plls */
@@ -176,6 +180,8 @@ struct intel_dpll_hw_state {
        uint32_t mg_pll_ssc;
        uint32_t mg_pll_bias;
        uint32_t mg_pll_tdc_coldst_bias;
+       uint32_t mg_pll_bias_mask;
+       uint32_t mg_pll_tdc_coldst_bias_mask;
 };
 
 /**
@@ -336,5 +342,7 @@ void intel_shared_dpll_init(struct drm_device *dev);
 
 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
                              struct intel_dpll_hw_state *hw_state);
+int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
+                              uint32_t pll_id);
 
 #endif /* _INTEL_DPLL_MGR_H_ */
index 0361130500a6f7fc0786b343c2193c1629fbb39f..17af06d8a43eabe6e73753b8a88218392cd6d1db 100644 (file)
 #define MAX_OUTPUTS 6
 /* maximum connectors per crtcs in the mode set */
 
-/* Maximum cursor sizes */
-#define GEN2_CURSOR_WIDTH 64
-#define GEN2_CURSOR_HEIGHT 64
-#define MAX_CURSOR_WIDTH 256
-#define MAX_CURSOR_HEIGHT 256
-
 #define INTEL_I2C_BUS_DVO 1
 #define INTEL_I2C_BUS_SDVO 2
 
@@ -194,7 +188,6 @@ enum intel_output_type {
 
 struct intel_framebuffer {
        struct drm_framebuffer base;
-       struct drm_i915_gem_object *obj;
        struct intel_rotation_info rot_info;
 
        /* for each plane in the normal GTT view */
@@ -261,7 +254,8 @@ struct intel_encoder {
                           struct intel_crtc_state *pipe_config);
        /* Returns a mask of power domains that need to be referenced as part
         * of the hardware state readout code. */
-       u64 (*get_power_domains)(struct intel_encoder *encoder);
+       u64 (*get_power_domains)(struct intel_encoder *encoder,
+                                struct intel_crtc_state *crtc_state);
        /*
         * Called during system suspend after all pending requests for the
         * encoder are flushed (for example for DP AUX transactions) and
@@ -310,6 +304,8 @@ struct intel_panel {
        } backlight;
 };
 
+struct intel_digital_port;
+
 /*
  * This structure serves as a translation layer between the generic HDCP code
  * and the bus-specific code. What that means is that HDCP over HDMI differs
@@ -488,6 +484,8 @@ struct intel_atomic_state {
         */
        bool skip_intermediate_wm;
 
+       bool rps_interactive;
+
        /* Gen9+ only */
        struct skl_ddb_values wm_results;
 
@@ -953,6 +951,7 @@ struct intel_plane {
        enum pipe pipe;
        bool can_scale;
        bool has_fbc;
+       bool has_ccs;
        int max_downscale;
        uint32_t frontbuffer_bit;
 
@@ -971,7 +970,7 @@ struct intel_plane {
                             const struct intel_plane_state *plane_state);
        void (*disable_plane)(struct intel_plane *plane,
                              struct intel_crtc *crtc);
-       bool (*get_hw_state)(struct intel_plane *plane);
+       bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
        int (*check_plane)(struct intel_plane *plane,
                           struct intel_crtc_state *crtc_state,
                           struct intel_plane_state *state);
@@ -1004,7 +1003,7 @@ struct cxsr_latency {
 #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
 #define to_intel_plane(x) container_of(x, struct intel_plane, base)
 #define to_intel_plane_state(x) container_of(x, struct intel_plane_state, base)
-#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL)
+#define intel_fb_obj(x) ((x) ? to_intel_bo((x)->obj[0]) : NULL)
 
 struct intel_hdmi {
        i915_reg_t hdmi_reg;
@@ -1139,7 +1138,6 @@ struct intel_dp {
         * register with to kick off an AUX transaction.
         */
        uint32_t (*get_aux_send_ctl)(struct intel_dp *dp,
-                                    bool has_aux_irq,
                                     int send_bytes,
                                     uint32_t aux_clock_divider);
 
@@ -1252,22 +1250,29 @@ intel_attached_encoder(struct drm_connector *connector)
        return to_intel_connector(connector)->encoder;
 }
 
-static inline struct intel_digital_port *
-enc_to_dig_port(struct drm_encoder *encoder)
+static inline bool intel_encoder_is_dig_port(struct intel_encoder *encoder)
 {
-       struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
-
-       switch (intel_encoder->type) {
+       switch (encoder->type) {
        case INTEL_OUTPUT_DDI:
-               WARN_ON(!HAS_DDI(to_i915(encoder->dev)));
        case INTEL_OUTPUT_DP:
        case INTEL_OUTPUT_EDP:
        case INTEL_OUTPUT_HDMI:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static inline struct intel_digital_port *
+enc_to_dig_port(struct drm_encoder *encoder)
+{
+       struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
+       if (intel_encoder_is_dig_port(intel_encoder))
                return container_of(encoder, struct intel_digital_port,
                                    base.base);
-       default:
+       else
                return NULL;
-       }
 }
 
 static inline struct intel_dp_mst_encoder *
@@ -1281,6 +1286,20 @@ static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
        return &enc_to_dig_port(encoder)->dp;
 }
 
+static inline bool intel_encoder_is_dp(struct intel_encoder *encoder)
+{
+       switch (encoder->type) {
+       case INTEL_OUTPUT_DP:
+       case INTEL_OUTPUT_EDP:
+               return true;
+       case INTEL_OUTPUT_DDI:
+               /* Skip pure HDMI/DVI DDI encoders */
+               return i915_mmio_reg_valid(enc_to_intel_dp(&encoder->base)->output_reg);
+       default:
+               return false;
+       }
+}
+
 static inline struct intel_digital_port *
 dp_to_dig_port(struct intel_dp *intel_dp)
 {
@@ -1337,9 +1356,6 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv);
 void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
 
 /* i915_irq.c */
-bool gen11_reset_one_iir(struct drm_i915_private * const i915,
-                        const unsigned int bank,
-                        const unsigned int bit);
 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
 void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
@@ -1376,6 +1392,8 @@ void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv);
 void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv);
 
 /* intel_crt.c */
+bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
+                           i915_reg_t adpa_reg, enum pipe *pipe);
 void intel_crt_init(struct drm_i915_private *dev_priv);
 void intel_crt_reset(struct drm_encoder *encoder);
 
@@ -1388,12 +1406,9 @@ void hsw_fdi_link_train(struct intel_crtc *crtc,
 void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
 bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
 void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
-void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
-                                      enum transcoder cpu_transcoder);
+void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
 void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
 void intel_ddi_disable_pipe_clock(const  struct intel_crtc_state *crtc_state);
-struct intel_encoder *
-intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state);
 void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
 void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
 bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
@@ -1407,6 +1422,8 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
 u32 bxt_signal_levels(struct intel_dp *intel_dp);
 uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
 u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
+u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
+                                u8 voltage_swing);
 int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
                                     bool enable);
 void icl_map_plls_to_ports(struct drm_crtc *crtc,
@@ -1488,6 +1505,9 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
                                    struct intel_encoder *encoder);
 struct drm_display_mode *
 intel_encoder_current_mode(struct intel_encoder *encoder);
+bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port);
+enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
+                             enum port port);
 
 enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
@@ -1615,6 +1635,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
                                  struct intel_crtc_state *crtc_state);
 
+u16 skl_scaler_calc_phase(int sub, bool chroma_center);
 int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
 int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
                  uint32_t pixel_format);
@@ -1644,6 +1665,9 @@ void intel_csr_ucode_suspend(struct drm_i915_private *);
 void intel_csr_ucode_resume(struct drm_i915_private *);
 
 /* intel_dp.c */
+bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
+                          i915_reg_t dp_reg, enum port port,
+                          enum pipe *pipe);
 bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg,
                   enum port port);
 bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
@@ -1661,8 +1685,6 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
 void intel_dp_encoder_reset(struct drm_encoder *encoder);
 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
 void intel_dp_encoder_destroy(struct drm_encoder *encoder);
-int intel_dp_sink_crc(struct intel_dp *intel_dp,
-                     struct intel_crtc_state *crtc_state, u8 *crc);
 bool intel_dp_compute_config(struct intel_encoder *encoder,
                             struct intel_crtc_state *pipe_config,
                             struct drm_connector_state *conn_state);
@@ -1676,8 +1698,8 @@ void intel_edp_backlight_off(const struct drm_connector_state *conn_state);
 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
 void intel_edp_panel_on(struct intel_dp *intel_dp);
 void intel_edp_panel_off(struct intel_dp *intel_dp);
-void intel_dp_mst_suspend(struct drm_device *dev);
-void intel_dp_mst_resume(struct drm_device *dev);
+void intel_dp_mst_suspend(struct drm_i915_private *dev_priv);
+void intel_dp_mst_resume(struct drm_i915_private *dev_priv);
 int intel_dp_max_link_rate(struct intel_dp *intel_dp);
 int intel_dp_max_lane_count(struct intel_dp *intel_dp);
 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
@@ -1707,6 +1729,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing);
 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
                           uint8_t *link_bw, uint8_t *rate_select);
 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
+bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
 bool
 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]);
 
@@ -1726,8 +1749,8 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
 /* intel_dp_mst.c */
 int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
 void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
-/* intel_dsi.c */
-void intel_dsi_init(struct drm_i915_private *dev_priv);
+/* vlv_dsi.c */
+void vlv_dsi_init(struct drm_i915_private *dev_priv);
 
 /* intel_dsi_dcs_backlight.c */
 int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
@@ -1821,6 +1844,8 @@ void intel_infoframe_init(struct intel_digital_port *intel_dig_port);
 
 
 /* intel_lvds.c */
+bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+                            i915_reg_t lvds_reg, enum pipe *pipe);
 void intel_lvds_init(struct drm_i915_private *dev_priv);
 struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev);
 bool intel_is_dual_link_lvds(struct drm_device *dev);
@@ -1867,7 +1892,6 @@ void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
                                  const struct drm_connector_state *conn_state);
 void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
 void intel_panel_destroy_backlight(struct drm_connector *connector);
-enum drm_connector_status intel_panel_detect(struct drm_i915_private *dev_priv);
 extern struct drm_display_mode *intel_find_panel_downclock(
                                struct drm_i915_private *dev_priv,
                                struct drm_display_mode *fixed_mode,
@@ -1911,12 +1935,12 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
                     unsigned frontbuffer_bits,
                     enum fb_op_origin origin);
 void intel_psr_init(struct drm_i915_private *dev_priv);
-void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
-                                  unsigned frontbuffer_bits);
 void intel_psr_compute_config(struct intel_dp *intel_dp,
                              struct intel_crtc_state *crtc_state);
 void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug);
 void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
+void intel_psr_short_pulse(struct intel_dp *intel_dp);
+int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state);
 
 /* intel_runtime_pm.c */
 int intel_power_domains_init(struct drm_i915_private *);
@@ -2058,12 +2082,13 @@ void intel_init_ipc(struct drm_i915_private *dev_priv);
 void intel_enable_ipc(struct drm_i915_private *dev_priv);
 
 /* intel_sdvo.c */
+bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
+                            i915_reg_t sdvo_reg, enum pipe *pipe);
 bool intel_sdvo_init(struct drm_i915_private *dev_priv,
                     i915_reg_t reg, enum port port);
 
 
 /* intel_sprite.c */
-bool intel_format_is_yuv(u32 format);
 int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
                             int usecs);
 struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
@@ -2076,10 +2101,9 @@ void skl_update_plane(struct intel_plane *plane,
                      const struct intel_crtc_state *crtc_state,
                      const struct intel_plane_state *plane_state);
 void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc);
-bool skl_plane_get_hw_state(struct intel_plane *plane);
+bool skl_plane_get_hw_state(struct intel_plane *plane, enum pipe *pipe);
 bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
                       enum pipe pipe, enum plane_id plane_id);
-bool intel_format_is_yuv(uint32_t format);
 bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
                          enum pipe pipe, enum plane_id plane_id);
 
@@ -2145,7 +2169,6 @@ void lspcon_resume(struct intel_lspcon *lspcon);
 void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
 
 /* intel_pipe_crc.c */
-int intel_pipe_crc_create(struct drm_minor *minor);
 #ifdef CONFIG_DEBUG_FS
 int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
                              size_t *values_cnt);
@@ -2161,5 +2184,4 @@ static inline void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc)
 {
 }
 #endif
-extern const struct file_operations i915_display_crc_ctl_fops;
 #endif /* __INTEL_DRV_H__ */
index 7afeb9580f41f6eb22f547616dca505314957bb6..ad7c1cb329836510d7263988a258a8be7f6e9623 100644 (file)
@@ -129,21 +129,29 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
        return container_of(encoder, struct intel_dsi, base.base);
 }
 
-/* intel_dsi.c */
-void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
+/* vlv_dsi.c */
+void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
 enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
 
-/* intel_dsi_pll.c */
-bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
-int intel_compute_dsi_pll(struct intel_encoder *encoder,
-                         struct intel_crtc_state *config);
-void intel_enable_dsi_pll(struct intel_encoder *encoder,
-                         const struct intel_crtc_state *config);
-void intel_disable_dsi_pll(struct intel_encoder *encoder);
-u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
-                      struct intel_crtc_state *config);
-void intel_dsi_reset_clocks(struct intel_encoder *encoder,
-                           enum port port);
+/* vlv_dsi_pll.c */
+int vlv_dsi_pll_compute(struct intel_encoder *encoder,
+                       struct intel_crtc_state *config);
+void vlv_dsi_pll_enable(struct intel_encoder *encoder,
+                       const struct intel_crtc_state *config);
+void vlv_dsi_pll_disable(struct intel_encoder *encoder);
+u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+                    struct intel_crtc_state *config);
+void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
+
+bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
+int bxt_dsi_pll_compute(struct intel_encoder *encoder,
+                       struct intel_crtc_state *config);
+void bxt_dsi_pll_enable(struct intel_encoder *encoder,
+                       const struct intel_crtc_state *config);
+void bxt_dsi_pll_disable(struct intel_encoder *encoder);
+u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+                    struct intel_crtc_state *config);
+void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
 
 /* intel_dsi_vbt.c */
 bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
index 4d6ffa7b3e7b9d930d720790b4df60fa0a2dbf58..ac83d6b89ae0c36c236ffc5bd155d86f045f7526 100644 (file)
@@ -181,7 +181,7 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
                break;
        }
 
-       wait_for_dsi_fifo_empty(intel_dsi, port);
+       vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
 
 out:
        data += len;
index a70d767313aa10e198338e2e7472592827dfa347..4e142ff49708537b33b113f34248b0c213f1ab5e 100644 (file)
@@ -137,19 +137,15 @@ static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
 static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
                                   enum pipe *pipe)
 {
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
        u32 tmp;
 
        tmp = I915_READ(intel_dvo->dev.dvo_reg);
 
-       if (!(tmp & DVO_ENABLE))
-               return false;
-
-       *pipe = PORT_TO_PIPE(tmp);
+       *pipe = (tmp & DVO_PIPE_SEL_MASK) >> DVO_PIPE_SEL_SHIFT;
 
-       return true;
+       return tmp & DVO_ENABLE;
 }
 
 static void intel_dvo_get_config(struct intel_encoder *encoder,
@@ -219,6 +215,9 @@ intel_dvo_mode_valid(struct drm_connector *connector,
        int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
        int target_clock = mode->clock;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
        /* XXX: Validate clock range */
 
        if (fixed_mode) {
@@ -254,6 +253,9 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
        if (fixed_mode)
                intel_fixed_panel_mode(fixed_mode, adjusted_mode);
 
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
        return true;
 }
 
@@ -276,8 +278,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder,
        dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE |
                   DVO_BLANK_ACTIVE_HIGH;
 
-       if (pipe == 1)
-               dvo_val |= DVO_PIPE_B_SELECT;
+       dvo_val |= DVO_PIPE_SEL(pipe);
        dvo_val |= DVO_PIPE_STALL;
        if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
                dvo_val |= DVO_HSYNC_ACTIVE_HIGH;
@@ -437,7 +438,7 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
                int gpio;
                bool dvoinit;
                enum pipe pipe;
-               uint32_t dpll[I915_MAX_PIPES];
+               u32 dpll[I915_MAX_PIPES];
                enum port port;
 
                /*
index 1590375f31cb7ee5af71006d4eff6f07a66e5d68..2d1952849d69ffec45b64360cc7d1baf78e0b0ba 100644 (file)
@@ -25,7 +25,6 @@
 #include <drm/drm_print.h>
 
 #include "i915_drv.h"
-#include "i915_vgpu.h"
 #include "intel_ringbuffer.h"
 #include "intel_lrc.h"
 
@@ -230,6 +229,7 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
                break;
        default:
                MISSING_CASE(class);
+               /* fall through */
        case VIDEO_DECODE_CLASS:
        case VIDEO_ENHANCEMENT_CLASS:
        case COPY_ENGINE_CLASS:
@@ -302,6 +302,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
                                                           engine->class);
        if (WARN_ON(engine->context_size > BIT(20)))
                engine->context_size = 0;
+       if (engine->context_size)
+               DRIVER_CAPS(dev_priv)->has_logical_contexts = true;
 
        /* Nothing to do here, execute in order of dependencies */
        engine->schedule = NULL;
@@ -456,28 +458,16 @@ static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
        i915_gem_batch_pool_init(&engine->batch_pool, engine);
 }
 
-static bool csb_force_mmio(struct drm_i915_private *i915)
-{
-       /* Older GVT emulation depends upon intercepting CSB mmio */
-       if (intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915))
-               return true;
-
-       return false;
-}
-
 static void intel_engine_init_execlist(struct intel_engine_cs *engine)
 {
        struct intel_engine_execlists * const execlists = &engine->execlists;
 
-       execlists->csb_use_mmio = csb_force_mmio(engine->i915);
-
        execlists->port_mask = 1;
        BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists));
        GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
 
        execlists->queue_priority = INT_MIN;
-       execlists->queue = RB_ROOT;
-       execlists->first = NULL;
+       execlists->queue = RB_ROOT_CACHED;
 }
 
 /**
@@ -492,6 +482,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
 void intel_engine_setup_common(struct intel_engine_cs *engine)
 {
        i915_timeline_init(engine->i915, &engine->timeline, engine->name);
+       lockdep_set_subclass(&engine->timeline.lock, TIMELINE_ENGINE);
 
        intel_engine_init_execlist(engine);
        intel_engine_init_hangcheck(engine);
@@ -499,7 +490,8 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
        intel_engine_init_cmd_parser(engine);
 }
 
-int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
+int intel_engine_create_scratch(struct intel_engine_cs *engine,
+                               unsigned int size)
 {
        struct drm_i915_gem_object *obj;
        struct i915_vma *vma;
@@ -515,7 +507,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
                return PTR_ERR(obj);
        }
 
-       vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
+       vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
        if (IS_ERR(vma)) {
                ret = PTR_ERR(vma);
                goto err_unref;
@@ -533,7 +525,7 @@ err_unref:
        return ret;
 }
 
-static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
+void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
 {
        i915_vma_unpin_and_release(&engine->scratch);
 }
@@ -585,7 +577,7 @@ static int init_status_page(struct intel_engine_cs *engine)
        if (ret)
                goto err;
 
-       vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
+       vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
        if (IS_ERR(vma)) {
                ret = PTR_ERR(vma);
                goto err;
@@ -645,6 +637,12 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
        return 0;
 }
 
+static void __intel_context_unpin(struct i915_gem_context *ctx,
+                                 struct intel_engine_cs *engine)
+{
+       intel_context_unpin(to_intel_context(ctx, engine));
+}
+
 /**
  * intel_engines_init_common - initialize cengine state which might require hw access
  * @engine: Engine to initialize.
@@ -658,7 +656,8 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
  */
 int intel_engine_init_common(struct intel_engine_cs *engine)
 {
-       struct intel_ring *ring;
+       struct drm_i915_private *i915 = engine->i915;
+       struct intel_context *ce;
        int ret;
 
        engine->set_default_submission(engine);
@@ -670,18 +669,18 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
         * be available. To avoid this we always pin the default
         * context.
         */
-       ring = intel_context_pin(engine->i915->kernel_context, engine);
-       if (IS_ERR(ring))
-               return PTR_ERR(ring);
+       ce = intel_context_pin(i915->kernel_context, engine);
+       if (IS_ERR(ce))
+               return PTR_ERR(ce);
 
        /*
         * Similarly the preempt context must always be available so that
         * we can interrupt the engine at any time.
         */
-       if (engine->i915->preempt_context) {
-               ring = intel_context_pin(engine->i915->preempt_context, engine);
-               if (IS_ERR(ring)) {
-                       ret = PTR_ERR(ring);
+       if (i915->preempt_context) {
+               ce = intel_context_pin(i915->preempt_context, engine);
+               if (IS_ERR(ce)) {
+                       ret = PTR_ERR(ce);
                        goto err_unpin_kernel;
                }
        }
@@ -690,7 +689,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
        if (ret)
                goto err_unpin_preempt;
 
-       if (HWS_NEEDS_PHYSICAL(engine->i915))
+       if (HWS_NEEDS_PHYSICAL(i915))
                ret = init_phys_status_page(engine);
        else
                ret = init_status_page(engine);
@@ -702,10 +701,11 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
 err_breadcrumbs:
        intel_engine_fini_breadcrumbs(engine);
 err_unpin_preempt:
-       if (engine->i915->preempt_context)
-               intel_context_unpin(engine->i915->preempt_context, engine);
+       if (i915->preempt_context)
+               __intel_context_unpin(i915->preempt_context, engine);
+
 err_unpin_kernel:
-       intel_context_unpin(engine->i915->kernel_context, engine);
+       __intel_context_unpin(i915->kernel_context, engine);
        return ret;
 }
 
@@ -718,6 +718,8 @@ err_unpin_kernel:
  */
 void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 {
+       struct drm_i915_private *i915 = engine->i915;
+
        intel_engine_cleanup_scratch(engine);
 
        if (HWS_NEEDS_PHYSICAL(engine->i915))
@@ -732,9 +734,9 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
        if (engine->default_state)
                i915_gem_object_put(engine->default_state);
 
-       if (engine->i915->preempt_context)
-               intel_context_unpin(engine->i915->preempt_context, engine);
-       intel_context_unpin(engine->i915->kernel_context, engine);
+       if (i915->preempt_context)
+               __intel_context_unpin(i915->preempt_context, engine);
+       __intel_context_unpin(i915->kernel_context, engine);
 
        i915_timeline_fini(&engine->timeline);
 }
@@ -769,6 +771,35 @@ u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
        return bbaddr;
 }
 
+int intel_engine_stop_cs(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+       const u32 base = engine->mmio_base;
+       const i915_reg_t mode = RING_MI_MODE(base);
+       int err;
+
+       if (INTEL_GEN(dev_priv) < 3)
+               return -ENODEV;
+
+       GEM_TRACE("%s\n", engine->name);
+
+       I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
+
+       err = 0;
+       if (__intel_wait_for_register_fw(dev_priv,
+                                        mode, MODE_IDLE, MODE_IDLE,
+                                        1000, 0,
+                                        NULL)) {
+               GEM_TRACE("%s: timed out on STOP_RING -> IDLE\n", engine->name);
+               err = -ETIMEDOUT;
+       }
+
+       /* A final mmio read to let GPU writes be hopefully flushed to memory */
+       POSTING_READ_FW(mode);
+
+       return err;
+}
+
 const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
 {
        switch (type) {
@@ -780,12 +811,32 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
        }
 }
 
+u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
+{
+       const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
+       u32 mcr_s_ss_select;
+       u32 slice = fls(sseu->slice_mask);
+       u32 subslice = fls(sseu->subslice_mask[slice]);
+
+       if (INTEL_GEN(dev_priv) == 10)
+               mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
+                                 GEN8_MCR_SUBSLICE(subslice);
+       else if (INTEL_GEN(dev_priv) >= 11)
+               mcr_s_ss_select = GEN11_MCR_SLICE(slice) |
+                                 GEN11_MCR_SUBSLICE(subslice);
+       else
+               mcr_s_ss_select = 0;
+
+       return mcr_s_ss_select;
+}
+
 static inline uint32_t
 read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
                  int subslice, i915_reg_t reg)
 {
        uint32_t mcr_slice_subslice_mask;
        uint32_t mcr_slice_subslice_select;
+       uint32_t default_mcr_s_ss_select;
        uint32_t mcr;
        uint32_t ret;
        enum forcewake_domains fw_domains;
@@ -802,6 +853,8 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
                                            GEN8_MCR_SUBSLICE(subslice);
        }
 
+       default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(dev_priv);
+
        fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
                                                    FW_REG_READ);
        fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
@@ -812,11 +865,10 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
        intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
 
        mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
-       /*
-        * The HW expects the slice and sublice selectors to be reset to 0
-        * after reading out the registers.
-        */
-       WARN_ON_ONCE(mcr & mcr_slice_subslice_mask);
+
+       WARN_ON_ONCE((mcr & mcr_slice_subslice_mask) !=
+                    default_mcr_s_ss_select);
+
        mcr &= ~mcr_slice_subslice_mask;
        mcr |= mcr_slice_subslice_select;
        I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
@@ -824,6 +876,8 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
        ret = I915_READ_FW(reg);
 
        mcr &= ~mcr_slice_subslice_mask;
+       mcr |= default_mcr_s_ss_select;
+
        I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
 
        intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
@@ -934,11 +988,24 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
                return true;
 
        /* Waiting to drain ELSP? */
-       if (READ_ONCE(engine->execlists.active))
-               return false;
+       if (READ_ONCE(engine->execlists.active)) {
+               struct tasklet_struct *t = &engine->execlists.tasklet;
+
+               local_bh_disable();
+               if (tasklet_trylock(t)) {
+                       /* Must wait for any GPU reset in progress. */
+                       if (__tasklet_is_enabled(t))
+                               t->func(t->data);
+                       tasklet_unlock(t);
+               }
+               local_bh_enable();
 
-       /* ELSP is empty, but there are ready requests? */
-       if (READ_ONCE(engine->execlists.first))
+               if (READ_ONCE(engine->execlists.active))
+                       return false;
+       }
+
+       /* ELSP is empty, but there are ready requests? E.g. after reset */
+       if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root))
                return false;
 
        /* Ring stopped? */
@@ -978,8 +1045,8 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
  */
 bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
 {
-       const struct i915_gem_context * const kernel_context =
-               engine->i915->kernel_context;
+       const struct intel_context *kernel_context =
+               to_intel_context(engine->i915->kernel_context, engine);
        struct i915_request *rq;
 
        lockdep_assert_held(&engine->i915->drm.struct_mutex);
@@ -991,7 +1058,7 @@ bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
         */
        rq = __i915_gem_active_peek(&engine->timeline.last_request);
        if (rq)
-               return rq->ctx == kernel_context;
+               return rq->hw_context == kernel_context;
        else
                return engine->last_retired_context == kernel_context;
 }
@@ -1005,6 +1072,28 @@ void intel_engines_reset_default_submission(struct drm_i915_private *i915)
                engine->set_default_submission(engine);
 }
 
+/**
+ * intel_engines_sanitize: called after the GPU has lost power
+ * @i915: the i915 device
+ *
+ * Anytime we reset the GPU, either with an explicit GPU reset or through a
+ * PCI power cycle, the GPU loses state and we must reset our state tracking
+ * to match. Note that calling intel_engines_sanitize() if the GPU has not
+ * been reset results in much confusion!
+ */
+void intel_engines_sanitize(struct drm_i915_private *i915)
+{
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+
+       GEM_TRACE("\n");
+
+       for_each_engine(engine, i915, id) {
+               if (engine->reset.reset)
+                       engine->reset.reset(engine, NULL);
+       }
+}
+
 /**
  * intel_engines_park: called when the GT is transitioning from busy->idle
  * @i915: the i915 device
@@ -1043,6 +1132,11 @@ void intel_engines_park(struct drm_i915_private *i915)
                if (engine->park)
                        engine->park(engine);
 
+               if (engine->pinned_default_state) {
+                       i915_gem_object_unpin_map(engine->default_state);
+                       engine->pinned_default_state = NULL;
+               }
+
                i915_gem_batch_pool_fini(&engine->batch_pool);
                engine->execlists.no_priolist = false;
        }
@@ -1060,6 +1154,16 @@ void intel_engines_unpark(struct drm_i915_private *i915)
        enum intel_engine_id id;
 
        for_each_engine(engine, i915, id) {
+               void *map;
+
+               /* Pin the default state for fast resets from atomic context. */
+               map = NULL;
+               if (engine->default_state)
+                       map = i915_gem_object_pin_map(engine->default_state,
+                                                     I915_MAP_WB);
+               if (!IS_ERR_OR_NULL(map))
+                       engine->pinned_default_state = map;
+
                if (engine->unpark)
                        engine->unpark(engine);
 
@@ -1067,6 +1171,26 @@ void intel_engines_unpark(struct drm_i915_private *i915)
        }
 }
 
+/**
+ * intel_engine_lost_context: called when the GPU is reset into unknown state
+ * @engine: the engine
+ *
+ * We have either reset the GPU or otherwise about to lose state tracking of
+ * the current GPU logical state (e.g. suspend). On next use, it is therefore
+ * imperative that we make no presumptions about the current state and load
+ * from scratch.
+ */
+void intel_engine_lost_context(struct intel_engine_cs *engine)
+{
+       struct intel_context *ce;
+
+       lockdep_assert_held(&engine->i915->drm.struct_mutex);
+
+       ce = fetch_and_zero(&engine->last_retired_context);
+       if (ce)
+               intel_context_unpin(ce);
+}
+
 bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
 {
        switch (INTEL_GEN(engine->i915)) {
@@ -1151,7 +1275,7 @@ static void hexdump(struct drm_printer *m, const void *buf, size_t len)
                                                rowsize, sizeof(u32),
                                                line, sizeof(line),
                                                false) >= sizeof(line));
-               drm_printf(m, "%08zx %s\n", pos, line);
+               drm_printf(m, "[%04zx] %s\n", pos, line);
 
                prev = buf + pos;
                skip = false;
@@ -1166,6 +1290,8 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
                &engine->execlists;
        u64 addr;
 
+       if (engine->id == RCS && IS_GEN(dev_priv, 4, 7))
+               drm_printf(m, "\tCCID: 0x%08x\n", I915_READ(CCID));
        drm_printf(m, "\tRING_START: 0x%08x\n",
                   I915_READ(RING_START(engine->mmio_base)));
        drm_printf(m, "\tRING_HEAD:  0x%08x\n",
@@ -1232,12 +1358,10 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
                ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
                read = GEN8_CSB_READ_PTR(ptr);
                write = GEN8_CSB_WRITE_PTR(ptr);
-               drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s, tasklet queued? %s (%s)\n",
+               drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], tasklet queued? %s (%s)\n",
                           read, execlists->csb_head,
                           write,
                           intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)),
-                          yesno(test_bit(ENGINE_IRQ_EXECLIST,
-                                         &engine->irq_posted)),
                           yesno(test_bit(TASKLET_STATE_SCHED,
                                          &engine->execlists.tasklet.state)),
                           enableddisabled(!atomic_read(&engine->execlists.tasklet.count)));
@@ -1287,6 +1411,39 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
        }
 }
 
+static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
+{
+       void *ring;
+       int size;
+
+       drm_printf(m,
+                  "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n",
+                  rq->head, rq->postfix, rq->tail,
+                  rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
+                  rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
+
+       size = rq->tail - rq->head;
+       if (rq->tail < rq->head)
+               size += rq->ring->size;
+
+       ring = kmalloc(size, GFP_ATOMIC);
+       if (ring) {
+               const void *vaddr = rq->ring->vaddr;
+               unsigned int head = rq->head;
+               unsigned int len = 0;
+
+               if (rq->tail < head) {
+                       len = rq->ring->size - head;
+                       memcpy(ring, vaddr + head, len);
+                       head = 0;
+               }
+               memcpy(ring + len, vaddr + head, size - len);
+
+               hexdump(m, ring, size);
+               kfree(ring);
+       }
+}
+
 void intel_engine_dump(struct intel_engine_cs *engine,
                       struct drm_printer *m,
                       const char *header, ...)
@@ -1296,6 +1453,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
        const struct intel_engine_execlists * const execlists = &engine->execlists;
        struct i915_gpu_error * const error = &engine->i915->gpu_error;
        struct i915_request *rq, *last;
+       unsigned long flags;
        struct rb_node *rb;
        int count;
 
@@ -1336,11 +1494,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
        rq = i915_gem_find_active_request(engine);
        if (rq) {
                print_request(m, rq, "\t\tactive ");
-               drm_printf(m,
-                          "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
-                          rq->head, rq->postfix, rq->tail,
-                          rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
-                          rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
+
                drm_printf(m, "\t\tring->start:  0x%08x\n",
                           i915_ggtt_offset(rq->ring->vma));
                drm_printf(m, "\t\tring->head:   0x%08x\n",
@@ -1351,6 +1505,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
                           rq->ring->emit);
                drm_printf(m, "\t\tring->space:  0x%08x\n",
                           rq->ring->space);
+
+               print_request_ring(m, rq);
        }
 
        rcu_read_unlock();
@@ -1362,7 +1518,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
                drm_printf(m, "\tDevice is asleep; skipping register dump\n");
        }
 
-       spin_lock_irq(&engine->timeline.lock);
+       local_irq_save(flags);
+       spin_lock(&engine->timeline.lock);
 
        last = NULL;
        count = 0;
@@ -1384,7 +1541,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
        last = NULL;
        count = 0;
        drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
-       for (rb = execlists->first; rb; rb = rb_next(rb)) {
+       for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
                struct i915_priolist *p =
                        rb_entry(rb, typeof(*p), node);
 
@@ -1404,22 +1561,21 @@ void intel_engine_dump(struct intel_engine_cs *engine,
                print_request(m, last, "\t\tQ ");
        }
 
-       spin_unlock_irq(&engine->timeline.lock);
+       spin_unlock(&engine->timeline.lock);
 
-       spin_lock_irq(&b->rb_lock);
+       spin_lock(&b->rb_lock);
        for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
                struct intel_wait *w = rb_entry(rb, typeof(*w), node);
 
                drm_printf(m, "\t%s [%d] waiting for %x\n",
                           w->tsk->comm, w->tsk->pid, w->seqno);
        }
-       spin_unlock_irq(&b->rb_lock);
+       spin_unlock(&b->rb_lock);
+       local_irq_restore(flags);
 
-       drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s) (execlists? %s)\n",
+       drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s)\n",
                   engine->irq_posted,
                   yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
-                                 &engine->irq_posted)),
-                  yesno(test_bit(ENGINE_IRQ_EXECLIST,
                                  &engine->irq_posted)));
 
        drm_printf(m, "HWSP:\n");
@@ -1468,8 +1624,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
        if (!intel_engine_supports_stats(engine))
                return -ENODEV;
 
-       tasklet_disable(&execlists->tasklet);
-       write_seqlock_irqsave(&engine->stats.lock, flags);
+       spin_lock_irqsave(&engine->timeline.lock, flags);
+       write_seqlock(&engine->stats.lock);
 
        if (unlikely(engine->stats.enabled == ~0)) {
                err = -EBUSY;
@@ -1493,8 +1649,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
        }
 
 unlock:
-       write_sequnlock_irqrestore(&engine->stats.lock, flags);
-       tasklet_enable(&execlists->tasklet);
+       write_sequnlock(&engine->stats.lock);
+       spin_unlock_irqrestore(&engine->timeline.lock, flags);
 
        return err;
 }
index b431b6733cc1582e826760a60cf1bd49e4feacff..01d1d2088f0488d1a8a8de4219c0c6307e0916ea 100644 (file)
@@ -399,89 +399,6 @@ bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
        return dev_priv->fbc.active;
 }
 
-static void intel_fbc_work_fn(struct work_struct *__work)
-{
-       struct drm_i915_private *dev_priv =
-               container_of(__work, struct drm_i915_private, fbc.work.work);
-       struct intel_fbc *fbc = &dev_priv->fbc;
-       struct intel_fbc_work *work = &fbc->work;
-       struct intel_crtc *crtc = fbc->crtc;
-       struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[crtc->pipe];
-
-       if (drm_crtc_vblank_get(&crtc->base)) {
-               /* CRTC is now off, leave FBC deactivated */
-               mutex_lock(&fbc->lock);
-               work->scheduled = false;
-               mutex_unlock(&fbc->lock);
-               return;
-       }
-
-retry:
-       /* Delay the actual enabling to let pageflipping cease and the
-        * display to settle before starting the compression. Note that
-        * this delay also serves a second purpose: it allows for a
-        * vblank to pass after disabling the FBC before we attempt
-        * to modify the control registers.
-        *
-        * WaFbcWaitForVBlankBeforeEnable:ilk,snb
-        *
-        * It is also worth mentioning that since work->scheduled_vblank can be
-        * updated multiple times by the other threads, hitting the timeout is
-        * not an error condition. We'll just end up hitting the "goto retry"
-        * case below.
-        */
-       wait_event_timeout(vblank->queue,
-               drm_crtc_vblank_count(&crtc->base) != work->scheduled_vblank,
-               msecs_to_jiffies(50));
-
-       mutex_lock(&fbc->lock);
-
-       /* Were we cancelled? */
-       if (!work->scheduled)
-               goto out;
-
-       /* Were we delayed again while this function was sleeping? */
-       if (drm_crtc_vblank_count(&crtc->base) == work->scheduled_vblank) {
-               mutex_unlock(&fbc->lock);
-               goto retry;
-       }
-
-       intel_fbc_hw_activate(dev_priv);
-
-       work->scheduled = false;
-
-out:
-       mutex_unlock(&fbc->lock);
-       drm_crtc_vblank_put(&crtc->base);
-}
-
-static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
-{
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       struct intel_fbc *fbc = &dev_priv->fbc;
-       struct intel_fbc_work *work = &fbc->work;
-
-       WARN_ON(!mutex_is_locked(&fbc->lock));
-       if (WARN_ON(!fbc->enabled))
-               return;
-
-       if (drm_crtc_vblank_get(&crtc->base)) {
-               DRM_ERROR("vblank not available for FBC on pipe %c\n",
-                         pipe_name(crtc->pipe));
-               return;
-       }
-
-       /* It is useless to call intel_fbc_cancel_work() or cancel_work() in
-        * this function since we're not releasing fbc.lock, so it won't have an
-        * opportunity to grab it to discover that it was cancelled. So we just
-        * update the expected jiffy count. */
-       work->scheduled = true;
-       work->scheduled_vblank = drm_crtc_vblank_count(&crtc->base);
-       drm_crtc_vblank_put(&crtc->base);
-
-       schedule_work(&work->work);
-}
-
 static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
                                 const char *reason)
 {
@@ -489,11 +406,6 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
 
        WARN_ON(!mutex_is_locked(&fbc->lock));
 
-       /* Calling cancel_work() here won't help due to the fact that the work
-        * function grabs fbc->lock. Just set scheduled to false so the work
-        * function can know it was cancelled. */
-       fbc->work.scheduled = false;
-
        if (fbc->active)
                intel_fbc_hw_deactivate(dev_priv);
 
@@ -924,13 +836,6 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
                                                32 * fbc->threshold) * 8;
 }
 
-static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
-                                      struct intel_fbc_reg_params *params2)
-{
-       /* We can use this since intel_fbc_get_reg_params() does a memset. */
-       return memcmp(params1, params2, sizeof(*params1)) == 0;
-}
-
 void intel_fbc_pre_update(struct intel_crtc *crtc,
                          struct intel_crtc_state *crtc_state,
                          struct intel_plane_state *plane_state)
@@ -953,6 +858,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc,
                goto unlock;
 
        intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
+       fbc->flip_pending = true;
 
 deactivate:
        intel_fbc_deactivate(dev_priv, reason);
@@ -988,13 +894,15 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
 {
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_fbc *fbc = &dev_priv->fbc;
-       struct intel_fbc_reg_params old_params;
 
        WARN_ON(!mutex_is_locked(&fbc->lock));
 
        if (!fbc->enabled || fbc->crtc != crtc)
                return;
 
+       fbc->flip_pending = false;
+       WARN_ON(fbc->active);
+
        if (!i915_modparams.enable_fbc) {
                intel_fbc_deactivate(dev_priv, "disabled at runtime per module param");
                __intel_fbc_disable(dev_priv);
@@ -1002,25 +910,16 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
                return;
        }
 
-       if (!intel_fbc_can_activate(crtc)) {
-               WARN_ON(fbc->active);
-               return;
-       }
-
-       old_params = fbc->params;
        intel_fbc_get_reg_params(crtc, &fbc->params);
 
-       /* If the scanout has not changed, don't modify the FBC settings.
-        * Note that we make the fundamental assumption that the fb->obj
-        * cannot be unpinned (and have its GTT offset and fence revoked)
-        * without first being decoupled from the scanout and FBC disabled.
-        */
-       if (fbc->active &&
-           intel_fbc_reg_params_equal(&old_params, &fbc->params))
+       if (!intel_fbc_can_activate(crtc))
                return;
 
-       intel_fbc_deactivate(dev_priv, "FBC enabled (active or scheduled)");
-       intel_fbc_schedule_activation(crtc);
+       if (!fbc->busy_bits) {
+               intel_fbc_deactivate(dev_priv, "FBC enabled (active or scheduled)");
+               intel_fbc_hw_activate(dev_priv);
+       } else
+               intel_fbc_deactivate(dev_priv, "frontbuffer write");
 }
 
 void intel_fbc_post_update(struct intel_crtc *crtc)
@@ -1085,7 +984,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
            (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
                if (fbc->active)
                        intel_fbc_recompress(dev_priv);
-               else
+               else if (!fbc->flip_pending)
                        __intel_fbc_post_update(fbc->crtc);
        }
 
@@ -1225,8 +1124,6 @@ void intel_fbc_disable(struct intel_crtc *crtc)
        if (fbc->crtc == crtc)
                __intel_fbc_disable(dev_priv);
        mutex_unlock(&fbc->lock);
-
-       cancel_work_sync(&fbc->work.work);
 }
 
 /**
@@ -1248,8 +1145,6 @@ void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
                __intel_fbc_disable(dev_priv);
        }
        mutex_unlock(&fbc->lock);
-
-       cancel_work_sync(&fbc->work.work);
 }
 
 static void intel_fbc_underrun_work_fn(struct work_struct *work)
@@ -1400,12 +1295,10 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
 {
        struct intel_fbc *fbc = &dev_priv->fbc;
 
-       INIT_WORK(&fbc->work.work, intel_fbc_work_fn);
        INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
        mutex_init(&fbc->lock);
        fbc->enabled = false;
        fbc->active = false;
-       fbc->work.scheduled = false;
 
        if (need_fbc_vtd_wa(dev_priv))
                mkwrite_device_info(dev_priv)->has_fbc = false;
index e9e02b58b7be6e2f4fb8b8d1302b552fa7849323..fb2f9fce34cd2a3e627d183d2bca1028d7f48282 100644 (file)
@@ -47,7 +47,7 @@
 
 static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
 {
-       struct drm_i915_gem_object *obj = ifbdev->fb->obj;
+       struct drm_i915_gem_object *obj = intel_fb_obj(&ifbdev->fb->base);
        unsigned int origin =
                ifbdev->vma_flags & PLANE_HAS_FENCE ? ORIGIN_GTT : ORIGIN_CPU;
 
@@ -193,7 +193,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
                drm_framebuffer_put(&intel_fb->base);
                intel_fb = ifbdev->fb = NULL;
        }
-       if (!intel_fb || WARN_ON(!intel_fb->obj)) {
+       if (!intel_fb || WARN_ON(!intel_fb_obj(&intel_fb->base))) {
                DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
                ret = intelfb_alloc(helper, sizes);
                if (ret)
@@ -265,7 +265,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
         * If the object is stolen however, it will be full of whatever
         * garbage was left in there.
         */
-       if (intel_fb->obj->stolen && !prealloc)
+       if (intel_fb_obj(fb)->stolen && !prealloc)
                memset_io(info->screen_base, 0, info->screen_size);
 
        /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
@@ -792,7 +792,8 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
         * been restored from swap. If the object is stolen however, it will be
         * full of whatever garbage was left in there.
         */
-       if (state == FBINFO_STATE_RUNNING && ifbdev->fb->obj->stolen)
+       if (state == FBINFO_STATE_RUNNING &&
+           intel_fb_obj(&ifbdev->fb->base)->stolen)
                memset_io(info->screen_base, 0, info->screen_size);
 
        drm_fb_helper_set_suspend(&ifbdev->helper, state);
index 7fff0a0eceb4712badfd7b014592dec5ea81985b..c3379bde266f12b1d1756733943ddc14b9d628e1 100644 (file)
@@ -153,8 +153,6 @@ void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
        /* Remove stale busy bits due to the old buffer. */
        dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
        spin_unlock(&dev_priv->fb_tracking.lock);
-
-       intel_psr_single_frame_update(dev_priv, frontbuffer_bits);
 }
 
 /**
index 116f4ccf1bbd62b515350c19c28ec86ae99b62a8..560c7406ae406e5df1d2b083024ea7ae983db4e8 100644 (file)
@@ -27,6 +27,8 @@
 #include "intel_guc_submission.h"
 #include "i915_drv.h"
 
+static void guc_init_ggtt_pin_bias(struct intel_guc *guc);
+
 static void gen8_guc_raise_irq(struct intel_guc *guc)
 {
        struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -73,7 +75,7 @@ void intel_guc_init_early(struct intel_guc *guc)
        guc->notify = gen8_guc_raise_irq;
 }
 
-int intel_guc_init_wq(struct intel_guc *guc)
+static int guc_init_wq(struct intel_guc *guc)
 {
        struct drm_i915_private *dev_priv = guc_to_i915(guc);
 
@@ -124,7 +126,7 @@ int intel_guc_init_wq(struct intel_guc *guc)
        return 0;
 }
 
-void intel_guc_fini_wq(struct intel_guc *guc)
+static void guc_fini_wq(struct intel_guc *guc)
 {
        struct drm_i915_private *dev_priv = guc_to_i915(guc);
 
@@ -135,6 +137,28 @@ void intel_guc_fini_wq(struct intel_guc *guc)
        destroy_workqueue(guc->log.relay.flush_wq);
 }
 
+int intel_guc_init_misc(struct intel_guc *guc)
+{
+       struct drm_i915_private *i915 = guc_to_i915(guc);
+       int ret;
+
+       guc_init_ggtt_pin_bias(guc);
+
+       ret = guc_init_wq(guc);
+       if (ret)
+               return ret;
+
+       intel_uc_fw_fetch(i915, &guc->fw);
+
+       return 0;
+}
+
+void intel_guc_fini_misc(struct intel_guc *guc)
+{
+       intel_uc_fw_fini(&guc->fw);
+       guc_fini_wq(guc);
+}
+
 static int guc_shared_data_create(struct intel_guc *guc)
 {
        struct i915_vma *vma;
@@ -169,7 +193,7 @@ int intel_guc_init(struct intel_guc *guc)
 
        ret = guc_shared_data_create(guc);
        if (ret)
-               return ret;
+               goto err_fetch;
        GEM_BUG_ON(!guc->shared_data);
 
        ret = intel_guc_log_create(&guc->log);
@@ -190,6 +214,8 @@ err_log:
        intel_guc_log_destroy(&guc->log);
 err_shared:
        guc_shared_data_destroy(guc);
+err_fetch:
+       intel_uc_fw_fini(&guc->fw);
        return ret;
 }
 
@@ -201,14 +227,17 @@ void intel_guc_fini(struct intel_guc *guc)
        intel_guc_ads_destroy(guc);
        intel_guc_log_destroy(&guc->log);
        guc_shared_data_destroy(guc);
+       intel_uc_fw_fini(&guc->fw);
 }
 
-static u32 get_log_control_flags(void)
+static u32 guc_ctl_debug_flags(struct intel_guc *guc)
 {
-       u32 level = i915_modparams.guc_log_level;
-       u32 flags = 0;
+       u32 level = intel_guc_log_get_level(&guc->log);
+       u32 flags;
+       u32 ads;
 
-       GEM_BUG_ON(level < 0);
+       ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT;
+       flags = ads << GUC_ADS_ADDR_SHIFT | GUC_ADS_ENABLED;
 
        if (!GUC_LOG_LEVEL_IS_ENABLED(level))
                flags |= GUC_LOG_DEFAULT_DISABLED;
@@ -222,6 +251,78 @@ static u32 get_log_control_flags(void)
        return flags;
 }
 
+static u32 guc_ctl_feature_flags(struct intel_guc *guc)
+{
+       u32 flags = 0;
+
+       flags |=  GUC_CTL_VCS2_ENABLED;
+
+       if (USES_GUC_SUBMISSION(guc_to_i915(guc)))
+               flags |= GUC_CTL_KERNEL_SUBMISSIONS;
+       else
+               flags |= GUC_CTL_DISABLE_SCHEDULER;
+
+       return flags;
+}
+
+static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc)
+{
+       u32 flags = 0;
+
+       if (USES_GUC_SUBMISSION(guc_to_i915(guc))) {
+               u32 ctxnum, base;
+
+               base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
+               ctxnum = GUC_MAX_STAGE_DESCRIPTORS / 16;
+
+               base >>= PAGE_SHIFT;
+               flags |= (base << GUC_CTL_BASE_ADDR_SHIFT) |
+                       (ctxnum << GUC_CTL_CTXNUM_IN16_SHIFT);
+       }
+       return flags;
+}
+
+static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
+{
+       u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
+       u32 flags;
+
+       #if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
+       #define UNIT SZ_1M
+       #define FLAG GUC_LOG_ALLOC_IN_MEGABYTE
+       #else
+       #define UNIT SZ_4K
+       #define FLAG 0
+       #endif
+
+       BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
+       BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT));
+       BUILD_BUG_ON(!DPC_BUFFER_SIZE);
+       BUILD_BUG_ON(!IS_ALIGNED(DPC_BUFFER_SIZE, UNIT));
+       BUILD_BUG_ON(!ISR_BUFFER_SIZE);
+       BUILD_BUG_ON(!IS_ALIGNED(ISR_BUFFER_SIZE, UNIT));
+
+       BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) >
+                       (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
+       BUILD_BUG_ON((DPC_BUFFER_SIZE / UNIT - 1) >
+                       (GUC_LOG_DPC_MASK >> GUC_LOG_DPC_SHIFT));
+       BUILD_BUG_ON((ISR_BUFFER_SIZE / UNIT - 1) >
+                       (GUC_LOG_ISR_MASK >> GUC_LOG_ISR_SHIFT));
+
+       flags = GUC_LOG_VALID |
+               GUC_LOG_NOTIFY_ON_HALF_FULL |
+               FLAG |
+               ((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
+               ((DPC_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DPC_SHIFT) |
+               ((ISR_BUFFER_SIZE / UNIT - 1) << GUC_LOG_ISR_SHIFT) |
+               (offset << GUC_LOG_BUF_ADDR_SHIFT);
+
+       #undef UNIT
+       #undef FLAG
+
+       return flags;
+}
+
 /*
  * Initialise the GuC parameter block before starting the firmware
  * transfer. These parameters are read by the firmware on startup
@@ -245,32 +346,13 @@ void intel_guc_init_params(struct intel_guc *guc)
 
        params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER;
 
-       params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER |
-                       GUC_CTL_VCS2_ENABLED;
-
-       params[GUC_CTL_LOG_PARAMS] = guc->log.flags;
-
-       params[GUC_CTL_DEBUG] = get_log_control_flags();
+       params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
+       params[GUC_CTL_LOG_PARAMS]  = guc_ctl_log_params_flags(guc);
+       params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
+       params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc);
 
-       /* If GuC submission is enabled, set up additional parameters here */
-       if (USES_GUC_SUBMISSION(dev_priv)) {
-               u32 ads = intel_guc_ggtt_offset(guc,
-                                               guc->ads_vma) >> PAGE_SHIFT;
-               u32 pgs = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
-               u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16;
-
-               params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
-               params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
-
-               pgs >>= PAGE_SHIFT;
-               params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
-                       (ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT);
-
-               params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS;
-
-               /* Unmask this bit to enable the GuC's internal scheduler */
-               params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER;
-       }
+       for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
+               DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
 
        /*
         * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
@@ -346,10 +428,8 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
                ret = -EIO;
 
        if (ret) {
-               DRM_DEBUG_DRIVER("INTEL_GUC_SEND: Action 0x%X failed;"
-                                " ret=%d status=0x%08X response=0x%08X\n",
-                                action[0], ret, status,
-                                I915_READ(SOFT_SCRATCH(15)));
+               DRM_ERROR("MMIO: GuC action %#x failed with error %d %#x\n",
+                         action[0], ret, status);
                goto out;
        }
 
@@ -386,11 +466,13 @@ void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc)
         * could happen that GuC sets the bit for 2nd interrupt but Host
         * clears out the bit on handling the 1st interrupt.
         */
+       disable_rpm_wakeref_asserts(dev_priv);
        spin_lock(&guc->irq_lock);
        val = I915_READ(SOFT_SCRATCH(15));
        msg = val & guc->msg_enabled_mask;
        I915_WRITE(SOFT_SCRATCH(15), val & ~msg);
        spin_unlock(&guc->irq_lock);
+       enable_rpm_wakeref_asserts(dev_priv);
 
        intel_guc_to_host_process_recv_msg(guc, msg);
 }
@@ -532,13 +614,13 @@ int intel_guc_resume(struct intel_guc *guc)
  */
 
 /**
- * intel_guc_init_ggtt_pin_bias() - Initialize the GuC ggtt_pin_bias value.
+ * guc_init_ggtt_pin_bias() - Initialize the GuC ggtt_pin_bias value.
  * @guc: intel_guc structure.
  *
  * This function will calculate and initialize the ggtt_pin_bias value based on
  * overall WOPCM size and GuC WOPCM size.
  */
-void intel_guc_init_ggtt_pin_bias(struct intel_guc *guc)
+static void guc_init_ggtt_pin_bias(struct intel_guc *guc)
 {
        struct drm_i915_private *i915 = guc_to_i915(guc);
 
@@ -572,7 +654,7 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
        if (IS_ERR(obj))
                return ERR_CAST(obj);
 
-       vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
+       vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL);
        if (IS_ERR(vma))
                goto err;
 
index f1265e122d307b6af8820aa9ee37c74e2f3b0f19..4121928a495e0ccfc4af79aeb27430f26dd15959 100644 (file)
@@ -151,11 +151,10 @@ static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc,
 void intel_guc_init_early(struct intel_guc *guc);
 void intel_guc_init_send_regs(struct intel_guc *guc);
 void intel_guc_init_params(struct intel_guc *guc);
-void intel_guc_init_ggtt_pin_bias(struct intel_guc *guc);
-int intel_guc_init_wq(struct intel_guc *guc);
-void intel_guc_fini_wq(struct intel_guc *guc);
+int intel_guc_init_misc(struct intel_guc *guc);
 int intel_guc_init(struct intel_guc *guc);
 void intel_guc_fini(struct intel_guc *guc);
+void intel_guc_fini_misc(struct intel_guc *guc);
 int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
                       u32 *response_buf, u32 response_buf_size);
 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
index 0867ba76d44536500f775948e04002dac408db00..1a0f2a39cef9b892fc4e78cdba85fa1d29e9234f 100644 (file)
 #define   GUC_LOG_VALID                        (1 << 0)
 #define   GUC_LOG_NOTIFY_ON_HALF_FULL  (1 << 1)
 #define   GUC_LOG_ALLOC_IN_MEGABYTE    (1 << 3)
-#define   GUC_LOG_CRASH_PAGES          1
 #define   GUC_LOG_CRASH_SHIFT          4
-#define   GUC_LOG_DPC_PAGES            7
+#define   GUC_LOG_CRASH_MASK           (0x1 << GUC_LOG_CRASH_SHIFT)
 #define   GUC_LOG_DPC_SHIFT            6
-#define   GUC_LOG_ISR_PAGES            7
+#define   GUC_LOG_DPC_MASK             (0x7 << GUC_LOG_DPC_SHIFT)
 #define   GUC_LOG_ISR_SHIFT            9
+#define   GUC_LOG_ISR_MASK             (0x7 << GUC_LOG_ISR_SHIFT)
 #define   GUC_LOG_BUF_ADDR_SHIFT       12
 
 #define GUC_CTL_PAGE_FAULT_CONTROL     5
@@ -532,20 +532,6 @@ enum guc_log_buffer_type {
 };
 
 /**
- * DOC: GuC Log buffer Layout
- *
- * Page0  +-------------------------------+
- *        |   ISR state header (32 bytes) |
- *        |      DPC state header         |
- *        |   Crash dump state header     |
- * Page1  +-------------------------------+
- *        |           ISR logs            |
- * Page9  +-------------------------------+
- *        |           DPC logs            |
- * Page17 +-------------------------------+
- *        |         Crash Dump logs       |
- *        +-------------------------------+
- *
  * Below state structure is used for coordination of retrieval of GuC firmware
  * logs. Separate state is maintained for each log buffer type.
  * read_ptr points to the location where i915 read last in log buffer and
index 401e1704d61edb09d34d57adb2834e5546e8591e..6da61a71d28f69835c589bbdf5acc48f19b69197 100644 (file)
@@ -215,11 +215,11 @@ static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
 {
        switch (type) {
        case GUC_ISR_LOG_BUFFER:
-               return (GUC_LOG_ISR_PAGES + 1) * PAGE_SIZE;
+               return ISR_BUFFER_SIZE;
        case GUC_DPC_LOG_BUFFER:
-               return (GUC_LOG_DPC_PAGES + 1) * PAGE_SIZE;
+               return DPC_BUFFER_SIZE;
        case GUC_CRASH_DUMP_LOG_BUFFER:
-               return (GUC_LOG_CRASH_PAGES + 1) * PAGE_SIZE;
+               return CRASH_BUFFER_SIZE;
        default:
                MISSING_CASE(type);
        }
@@ -397,7 +397,7 @@ static int guc_log_relay_create(struct intel_guc_log *log)
        lockdep_assert_held(&log->relay.lock);
 
         /* Keep the size of sub buffers same as shared log buffer */
-       subbuf_size = GUC_LOG_SIZE;
+       subbuf_size = log->vma->size;
 
        /*
         * Store up to 8 snapshots, which is large enough to buffer sufficient
@@ -452,13 +452,34 @@ int intel_guc_log_create(struct intel_guc_log *log)
 {
        struct intel_guc *guc = log_to_guc(log);
        struct i915_vma *vma;
-       unsigned long offset;
-       u32 flags;
+       u32 guc_log_size;
        int ret;
 
        GEM_BUG_ON(log->vma);
 
-       vma = intel_guc_allocate_vma(guc, GUC_LOG_SIZE);
+       /*
+        *  GuC Log buffer Layout
+        *
+        *  +===============================+ 00B
+        *  |    Crash dump state header    |
+        *  +-------------------------------+ 32B
+        *  |       DPC state header        |
+        *  +-------------------------------+ 64B
+        *  |       ISR state header        |
+        *  +-------------------------------+ 96B
+        *  |                               |
+        *  +===============================+ PAGE_SIZE (4KB)
+        *  |        Crash Dump logs        |
+        *  +===============================+ + CRASH_SIZE
+        *  |           DPC logs            |
+        *  +===============================+ + DPC_SIZE
+        *  |           ISR logs            |
+        *  +===============================+ + ISR_SIZE
+        */
+       guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DPC_BUFFER_SIZE +
+                       ISR_BUFFER_SIZE;
+
+       vma = intel_guc_allocate_vma(guc, guc_log_size);
        if (IS_ERR(vma)) {
                ret = PTR_ERR(vma);
                goto err;
@@ -466,20 +487,12 @@ int intel_guc_log_create(struct intel_guc_log *log)
 
        log->vma = vma;
 
-       /* each allocated unit is a page */
-       flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
-               (GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
-               (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
-               (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
-
-       offset = intel_guc_ggtt_offset(guc, vma) >> PAGE_SHIFT;
-       log->flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
+       log->level = i915_modparams.guc_log_level;
 
        return 0;
 
 err:
-       /* logging will be off */
-       i915_modparams.guc_log_level = 0;
+       DRM_ERROR("Failed to allocate GuC log buffer. %d\n", ret);
        return ret;
 }
 
@@ -488,15 +501,7 @@ void intel_guc_log_destroy(struct intel_guc_log *log)
        i915_vma_unpin_and_release(&log->vma);
 }
 
-int intel_guc_log_level_get(struct intel_guc_log *log)
-{
-       GEM_BUG_ON(!log->vma);
-       GEM_BUG_ON(i915_modparams.guc_log_level < 0);
-
-       return i915_modparams.guc_log_level;
-}
-
-int intel_guc_log_level_set(struct intel_guc_log *log, u64 val)
+int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
 {
        struct intel_guc *guc = log_to_guc(log);
        struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -504,33 +509,32 @@ int intel_guc_log_level_set(struct intel_guc_log *log, u64 val)
 
        BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0);
        GEM_BUG_ON(!log->vma);
-       GEM_BUG_ON(i915_modparams.guc_log_level < 0);
 
        /*
         * GuC is recognizing log levels starting from 0 to max, we're using 0
         * as indication that logging should be disabled.
         */
-       if (val < GUC_LOG_LEVEL_DISABLED || val > GUC_LOG_LEVEL_MAX)
+       if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX)
                return -EINVAL;
 
        mutex_lock(&dev_priv->drm.struct_mutex);
 
-       if (i915_modparams.guc_log_level == val) {
+       if (log->level == level) {
                ret = 0;
                goto out_unlock;
        }
 
        intel_runtime_pm_get(dev_priv);
-       ret = guc_action_control_log(guc, GUC_LOG_LEVEL_IS_VERBOSE(val),
-                                    GUC_LOG_LEVEL_IS_ENABLED(val),
-                                    GUC_LOG_LEVEL_TO_VERBOSITY(val));
+       ret = guc_action_control_log(guc, GUC_LOG_LEVEL_IS_VERBOSE(level),
+                                    GUC_LOG_LEVEL_IS_ENABLED(level),
+                                    GUC_LOG_LEVEL_TO_VERBOSITY(level));
        intel_runtime_pm_put(dev_priv);
        if (ret) {
                DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret);
                goto out_unlock;
        }
 
-       i915_modparams.guc_log_level = val;
+       log->level = level;
 
 out_unlock:
        mutex_unlock(&dev_priv->drm.struct_mutex);
index fa80535a6f9d81c6f7689b237ed91c5ed2b6f385..7bc763f10c03e442cc1b772a5c7cd38925b39a7b 100644 (file)
 #include <linux/workqueue.h>
 
 #include "intel_guc_fwif.h"
+#include "i915_gem.h"
 
 struct intel_guc;
 
-/*
- * The first page is to save log buffer state. Allocate one
- * extra page for others in case for overlap
- */
-#define GUC_LOG_SIZE   ((1 + GUC_LOG_DPC_PAGES + 1 + GUC_LOG_ISR_PAGES + \
-                         1 + GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT)
+#ifdef CONFIG_DRM_I915_DEBUG_GUC
+#define CRASH_BUFFER_SIZE      SZ_2M
+#define DPC_BUFFER_SIZE                SZ_8M
+#define ISR_BUFFER_SIZE                SZ_8M
+#else
+#define CRASH_BUFFER_SIZE      SZ_8K
+#define DPC_BUFFER_SIZE                SZ_32K
+#define ISR_BUFFER_SIZE                SZ_32K
+#endif
 
 /*
  * While we're using plain log level in i915, GuC controls are much more...
@@ -58,7 +62,7 @@ struct intel_guc;
 #define GUC_LOG_LEVEL_MAX GUC_VERBOSITY_TO_LOG_LEVEL(GUC_LOG_VERBOSITY_MAX)
 
 struct intel_guc_log {
-       u32 flags;
+       u32 level;
        struct i915_vma *vma;
        struct {
                void *buf_addr;
@@ -80,8 +84,7 @@ void intel_guc_log_init_early(struct intel_guc_log *log);
 int intel_guc_log_create(struct intel_guc_log *log);
 void intel_guc_log_destroy(struct intel_guc_log *log);
 
-int intel_guc_log_level_get(struct intel_guc_log *log);
-int intel_guc_log_level_set(struct intel_guc_log *log, u64 control_val);
+int intel_guc_log_set_level(struct intel_guc_log *log, u32 level);
 bool intel_guc_log_relay_enabled(const struct intel_guc_log *log);
 int intel_guc_log_relay_open(struct intel_guc_log *log);
 void intel_guc_log_relay_flush(struct intel_guc_log *log);
@@ -89,4 +92,9 @@ void intel_guc_log_relay_close(struct intel_guc_log *log);
 
 void intel_guc_log_handle_flush_event(struct intel_guc_log *log);
 
+static inline u32 intel_guc_log_get_level(struct intel_guc_log *log)
+{
+       return log->level;
+}
+
 #endif
index 2feb65096966519440a67092714bdcdec847db8f..4aa5e6463e7b70d47fa459b072e6d93e670f9b25 100644 (file)
@@ -513,8 +513,7 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
 {
        struct intel_guc_client *client = guc->execbuf_client;
        struct intel_engine_cs *engine = rq->engine;
-       u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(rq->ctx,
-                                                                engine));
+       u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc);
        u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
 
        spin_lock(&client->wq_lock);
@@ -537,7 +536,7 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
  */
 static void flush_ggtt_writes(struct i915_vma *vma)
 {
-       struct drm_i915_private *dev_priv = to_i915(vma->obj->base.dev);
+       struct drm_i915_private *dev_priv = vma->vm->i915;
 
        if (i915_vma_is_map_and_fenceable(vma))
                POSTING_READ_FW(GUC_STATUS);
@@ -552,8 +551,8 @@ static void inject_preempt_context(struct work_struct *work)
                                             preempt_work[engine->id]);
        struct intel_guc_client *client = guc->preempt_client;
        struct guc_stage_desc *stage_desc = __get_stage_desc(client);
-       u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(client->owner,
-                                                                engine));
+       u32 ctx_desc = lower_32_bits(to_intel_context(client->owner,
+                                                     engine)->lrc_desc);
        u32 data[7];
 
        /*
@@ -623,6 +622,22 @@ static void wait_for_guc_preempt_report(struct intel_engine_cs *engine)
        report->report_return_status = INTEL_GUC_REPORT_STATUS_UNKNOWN;
 }
 
+static void complete_preempt_context(struct intel_engine_cs *engine)
+{
+       struct intel_engine_execlists *execlists = &engine->execlists;
+
+       GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT));
+
+       if (inject_preempt_hang(execlists))
+               return;
+
+       execlists_cancel_port_requests(execlists);
+       execlists_unwind_incomplete_requests(execlists);
+
+       wait_for_guc_preempt_report(engine);
+       intel_write_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX, 0);
+}
+
 /**
  * guc_submit() - Submit commands through GuC
  * @engine: engine associated with the commands
@@ -681,9 +696,6 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
 
        lockdep_assert_held(&engine->timeline.lock);
 
-       rb = execlists->first;
-       GEM_BUG_ON(rb_first(&execlists->queue) != rb);
-
        if (port_isset(port)) {
                if (intel_engine_has_preemption(engine)) {
                        struct guc_preempt_work *preempt_work =
@@ -705,12 +717,12 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
        }
        GEM_BUG_ON(port_isset(port));
 
-       while (rb) {
+       while ((rb = rb_first_cached(&execlists->queue))) {
                struct i915_priolist *p = to_priolist(rb);
                struct i915_request *rq, *rn;
 
                list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
-                       if (last && rq->ctx != last->ctx) {
+                       if (last && rq->hw_context != last->hw_context) {
                                if (port == last_port) {
                                        __list_del_many(&p->requests,
                                                        &rq->sched.link);
@@ -730,15 +742,13 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
                        submit = true;
                }
 
-               rb = rb_next(rb);
-               rb_erase(&p->node, &execlists->queue);
+               rb_erase_cached(&p->node, &execlists->queue);
                INIT_LIST_HEAD(&p->requests);
                if (p->priority != I915_PRIORITY_NORMAL)
                        kmem_cache_free(engine->i915->priorities, p);
        }
 done:
        execlists->queue_priority = rb ? to_priolist(rb)->priority : INT_MIN;
-       execlists->first = rb;
        if (submit)
                port_assign(port, last);
        if (last)
@@ -747,7 +757,8 @@ done:
        /* We must always keep the beast fed if we have work piled up */
        GEM_BUG_ON(port_isset(execlists->port) &&
                   !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
-       GEM_BUG_ON(execlists->first && !port_isset(execlists->port));
+       GEM_BUG_ON(rb_first_cached(&execlists->queue) &&
+                  !port_isset(execlists->port));
 
        return submit;
 }
@@ -793,20 +804,44 @@ static void guc_submission_tasklet(unsigned long data)
 
        if (execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT) &&
            intel_read_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX) ==
-           GUC_PREEMPT_FINISHED) {
-               execlists_cancel_port_requests(&engine->execlists);
-               execlists_unwind_incomplete_requests(execlists);
-
-               wait_for_guc_preempt_report(engine);
-
-               execlists_clear_active(execlists, EXECLISTS_ACTIVE_PREEMPT);
-               intel_write_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX, 0);
-       }
+           GUC_PREEMPT_FINISHED)
+               complete_preempt_context(engine);
 
        if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
                guc_dequeue(engine);
 }
 
+static struct i915_request *
+guc_reset_prepare(struct intel_engine_cs *engine)
+{
+       struct intel_engine_execlists * const execlists = &engine->execlists;
+
+       GEM_TRACE("%s\n", engine->name);
+
+       /*
+        * Prevent request submission to the hardware until we have
+        * completed the reset in i915_gem_reset_finish(). If a request
+        * is completed by one engine, it may then queue a request
+        * to a second via its execlists->tasklet *just* as we are
+        * calling engine->init_hw() and also writing the ELSP.
+        * Turning off the execlists->tasklet until the reset is over
+        * prevents the race.
+        */
+       __tasklet_disable_sync_once(&execlists->tasklet);
+
+       /*
+        * We're using worker to queue preemption requests from the tasklet in
+        * GuC submission mode.
+        * Even though tasklet was disabled, we may still have a worker queued.
+        * Let's make sure that all workers scheduled before disabling the
+        * tasklet are completed before continuing with the reset.
+        */
+       if (engine->i915->guc.preempt_wq)
+               flush_workqueue(engine->i915->guc.preempt_wq);
+
+       return i915_gem_find_active_request(engine);
+}
+
 /*
  * Everything below here is concerned with setup & teardown, and is
  * therefore not part of the somewhat time-critical batch-submission
@@ -876,8 +911,12 @@ static void guc_clients_doorbell_fini(struct intel_guc *guc)
                __update_doorbell_desc(guc->preempt_client,
                                       GUC_DOORBELL_INVALID);
        }
-       __destroy_doorbell(guc->execbuf_client);
-       __update_doorbell_desc(guc->execbuf_client, GUC_DOORBELL_INVALID);
+
+       if (guc->execbuf_client) {
+               __destroy_doorbell(guc->execbuf_client);
+               __update_doorbell_desc(guc->execbuf_client,
+                                      GUC_DOORBELL_INVALID);
+       }
 }
 
 /**
@@ -1090,7 +1129,8 @@ static void guc_clients_destroy(struct intel_guc *guc)
                guc_client_free(client);
 
        client = fetch_and_zero(&guc->execbuf_client);
-       guc_client_free(client);
+       if (client)
+               guc_client_free(client);
 }
 
 /*
@@ -1119,7 +1159,7 @@ int intel_guc_submission_init(struct intel_guc *guc)
        WARN_ON(!guc_verify_doorbells(guc));
        ret = guc_clients_create(guc);
        if (ret)
-               return ret;
+               goto err_pool;
 
        for_each_engine(engine, dev_priv, id) {
                guc->preempt_work[id].engine = engine;
@@ -1128,6 +1168,9 @@ int intel_guc_submission_init(struct intel_guc *guc)
 
        return 0;
 
+err_pool:
+       guc_stage_desc_pool_destroy(guc);
+       return ret;
 }
 
 void intel_guc_submission_fini(struct intel_guc *guc)
@@ -1142,7 +1185,8 @@ void intel_guc_submission_fini(struct intel_guc *guc)
        guc_clients_destroy(guc);
        WARN_ON(!guc_verify_doorbells(guc));
 
-       guc_stage_desc_pool_destroy(guc);
+       if (guc->stage_desc_pool)
+               guc_stage_desc_pool_destroy(guc);
 }
 
 static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
@@ -1225,6 +1269,31 @@ static void guc_submission_unpark(struct intel_engine_cs *engine)
        intel_engine_pin_breadcrumbs_irq(engine);
 }
 
+static void guc_set_default_submission(struct intel_engine_cs *engine)
+{
+       /*
+        * We inherit a bunch of functions from execlists that we'd like
+        * to keep using:
+        *
+        *    engine->submit_request = execlists_submit_request;
+        *    engine->cancel_requests = execlists_cancel_requests;
+        *    engine->schedule = execlists_schedule;
+        *
+        * But we need to override the actual submission backend in order
+        * to talk to the GuC.
+        */
+       intel_execlists_set_default_submission(engine);
+
+       engine->execlists.tasklet.func = guc_submission_tasklet;
+
+       engine->park = guc_submission_park;
+       engine->unpark = guc_submission_unpark;
+
+       engine->reset.prepare = guc_reset_prepare;
+
+       engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
+}
+
 int intel_guc_submission_enable(struct intel_guc *guc)
 {
        struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -1263,14 +1332,8 @@ int intel_guc_submission_enable(struct intel_guc *guc)
        guc_interrupts_capture(dev_priv);
 
        for_each_engine(engine, dev_priv, id) {
-               struct intel_engine_execlists * const execlists =
-                       &engine->execlists;
-
-               execlists->tasklet.func = guc_submission_tasklet;
-               engine->park = guc_submission_park;
-               engine->unpark = guc_submission_unpark;
-
-               engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
+               engine->set_default_submission = guc_set_default_submission;
+               engine->set_default_submission(engine);
        }
 
        return 0;
@@ -1284,9 +1347,6 @@ void intel_guc_submission_disable(struct intel_guc *guc)
 
        guc_interrupts_release(dev_priv);
        guc_clients_doorbell_fini(guc);
-
-       /* Revert back to manual ELSP submission */
-       intel_engines_reset_default_submission(dev_priv);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
index a2fe7c8d44775e06e4e4168336f250c4be02bfa2..c22b3e18a0f5ff54530b0676aad6303d6c298889 100644 (file)
@@ -47,6 +47,8 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
                return true;
        if (IS_KABYLAKE(dev_priv))
                return true;
+       if (IS_BROXTON(dev_priv))
+               return true;
        return false;
 }
 
@@ -90,6 +92,9 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
 {
        int ret;
 
+       if (i915_inject_load_failure())
+               return -ENODEV;
+
        if (!i915_modparams.enable_gvt) {
                DRM_DEBUG_DRIVER("GVT-g is disabled by kernel params\n");
                return 0;
index d47e346bd49e97fc45e9aa0cb15563bbdb28c2fd..2fc7a0dd0df9b2bc7a88814f75d98334bfd4d4e9 100644 (file)
@@ -294,6 +294,7 @@ static void hangcheck_store_sample(struct intel_engine_cs *engine,
        engine->hangcheck.seqno = hc->seqno;
        engine->hangcheck.action = hc->action;
        engine->hangcheck.stalled = hc->stalled;
+       engine->hangcheck.wedged = hc->wedged;
 }
 
 static enum intel_engine_hangcheck_action
@@ -368,6 +369,9 @@ static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
 
        hc->stalled = time_after(jiffies,
                                 engine->hangcheck.action_timestamp + timeout);
+       hc->wedged = time_after(jiffies,
+                                engine->hangcheck.action_timestamp +
+                                I915_ENGINE_WEDGED_TIMEOUT);
 }
 
 static void hangcheck_declare_hang(struct drm_i915_private *i915,
@@ -409,7 +413,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
                             gpu_error.hangcheck_work.work);
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
-       unsigned int hung = 0, stuck = 0;
+       unsigned int hung = 0, stuck = 0, wedged = 0;
 
        if (!i915_modparams.enable_hangcheck)
                return;
@@ -440,6 +444,17 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
                        if (hc.action != ENGINE_DEAD)
                                stuck |= intel_engine_flag(engine);
                }
+
+               if (engine->hangcheck.wedged)
+                       wedged |= intel_engine_flag(engine);
+       }
+
+       if (wedged) {
+               dev_err(dev_priv->drm.dev,
+                       "GPU recovery timed out,"
+                       " cancelling all in-flight rendering.\n");
+               GEM_TRACE_DUMP();
+               i915_gem_set_wedged(dev_priv);
        }
 
        if (hung)
index ee929f31f7db712d0f1b306571de962480758314..8363fbd18ee870c1a3f54466b5d42ac1f21cad13 100644 (file)
@@ -51,7 +51,7 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
 {
        struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
        struct drm_i915_private *dev_priv = to_i915(dev);
-       uint32_t enabled_bits;
+       u32 enabled_bits;
 
        enabled_bits = HAS_DDI(dev_priv) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
 
@@ -59,6 +59,15 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
             "HDMI port enabled, expecting disabled\n");
 }
 
+static void
+assert_hdmi_transcoder_func_disabled(struct drm_i915_private *dev_priv,
+                                    enum transcoder cpu_transcoder)
+{
+       WARN(I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)) &
+            TRANS_DDI_FUNC_ENABLE,
+            "HDMI transcoder function enabled, expecting disabled\n");
+}
+
 struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
 {
        struct intel_digital_port *intel_dig_port =
@@ -144,7 +153,7 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
                                unsigned int type,
                                const void *frame, ssize_t len)
 {
-       const uint32_t *data = frame;
+       const u32 *data = frame;
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        u32 val = I915_READ(VIDEO_DIP_CTL);
@@ -199,7 +208,7 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
                                unsigned int type,
                                const void *frame, ssize_t len)
 {
-       const uint32_t *data = frame;
+       const u32 *data = frame;
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
@@ -259,7 +268,7 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
                                unsigned int type,
                                const void *frame, ssize_t len)
 {
-       const uint32_t *data = frame;
+       const u32 *data = frame;
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
@@ -317,7 +326,7 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
                                unsigned int type,
                                const void *frame, ssize_t len)
 {
-       const uint32_t *data = frame;
+       const u32 *data = frame;
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
@@ -376,19 +385,16 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
                                unsigned int type,
                                const void *frame, ssize_t len)
 {
-       const uint32_t *data = frame;
+       const u32 *data = frame;
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
        i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
-       i915_reg_t data_reg;
        int data_size = type == DP_SDP_VSC ?
                VIDEO_DIP_VSC_DATA_SIZE : VIDEO_DIP_DATA_SIZE;
        int i;
        u32 val = I915_READ(ctl_reg);
 
-       data_reg = hsw_dip_data_reg(dev_priv, cpu_transcoder, type, 0);
-
        val &= ~hsw_infoframe_enable(type);
        I915_WRITE(ctl_reg, val);
 
@@ -442,7 +448,7 @@ static void intel_write_infoframe(struct drm_encoder *encoder,
                                  union hdmi_infoframe *frame)
 {
        struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
-       uint8_t buffer[VIDEO_DIP_DATA_SIZE];
+       u8 buffer[VIDEO_DIP_DATA_SIZE];
        ssize_t len;
 
        /* see comment above for the reason for this offset */
@@ -461,7 +467,8 @@ static void intel_write_infoframe(struct drm_encoder *encoder,
 }
 
 static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
-                                        const struct intel_crtc_state *crtc_state)
+                                        const struct intel_crtc_state *crtc_state,
+                                        const struct drm_connector_state *conn_state)
 {
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
        const struct drm_display_mode *adjusted_mode =
@@ -491,6 +498,9 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
                                           intel_hdmi->rgb_quant_range_selectable,
                                           is_hdmi2_sink);
 
+       drm_hdmi_avi_infoframe_content_type(&frame.avi,
+                                           conn_state);
+
        /* TODO: handle pixel repetition for YCBCR420 outputs */
        intel_write_infoframe(encoder, crtc_state, &frame);
 }
@@ -586,7 +596,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
        I915_WRITE(reg, val);
        POSTING_READ(reg);
 
-       intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+       intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
        intel_hdmi_set_spd_infoframe(encoder, crtc_state);
        intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
 }
@@ -727,7 +737,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
        I915_WRITE(reg, val);
        POSTING_READ(reg);
 
-       intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+       intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
        intel_hdmi_set_spd_infoframe(encoder, crtc_state);
        intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
 }
@@ -770,7 +780,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
        I915_WRITE(reg, val);
        POSTING_READ(reg);
 
-       intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+       intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
        intel_hdmi_set_spd_infoframe(encoder, crtc_state);
        intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
 }
@@ -823,7 +833,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
        I915_WRITE(reg, val);
        POSTING_READ(reg);
 
-       intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+       intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
        intel_hdmi_set_spd_infoframe(encoder, crtc_state);
        intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
 }
@@ -834,11 +844,11 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
                               const struct drm_connector_state *conn_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->dev);
-       struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
        i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
        u32 val = I915_READ(reg);
 
-       assert_hdmi_port_disabled(intel_hdmi);
+       assert_hdmi_transcoder_func_disabled(dev_priv,
+                                            crtc_state->cpu_transcoder);
 
        val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
                 VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
@@ -856,7 +866,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
        I915_WRITE(reg, val);
        POSTING_READ(reg);
 
-       intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+       intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
        intel_hdmi_set_spd_infoframe(encoder, crtc_state);
        intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
 }
@@ -1161,33 +1171,16 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder,
 static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
                                    enum pipe *pipe)
 {
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
-       u32 tmp;
        bool ret;
 
        if (!intel_display_power_get_if_enabled(dev_priv,
                                                encoder->power_domain))
                return false;
 
-       ret = false;
-
-       tmp = I915_READ(intel_hdmi->hdmi_reg);
+       ret = intel_sdvo_port_enabled(dev_priv, intel_hdmi->hdmi_reg, pipe);
 
-       if (!(tmp & SDVO_ENABLE))
-               goto out;
-
-       if (HAS_PCH_CPT(dev_priv))
-               *pipe = PORT_TO_PIPE_CPT(tmp);
-       else if (IS_CHERRYVIEW(dev_priv))
-               *pipe = SDVO_PORT_TO_PIPE_CHV(tmp);
-       else
-               *pipe = PORT_TO_PIPE(tmp);
-
-       ret = true;
-
-out:
        intel_display_power_put(dev_priv, encoder->power_domain);
 
        return ret;
@@ -1421,8 +1414,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
                intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
                intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
 
-               temp &= ~SDVO_PIPE_B_SELECT;
-               temp |= SDVO_ENABLE;
+               temp &= ~SDVO_PIPE_SEL_MASK;
+               temp |= SDVO_ENABLE | SDVO_PIPE_SEL(PIPE_A);
                /*
                 * HW workaround, need to write this twice for issue
                 * that may result in first write getting masked.
@@ -1557,6 +1550,9 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
        bool force_dvi =
                READ_ONCE(to_intel_digital_connector_state(connector->state)->force_audio) == HDMI_AUDIO_OFF_DVI;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
        clock = mode->clock;
 
        if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
@@ -1574,14 +1570,23 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
        /* check if we can do 8bpc */
        status = hdmi_port_clock_valid(hdmi, clock, true, force_dvi);
 
-       /* if we can't do 8bpc we may still be able to do 12bpc */
-       if (!HAS_GMCH_DISPLAY(dev_priv) && status != MODE_OK && hdmi->has_hdmi_sink && !force_dvi)
-               status = hdmi_port_clock_valid(hdmi, clock * 3 / 2, true, force_dvi);
+       if (hdmi->has_hdmi_sink && !force_dvi) {
+               /* if we can't do 8bpc we may still be able to do 12bpc */
+               if (status != MODE_OK && !HAS_GMCH_DISPLAY(dev_priv))
+                       status = hdmi_port_clock_valid(hdmi, clock * 3 / 2,
+                                                      true, force_dvi);
+
+               /* if we can't do 8,12bpc we may still be able to do 10bpc */
+               if (status != MODE_OK && INTEL_GEN(dev_priv) >= 11)
+                       status = hdmi_port_clock_valid(hdmi, clock * 5 / 4,
+                                                      true, force_dvi);
+       }
 
        return status;
 }
 
-static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
+static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
+                                    int bpc)
 {
        struct drm_i915_private *dev_priv =
                to_i915(crtc_state->base.crtc->dev);
@@ -1593,6 +1598,9 @@ static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
        if (HAS_GMCH_DISPLAY(dev_priv))
                return false;
 
+       if (bpc == 10 && INTEL_GEN(dev_priv) < 11)
+               return false;
+
        if (crtc_state->pipe_bpp <= 8*3)
                return false;
 
@@ -1600,7 +1608,7 @@ static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
                return false;
 
        /*
-        * HDMI 12bpc affects the clocks, so it's only possible
+        * HDMI deep color affects the clocks, so it's only possible
         * when not cloning with other encoder types.
         */
        if (crtc_state->output_types != 1 << INTEL_OUTPUT_HDMI)
@@ -1615,16 +1623,24 @@ static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
                if (crtc_state->ycbcr420) {
                        const struct drm_hdmi_info *hdmi = &info->hdmi;
 
-                       if (!(hdmi->y420_dc_modes & DRM_EDID_YCBCR420_DC_36))
+                       if (bpc == 12 && !(hdmi->y420_dc_modes &
+                                          DRM_EDID_YCBCR420_DC_36))
+                               return false;
+                       else if (bpc == 10 && !(hdmi->y420_dc_modes &
+                                               DRM_EDID_YCBCR420_DC_30))
                                return false;
                } else {
-                       if (!(info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36))
+                       if (bpc == 12 && !(info->edid_hdmi_dc_modes &
+                                          DRM_EDID_HDMI_DC_36))
+                               return false;
+                       else if (bpc == 10 && !(info->edid_hdmi_dc_modes &
+                                               DRM_EDID_HDMI_DC_30))
                                return false;
                }
        }
 
        /* Display WA #1139: glk */
-       if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) &&
+       if (bpc == 12 && IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) &&
            crtc_state->base.adjusted_mode.htotal > 5460)
                return false;
 
@@ -1634,7 +1650,8 @@ static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
 static bool
 intel_hdmi_ycbcr420_config(struct drm_connector *connector,
                           struct intel_crtc_state *config,
-                          int *clock_12bpc, int *clock_8bpc)
+                          int *clock_12bpc, int *clock_10bpc,
+                          int *clock_8bpc)
 {
        struct intel_crtc *intel_crtc = to_intel_crtc(config->base.crtc);
 
@@ -1646,6 +1663,7 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector,
        /* YCBCR420 TMDS rate requirement is half the pixel clock */
        config->port_clock /= 2;
        *clock_12bpc /= 2;
+       *clock_10bpc /= 2;
        *clock_8bpc /= 2;
        config->ycbcr420 = true;
 
@@ -1673,10 +1691,14 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
        struct intel_digital_connector_state *intel_conn_state =
                to_intel_digital_connector_state(conn_state);
        int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
+       int clock_10bpc = clock_8bpc * 5 / 4;
        int clock_12bpc = clock_8bpc * 3 / 2;
        int desired_bpp;
        bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI;
 
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
        pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink;
 
        if (pipe_config->has_hdmi_sink)
@@ -1696,12 +1718,14 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) {
                pipe_config->pixel_multiplier = 2;
                clock_8bpc *= 2;
+               clock_10bpc *= 2;
                clock_12bpc *= 2;
        }
 
        if (drm_mode_is_420_only(&connector->display_info, adjusted_mode)) {
                if (!intel_hdmi_ycbcr420_config(connector, pipe_config,
-                                               &clock_12bpc, &clock_8bpc)) {
+                                               &clock_12bpc, &clock_10bpc,
+                                               &clock_8bpc)) {
                        DRM_ERROR("Can't support YCBCR420 output\n");
                        return false;
                }
@@ -1719,18 +1743,25 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
        }
 
        /*
-        * HDMI is either 12 or 8, so if the display lets 10bpc sneak
-        * through, clamp it down. Note that g4x/vlv don't support 12bpc hdmi
-        * outputs. We also need to check that the higher clock still fits
-        * within limits.
+        * Note that g4x/vlv don't support 12bpc hdmi outputs. We also need
+        * to check that the higher clock still fits within limits.
         */
-       if (hdmi_12bpc_possible(pipe_config) &&
-           hdmi_port_clock_valid(intel_hdmi, clock_12bpc, true, force_dvi) == MODE_OK) {
+       if (hdmi_deep_color_possible(pipe_config, 12) &&
+           hdmi_port_clock_valid(intel_hdmi, clock_12bpc,
+                                 true, force_dvi) == MODE_OK) {
                DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
                desired_bpp = 12*3;
 
                /* Need to adjust the port link by 1.5x for 12bpc. */
                pipe_config->port_clock = clock_12bpc;
+       } else if (hdmi_deep_color_possible(pipe_config, 10) &&
+                  hdmi_port_clock_valid(intel_hdmi, clock_10bpc,
+                                        true, force_dvi) == MODE_OK) {
+               DRM_DEBUG_KMS("picking bpc to 10 for HDMI output\n");
+               desired_bpp = 10 * 3;
+
+               /* Need to adjust the port link by 1.25x for 10bpc. */
+               pipe_config->port_clock = clock_10bpc;
        } else {
                DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n");
                desired_bpp = 8*3;
@@ -2065,6 +2096,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
        intel_attach_force_audio_property(connector);
        intel_attach_broadcast_rgb_property(connector);
        intel_attach_aspect_ratio_property(connector);
+       drm_connector_attach_content_type_property(connector);
        connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
 }
 
@@ -2251,7 +2283,7 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
                ddc_pin = bxt_port_to_ddc_pin(dev_priv, port);
        else if (HAS_PCH_CNP(dev_priv))
                ddc_pin = cnp_port_to_ddc_pin(dev_priv, port);
-       else if (IS_ICELAKE(dev_priv))
+       else if (HAS_PCH_ICP(dev_priv))
                ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
        else
                ddc_pin = g4x_port_to_ddc_pin(dev_priv, port);
@@ -2346,7 +2378,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
         * 0xd.  Failure to do so will result in spurious interrupts being
         * generated on the port when a cable is not attached.
         */
-       if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) {
+       if (IS_G45(dev_priv)) {
                u32 temp = I915_READ(PEG_BAND_GAP_DATA);
                I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
        }
index 43aa92beff2a691b8fe9ea0e00e1721680c73d2a..648a13c6043c0071ddd495424691d795b39b96a1 100644 (file)
  * it will use i915_hotplug_work_func where this logic is handled.
  */
 
-/**
- * intel_hpd_port - return port hard associated with certain pin.
- * @dev_priv: private driver data pointer
- * @pin: the hpd pin to get associated port
- *
- * Return port that is associatade with @pin and PORT_NONE if no port is
- * hard associated with that @pin.
- */
-enum port intel_hpd_pin_to_port(struct drm_i915_private *dev_priv,
-                               enum hpd_pin pin)
-{
-       switch (pin) {
-       case HPD_PORT_A:
-               return PORT_A;
-       case HPD_PORT_B:
-               return PORT_B;
-       case HPD_PORT_C:
-               return PORT_C;
-       case HPD_PORT_D:
-               return PORT_D;
-       case HPD_PORT_E:
-               if (IS_CNL_WITH_PORT_F(dev_priv))
-                       return PORT_F;
-               return PORT_E;
-       case HPD_PORT_F:
-               return PORT_F;
-       default:
-               return PORT_NONE; /* no port for this pin */
-       }
-}
-
 /**
  * intel_hpd_pin_default - return default pin associated with certain port.
  * @dev_priv: private driver data pointer
@@ -241,25 +210,25 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
                container_of(work, typeof(*dev_priv),
                             hotplug.reenable_work.work);
        struct drm_device *dev = &dev_priv->drm;
-       int i;
+       enum hpd_pin pin;
 
        intel_runtime_pm_get(dev_priv);
 
        spin_lock_irq(&dev_priv->irq_lock);
-       for_each_hpd_pin(i) {
+       for_each_hpd_pin(pin) {
                struct drm_connector *connector;
                struct drm_connector_list_iter conn_iter;
 
-               if (dev_priv->hotplug.stats[i].state != HPD_DISABLED)
+               if (dev_priv->hotplug.stats[pin].state != HPD_DISABLED)
                        continue;
 
-               dev_priv->hotplug.stats[i].state = HPD_ENABLED;
+               dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
 
                drm_connector_list_iter_begin(dev, &conn_iter);
                drm_for_each_connector_iter(connector, &conn_iter) {
                        struct intel_connector *intel_connector = to_intel_connector(connector);
 
-                       if (intel_connector->encoder->hpd_pin == i) {
+                       if (intel_connector->encoder->hpd_pin == pin) {
                                if (connector->polled != intel_connector->polled)
                                        DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
                                                         connector->name);
@@ -301,13 +270,18 @@ bool intel_encoder_hotplug(struct intel_encoder *encoder,
        return true;
 }
 
+static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
+{
+       return intel_encoder_is_dig_port(encoder) &&
+               enc_to_dig_port(&encoder->base)->hpd_pulse != NULL;
+}
+
 static void i915_digport_work_func(struct work_struct *work)
 {
        struct drm_i915_private *dev_priv =
                container_of(work, struct drm_i915_private, hotplug.dig_port_work);
        u32 long_port_mask, short_port_mask;
-       struct intel_digital_port *intel_dig_port;
-       int i;
+       struct intel_encoder *encoder;
        u32 old_bits = 0;
 
        spin_lock_irq(&dev_priv->irq_lock);
@@ -317,27 +291,27 @@ static void i915_digport_work_func(struct work_struct *work)
        dev_priv->hotplug.short_port_mask = 0;
        spin_unlock_irq(&dev_priv->irq_lock);
 
-       for (i = 0; i < I915_MAX_PORTS; i++) {
-               bool valid = false;
-               bool long_hpd = false;
-               intel_dig_port = dev_priv->hotplug.irq_port[i];
-               if (!intel_dig_port || !intel_dig_port->hpd_pulse)
+       for_each_intel_encoder(&dev_priv->drm, encoder) {
+               struct intel_digital_port *dig_port;
+               enum port port = encoder->port;
+               bool long_hpd, short_hpd;
+               enum irqreturn ret;
+
+               if (!intel_encoder_has_hpd_pulse(encoder))
                        continue;
 
-               if (long_port_mask & (1 << i))  {
-                       valid = true;
-                       long_hpd = true;
-               } else if (short_port_mask & (1 << i))
-                       valid = true;
+               long_hpd = long_port_mask & BIT(port);
+               short_hpd = short_port_mask & BIT(port);
 
-               if (valid) {
-                       enum irqreturn ret;
+               if (!long_hpd && !short_hpd)
+                       continue;
 
-                       ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
-                       if (ret == IRQ_NONE) {
-                               /* fall back to old school hpd */
-                               old_bits |= (1 << intel_dig_port->base.hpd_pin);
-                       }
+               dig_port = enc_to_dig_port(&encoder->base);
+
+               ret = dig_port->hpd_pulse(dig_port, long_hpd);
+               if (ret == IRQ_NONE) {
+                       /* fall back to old school hpd */
+                       old_bits |= BIT(encoder->hpd_pin);
                }
        }
 
@@ -418,26 +392,24 @@ static void i915_hotplug_work_func(struct work_struct *work)
 void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
                           u32 pin_mask, u32 long_mask)
 {
-       int i;
-       enum port port;
+       struct intel_encoder *encoder;
        bool storm_detected = false;
        bool queue_dig = false, queue_hp = false;
-       bool is_dig_port;
 
        if (!pin_mask)
                return;
 
        spin_lock(&dev_priv->irq_lock);
-       for_each_hpd_pin(i) {
-               if (!(BIT(i) & pin_mask))
-                       continue;
+       for_each_intel_encoder(&dev_priv->drm, encoder) {
+               enum hpd_pin pin = encoder->hpd_pin;
+               bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder);
 
-               port = intel_hpd_pin_to_port(dev_priv, i);
-               is_dig_port = port != PORT_NONE &&
-                       dev_priv->hotplug.irq_port[port];
+               if (!(BIT(pin) & pin_mask))
+                       continue;
 
-               if (is_dig_port) {
-                       bool long_hpd = long_mask & BIT(i);
+               if (has_hpd_pulse) {
+                       bool long_hpd = long_mask & BIT(pin);
+                       enum port port = encoder->port;
 
                        DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
                                         long_hpd ? "long" : "short");
@@ -455,7 +427,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
                        }
                }
 
-               if (dev_priv->hotplug.stats[i].state == HPD_DISABLED) {
+               if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) {
                        /*
                         * On GMCH platforms the interrupt mask bits only
                         * prevent irq generation, not the setting of the
@@ -463,20 +435,20 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
                         * interrupts on saner platforms.
                         */
                        WARN_ONCE(!HAS_GMCH_DISPLAY(dev_priv),
-                                 "Received HPD interrupt on pin %d although disabled\n", i);
+                                 "Received HPD interrupt on pin %d although disabled\n", pin);
                        continue;
                }
 
-               if (dev_priv->hotplug.stats[i].state != HPD_ENABLED)
+               if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED)
                        continue;
 
-               if (!is_dig_port) {
-                       dev_priv->hotplug.event_bits |= BIT(i);
+               if (!has_hpd_pulse) {
+                       dev_priv->hotplug.event_bits |= BIT(pin);
                        queue_hp = true;
                }
 
-               if (intel_hpd_irq_storm_detect(dev_priv, i)) {
-                       dev_priv->hotplug.event_bits &= ~BIT(i);
+               if (intel_hpd_irq_storm_detect(dev_priv, pin)) {
+                       dev_priv->hotplug.event_bits &= ~BIT(pin);
                        storm_detected = true;
                }
        }
index 29128527740350e63c79fa604c32185b3f8db319..ffcad5fad6a7b4e6a24b1e34d53964caaeebfaae 100644 (file)
@@ -32,6 +32,14 @@ void intel_huc_init_early(struct intel_huc *huc)
        intel_huc_fw_init_early(huc);
 }
 
+int intel_huc_init_misc(struct intel_huc *huc)
+{
+       struct drm_i915_private *i915 = huc_to_i915(huc);
+
+       intel_uc_fw_fetch(i915, &huc->fw);
+       return 0;
+}
+
 /**
  * intel_huc_auth() - Authenticate HuC uCode
  * @huc: intel_huc structure
index aa854907abac82b47f69d32d22a9e5cdddad7900..7e41d870b509cfa3c2736827c07f00a9159be2af 100644 (file)
@@ -36,9 +36,15 @@ struct intel_huc {
 };
 
 void intel_huc_init_early(struct intel_huc *huc);
+int intel_huc_init_misc(struct intel_huc *huc);
 int intel_huc_auth(struct intel_huc *huc);
 int intel_huc_check_status(struct intel_huc *huc);
 
+static inline void intel_huc_fini_misc(struct intel_huc *huc)
+{
+       intel_uc_fw_fini(&huc->fw);
+}
+
 static inline int intel_huc_sanitize(struct intel_huc *huc)
 {
        intel_uc_fw_sanitize(&huc->fw);
index e6875509bcd9cc53330a2e980d450b7e26a26e4e..bef32b7c248e0bdbe42eec64f36e0e3c64e5e5a7 100644 (file)
@@ -77,12 +77,12 @@ static const struct gmbus_pin gmbus_pins_cnp[] = {
 };
 
 static const struct gmbus_pin gmbus_pins_icp[] = {
-       [GMBUS_PIN_1_BXT] = { "dpa", GPIOA },
-       [GMBUS_PIN_2_BXT] = { "dpb", GPIOB },
-       [GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOC },
-       [GMBUS_PIN_10_TC2_ICP] = { "tc2", GPIOD },
-       [GMBUS_PIN_11_TC3_ICP] = { "tc3", GPIOE },
-       [GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOF },
+       [GMBUS_PIN_1_BXT] = { "dpa", GPIOB },
+       [GMBUS_PIN_2_BXT] = { "dpb", GPIOC },
+       [GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ },
+       [GMBUS_PIN_10_TC2_ICP] = { "tc2", GPIOK },
+       [GMBUS_PIN_11_TC3_ICP] = { "tc3", GPIOL },
+       [GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOM },
 };
 
 /* pin is expected to be valid */
@@ -361,15 +361,39 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
        return ret;
 }
 
+static inline
+unsigned int gmbus_max_xfer_size(struct drm_i915_private *dev_priv)
+{
+       return INTEL_GEN(dev_priv) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX :
+              GMBUS_BYTE_COUNT_MAX;
+}
+
 static int
 gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
                      unsigned short addr, u8 *buf, unsigned int len,
-                     u32 gmbus1_index)
+                     u32 gmbus0_reg, u32 gmbus1_index)
 {
+       unsigned int size = len;
+       bool burst_read = len > gmbus_max_xfer_size(dev_priv);
+       bool extra_byte_added = false;
+
+       if (burst_read) {
+               /*
+                * As per HW Spec, for 512Bytes need to read extra Byte and
+                * Ignore the extra byte read.
+                */
+               if (len == 512) {
+                       extra_byte_added = true;
+                       len++;
+               }
+               size = len % 256 + 256;
+               I915_WRITE_FW(GMBUS0, gmbus0_reg | GMBUS_BYTE_CNT_OVERRIDE);
+       }
+
        I915_WRITE_FW(GMBUS1,
                      gmbus1_index |
                      GMBUS_CYCLE_WAIT |
-                     (len << GMBUS_BYTE_COUNT_SHIFT) |
+                     (size << GMBUS_BYTE_COUNT_SHIFT) |
                      (addr << GMBUS_SLAVE_ADDR_SHIFT) |
                      GMBUS_SLAVE_READ | GMBUS_SW_RDY);
        while (len) {
@@ -382,17 +406,34 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
 
                val = I915_READ_FW(GMBUS3);
                do {
+                       if (extra_byte_added && len == 1)
+                               break;
+
                        *buf++ = val & 0xff;
                        val >>= 8;
                } while (--len && ++loop < 4);
+
+               if (burst_read && len == size - 4)
+                       /* Reset the override bit */
+                       I915_WRITE_FW(GMBUS0, gmbus0_reg);
        }
 
        return 0;
 }
 
+/*
+ * HW spec says that 512Bytes in Burst read need special treatment.
+ * But it doesn't talk about other multiple of 256Bytes. And couldn't locate
+ * an I2C slave, which supports such a lengthy burst read too for experiments.
+ *
+ * So until things get clarified on HW support, to avoid the burst read length
+ * in fold of 256Bytes except 512, max burst read length is fixed at 767Bytes.
+ */
+#define INTEL_GMBUS_BURST_READ_MAX_LEN         767U
+
 static int
 gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
-               u32 gmbus1_index)
+               u32 gmbus0_reg, u32 gmbus1_index)
 {
        u8 *buf = msg->buf;
        unsigned int rx_size = msg->len;
@@ -400,10 +441,13 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
        int ret;
 
        do {
-               len = min(rx_size, GMBUS_BYTE_COUNT_MAX);
+               if (HAS_GMBUS_BURST_READ(dev_priv))
+                       len = min(rx_size, INTEL_GMBUS_BURST_READ_MAX_LEN);
+               else
+                       len = min(rx_size, gmbus_max_xfer_size(dev_priv));
 
-               ret = gmbus_xfer_read_chunk(dev_priv, msg->addr,
-                                           buf, len, gmbus1_index);
+               ret = gmbus_xfer_read_chunk(dev_priv, msg->addr, buf, len,
+                                           gmbus0_reg, gmbus1_index);
                if (ret)
                        return ret;
 
@@ -462,7 +506,7 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
        int ret;
 
        do {
-               len = min(tx_size, GMBUS_BYTE_COUNT_MAX);
+               len = min(tx_size, gmbus_max_xfer_size(dev_priv));
 
                ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len,
                                             gmbus1_index);
@@ -491,7 +535,8 @@ gmbus_is_index_xfer(struct i2c_msg *msgs, int i, int num)
 }
 
 static int
-gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
+gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs,
+                u32 gmbus0_reg)
 {
        u32 gmbus1_index = 0;
        u32 gmbus5 = 0;
@@ -509,7 +554,8 @@ gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
                I915_WRITE_FW(GMBUS5, gmbus5);
 
        if (msgs[1].flags & I2C_M_RD)
-               ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index);
+               ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus0_reg,
+                                     gmbus1_index);
        else
                ret = gmbus_xfer_write(dev_priv, &msgs[1], gmbus1_index);
 
@@ -544,10 +590,12 @@ retry:
        for (; i < num; i += inc) {
                inc = 1;
                if (gmbus_is_index_xfer(msgs, i, num)) {
-                       ret = gmbus_index_xfer(dev_priv, &msgs[i]);
+                       ret = gmbus_index_xfer(dev_priv, &msgs[i],
+                                              gmbus0_source | bus->reg0);
                        inc = 2; /* an index transmission is two msgs */
                } else if (msgs[i].flags & I2C_M_RD) {
-                       ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
+                       ret = gmbus_xfer_read(dev_priv, &msgs[i],
+                                             gmbus0_source | bus->reg0, 0);
                } else {
                        ret = gmbus_xfer_write(dev_priv, &msgs[i], 0);
                }
@@ -771,7 +819,7 @@ int intel_setup_gmbus(struct drm_i915_private *dev_priv)
        unsigned int pin;
        int ret;
 
-       if (HAS_PCH_NOP(dev_priv))
+       if (INTEL_INFO(dev_priv)->num_pipes == 0)
                return 0;
 
        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
index 6269750e2b547c3db8969e54c6ab03116a1e112f..430732720e656b0468b0914d9d1e528ecc3cf5f8 100644 (file)
@@ -126,9 +126,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
                return platdev;
        }
 
-       pm_runtime_forbid(&platdev->dev);
-       pm_runtime_set_active(&platdev->dev);
-       pm_runtime_enable(&platdev->dev);
+       pm_runtime_no_callbacks(&platdev->dev);
 
        return platdev;
 }
index 15434cad543001317be875f1a266e3fef6636042..174479232e94312bc232a7792ded191e960fee49 100644 (file)
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 #include "i915_gem_render_state.h"
+#include "i915_vgpu.h"
 #include "intel_lrc_reg.h"
 #include "intel_mocs.h"
 #include "intel_workarounds.h"
 #define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
 
 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
-                                           struct intel_engine_cs *engine);
+                                           struct intel_engine_cs *engine,
+                                           struct intel_context *ce);
 static void execlists_init_reg_state(u32 *reg_state,
                                     struct i915_gem_context *ctx,
                                     struct intel_engine_cs *engine,
@@ -189,12 +191,7 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
                !i915_request_completed(last));
 }
 
-/**
- * intel_lr_context_descriptor_update() - calculate & cache the descriptor
- *                                       descriptor for a pinned context
- * @ctx: Context to work on
- * @engine: Engine the descriptor will be used with
- *
+/*
  * The context descriptor encodes various attributes of a context,
  * including its GTT address and some flags. Because it's fairly
  * expensive to calculate, we'll just do it once and cache the result,
@@ -204,7 +201,7 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
  *
  *      bits  0-11:    flags, GEN8_CTX_* (cached in ctx->desc_template)
  *      bits 12-31:    LRCA, GTT address of (the HWSP of) this context
- *      bits 32-52:    ctx ID, a globally unique tag
+ *      bits 32-52:    ctx ID, a globally unique tag (highest bit used by GuC)
  *      bits 53-54:    mbz, reserved for use by hardware
  *      bits 55-63:    group ID, currently unused and set to 0
  *
@@ -222,9 +219,9 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
  */
 static void
 intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
-                                  struct intel_engine_cs *engine)
+                                  struct intel_engine_cs *engine,
+                                  struct intel_context *ce)
 {
-       struct intel_context *ce = to_intel_context(ctx, engine);
        u64 desc;
 
        BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
@@ -237,6 +234,11 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
                                                                /* bits 12-31 */
        GEM_BUG_ON(desc & GENMASK_ULL(63, 32));
 
+       /*
+        * The following 32bits are copied into the OA reports (dword 2).
+        * Consider updating oa_get_render_ctx_id in i915_perf.c when changing
+        * anything below.
+        */
        if (INTEL_GEN(ctx->i915) >= 11) {
                GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH));
                desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT;
@@ -271,7 +273,7 @@ lookup_priolist(struct intel_engine_cs *engine, int prio)
 find_priolist:
        /* most positive priority is scheduled first, equal priorities fifo */
        rb = NULL;
-       parent = &execlists->queue.rb_node;
+       parent = &execlists->queue.rb_root.rb_node;
        while (*parent) {
                rb = *parent;
                p = to_priolist(rb);
@@ -309,10 +311,7 @@ find_priolist:
        p->priority = prio;
        INIT_LIST_HEAD(&p->requests);
        rb_link_node(&p->node, rb, parent);
-       rb_insert_color(&p->node, &execlists->queue);
-
-       if (first)
-               execlists->first = &p->node;
+       rb_insert_color_cached(&p->node, &execlists->queue, first);
 
        return p;
 }
@@ -418,9 +417,9 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
 
 static u64 execlists_update_context(struct i915_request *rq)
 {
-       struct intel_context *ce = to_intel_context(rq->ctx, rq->engine);
+       struct intel_context *ce = rq->hw_context;
        struct i915_hw_ppgtt *ppgtt =
-               rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
+               rq->gem_context->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
        u32 *reg_state = ce->lrc_reg_state;
 
        reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
@@ -430,7 +429,7 @@ static u64 execlists_update_context(struct i915_request *rq)
         * PML4 is allocated during ppgtt init, so this is not needed
         * in 48-bit mode.
         */
-       if (ppgtt && !i915_vm_is_48bit(&ppgtt->base))
+       if (ppgtt && !i915_vm_is_48bit(&ppgtt->vm))
                execlists_update_context_pdps(ppgtt, reg_state);
 
        return ce->lrc_desc;
@@ -453,6 +452,16 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
        struct execlist_port *port = execlists->port;
        unsigned int n;
 
+       /*
+        * We can skip acquiring intel_runtime_pm_get() here as it was taken
+        * on our behalf by the request (see i915_gem_mark_busy()) and it will
+        * not be relinquished until the device is idle (see
+        * i915_gem_idle_work_handler()). As a precaution, we make sure
+        * that all ELSP are drained i.e. we have processed the CSB,
+        * before allowing ourselves to idle and calling intel_runtime_pm_put().
+        */
+       GEM_BUG_ON(!engine->i915->gt.awake);
+
        /*
         * ELSQ note: the submit queue is not cleared after being submitted
         * to the HW so we need to make sure we always clean it up. This is
@@ -495,14 +504,14 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
        execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
 }
 
-static bool ctx_single_port_submission(const struct i915_gem_context *ctx)
+static bool ctx_single_port_submission(const struct intel_context *ce)
 {
        return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
-               i915_gem_context_force_single_submission(ctx));
+               i915_gem_context_force_single_submission(ce->gem_context));
 }
 
-static bool can_merge_ctx(const struct i915_gem_context *prev,
-                         const struct i915_gem_context *next)
+static bool can_merge_ctx(const struct intel_context *prev,
+                         const struct intel_context *next)
 {
        if (prev != next)
                return false;
@@ -552,11 +561,24 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
        if (execlists->ctrl_reg)
                writel(EL_CTRL_LOAD, execlists->ctrl_reg);
 
-       execlists_clear_active(&engine->execlists, EXECLISTS_ACTIVE_HWACK);
-       execlists_set_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT);
+       execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
+       execlists_set_active(execlists, EXECLISTS_ACTIVE_PREEMPT);
+}
+
+static void complete_preempt_context(struct intel_engine_execlists *execlists)
+{
+       GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT));
+
+       if (inject_preempt_hang(execlists))
+               return;
+
+       execlists_cancel_port_requests(execlists);
+       __unwind_incomplete_requests(container_of(execlists,
+                                                 struct intel_engine_cs,
+                                                 execlists));
 }
 
-static bool __execlists_dequeue(struct intel_engine_cs *engine)
+static void execlists_dequeue(struct intel_engine_cs *engine)
 {
        struct intel_engine_execlists * const execlists = &engine->execlists;
        struct execlist_port *port = execlists->port;
@@ -566,9 +588,8 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
        struct rb_node *rb;
        bool submit = false;
 
-       lockdep_assert_held(&engine->timeline.lock);
-
-       /* Hardware submission is through 2 ports. Conceptually each port
+       /*
+        * Hardware submission is through 2 ports. Conceptually each port
         * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
         * static for a context, and unique to each, so we only execute
         * requests belonging to a single context from each ring. RING_HEAD
@@ -589,9 +610,6 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
         * and context switches) submission.
         */
 
-       rb = execlists->first;
-       GEM_BUG_ON(rb_first(&execlists->queue) != rb);
-
        if (last) {
                /*
                 * Don't resubmit or switch until all outstanding
@@ -602,8 +620,6 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
                GEM_BUG_ON(!execlists_is_active(execlists,
                                                EXECLISTS_ACTIVE_USER));
                GEM_BUG_ON(!port_count(&port[0]));
-               if (port_count(&port[0]) > 1)
-                       return false;
 
                /*
                 * If we write to ELSP a second time before the HW has had
@@ -613,11 +629,11 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
                 * the HW to indicate that it has had a chance to respond.
                 */
                if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
-                       return false;
+                       return;
 
                if (need_preempt(engine, last, execlists->queue_priority)) {
                        inject_preempt_context(engine);
-                       return false;
+                       return;
                }
 
                /*
@@ -642,7 +658,7 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
                 * priorities of the ports haven't been switch.
                 */
                if (port_count(&port[1]))
-                       return false;
+                       return;
 
                /*
                 * WaIdleLiteRestore:bdw,skl
@@ -655,7 +671,7 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
                last->tail = last->wa_tail;
        }
 
-       while (rb) {
+       while ((rb = rb_first_cached(&execlists->queue))) {
                struct i915_priolist *p = to_priolist(rb);
                struct i915_request *rq, *rn;
 
@@ -671,7 +687,8 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
                         * second request, and so we never need to tell the
                         * hardware about the first.
                         */
-                       if (last && !can_merge_ctx(rq->ctx, last->ctx)) {
+                       if (last &&
+                           !can_merge_ctx(rq->hw_context, last->hw_context)) {
                                /*
                                 * If we are on the second port and cannot
                                 * combine this request with the last, then we
@@ -690,14 +707,14 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
                                 * the same context (even though a different
                                 * request) to the second port.
                                 */
-                               if (ctx_single_port_submission(last->ctx) ||
-                                   ctx_single_port_submission(rq->ctx)) {
+                               if (ctx_single_port_submission(last->hw_context) ||
+                                   ctx_single_port_submission(rq->hw_context)) {
                                        __list_del_many(&p->requests,
                                                        &rq->sched.link);
                                        goto done;
                                }
 
-                               GEM_BUG_ON(last->ctx == rq->ctx);
+                               GEM_BUG_ON(last->hw_context == rq->hw_context);
 
                                if (submit)
                                        port_assign(port, last);
@@ -713,8 +730,7 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
                        submit = true;
                }
 
-               rb = rb_next(rb);
-               rb_erase(&p->node, &execlists->queue);
+               rb_erase_cached(&p->node, &execlists->queue);
                INIT_LIST_HEAD(&p->requests);
                if (p->priority != I915_PRIORITY_NORMAL)
                        kmem_cache_free(engine->i915->priorities, p);
@@ -740,35 +756,23 @@ done:
        execlists->queue_priority =
                port != execlists->port ? rq_prio(last) : INT_MIN;
 
-       execlists->first = rb;
-       if (submit)
+       if (submit) {
                port_assign(port, last);
+               execlists_submit_ports(engine);
+       }
 
        /* We must always keep the beast fed if we have work piled up */
-       GEM_BUG_ON(execlists->first && !port_isset(execlists->port));
+       GEM_BUG_ON(rb_first_cached(&execlists->queue) &&
+                  !port_isset(execlists->port));
 
        /* Re-evaluate the executing context setup after each preemptive kick */
        if (last)
                execlists_user_begin(execlists, execlists->port);
 
-       return submit;
-}
-
-static void execlists_dequeue(struct intel_engine_cs *engine)
-{
-       struct intel_engine_execlists * const execlists = &engine->execlists;
-       unsigned long flags;
-       bool submit;
-
-       spin_lock_irqsave(&engine->timeline.lock, flags);
-       submit = __execlists_dequeue(engine);
-       spin_unlock_irqrestore(&engine->timeline.lock, flags);
-
-       if (submit)
-               execlists_submit_ports(engine);
-
-       GEM_BUG_ON(port_isset(execlists->port) &&
-                  !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
+       /* If the engine is now idle, so should be the flag; and vice versa. */
+       GEM_BUG_ON(execlists_is_active(&engine->execlists,
+                                      EXECLISTS_ACTIVE_USER) ==
+                  !port_isset(engine->execlists.port));
 }
 
 void
@@ -799,82 +803,27 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
                port++;
        }
 
-       execlists_user_end(execlists);
+       execlists_clear_all_active(execlists);
 }
 
-static void clear_gtiir(struct intel_engine_cs *engine)
+static void reset_csb_pointers(struct intel_engine_execlists *execlists)
 {
-       struct drm_i915_private *dev_priv = engine->i915;
-       int i;
-
        /*
-        * Clear any pending interrupt state.
-        *
-        * We do it twice out of paranoia that some of the IIR are
-        * double buffered, and so if we only reset it once there may
-        * still be an interrupt pending.
+        * After a reset, the HW starts writing into CSB entry [0]. We
+        * therefore have to set our HEAD pointer back one entry so that
+        * the *first* entry we check is entry 0. To complicate this further,
+        * as we don't wait for the first interrupt after reset, we have to
+        * fake the HW write to point back to the last entry so that our
+        * inline comparison of our cached head position against the last HW
+        * write works even before the first interrupt.
         */
-       if (INTEL_GEN(dev_priv) >= 11) {
-               static const struct {
-                       u8 bank;
-                       u8 bit;
-               } gen11_gtiir[] = {
-                       [RCS] = {0, GEN11_RCS0},
-                       [BCS] = {0, GEN11_BCS},
-                       [_VCS(0)] = {1, GEN11_VCS(0)},
-                       [_VCS(1)] = {1, GEN11_VCS(1)},
-                       [_VCS(2)] = {1, GEN11_VCS(2)},
-                       [_VCS(3)] = {1, GEN11_VCS(3)},
-                       [_VECS(0)] = {1, GEN11_VECS(0)},
-                       [_VECS(1)] = {1, GEN11_VECS(1)},
-               };
-               unsigned long irqflags;
-
-               GEM_BUG_ON(engine->id >= ARRAY_SIZE(gen11_gtiir));
-
-               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-               for (i = 0; i < 2; i++) {
-                       gen11_reset_one_iir(dev_priv,
-                                           gen11_gtiir[engine->id].bank,
-                                           gen11_gtiir[engine->id].bit);
-               }
-               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-       } else {
-               static const u8 gtiir[] = {
-                       [RCS]  = 0,
-                       [BCS]  = 0,
-                       [VCS]  = 1,
-                       [VCS2] = 1,
-                       [VECS] = 3,
-               };
-
-               GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir));
-
-               for (i = 0; i < 2; i++) {
-                       I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
-                                  engine->irq_keep_mask);
-                       POSTING_READ(GEN8_GT_IIR(gtiir[engine->id]));
-               }
-               GEM_BUG_ON(I915_READ(GEN8_GT_IIR(gtiir[engine->id])) &
-                          engine->irq_keep_mask);
-       }
+       execlists->csb_head = execlists->csb_write_reset;
+       WRITE_ONCE(*execlists->csb_write, execlists->csb_write_reset);
 }
 
-static void reset_irq(struct intel_engine_cs *engine)
+static void nop_submission_tasklet(unsigned long data)
 {
-       /* Mark all CS interrupts as complete */
-       smp_store_mb(engine->execlists.active, 0);
-       synchronize_hardirq(engine->i915->drm.irq);
-
-       clear_gtiir(engine);
-
-       /*
-        * The port is checked prior to scheduling a tasklet, but
-        * just in case we have suspended the tasklet to do the
-        * wedging make sure that when it wakes, it decides there
-        * is no work to do by clearing the irq_posted bit.
-        */
-       clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+       /* The driver is wedged; don't process any more events. */
 }
 
 static void execlists_cancel_requests(struct intel_engine_cs *engine)
@@ -901,13 +850,11 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
         * submission's irq state, we also wish to remind ourselves that
         * it is irq state.)
         */
-       local_irq_save(flags);
+       spin_lock_irqsave(&engine->timeline.lock, flags);
 
        /* Cancel the requests on the HW and clear the ELSP tracker. */
        execlists_cancel_port_requests(execlists);
-       reset_irq(engine);
-
-       spin_lock(&engine->timeline.lock);
+       execlists_user_end(execlists);
 
        /* Mark all executing requests as skipped. */
        list_for_each_entry(rq, &engine->timeline.requests, link) {
@@ -917,8 +864,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
        }
 
        /* Flush the queued requests to the timeline list (for retiring). */
-       rb = execlists->first;
-       while (rb) {
+       while ((rb = rb_first_cached(&execlists->queue))) {
                struct i915_priolist *p = to_priolist(rb);
 
                list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
@@ -928,8 +874,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
                        __i915_request_submit(rq);
                }
 
-               rb = rb_next(rb);
-               rb_erase(&p->node, &execlists->queue);
+               rb_erase_cached(&p->node, &execlists->queue);
                INIT_LIST_HEAD(&p->requests);
                if (p->priority != I915_PRIORITY_NORMAL)
                        kmem_cache_free(engine->i915->priorities, p);
@@ -938,221 +883,198 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
        /* Remaining _unready_ requests will be nop'ed when submitted */
 
        execlists->queue_priority = INT_MIN;
-       execlists->queue = RB_ROOT;
-       execlists->first = NULL;
+       execlists->queue = RB_ROOT_CACHED;
        GEM_BUG_ON(port_isset(execlists->port));
 
-       spin_unlock(&engine->timeline.lock);
+       GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
+       execlists->tasklet.func = nop_submission_tasklet;
 
-       local_irq_restore(flags);
+       spin_unlock_irqrestore(&engine->timeline.lock, flags);
 }
 
-/*
- * Check the unread Context Status Buffers and manage the submission of new
- * contexts to the ELSP accordingly.
- */
-static void execlists_submission_tasklet(unsigned long data)
+static inline bool
+reset_in_progress(const struct intel_engine_execlists *execlists)
+{
+       return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
+}
+
+static void process_csb(struct intel_engine_cs *engine)
 {
-       struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
        struct intel_engine_execlists * const execlists = &engine->execlists;
        struct execlist_port *port = execlists->port;
-       struct drm_i915_private *dev_priv = engine->i915;
-       bool fw = false;
+       const u32 * const buf = execlists->csb_status;
+       u8 head, tail;
 
        /*
-        * We can skip acquiring intel_runtime_pm_get() here as it was taken
-        * on our behalf by the request (see i915_gem_mark_busy()) and it will
-        * not be relinquished until the device is idle (see
-        * i915_gem_idle_work_handler()). As a precaution, we make sure
-        * that all ELSP are drained i.e. we have processed the CSB,
-        * before allowing ourselves to idle and calling intel_runtime_pm_put().
+        * Note that csb_write, csb_status may be either in HWSP or mmio.
+        * When reading from the csb_write mmio register, we have to be
+        * careful to only use the GEN8_CSB_WRITE_PTR portion, which is
+        * the low 4bits. As it happens we know the next 4bits are always
+        * zero and so we can simply masked off the low u8 of the register
+        * and treat it identically to reading from the HWSP (without having
+        * to use explicit shifting and masking, and probably bifurcating
+        * the code to handle the legacy mmio read).
         */
-       GEM_BUG_ON(!dev_priv->gt.awake);
+       head = execlists->csb_head;
+       tail = READ_ONCE(*execlists->csb_write);
+       GEM_TRACE("%s cs-irq head=%d, tail=%d\n", engine->name, head, tail);
+       if (unlikely(head == tail))
+               return;
 
        /*
-        * Prefer doing test_and_clear_bit() as a two stage operation to avoid
-        * imposing the cost of a locked atomic transaction when submitting a
-        * new request (outside of the context-switch interrupt).
+        * Hopefully paired with a wmb() in HW!
+        *
+        * We must complete the read of the write pointer before any reads
+        * from the CSB, so that we do not see stale values. Without an rmb
+        * (lfence) the HW may speculatively perform the CSB[] reads *before*
+        * we perform the READ_ONCE(*csb_write).
         */
-       while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) {
-               /* The HWSP contains a (cacheable) mirror of the CSB */
-               const u32 *buf =
-                       &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
-               unsigned int head, tail;
+       rmb();
 
-               if (unlikely(execlists->csb_use_mmio)) {
-                       buf = (u32 * __force)
-                               (dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
-                       execlists->csb_head = -1; /* force mmio read of CSB ptrs */
-               }
+       do {
+               struct i915_request *rq;
+               unsigned int status;
+               unsigned int count;
 
-               /* Clear before reading to catch new interrupts */
-               clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
-               smp_mb__after_atomic();
+               if (++head == GEN8_CSB_ENTRIES)
+                       head = 0;
 
-               if (unlikely(execlists->csb_head == -1)) { /* following a reset */
-                       if (!fw) {
-                               intel_uncore_forcewake_get(dev_priv,
-                                                          execlists->fw_domains);
-                               fw = true;
-                       }
+               /*
+                * We are flying near dragons again.
+                *
+                * We hold a reference to the request in execlist_port[]
+                * but no more than that. We are operating in softirq
+                * context and so cannot hold any mutex or sleep. That
+                * prevents us stopping the requests we are processing
+                * in port[] from being retired simultaneously (the
+                * breadcrumb will be complete before we see the
+                * context-switch). As we only hold the reference to the
+                * request, any pointer chasing underneath the request
+                * is subject to a potential use-after-free. Thus we
+                * store all of the bookkeeping within port[] as
+                * required, and avoid using unguarded pointers beneath
+                * request itself. The same applies to the atomic
+                * status notifier.
+                */
 
-                       head = readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
-                       tail = GEN8_CSB_WRITE_PTR(head);
-                       head = GEN8_CSB_READ_PTR(head);
-                       execlists->csb_head = head;
-               } else {
-                       const int write_idx =
-                               intel_hws_csb_write_index(dev_priv) -
-                               I915_HWS_CSB_BUF0_INDEX;
+               GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x, active=0x%x\n",
+                         engine->name, head,
+                         buf[2 * head + 0], buf[2 * head + 1],
+                         execlists->active);
+
+               status = buf[2 * head];
+               if (status & (GEN8_CTX_STATUS_IDLE_ACTIVE |
+                             GEN8_CTX_STATUS_PREEMPTED))
+                       execlists_set_active(execlists,
+                                            EXECLISTS_ACTIVE_HWACK);
+               if (status & GEN8_CTX_STATUS_ACTIVE_IDLE)
+                       execlists_clear_active(execlists,
+                                              EXECLISTS_ACTIVE_HWACK);
+
+               if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
+                       continue;
+
+               /* We should never get a COMPLETED | IDLE_ACTIVE! */
+               GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE);
 
-                       head = execlists->csb_head;
-                       tail = READ_ONCE(buf[write_idx]);
-                       rmb(); /* Hopefully paired with a wmb() in HW */
+               if (status & GEN8_CTX_STATUS_COMPLETE &&
+                   buf[2*head + 1] == execlists->preempt_complete_status) {
+                       GEM_TRACE("%s preempt-idle\n", engine->name);
+                       complete_preempt_context(execlists);
+                       continue;
                }
-               GEM_TRACE("%s cs-irq head=%d [%d%s], tail=%d [%d%s]\n",
-                         engine->name,
-                         head, GEN8_CSB_READ_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?",
-                         tail, GEN8_CSB_WRITE_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?");
 
-               while (head != tail) {
-                       struct i915_request *rq;
-                       unsigned int status;
-                       unsigned int count;
+               if (status & GEN8_CTX_STATUS_PREEMPTED &&
+                   execlists_is_active(execlists,
+                                       EXECLISTS_ACTIVE_PREEMPT))
+                       continue;
 
-                       if (++head == GEN8_CSB_ENTRIES)
-                               head = 0;
+               GEM_BUG_ON(!execlists_is_active(execlists,
+                                               EXECLISTS_ACTIVE_USER));
 
-                       /* We are flying near dragons again.
-                        *
-                        * We hold a reference to the request in execlist_port[]
-                        * but no more than that. We are operating in softirq
-                        * context and so cannot hold any mutex or sleep. That
-                        * prevents us stopping the requests we are processing
-                        * in port[] from being retired simultaneously (the
-                        * breadcrumb will be complete before we see the
-                        * context-switch). As we only hold the reference to the
-                        * request, any pointer chasing underneath the request
-                        * is subject to a potential use-after-free. Thus we
-                        * store all of the bookkeeping within port[] as
-                        * required, and avoid using unguarded pointers beneath
-                        * request itself. The same applies to the atomic
-                        * status notifier.
+               rq = port_unpack(port, &count);
+               GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n",
+                         engine->name,
+                         port->context_id, count,
+                         rq ? rq->global_seqno : 0,
+                         rq ? rq->fence.context : 0,
+                         rq ? rq->fence.seqno : 0,
+                         intel_engine_get_seqno(engine),
+                         rq ? rq_prio(rq) : 0);
+
+               /* Check the context/desc id for this event matches */
+               GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
+
+               GEM_BUG_ON(count == 0);
+               if (--count == 0) {
+                       /*
+                        * On the final event corresponding to the
+                        * submission of this context, we expect either
+                        * an element-switch event or a completion
+                        * event (and on completion, the active-idle
+                        * marker). No more preemptions, lite-restore
+                        * or otherwise.
                         */
+                       GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
+                       GEM_BUG_ON(port_isset(&port[1]) &&
+                                  !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH));
+                       GEM_BUG_ON(!port_isset(&port[1]) &&
+                                  !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
 
-                       status = READ_ONCE(buf[2 * head]); /* maybe mmio! */
-                       GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x, active=0x%x\n",
-                                 engine->name, head,
-                                 status, buf[2*head + 1],
-                                 execlists->active);
-
-                       if (status & (GEN8_CTX_STATUS_IDLE_ACTIVE |
-                                     GEN8_CTX_STATUS_PREEMPTED))
-                               execlists_set_active(execlists,
-                                                    EXECLISTS_ACTIVE_HWACK);
-                       if (status & GEN8_CTX_STATUS_ACTIVE_IDLE)
-                               execlists_clear_active(execlists,
-                                                      EXECLISTS_ACTIVE_HWACK);
-
-                       if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
-                               continue;
-
-                       /* We should never get a COMPLETED | IDLE_ACTIVE! */
-                       GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE);
-
-                       if (status & GEN8_CTX_STATUS_COMPLETE &&
-                           buf[2*head + 1] == execlists->preempt_complete_status) {
-                               GEM_TRACE("%s preempt-idle\n", engine->name);
-
-                               execlists_cancel_port_requests(execlists);
-                               execlists_unwind_incomplete_requests(execlists);
-
-                               GEM_BUG_ON(!execlists_is_active(execlists,
-                                                               EXECLISTS_ACTIVE_PREEMPT));
-                               execlists_clear_active(execlists,
-                                                      EXECLISTS_ACTIVE_PREEMPT);
-                               continue;
-                       }
-
-                       if (status & GEN8_CTX_STATUS_PREEMPTED &&
-                           execlists_is_active(execlists,
-                                               EXECLISTS_ACTIVE_PREEMPT))
-                               continue;
-
-                       GEM_BUG_ON(!execlists_is_active(execlists,
-                                                       EXECLISTS_ACTIVE_USER));
-
-                       rq = port_unpack(port, &count);
-                       GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n",
-                                 engine->name,
-                                 port->context_id, count,
-                                 rq ? rq->global_seqno : 0,
-                                 rq ? rq->fence.context : 0,
-                                 rq ? rq->fence.seqno : 0,
-                                 intel_engine_get_seqno(engine),
-                                 rq ? rq_prio(rq) : 0);
+                       /*
+                        * We rely on the hardware being strongly
+                        * ordered, that the breadcrumb write is
+                        * coherent (visible from the CPU) before the
+                        * user interrupt and CSB is processed.
+                        */
+                       GEM_BUG_ON(!i915_request_completed(rq));
 
-                       /* Check the context/desc id for this event matches */
-                       GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
+                       execlists_context_schedule_out(rq,
+                                                      INTEL_CONTEXT_SCHEDULE_OUT);
+                       i915_request_put(rq);
 
-                       GEM_BUG_ON(count == 0);
-                       if (--count == 0) {
-                               /*
-                                * On the final event corresponding to the
-                                * submission of this context, we expect either
-                                * an element-switch event or a completion
-                                * event (and on completion, the active-idle
-                                * marker). No more preemptions, lite-restore
-                                * or otherwise.
-                                */
-                               GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
-                               GEM_BUG_ON(port_isset(&port[1]) &&
-                                          !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH));
-                               GEM_BUG_ON(!port_isset(&port[1]) &&
-                                          !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
+                       GEM_TRACE("%s completed ctx=%d\n",
+                                 engine->name, port->context_id);
 
-                               /*
-                                * We rely on the hardware being strongly
-                                * ordered, that the breadcrumb write is
-                                * coherent (visible from the CPU) before the
-                                * user interrupt and CSB is processed.
-                                */
-                               GEM_BUG_ON(!i915_request_completed(rq));
-
-                               execlists_context_schedule_out(rq,
-                                                              INTEL_CONTEXT_SCHEDULE_OUT);
-                               i915_request_put(rq);
-
-                               GEM_TRACE("%s completed ctx=%d\n",
-                                         engine->name, port->context_id);
-
-                               port = execlists_port_complete(execlists, port);
-                               if (port_isset(port))
-                                       execlists_user_begin(execlists, port);
-                               else
-                                       execlists_user_end(execlists);
-                       } else {
-                               port_set(port, port_pack(rq, count));
-                       }
+                       port = execlists_port_complete(execlists, port);
+                       if (port_isset(port))
+                               execlists_user_begin(execlists, port);
+                       else
+                               execlists_user_end(execlists);
+               } else {
+                       port_set(port, port_pack(rq, count));
                }
+       } while (head != tail);
 
-               if (head != execlists->csb_head) {
-                       execlists->csb_head = head;
-                       writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8),
-                              dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
-               }
-       }
+       execlists->csb_head = head;
+}
 
-       if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
+static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
+{
+       lockdep_assert_held(&engine->timeline.lock);
+
+       process_csb(engine);
+       if (!execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT))
                execlists_dequeue(engine);
+}
+
+/*
+ * Check the unread Context Status Buffers and manage the submission of new
+ * contexts to the ELSP accordingly.
+ */
+static void execlists_submission_tasklet(unsigned long data)
+{
+       struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+       unsigned long flags;
 
-       if (fw)
-               intel_uncore_forcewake_put(dev_priv, execlists->fw_domains);
+       GEM_TRACE("%s awake?=%d, active=%x\n",
+                 engine->name,
+                 engine->i915->gt.awake,
+                 engine->execlists.active);
 
-       /* If the engine is now idle, so should be the flag; and vice versa. */
-       GEM_BUG_ON(execlists_is_active(&engine->execlists,
-                                      EXECLISTS_ACTIVE_USER) ==
-                  !port_isset(engine->execlists.port));
+       spin_lock_irqsave(&engine->timeline.lock, flags);
+       __execlists_submission_tasklet(engine);
+       spin_unlock_irqrestore(&engine->timeline.lock, flags);
 }
 
 static void queue_request(struct intel_engine_cs *engine,
@@ -1163,16 +1085,30 @@ static void queue_request(struct intel_engine_cs *engine,
                      &lookup_priolist(engine, prio)->requests);
 }
 
-static void __submit_queue(struct intel_engine_cs *engine, int prio)
+static void __update_queue(struct intel_engine_cs *engine, int prio)
 {
        engine->execlists.queue_priority = prio;
-       tasklet_hi_schedule(&engine->execlists.tasklet);
+}
+
+static void __submit_queue_imm(struct intel_engine_cs *engine)
+{
+       struct intel_engine_execlists * const execlists = &engine->execlists;
+
+       if (reset_in_progress(execlists))
+               return; /* defer until we restart the engine following reset */
+
+       if (execlists->tasklet.func == execlists_submission_tasklet)
+               __execlists_submission_tasklet(engine);
+       else
+               tasklet_hi_schedule(&execlists->tasklet);
 }
 
 static void submit_queue(struct intel_engine_cs *engine, int prio)
 {
-       if (prio > engine->execlists.queue_priority)
-               __submit_queue(engine, prio);
+       if (prio > engine->execlists.queue_priority) {
+               __update_queue(engine, prio);
+               __submit_queue_imm(engine);
+       }
 }
 
 static void execlists_submit_request(struct i915_request *request)
@@ -1184,11 +1120,12 @@ static void execlists_submit_request(struct i915_request *request)
        spin_lock_irqsave(&engine->timeline.lock, flags);
 
        queue_request(engine, &request->sched, rq_prio(request));
-       submit_queue(engine, rq_prio(request));
 
-       GEM_BUG_ON(!engine->execlists.first);
+       GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
        GEM_BUG_ON(list_empty(&request->sched.link));
 
+       submit_queue(engine, rq_prio(request));
+
        spin_unlock_irqrestore(&engine->timeline.lock, flags);
 }
 
@@ -1315,13 +1252,40 @@ static void execlists_schedule(struct i915_request *request,
                }
 
                if (prio > engine->execlists.queue_priority &&
-                   i915_sw_fence_done(&sched_to_request(node)->submit))
-                       __submit_queue(engine, prio);
+                   i915_sw_fence_done(&sched_to_request(node)->submit)) {
+                       /* defer submission until after all of our updates */
+                       __update_queue(engine, prio);
+                       tasklet_hi_schedule(&engine->execlists.tasklet);
+               }
        }
 
        spin_unlock_irq(&engine->timeline.lock);
 }
 
+static void execlists_context_destroy(struct intel_context *ce)
+{
+       GEM_BUG_ON(ce->pin_count);
+
+       if (!ce->state)
+               return;
+
+       intel_ring_free(ce->ring);
+
+       GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
+       i915_gem_object_put(ce->state->obj);
+}
+
+static void execlists_context_unpin(struct intel_context *ce)
+{
+       intel_ring_unpin(ce->ring);
+
+       ce->state->obj->pin_global--;
+       i915_gem_object_unpin_map(ce->state->obj);
+       i915_vma_unpin(ce->state);
+
+       i915_gem_context_put(ce->gem_context);
+}
+
 static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
 {
        unsigned int flags;
@@ -1345,21 +1309,15 @@ static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
        return i915_vma_pin(vma, 0, GEN8_LR_CONTEXT_ALIGN, flags);
 }
 
-static struct intel_ring *
-execlists_context_pin(struct intel_engine_cs *engine,
-                     struct i915_gem_context *ctx)
+static struct intel_context *
+__execlists_context_pin(struct intel_engine_cs *engine,
+                       struct i915_gem_context *ctx,
+                       struct intel_context *ce)
 {
-       struct intel_context *ce = to_intel_context(ctx, engine);
        void *vaddr;
        int ret;
 
-       lockdep_assert_held(&ctx->i915->drm.struct_mutex);
-
-       if (likely(ce->pin_count++))
-               goto out;
-       GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
-
-       ret = execlists_context_deferred_alloc(ctx, engine);
+       ret = execlists_context_deferred_alloc(ctx, engine, ce);
        if (ret)
                goto err;
        GEM_BUG_ON(!ce->state);
@@ -1378,17 +1336,17 @@ execlists_context_pin(struct intel_engine_cs *engine,
        if (ret)
                goto unpin_map;
 
-       intel_lr_context_descriptor_update(ctx, engine);
+       intel_lr_context_descriptor_update(ctx, engine, ce);
 
        ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
        ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
                i915_ggtt_offset(ce->ring->vma);
+       GEM_BUG_ON(!intel_ring_offset_valid(ce->ring, ce->ring->head));
        ce->lrc_reg_state[CTX_RING_HEAD+1] = ce->ring->head;
 
        ce->state->obj->pin_global++;
        i915_gem_context_get(ctx);
-out:
-       return ce->ring;
+       return ce;
 
 unpin_map:
        i915_gem_object_unpin_map(ce->state->obj);
@@ -1399,33 +1357,33 @@ err:
        return ERR_PTR(ret);
 }
 
-static void execlists_context_unpin(struct intel_engine_cs *engine,
-                                   struct i915_gem_context *ctx)
+static const struct intel_context_ops execlists_context_ops = {
+       .unpin = execlists_context_unpin,
+       .destroy = execlists_context_destroy,
+};
+
+static struct intel_context *
+execlists_context_pin(struct intel_engine_cs *engine,
+                     struct i915_gem_context *ctx)
 {
        struct intel_context *ce = to_intel_context(ctx, engine);
 
        lockdep_assert_held(&ctx->i915->drm.struct_mutex);
-       GEM_BUG_ON(ce->pin_count == 0);
 
-       if (--ce->pin_count)
-               return;
+       if (likely(ce->pin_count++))
+               return ce;
+       GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
 
-       intel_ring_unpin(ce->ring);
+       ce->ops = &execlists_context_ops;
 
-       ce->state->obj->pin_global--;
-       i915_gem_object_unpin_map(ce->state->obj);
-       i915_vma_unpin(ce->state);
-
-       i915_gem_context_put(ctx);
+       return __execlists_context_pin(engine, ctx, ce);
 }
 
 static int execlists_request_alloc(struct i915_request *request)
 {
-       struct intel_context *ce =
-               to_intel_context(request->ctx, request->engine);
        int ret;
 
-       GEM_BUG_ON(!ce->pin_count);
+       GEM_BUG_ON(!request->hw_context->pin_count);
 
        /* Flush enough space to reduce the likelihood of waiting after
         * we start building the request - in which case we will just
@@ -1538,19 +1496,56 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
        return batch;
 }
 
+struct lri {
+       i915_reg_t reg;
+       u32 value;
+};
+
+static u32 *emit_lri(u32 *batch, const struct lri *lri, unsigned int count)
+{
+       GEM_BUG_ON(!count || count > 63);
+
+       *batch++ = MI_LOAD_REGISTER_IMM(count);
+       do {
+               *batch++ = i915_mmio_reg_offset(lri->reg);
+               *batch++ = lri->value;
+       } while (lri++, --count);
+       *batch++ = MI_NOOP;
+
+       return batch;
+}
+
 static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
 {
+       static const struct lri lri[] = {
+               /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
+               {
+                       COMMON_SLICE_CHICKEN2,
+                       __MASKED_FIELD(GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE,
+                                      0),
+               },
+
+               /* BSpec: 11391 */
+               {
+                       FF_SLICE_CHICKEN,
+                       __MASKED_FIELD(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX,
+                                      FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX),
+               },
+
+               /* BSpec: 11299 */
+               {
+                       _3D_CHICKEN3,
+                       __MASKED_FIELD(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX,
+                                      _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX),
+               }
+       };
+
        *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
 
        /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
        batch = gen8_emit_flush_coherentl3_wa(engine, batch);
 
-       /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
-       *batch++ = MI_LOAD_REGISTER_IMM(1);
-       *batch++ = i915_mmio_reg_offset(COMMON_SLICE_CHICKEN2);
-       *batch++ = _MASKED_BIT_DISABLE(
-                       GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE);
-       *batch++ = MI_NOOP;
+       batch = emit_lri(batch, lri, ARRAY_SIZE(lri));
 
        /* WaClearSlmSpaceAtContextSwitch:kbl */
        /* Actual scratch location is at 128 bytes offset */
@@ -1642,7 +1637,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
        if (IS_ERR(obj))
                return PTR_ERR(obj);
 
-       vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
+       vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
        if (IS_ERR(vma)) {
                err = PTR_ERR(vma);
                goto err;
@@ -1757,17 +1752,29 @@ static void enable_execlists(struct intel_engine_cs *engine)
                I915_WRITE(RING_MODE_GEN7(engine),
                           _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
 
+       I915_WRITE(RING_MI_MODE(engine->mmio_base),
+                  _MASKED_BIT_DISABLE(STOP_RING));
+
        I915_WRITE(RING_HWS_PGA(engine->mmio_base),
                   engine->status_page.ggtt_offset);
        POSTING_READ(RING_HWS_PGA(engine->mmio_base));
+}
 
-       /* Following the reset, we need to reload the CSB read/write pointers */
-       engine->execlists.csb_head = -1;
+static bool unexpected_starting_state(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+       bool unexpected = false;
+
+       if (I915_READ(RING_MI_MODE(engine->mmio_base)) & STOP_RING) {
+               DRM_DEBUG_DRIVER("STOP_RING still set in RING_MI_MODE\n");
+               unexpected = true;
+       }
+
+       return unexpected;
 }
 
 static int gen8_init_common_ring(struct intel_engine_cs *engine)
 {
-       struct intel_engine_execlists * const execlists = &engine->execlists;
        int ret;
 
        ret = intel_mocs_init_engine(engine);
@@ -1775,13 +1782,14 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
                return ret;
 
        intel_engine_reset_breadcrumbs(engine);
-       intel_engine_init_hangcheck(engine);
 
-       enable_execlists(engine);
+       if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) {
+               struct drm_printer p = drm_debug_printer(__func__);
 
-       /* After a GPU reset, we may have requests to replay */
-       if (execlists->first)
-               tasklet_schedule(&execlists->tasklet);
+               intel_engine_dump(engine, &p, NULL);
+       }
+
+       enable_execlists(engine);
 
        return 0;
 }
@@ -1823,8 +1831,69 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
        return 0;
 }
 
-static void reset_common_ring(struct intel_engine_cs *engine,
-                             struct i915_request *request)
+static struct i915_request *
+execlists_reset_prepare(struct intel_engine_cs *engine)
+{
+       struct intel_engine_execlists * const execlists = &engine->execlists;
+       struct i915_request *request, *active;
+       unsigned long flags;
+
+       GEM_TRACE("%s\n", engine->name);
+
+       /*
+        * Prevent request submission to the hardware until we have
+        * completed the reset in i915_gem_reset_finish(). If a request
+        * is completed by one engine, it may then queue a request
+        * to a second via its execlists->tasklet *just* as we are
+        * calling engine->init_hw() and also writing the ELSP.
+        * Turning off the execlists->tasklet until the reset is over
+        * prevents the race.
+        */
+       __tasklet_disable_sync_once(&execlists->tasklet);
+
+       spin_lock_irqsave(&engine->timeline.lock, flags);
+
+       /*
+        * We want to flush the pending context switches, having disabled
+        * the tasklet above, we can assume exclusive access to the execlists.
+        * For this allows us to catch up with an inflight preemption event,
+        * and avoid blaming an innocent request if the stall was due to the
+        * preemption itself.
+        */
+       process_csb(engine);
+
+       /*
+        * The last active request can then be no later than the last request
+        * now in ELSP[0]. So search backwards from there, so that if the GPU
+        * has advanced beyond the last CSB update, it will be pardoned.
+        */
+       active = NULL;
+       request = port_request(execlists->port);
+       if (request) {
+               /*
+                * Prevent the breadcrumb from advancing before we decide
+                * which request is currently active.
+                */
+               intel_engine_stop_cs(engine);
+
+               list_for_each_entry_from_reverse(request,
+                                                &engine->timeline.requests,
+                                                link) {
+                       if (__i915_request_completed(request,
+                                                    request->global_seqno))
+                               break;
+
+                       active = request;
+               }
+       }
+
+       spin_unlock_irqrestore(&engine->timeline.lock, flags);
+
+       return active;
+}
+
+static void execlists_reset(struct intel_engine_cs *engine,
+                           struct i915_request *request)
 {
        struct intel_engine_execlists * const execlists = &engine->execlists;
        unsigned long flags;
@@ -1834,8 +1903,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
                  engine->name, request ? request->global_seqno : 0,
                  intel_engine_get_seqno(engine));
 
-       /* See execlists_cancel_requests() for the irq/spinlock split. */
-       local_irq_save(flags);
+       spin_lock_irqsave(&engine->timeline.lock, flags);
 
        /*
         * Catch up with any missed context-switch interrupts.
@@ -1847,14 +1915,14 @@ static void reset_common_ring(struct intel_engine_cs *engine,
         * requests were completed.
         */
        execlists_cancel_port_requests(execlists);
-       reset_irq(engine);
 
        /* Push back any incomplete requests for replay after the reset. */
-       spin_lock(&engine->timeline.lock);
        __unwind_incomplete_requests(engine);
-       spin_unlock(&engine->timeline.lock);
 
-       local_irq_restore(flags);
+       /* Following the reset, we need to reload the CSB read/write pointers */
+       reset_csb_pointers(&engine->execlists);
+
+       spin_unlock_irqrestore(&engine->timeline.lock, flags);
 
        /*
         * If the request was innocent, we leave the request in the ELSP
@@ -1878,35 +1946,52 @@ static void reset_common_ring(struct intel_engine_cs *engine,
         * future request will be after userspace has had the opportunity
         * to recreate its own state.
         */
-       regs = to_intel_context(request->ctx, engine)->lrc_reg_state;
-       if (engine->default_state) {
-               void *defaults;
-
-               defaults = i915_gem_object_pin_map(engine->default_state,
-                                                  I915_MAP_WB);
-               if (!IS_ERR(defaults)) {
-                       memcpy(regs, /* skip restoring the vanilla PPHWSP */
-                              defaults + LRC_STATE_PN * PAGE_SIZE,
-                              engine->context_size - PAGE_SIZE);
-                       i915_gem_object_unpin_map(engine->default_state);
-               }
+       regs = request->hw_context->lrc_reg_state;
+       if (engine->pinned_default_state) {
+               memcpy(regs, /* skip restoring the vanilla PPHWSP */
+                      engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
+                      engine->context_size - PAGE_SIZE);
        }
-       execlists_init_reg_state(regs, request->ctx, engine, request->ring);
+       execlists_init_reg_state(regs,
+                                request->gem_context, engine, request->ring);
 
        /* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
        regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(request->ring->vma);
-       regs[CTX_RING_HEAD + 1] = request->postfix;
 
-       request->ring->head = request->postfix;
+       request->ring->head = intel_ring_wrap(request->ring, request->postfix);
+       regs[CTX_RING_HEAD + 1] = request->ring->head;
+
        intel_ring_update_space(request->ring);
 
        /* Reset WaIdleLiteRestore:bdw,skl as well */
        unwind_wa_tail(request);
 }
 
+static void execlists_reset_finish(struct intel_engine_cs *engine)
+{
+       struct intel_engine_execlists * const execlists = &engine->execlists;
+
+       /* After a GPU reset, we may have requests to replay */
+       if (!RB_EMPTY_ROOT(&execlists->queue.rb_root))
+               tasklet_schedule(&execlists->tasklet);
+
+       /*
+        * Flush the tasklet while we still have the forcewake to be sure
+        * that it is not allowed to sleep before we restart and reload a
+        * context.
+        *
+        * As before (with execlists_reset_prepare) we rely on the caller
+        * serialising multiple attempts to reset so that we know that we
+        * are the only one manipulating tasklet state.
+        */
+       __tasklet_enable_sync_once(&execlists->tasklet);
+
+       GEM_TRACE("%s\n", engine->name);
+}
+
 static int intel_logical_ring_emit_pdps(struct i915_request *rq)
 {
-       struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
+       struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
        struct intel_engine_cs *engine = rq->engine;
        const int num_lri_cmds = GEN8_3LVL_PDPES * 2;
        u32 *cs;
@@ -1945,15 +2030,15 @@ static int gen8_emit_bb_start(struct i915_request *rq,
         * it is unsafe in case of lite-restore (because the ctx is
         * not idle). PML4 is allocated during ppgtt init so this is
         * not needed in 48-bit.*/
-       if (rq->ctx->ppgtt &&
-           (intel_engine_flag(rq->engine) & rq->ctx->ppgtt->pd_dirty_rings) &&
-           !i915_vm_is_48bit(&rq->ctx->ppgtt->base) &&
+       if (rq->gem_context->ppgtt &&
+           (intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) &&
+           !i915_vm_is_48bit(&rq->gem_context->ppgtt->vm) &&
            !intel_vgpu_active(rq->i915)) {
                ret = intel_logical_ring_emit_pdps(rq);
                if (ret)
                        return ret;
 
-               rq->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine);
+               rq->gem_context->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine);
        }
 
        cs = intel_ring_begin(rq, 6);
@@ -2207,13 +2292,15 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
        kfree(engine);
 }
 
-static void execlists_set_default_submission(struct intel_engine_cs *engine)
+void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
 {
        engine->submit_request = execlists_submit_request;
        engine->cancel_requests = execlists_cancel_requests;
        engine->schedule = execlists_schedule;
        engine->execlists.tasklet.func = execlists_submission_tasklet;
 
+       engine->reset.prepare = execlists_reset_prepare;
+
        engine->park = NULL;
        engine->unpark = NULL;
 
@@ -2233,18 +2320,19 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
 {
        /* Default vfuncs which can be overriden by each engine. */
        engine->init_hw = gen8_init_common_ring;
-       engine->reset_hw = reset_common_ring;
 
-       engine->context_pin = execlists_context_pin;
-       engine->context_unpin = execlists_context_unpin;
+       engine->reset.prepare = execlists_reset_prepare;
+       engine->reset.reset = execlists_reset;
+       engine->reset.finish = execlists_reset_finish;
 
+       engine->context_pin = execlists_context_pin;
        engine->request_alloc = execlists_request_alloc;
 
        engine->emit_flush = gen8_emit_flush;
        engine->emit_breadcrumb = gen8_emit_breadcrumb;
        engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz;
 
-       engine->set_default_submission = execlists_set_default_submission;
+       engine->set_default_submission = intel_execlists_set_default_submission;
 
        if (INTEL_GEN(engine->i915) < 11) {
                engine->irq_enable = gen8_logical_ring_enable_irq;
@@ -2284,28 +2372,11 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
 static void
 logical_ring_setup(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = engine->i915;
-       enum forcewake_domains fw_domains;
-
        intel_engine_setup_common(engine);
 
        /* Intentionally left blank. */
        engine->buffer = NULL;
 
-       fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
-                                                   RING_ELSP(engine),
-                                                   FW_REG_WRITE);
-
-       fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
-                                                    RING_CONTEXT_STATUS_PTR(engine),
-                                                    FW_REG_READ | FW_REG_WRITE);
-
-       fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
-                                                    RING_CONTEXT_STATUS_BUF_BASE(engine),
-                                                    FW_REG_READ);
-
-       engine->execlists.fw_domains = fw_domains;
-
        tasklet_init(&engine->execlists.tasklet,
                     execlists_submission_tasklet, (unsigned long)engine);
 
@@ -2313,33 +2384,61 @@ logical_ring_setup(struct intel_engine_cs *engine)
        logical_ring_default_irqs(engine);
 }
 
+static bool csb_force_mmio(struct drm_i915_private *i915)
+{
+       /* Older GVT emulation depends upon intercepting CSB mmio */
+       return intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915);
+}
+
 static int logical_ring_init(struct intel_engine_cs *engine)
 {
+       struct drm_i915_private *i915 = engine->i915;
+       struct intel_engine_execlists * const execlists = &engine->execlists;
        int ret;
 
        ret = intel_engine_init_common(engine);
        if (ret)
                goto error;
 
-       if (HAS_LOGICAL_RING_ELSQ(engine->i915)) {
-               engine->execlists.submit_reg = engine->i915->regs +
+       if (HAS_LOGICAL_RING_ELSQ(i915)) {
+               execlists->submit_reg = i915->regs +
                        i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine));
-               engine->execlists.ctrl_reg = engine->i915->regs +
+               execlists->ctrl_reg = i915->regs +
                        i915_mmio_reg_offset(RING_EXECLIST_CONTROL(engine));
        } else {
-               engine->execlists.submit_reg = engine->i915->regs +
+               execlists->submit_reg = i915->regs +
                        i915_mmio_reg_offset(RING_ELSP(engine));
        }
 
-       engine->execlists.preempt_complete_status = ~0u;
-       if (engine->i915->preempt_context) {
+       execlists->preempt_complete_status = ~0u;
+       if (i915->preempt_context) {
                struct intel_context *ce =
-                       to_intel_context(engine->i915->preempt_context, engine);
+                       to_intel_context(i915->preempt_context, engine);
 
-               engine->execlists.preempt_complete_status =
+               execlists->preempt_complete_status =
                        upper_32_bits(ce->lrc_desc);
        }
 
+       execlists->csb_read =
+               i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine));
+       if (csb_force_mmio(i915)) {
+               execlists->csb_status = (u32 __force *)
+                       (i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
+
+               execlists->csb_write = (u32 __force *)execlists->csb_read;
+               execlists->csb_write_reset =
+                       _MASKED_FIELD(GEN8_CSB_WRITE_PTR_MASK,
+                                     GEN8_CSB_ENTRIES - 1);
+       } else {
+               execlists->csb_status =
+                       &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
+
+               execlists->csb_write =
+                       &engine->status_page.page_addr[intel_hws_csb_write_index(i915)];
+               execlists->csb_write_reset = GEN8_CSB_ENTRIES - 1;
+       }
+       reset_csb_pointers(execlists);
+
        return 0;
 
 error:
@@ -2472,7 +2571,7 @@ static void execlists_init_reg_state(u32 *regs,
        struct drm_i915_private *dev_priv = engine->i915;
        struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt;
        u32 base = engine->mmio_base;
-       bool rcs = engine->id == RCS;
+       bool rcs = engine->class == RENDER_CLASS;
 
        /* A context is actually a big batch buffer with several
         * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
@@ -2540,7 +2639,7 @@ static void execlists_init_reg_state(u32 *regs,
        CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0);
        CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0);
 
-       if (ppgtt && i915_vm_is_48bit(&ppgtt->base)) {
+       if (ppgtt && i915_vm_is_48bit(&ppgtt->vm)) {
                /* 64b PPGTT (48bit canonical)
                 * PDP0_DESCRIPTOR contains the base address to PML4 and
                 * other PDP Descriptors are ignored.
@@ -2619,10 +2718,10 @@ err_unpin_ctx:
 }
 
 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
-                                           struct intel_engine_cs *engine)
+                                           struct intel_engine_cs *engine,
+                                           struct intel_context *ce)
 {
        struct drm_i915_gem_object *ctx_obj;
-       struct intel_context *ce = to_intel_context(ctx, engine);
        struct i915_vma *vma;
        uint32_t context_size;
        struct intel_ring *ring;
@@ -2641,12 +2740,10 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
        context_size += LRC_HEADER_PAGES * PAGE_SIZE;
 
        ctx_obj = i915_gem_object_create(ctx->i915, context_size);
-       if (IS_ERR(ctx_obj)) {
-               ret = PTR_ERR(ctx_obj);
-               goto error_deref_obj;
-       }
+       if (IS_ERR(ctx_obj))
+               return PTR_ERR(ctx_obj);
 
-       vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL);
+       vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.vm, NULL);
        if (IS_ERR(vma)) {
                ret = PTR_ERR(vma);
                goto error_deref_obj;
index 4ec7d8dd13c8013ab3aaa2446b453fe3f73451bc..4dfb78e3ec7e4dc6f8784ca26d48360c14bcfb51 100644 (file)
@@ -104,11 +104,6 @@ struct i915_gem_context;
 
 void intel_lr_context_resume(struct drm_i915_private *dev_priv);
 
-static inline uint64_t
-intel_lr_context_descriptor(struct i915_gem_context *ctx,
-                           struct intel_engine_cs *engine)
-{
-       return to_intel_context(ctx, engine)->lrc_desc;
-}
+void intel_execlists_set_default_submission(struct intel_engine_cs *engine);
 
 #endif /* _INTEL_LRC_H_ */
index 8ae8f42f430a1a1e0fdc7e91b2ca3b9a1f0dd92f..5dae16ccd9f1015fe80abc2c41d98937f3aaef89 100644 (file)
@@ -116,7 +116,7 @@ static int lspcon_change_mode(struct intel_lspcon *lspcon,
 
 static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon)
 {
-       uint8_t rev;
+       u8 rev;
 
        if (drm_dp_dpcd_readb(&lspcon_to_intel_dp(lspcon)->aux, DP_DPCD_REV,
                              &rev) != 1) {
index d278f24ba6ae58bf4a704dc57610a53e9b042d25..f9f3b0885ba595be9dad319ee78060c0ec731a0c 100644 (file)
@@ -44,8 +44,6 @@
 /* Private structure for the integrated LVDS support */
 struct intel_lvds_connector {
        struct intel_connector base;
-
-       struct notifier_block lid_notifier;
 };
 
 struct intel_lvds_pps {
@@ -85,34 +83,35 @@ static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *conn
        return container_of(connector, struct intel_lvds_connector, base.base);
 }
 
+bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+                            i915_reg_t lvds_reg, enum pipe *pipe)
+{
+       u32 val;
+
+       val = I915_READ(lvds_reg);
+
+       /* asserts want to know the pipe even if the port is disabled */
+       if (HAS_PCH_CPT(dev_priv))
+               *pipe = (val & LVDS_PIPE_SEL_MASK_CPT) >> LVDS_PIPE_SEL_SHIFT_CPT;
+       else
+               *pipe = (val & LVDS_PIPE_SEL_MASK) >> LVDS_PIPE_SEL_SHIFT;
+
+       return val & LVDS_PORT_EN;
+}
+
 static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
                                    enum pipe *pipe)
 {
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
-       u32 tmp;
        bool ret;
 
        if (!intel_display_power_get_if_enabled(dev_priv,
                                                encoder->power_domain))
                return false;
 
-       ret = false;
-
-       tmp = I915_READ(lvds_encoder->reg);
-
-       if (!(tmp & LVDS_PORT_EN))
-               goto out;
+       ret = intel_lvds_port_enabled(dev_priv, lvds_encoder->reg, pipe);
 
-       if (HAS_PCH_CPT(dev_priv))
-               *pipe = PORT_TO_PIPE_CPT(tmp);
-       else
-               *pipe = PORT_TO_PIPE(tmp);
-
-       ret = true;
-
-out:
        intel_display_power_put(dev_priv, encoder->power_domain);
 
        return ret;
@@ -255,14 +254,11 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
        temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
 
        if (HAS_PCH_CPT(dev_priv)) {
-               temp &= ~PORT_TRANS_SEL_MASK;
-               temp |= PORT_TRANS_SEL_CPT(pipe);
+               temp &= ~LVDS_PIPE_SEL_MASK_CPT;
+               temp |= LVDS_PIPE_SEL_CPT(pipe);
        } else {
-               if (pipe == 1) {
-                       temp |= LVDS_PIPEB_SELECT;
-               } else {
-                       temp &= ~LVDS_PIPEB_SELECT;
-               }
+               temp &= ~LVDS_PIPE_SEL_MASK;
+               temp |= LVDS_PIPE_SEL(pipe);
        }
 
        /* set the corresponsding LVDS_BORDER bit */
@@ -380,6 +376,8 @@ intel_lvds_mode_valid(struct drm_connector *connector,
        struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
        int max_pixclk = to_i915(connector->dev)->max_dotclk_freq;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
        if (mode->hdisplay > fixed_mode->hdisplay)
                return MODE_PANEL;
        if (mode->vdisplay > fixed_mode->vdisplay)
@@ -429,6 +427,9 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
        intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
                               adjusted_mode);
 
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
        if (HAS_PCH_SPLIT(dev_priv)) {
                pipe_config->has_pch_encoder = true;
 
@@ -449,26 +450,9 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
        return true;
 }
 
-/*
- * Detect the LVDS connection.
- *
- * Since LVDS doesn't have hotlug, we use the lid as a proxy.  Open means
- * connected and closed means disconnected.  We also send hotplug events as
- * needed, using lid status notification from the input layer.
- */
 static enum drm_connector_status
 intel_lvds_detect(struct drm_connector *connector, bool force)
 {
-       struct drm_i915_private *dev_priv = to_i915(connector->dev);
-       enum drm_connector_status status;
-
-       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
-                     connector->base.id, connector->name);
-
-       status = intel_panel_detect(dev_priv);
-       if (status != connector_status_unknown)
-               return status;
-
        return connector_status_connected;
 }
 
@@ -493,117 +477,6 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
        return 1;
 }
 
-static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
-{
-       DRM_INFO("Skipping forced modeset for %s\n", id->ident);
-       return 1;
-}
-
-/* The GPU hangs up on these systems if modeset is performed on LID open */
-static const struct dmi_system_id intel_no_modeset_on_lid[] = {
-       {
-               .callback = intel_no_modeset_on_lid_dmi_callback,
-               .ident = "Toshiba Tecra A11",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"),
-               },
-       },
-
-       { }     /* terminating entry */
-};
-
-/*
- * Lid events. Note the use of 'modeset':
- *  - we set it to MODESET_ON_LID_OPEN on lid close,
- *    and set it to MODESET_DONE on open
- *  - we use it as a "only once" bit (ie we ignore
- *    duplicate events where it was already properly set)
- *  - the suspend/resume paths will set it to
- *    MODESET_SUSPENDED and ignore the lid open event,
- *    because they restore the mode ("lid open").
- */
-static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
-                           void *unused)
-{
-       struct intel_lvds_connector *lvds_connector =
-               container_of(nb, struct intel_lvds_connector, lid_notifier);
-       struct drm_connector *connector = &lvds_connector->base.base;
-       struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-
-       if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
-               return NOTIFY_OK;
-
-       mutex_lock(&dev_priv->modeset_restore_lock);
-       if (dev_priv->modeset_restore == MODESET_SUSPENDED)
-               goto exit;
-       /*
-        * check and update the status of LVDS connector after receiving
-        * the LID nofication event.
-        */
-       connector->status = connector->funcs->detect(connector, false);
-
-       /* Don't force modeset on machines where it causes a GPU lockup */
-       if (dmi_check_system(intel_no_modeset_on_lid))
-               goto exit;
-       if (!acpi_lid_open()) {
-               /* do modeset on next lid open event */
-               dev_priv->modeset_restore = MODESET_ON_LID_OPEN;
-               goto exit;
-       }
-
-       if (dev_priv->modeset_restore == MODESET_DONE)
-               goto exit;
-
-       /*
-        * Some old platform's BIOS love to wreak havoc while the lid is closed.
-        * We try to detect this here and undo any damage. The split for PCH
-        * platforms is rather conservative and a bit arbitrary expect that on
-        * those platforms VGA disabling requires actual legacy VGA I/O access,
-        * and as part of the cleanup in the hw state restore we also redisable
-        * the vga plane.
-        */
-       if (!HAS_PCH_SPLIT(dev_priv))
-               intel_display_resume(dev);
-
-       dev_priv->modeset_restore = MODESET_DONE;
-
-exit:
-       mutex_unlock(&dev_priv->modeset_restore_lock);
-       return NOTIFY_OK;
-}
-
-static int
-intel_lvds_connector_register(struct drm_connector *connector)
-{
-       struct intel_lvds_connector *lvds = to_lvds_connector(connector);
-       int ret;
-
-       ret = intel_connector_register(connector);
-       if (ret)
-               return ret;
-
-       lvds->lid_notifier.notifier_call = intel_lid_notify;
-       if (acpi_lid_notifier_register(&lvds->lid_notifier)) {
-               DRM_DEBUG_KMS("lid notifier registration failed\n");
-               lvds->lid_notifier.notifier_call = NULL;
-       }
-
-       return 0;
-}
-
-static void
-intel_lvds_connector_unregister(struct drm_connector *connector)
-{
-       struct intel_lvds_connector *lvds = to_lvds_connector(connector);
-
-       if (lvds->lid_notifier.notifier_call)
-               acpi_lid_notifier_unregister(&lvds->lid_notifier);
-
-       intel_connector_unregister(connector);
-}
-
 /**
  * intel_lvds_destroy - unregister and free LVDS structures
  * @connector: connector to free
@@ -636,8 +509,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
        .fill_modes = drm_helper_probe_single_connector_modes,
        .atomic_get_property = intel_digital_connector_atomic_get_property,
        .atomic_set_property = intel_digital_connector_atomic_set_property,
-       .late_register = intel_lvds_connector_register,
-       .early_unregister = intel_lvds_connector_unregister,
+       .late_register = intel_connector_register,
+       .early_unregister = intel_connector_unregister,
        .destroy = intel_lvds_destroy,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
        .atomic_duplicate_state = intel_digital_connector_duplicate_state,
@@ -943,7 +816,11 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
         * register is uninitialized.
         */
        val = I915_READ(lvds_encoder->reg);
-       if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
+       if (HAS_PCH_CPT(dev_priv))
+               val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK_CPT);
+       else
+               val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK);
+       if (val == 0)
                val = dev_priv->vbt.bios_lvds_val;
 
        return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
@@ -998,8 +875,16 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
                return;
 
        /* Skip init on machines we know falsely report LVDS */
-       if (dmi_check_system(intel_no_lvds))
+       if (dmi_check_system(intel_no_lvds)) {
+               WARN(!dev_priv->vbt.int_lvds_support,
+                    "Useless DMI match. Internal LVDS support disabled by VBT\n");
+               return;
+       }
+
+       if (!dev_priv->vbt.int_lvds_support) {
+               DRM_DEBUG_KMS("Internal LVDS support disabled by VBT\n");
                return;
+       }
 
        if (HAS_PCH_SPLIT(dev_priv))
                lvds_reg = PCH_LVDS;
@@ -1011,10 +896,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
        if (HAS_PCH_SPLIT(dev_priv)) {
                if ((lvds & LVDS_DETECTED) == 0)
                        return;
-               if (dev_priv->vbt.edp.support) {
-                       DRM_DEBUG_KMS("disable LVDS for eDP support\n");
-                       return;
-               }
        }
 
        pin = GMBUS_PIN_PANEL;
@@ -1103,8 +984,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
         * 2) check for VBT data
         * 3) check to see if LVDS is already on
         *    if none of the above, no panel
-        * 4) make sure lid is open
-        *    if closed, act like it's not there for now
         */
 
        /*
@@ -1120,7 +999,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
                                    intel_gmbus_get_adapter(dev_priv, pin));
        if (edid) {
                if (drm_add_edid_modes(connector, edid)) {
-                       drm_mode_connector_update_edid_property(connector,
+                       drm_connector_update_edid_property(connector,
                                                                edid);
                } else {
                        kfree(edid);
index b39846613e3c78f558ffc3c65233a1bad2e4dd7e..ca44bf368e2428050ae45c17f2fa34bd7d7ac9b5 100644 (file)
@@ -40,7 +40,7 @@ int intel_connector_update_modes(struct drm_connector *connector,
 {
        int ret;
 
-       drm_mode_connector_update_edid_property(connector, edid);
+       drm_connector_update_edid_property(connector, edid);
        ret = drm_add_edid_modes(connector, edid);
 
        return ret;
index c58e5f53bab0d43da4623091db110aba0c10b993..e034b4166d322f8182a9f31bcfb1cd224c686214 100644 (file)
@@ -608,16 +608,16 @@ void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
 #define ACPI_EV_LID            (1<<1)
 #define ACPI_EV_DOCK           (1<<2)
 
-static struct intel_opregion *system_opregion;
-
+/*
+ * The only video events relevant to opregion are 0x80. These indicate either a
+ * docking event, lid switch or display switch request. In Linux, these are
+ * handled by the dock, button and video drivers.
+ */
 static int intel_opregion_video_event(struct notifier_block *nb,
                                      unsigned long val, void *data)
 {
-       /* The only video events relevant to opregion are 0x80. These indicate
-          either a docking event, lid switch or display switch request. In
-          Linux, these are handled by the dock, button and video drivers.
-       */
-
+       struct intel_opregion *opregion = container_of(nb, struct intel_opregion,
+                                                      acpi_notifier);
        struct acpi_bus_event *event = data;
        struct opregion_acpi *acpi;
        int ret = NOTIFY_OK;
@@ -625,10 +625,7 @@ static int intel_opregion_video_event(struct notifier_block *nb,
        if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
                return NOTIFY_DONE;
 
-       if (!system_opregion)
-               return NOTIFY_DONE;
-
-       acpi = system_opregion->acpi;
+       acpi = opregion->acpi;
 
        if (event->type == 0x80 && ((acpi->cevt & 1) == 0))
                ret = NOTIFY_BAD;
@@ -638,10 +635,6 @@ static int intel_opregion_video_event(struct notifier_block *nb,
        return ret;
 }
 
-static struct notifier_block intel_opregion_notifier = {
-       .notifier_call = intel_opregion_video_event,
-};
-
 /*
  * Initialise the DIDL field in opregion. This passes a list of devices to
  * the firmware. Values are defined by section B.4.2 of the ACPI specification
@@ -797,8 +790,8 @@ void intel_opregion_register(struct drm_i915_private *dev_priv)
                opregion->acpi->csts = 0;
                opregion->acpi->drdy = 1;
 
-               system_opregion = opregion;
-               register_acpi_notifier(&intel_opregion_notifier);
+               opregion->acpi_notifier.notifier_call = intel_opregion_video_event;
+               register_acpi_notifier(&opregion->acpi_notifier);
        }
 
        if (opregion->asle) {
@@ -822,8 +815,8 @@ void intel_opregion_unregister(struct drm_i915_private *dev_priv)
        if (opregion->acpi) {
                opregion->acpi->drdy = 0;
 
-               system_opregion = NULL;
-               unregister_acpi_notifier(&intel_opregion_notifier);
+               unregister_acpi_notifier(&opregion->acpi_notifier);
+               opregion->acpi_notifier.notifier_call = NULL;
        }
 
        /* just clear all opregion memory pointers now */
index e0e437ba9e516449ccdd0b45895747422769759c..e8498a8cda3d1c9850f386bb00bfef90622fa12c 100644 (file)
@@ -49,6 +49,7 @@ struct intel_opregion {
        u32 vbt_size;
        u32 *lid_state;
        struct work_struct asle_work;
+       struct notifier_block acpi_notifier;
 };
 
 #define OPREGION_SIZE            (8 * 1024)
index b443278e569cedb1f9d7fbe36188eb6a9f67fd70..4a9f139e7b7383c8f244bac833679c3b5302214b 100644 (file)
@@ -375,26 +375,6 @@ out:
        pipe_config->gmch_pfit.lvds_border_bits = border;
 }
 
-enum drm_connector_status
-intel_panel_detect(struct drm_i915_private *dev_priv)
-{
-       /* Assume that the BIOS does not lie through the OpRegion... */
-       if (!i915_modparams.panel_ignore_lid && dev_priv->opregion.lid_state) {
-               return *dev_priv->opregion.lid_state & 0x1 ?
-                       connector_status_connected :
-                       connector_status_disconnected;
-       }
-
-       switch (i915_modparams.panel_ignore_lid) {
-       case -2:
-               return connector_status_connected;
-       case -1:
-               return connector_status_disconnected;
-       default:
-               return connector_status_unknown;
-       }
-}
-
 /**
  * scale - scale values from one range to another
  * @source_val: value in range [@source_min..@source_max]
@@ -406,11 +386,11 @@ intel_panel_detect(struct drm_i915_private *dev_priv)
  * Return @source_val in range [@source_min..@source_max] scaled to range
  * [@target_min..@target_max].
  */
-static uint32_t scale(uint32_t source_val,
-                     uint32_t source_min, uint32_t source_max,
-                     uint32_t target_min, uint32_t target_max)
+static u32 scale(u32 source_val,
+                u32 source_min, u32 source_max,
+                u32 target_min, u32 target_max)
 {
-       uint64_t target_val;
+       u64 target_val;
 
        WARN_ON(source_min > source_max);
        WARN_ON(target_min > target_max);
index 39a4e4edda07052a31ec762cc5190b706d4fe713..849e1b69ba739e9fd61407313582cde5461820a9 100644 (file)
 #include <linux/debugfs.h>
 #include "intel_drv.h"
 
-struct pipe_crc_info {
-       const char *name;
-       struct drm_i915_private *dev_priv;
-       enum pipe pipe;
-};
-
-static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
-{
-       struct pipe_crc_info *info = inode->i_private;
-       struct drm_i915_private *dev_priv = info->dev_priv;
-       struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
-
-       if (info->pipe >= INTEL_INFO(dev_priv)->num_pipes)
-               return -ENODEV;
-
-       spin_lock_irq(&pipe_crc->lock);
-
-       if (pipe_crc->opened) {
-               spin_unlock_irq(&pipe_crc->lock);
-               return -EBUSY; /* already open */
-       }
-
-       pipe_crc->opened = true;
-       filep->private_data = inode->i_private;
-
-       spin_unlock_irq(&pipe_crc->lock);
-
-       return 0;
-}
-
-static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
-{
-       struct pipe_crc_info *info = inode->i_private;
-       struct drm_i915_private *dev_priv = info->dev_priv;
-       struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
-
-       spin_lock_irq(&pipe_crc->lock);
-       pipe_crc->opened = false;
-       spin_unlock_irq(&pipe_crc->lock);
-
-       return 0;
-}
-
-/* (6 fields, 8 chars each, space separated (5) + '\n') */
-#define PIPE_CRC_LINE_LEN      (6 * 8 + 5 + 1)
-/* account for \'0' */
-#define PIPE_CRC_BUFFER_LEN    (PIPE_CRC_LINE_LEN + 1)
-
-static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
-{
-       lockdep_assert_held(&pipe_crc->lock);
-       return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
-                       INTEL_PIPE_CRC_ENTRIES_NR);
-}
-
-static ssize_t
-i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
-                  loff_t *pos)
-{
-       struct pipe_crc_info *info = filep->private_data;
-       struct drm_i915_private *dev_priv = info->dev_priv;
-       struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
-       char buf[PIPE_CRC_BUFFER_LEN];
-       int n_entries;
-       ssize_t bytes_read;
-
-       /*
-        * Don't allow user space to provide buffers not big enough to hold
-        * a line of data.
-        */
-       if (count < PIPE_CRC_LINE_LEN)
-               return -EINVAL;
-
-       if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
-               return 0;
-
-       /* nothing to read */
-       spin_lock_irq(&pipe_crc->lock);
-       while (pipe_crc_data_count(pipe_crc) == 0) {
-               int ret;
-
-               if (filep->f_flags & O_NONBLOCK) {
-                       spin_unlock_irq(&pipe_crc->lock);
-                       return -EAGAIN;
-               }
-
-               ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
-                               pipe_crc_data_count(pipe_crc), pipe_crc->lock);
-               if (ret) {
-                       spin_unlock_irq(&pipe_crc->lock);
-                       return ret;
-               }
-       }
-
-       /* We now have one or more entries to read */
-       n_entries = count / PIPE_CRC_LINE_LEN;
-
-       bytes_read = 0;
-       while (n_entries > 0) {
-               struct intel_pipe_crc_entry *entry =
-                       &pipe_crc->entries[pipe_crc->tail];
-
-               if (CIRC_CNT(pipe_crc->head, pipe_crc->tail,
-                            INTEL_PIPE_CRC_ENTRIES_NR) < 1)
-                       break;
-
-               BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
-               pipe_crc->tail = (pipe_crc->tail + 1) &
-                                (INTEL_PIPE_CRC_ENTRIES_NR - 1);
-
-               bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
-                                      "%8u %8x %8x %8x %8x %8x\n",
-                                      entry->frame, entry->crc[0],
-                                      entry->crc[1], entry->crc[2],
-                                      entry->crc[3], entry->crc[4]);
-
-               spin_unlock_irq(&pipe_crc->lock);
-
-               if (copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN))
-                       return -EFAULT;
-
-               user_buf += PIPE_CRC_LINE_LEN;
-               n_entries--;
-
-               spin_lock_irq(&pipe_crc->lock);
-       }
-
-       spin_unlock_irq(&pipe_crc->lock);
-
-       return bytes_read;
-}
-
-static const struct file_operations i915_pipe_crc_fops = {
-       .owner = THIS_MODULE,
-       .open = i915_pipe_crc_open,
-       .read = i915_pipe_crc_read,
-       .release = i915_pipe_crc_release,
-};
-
-static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
-       {
-               .name = "i915_pipe_A_crc",
-               .pipe = PIPE_A,
-       },
-       {
-               .name = "i915_pipe_B_crc",
-               .pipe = PIPE_B,
-       },
-       {
-               .name = "i915_pipe_C_crc",
-               .pipe = PIPE_C,
-       },
-};
-
 static const char * const pipe_crc_sources[] = {
        "none",
        "plane1",
@@ -197,29 +43,6 @@ static const char * const pipe_crc_sources[] = {
        "auto",
 };
 
-static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
-{
-       BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
-       return pipe_crc_sources[source];
-}
-
-static int display_crc_ctl_show(struct seq_file *m, void *data)
-{
-       struct drm_i915_private *dev_priv = m->private;
-       enum pipe pipe;
-
-       for_each_pipe(dev_priv, pipe)
-               seq_printf(m, "%c %s\n", pipe_name(pipe),
-                          pipe_crc_source_name(dev_priv->pipe_crc[pipe].source));
-
-       return 0;
-}
-
-static int display_crc_ctl_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, display_crc_ctl_show, inode->i_private);
-}
-
 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
                                 uint32_t *val)
 {
@@ -616,177 +439,6 @@ static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
                return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val, set_wa);
 }
 
-static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
-                              enum pipe pipe,
-                              enum intel_pipe_crc_source source)
-{
-       struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
-       enum intel_display_power_domain power_domain;
-       u32 val = 0; /* shut up gcc */
-       int ret;
-
-       if (pipe_crc->source == source)
-               return 0;
-
-       /* forbid changing the source without going back to 'none' */
-       if (pipe_crc->source && source)
-               return -EINVAL;
-
-       power_domain = POWER_DOMAIN_PIPE(pipe);
-       if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
-               DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
-               return -EIO;
-       }
-
-       ret = get_new_crc_ctl_reg(dev_priv, pipe, &source, &val, true);
-       if (ret != 0)
-               goto out;
-
-       /* none -> real source transition */
-       if (source) {
-               struct intel_pipe_crc_entry *entries;
-
-               DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
-                                pipe_name(pipe), pipe_crc_source_name(source));
-
-               entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
-                                 sizeof(pipe_crc->entries[0]),
-                                 GFP_KERNEL);
-               if (!entries) {
-                       ret = -ENOMEM;
-                       goto out;
-               }
-
-               spin_lock_irq(&pipe_crc->lock);
-               kfree(pipe_crc->entries);
-               pipe_crc->entries = entries;
-               pipe_crc->head = 0;
-               pipe_crc->tail = 0;
-               spin_unlock_irq(&pipe_crc->lock);
-       }
-
-       pipe_crc->source = source;
-
-       I915_WRITE(PIPE_CRC_CTL(pipe), val);
-       POSTING_READ(PIPE_CRC_CTL(pipe));
-
-       /* real source -> none transition */
-       if (!source) {
-               struct intel_pipe_crc_entry *entries;
-               struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
-                                                                 pipe);
-
-               DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
-                                pipe_name(pipe));
-
-               drm_modeset_lock(&crtc->base.mutex, NULL);
-               if (crtc->base.state->active)
-                       intel_wait_for_vblank(dev_priv, pipe);
-               drm_modeset_unlock(&crtc->base.mutex);
-
-               spin_lock_irq(&pipe_crc->lock);
-               entries = pipe_crc->entries;
-               pipe_crc->entries = NULL;
-               pipe_crc->head = 0;
-               pipe_crc->tail = 0;
-               spin_unlock_irq(&pipe_crc->lock);
-
-               kfree(entries);
-
-               if (IS_G4X(dev_priv))
-                       g4x_undo_pipe_scramble_reset(dev_priv, pipe);
-               else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-                       vlv_undo_pipe_scramble_reset(dev_priv, pipe);
-               else if ((IS_HASWELL(dev_priv) ||
-                         IS_BROADWELL(dev_priv)) && pipe == PIPE_A)
-                       hsw_pipe_A_crc_wa(dev_priv, false);
-       }
-
-       ret = 0;
-
-out:
-       intel_display_power_put(dev_priv, power_domain);
-
-       return ret;
-}
-
-/*
- * Parse pipe CRC command strings:
- *   command: wsp* object wsp+ name wsp+ source wsp*
- *   object: 'pipe'
- *   name: (A | B | C)
- *   source: (none | plane1 | plane2 | pf)
- *   wsp: (#0x20 | #0x9 | #0xA)+
- *
- * eg.:
- *  "pipe A plane1"  ->  Start CRC computations on plane1 of pipe A
- *  "pipe A none"    ->  Stop CRC
- */
-static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
-{
-       int n_words = 0;
-
-       while (*buf) {
-               char *end;
-
-               /* skip leading white space */
-               buf = skip_spaces(buf);
-               if (!*buf)
-                       break;  /* end of buffer */
-
-               /* find end of word */
-               for (end = buf; *end && !isspace(*end); end++)
-                       ;
-
-               if (n_words == max_words) {
-                       DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
-                                        max_words);
-                       return -EINVAL; /* ran out of words[] before bytes */
-               }
-
-               if (*end)
-                       *end++ = '\0';
-               words[n_words++] = buf;
-               buf = end;
-       }
-
-       return n_words;
-}
-
-enum intel_pipe_crc_object {
-       PIPE_CRC_OBJECT_PIPE,
-};
-
-static const char * const pipe_crc_objects[] = {
-       "pipe",
-};
-
-static int
-display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
-{
-       int i;
-
-       i = match_string(pipe_crc_objects, ARRAY_SIZE(pipe_crc_objects), buf);
-       if (i < 0)
-               return i;
-
-       *o = i;
-       return 0;
-}
-
-static int display_crc_ctl_parse_pipe(struct drm_i915_private *dev_priv,
-                                     const char *buf, enum pipe *pipe)
-{
-       const char name = buf[0];
-
-       if (name < 'A' || name >= pipe_name(INTEL_INFO(dev_priv)->num_pipes))
-               return -EINVAL;
-
-       *pipe = name - 'A';
-
-       return 0;
-}
-
 static int
 display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
 {
@@ -805,81 +457,6 @@ display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
        return 0;
 }
 
-static int display_crc_ctl_parse(struct drm_i915_private *dev_priv,
-                                char *buf, size_t len)
-{
-#define N_WORDS 3
-       int n_words;
-       char *words[N_WORDS];
-       enum pipe pipe;
-       enum intel_pipe_crc_object object;
-       enum intel_pipe_crc_source source;
-
-       n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
-       if (n_words != N_WORDS) {
-               DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
-                                N_WORDS);
-               return -EINVAL;
-       }
-
-       if (display_crc_ctl_parse_object(words[0], &object) < 0) {
-               DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
-               return -EINVAL;
-       }
-
-       if (display_crc_ctl_parse_pipe(dev_priv, words[1], &pipe) < 0) {
-               DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
-               return -EINVAL;
-       }
-
-       if (display_crc_ctl_parse_source(words[2], &source) < 0) {
-               DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
-               return -EINVAL;
-       }
-
-       return pipe_crc_set_source(dev_priv, pipe, source);
-}
-
-static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
-                                    size_t len, loff_t *offp)
-{
-       struct seq_file *m = file->private_data;
-       struct drm_i915_private *dev_priv = m->private;
-       char *tmpbuf;
-       int ret;
-
-       if (len == 0)
-               return 0;
-
-       if (len > PAGE_SIZE - 1) {
-               DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
-                                PAGE_SIZE);
-               return -E2BIG;
-       }
-
-       tmpbuf = memdup_user_nul(ubuf, len);
-       if (IS_ERR(tmpbuf))
-               return PTR_ERR(tmpbuf);
-
-       ret = display_crc_ctl_parse(dev_priv, tmpbuf, len);
-
-       kfree(tmpbuf);
-       if (ret < 0)
-               return ret;
-
-       *offp += len;
-       return len;
-}
-
-const struct file_operations i915_display_crc_ctl_fops = {
-       .owner = THIS_MODULE,
-       .open = display_crc_ctl_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .write = display_crc_ctl_write
-};
-
 void intel_display_crc_init(struct drm_i915_private *dev_priv)
 {
        enum pipe pipe;
@@ -887,30 +464,8 @@ void intel_display_crc_init(struct drm_i915_private *dev_priv)
        for_each_pipe(dev_priv, pipe) {
                struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
 
-               pipe_crc->opened = false;
                spin_lock_init(&pipe_crc->lock);
-               init_waitqueue_head(&pipe_crc->wq);
-       }
-}
-
-int intel_pipe_crc_create(struct drm_minor *minor)
-{
-       struct drm_i915_private *dev_priv = to_i915(minor->dev);
-       struct dentry *ent;
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
-               struct pipe_crc_info *info = &i915_pipe_crc_data[i];
-
-               info->dev_priv = dev_priv;
-               ent = debugfs_create_file(info->name, S_IRUGO,
-                                         minor->debugfs_root, info,
-                                         &i915_pipe_crc_fops);
-               if (!ent)
-                       return -ENOMEM;
        }
-
-       return 0;
 }
 
 int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
index 53aaaa3e6886d9eb472a1eb5b4f61955e12edc4a..43ae9de12ba3eb821c12d63e9c7b1ec923eace48 100644 (file)
@@ -6264,42 +6264,15 @@ static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
        return limits;
 }
 
-static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
+static void rps_set_power(struct drm_i915_private *dev_priv, int new_power)
 {
        struct intel_rps *rps = &dev_priv->gt_pm.rps;
-       int new_power;
        u32 threshold_up = 0, threshold_down = 0; /* in % */
        u32 ei_up = 0, ei_down = 0;
 
-       new_power = rps->power;
-       switch (rps->power) {
-       case LOW_POWER:
-               if (val > rps->efficient_freq + 1 &&
-                   val > rps->cur_freq)
-                       new_power = BETWEEN;
-               break;
-
-       case BETWEEN:
-               if (val <= rps->efficient_freq &&
-                   val < rps->cur_freq)
-                       new_power = LOW_POWER;
-               else if (val >= rps->rp0_freq &&
-                        val > rps->cur_freq)
-                       new_power = HIGH_POWER;
-               break;
+       lockdep_assert_held(&rps->power.mutex);
 
-       case HIGH_POWER:
-               if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
-                   val < rps->cur_freq)
-                       new_power = BETWEEN;
-               break;
-       }
-       /* Max/min bins are special */
-       if (val <= rps->min_freq_softlimit)
-               new_power = LOW_POWER;
-       if (val >= rps->max_freq_softlimit)
-               new_power = HIGH_POWER;
-       if (new_power == rps->power)
+       if (new_power == rps->power.mode)
                return;
 
        /* Note the units here are not exactly 1us, but 1280ns. */
@@ -6362,12 +6335,71 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
                   GEN6_RP_DOWN_IDLE_AVG);
 
 skip_hw_write:
-       rps->power = new_power;
-       rps->up_threshold = threshold_up;
-       rps->down_threshold = threshold_down;
+       rps->power.mode = new_power;
+       rps->power.up_threshold = threshold_up;
+       rps->power.down_threshold = threshold_down;
+}
+
+static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
+{
+       struct intel_rps *rps = &dev_priv->gt_pm.rps;
+       int new_power;
+
+       new_power = rps->power.mode;
+       switch (rps->power.mode) {
+       case LOW_POWER:
+               if (val > rps->efficient_freq + 1 &&
+                   val > rps->cur_freq)
+                       new_power = BETWEEN;
+               break;
+
+       case BETWEEN:
+               if (val <= rps->efficient_freq &&
+                   val < rps->cur_freq)
+                       new_power = LOW_POWER;
+               else if (val >= rps->rp0_freq &&
+                        val > rps->cur_freq)
+                       new_power = HIGH_POWER;
+               break;
+
+       case HIGH_POWER:
+               if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
+                   val < rps->cur_freq)
+                       new_power = BETWEEN;
+               break;
+       }
+       /* Max/min bins are special */
+       if (val <= rps->min_freq_softlimit)
+               new_power = LOW_POWER;
+       if (val >= rps->max_freq_softlimit)
+               new_power = HIGH_POWER;
+
+       mutex_lock(&rps->power.mutex);
+       if (rps->power.interactive)
+               new_power = HIGH_POWER;
+       rps_set_power(dev_priv, new_power);
+       mutex_unlock(&rps->power.mutex);
        rps->last_adj = 0;
 }
 
+void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive)
+{
+       struct intel_rps *rps = &i915->gt_pm.rps;
+
+       if (INTEL_GEN(i915) < 6)
+               return;
+
+       mutex_lock(&rps->power.mutex);
+       if (interactive) {
+               if (!rps->power.interactive++ && READ_ONCE(i915->gt.awake))
+                       rps_set_power(i915, HIGH_POWER);
+       } else {
+               GEM_BUG_ON(!rps->power.interactive);
+               rps->power.interactive--;
+       }
+       mutex_unlock(&rps->power.mutex);
+}
+
 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
 {
        struct intel_rps *rps = &dev_priv->gt_pm.rps;
@@ -6780,7 +6812,7 @@ static void reset_rps(struct drm_i915_private *dev_priv,
        u8 freq = rps->cur_freq;
 
        /* force a reset */
-       rps->power = -1;
+       rps->power.mode = -1;
        rps->cur_freq = -1;
 
        if (set(dev_priv, freq))
@@ -7347,11 +7379,11 @@ out:
 
 static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
 {
-       if (WARN_ON(!dev_priv->vlv_pctx))
-               return;
+       struct drm_i915_gem_object *pctx;
 
-       i915_gem_object_put(dev_priv->vlv_pctx);
-       dev_priv->vlv_pctx = NULL;
+       pctx = fetch_and_zero(&dev_priv->vlv_pctx);
+       if (pctx)
+               i915_gem_object_put(pctx);
 }
 
 static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
@@ -9604,6 +9636,7 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
 void intel_pm_setup(struct drm_i915_private *dev_priv)
 {
        mutex_init(&dev_priv->pcu_lock);
+       mutex_init(&dev_priv->gt_pm.rps.power.mutex);
 
        atomic_set(&dev_priv->gt_pm.rps.num_waiters, 0);
 
index db27f2faa1dec9034c75e88c2ce2683368615cb6..4bd5768731ee26b282b5ad92f8d446af1f21e7a0 100644 (file)
 #include "intel_drv.h"
 #include "i915_drv.h"
 
-static inline enum intel_display_power_domain
-psr_aux_domain(struct intel_dp *intel_dp)
-{
-       /* CNL HW requires corresponding AUX IOs to be powered up for PSR.
-        * However, for non-A AUX ports the corresponding non-EDP transcoders
-        * would have already enabled power well 2 and DC_OFF. This means we can
-        * acquire a wider POWER_DOMAIN_AUX_{B,C,D,F} reference instead of a
-        * specific AUX_IO reference without powering up any extra wells.
-        * Note that PSR is enabled only on Port A even though this function
-        * returns the correct domain for other ports too.
-        */
-       return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
-                                             intel_dp->aux_power_domain;
-}
-
-static void psr_aux_io_power_get(struct intel_dp *intel_dp)
-{
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
-
-       if (INTEL_GEN(dev_priv) < 10)
-               return;
-
-       intel_display_power_get(dev_priv, psr_aux_domain(intel_dp));
-}
-
-static void psr_aux_io_power_put(struct intel_dp *intel_dp)
-{
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
-
-       if (INTEL_GEN(dev_priv) < 10)
-               return;
-
-       intel_display_power_put(dev_priv, psr_aux_domain(intel_dp));
-}
-
 void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug)
 {
        u32 debug_mask, mask;
 
-       /* No PSR interrupts on VLV/CHV */
-       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               return;
-
        mask = EDP_PSR_ERROR(TRANSCODER_EDP);
        debug_mask = EDP_PSR_POST_EXIT(TRANSCODER_EDP) |
                     EDP_PSR_PRE_ENTRY(TRANSCODER_EDP);
@@ -201,15 +160,6 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
        }
 }
 
-static bool intel_dp_get_y_coord_required(struct intel_dp *intel_dp)
-{
-       uint8_t psr_caps = 0;
-
-       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps) != 1)
-               return false;
-       return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
-}
-
 static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
 {
        uint8_t dprx = 0;
@@ -232,13 +182,13 @@ static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
 
 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
 {
-       u8 val = 0;
+       u8 val = 8; /* assume the worst if we can't read the value */
 
        if (drm_dp_dpcd_readb(&intel_dp->aux,
                              DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
                val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
        else
-               DRM_ERROR("Unable to get sink synchronization latency\n");
+               DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n");
        return val;
 }
 
@@ -250,13 +200,25 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
        drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
                         sizeof(intel_dp->psr_dpcd));
 
-       if (intel_dp->psr_dpcd[0]) {
-               dev_priv->psr.sink_support = true;
-               DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
+       if (!intel_dp->psr_dpcd[0])
+               return;
+       DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
+                     intel_dp->psr_dpcd[0]);
+
+       if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
+               DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
+               return;
        }
+       dev_priv->psr.sink_support = true;
+       dev_priv->psr.sink_sync_latency =
+               intel_dp_get_sink_sync_latency(intel_dp);
 
        if (INTEL_GEN(dev_priv) >= 9 &&
            (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
+               bool y_req = intel_dp->psr_dpcd[1] &
+                            DP_PSR2_SU_Y_COORDINATE_REQUIRED;
+               bool alpm = intel_dp_get_alpm_status(intel_dp);
+
                /*
                 * All panels that supports PSR version 03h (PSR2 +
                 * Y-coordinate) can handle Y-coordinates in VSC but we are
@@ -268,49 +230,19 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
                 * Y-coordinate requirement panels we would need to enable
                 * GTC first.
                 */
-               dev_priv->psr.sink_psr2_support =
-                               intel_dp_get_y_coord_required(intel_dp);
-               DRM_DEBUG_KMS("PSR2 %s on sink", dev_priv->psr.sink_psr2_support
-                             ? "supported" : "not supported");
+               dev_priv->psr.sink_psr2_support = y_req && alpm;
+               DRM_DEBUG_KMS("PSR2 %ssupported\n",
+                             dev_priv->psr.sink_psr2_support ? "" : "not ");
 
                if (dev_priv->psr.sink_psr2_support) {
                        dev_priv->psr.colorimetry_support =
                                intel_dp_get_colorimetry_status(intel_dp);
-                       dev_priv->psr.alpm =
-                               intel_dp_get_alpm_status(intel_dp);
-                       dev_priv->psr.sink_sync_latency =
-                               intel_dp_get_sink_sync_latency(intel_dp);
                }
        }
 }
 
-static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       uint32_t val;
-
-       val = I915_READ(VLV_PSRSTAT(pipe)) &
-             VLV_EDP_PSR_CURR_STATE_MASK;
-       return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
-              (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
-}
-
-static void vlv_psr_setup_vsc(struct intel_dp *intel_dp,
-                             const struct intel_crtc_state *crtc_state)
-{
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-       uint32_t val;
-
-       /* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
-       val  = I915_READ(VLV_VSCSDP(crtc->pipe));
-       val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
-       val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
-       I915_WRITE(VLV_VSCSDP(crtc->pipe), val);
-}
-
-static void hsw_psr_setup_vsc(struct intel_dp *intel_dp,
-                             const struct intel_crtc_state *crtc_state)
+static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
+                               const struct intel_crtc_state *crtc_state)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
@@ -341,12 +273,6 @@ static void hsw_psr_setup_vsc(struct intel_dp *intel_dp,
                                        DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
 }
 
-static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
-{
-       drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
-                          DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
-}
-
 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -373,7 +299,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
        aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
 
        /* Start with bits set for DDI_AUX_CTL register */
-       aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg),
+       aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
                                             aux_clock_divider);
 
        /* Select only valid bits for SRD_AUX_CTL */
@@ -381,7 +307,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
        I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl);
 }
 
-static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
+static void intel_psr_enable_sink(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = dig_port->base.base.dev;
@@ -389,95 +315,64 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
        u8 dpcd_val = DP_PSR_ENABLE;
 
        /* Enable ALPM at sink for psr2 */
-       if (dev_priv->psr.psr2_enabled && dev_priv->psr.alpm)
-               drm_dp_dpcd_writeb(&intel_dp->aux,
-                               DP_RECEIVER_ALPM_CONFIG,
-                               DP_ALPM_ENABLE);
-
-       if (dev_priv->psr.psr2_enabled)
+       if (dev_priv->psr.psr2_enabled) {
+               drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
+                                  DP_ALPM_ENABLE);
                dpcd_val |= DP_PSR_ENABLE_PSR2;
+       }
+
        if (dev_priv->psr.link_standby)
                dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
+       if (!dev_priv->psr.psr2_enabled && INTEL_GEN(dev_priv) >= 8)
+               dpcd_val |= DP_PSR_CRC_VERIFICATION;
        drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
 
        drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
 }
 
-static void vlv_psr_enable_source(struct intel_dp *intel_dp,
-                                 const struct intel_crtc_state *crtc_state)
-{
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
-       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-
-       /* Transition from PSR_state 0 (disabled) to PSR_state 1 (inactive) */
-       I915_WRITE(VLV_PSRCTL(crtc->pipe),
-                  VLV_EDP_PSR_MODE_SW_TIMER |
-                  VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
-                  VLV_EDP_PSR_ENABLE);
-}
-
-static void vlv_psr_activate(struct intel_dp *intel_dp)
-{
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_crtc *crtc = dig_port->base.base.crtc;
-       enum pipe pipe = to_intel_crtc(crtc)->pipe;
-
-       /*
-        * Let's do the transition from PSR_state 1 (inactive) to
-        * PSR_state 2 (transition to active - static frame transmission).
-        * Then Hardware is responsible for the transition to
-        * PSR_state 3 (active - no Remote Frame Buffer (RFB) update).
-        */
-       I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
-                  VLV_EDP_PSR_ACTIVE_ENTRY);
-}
-
 static void hsw_activate_psr1(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
+       u32 max_sleep_time = 0x1f;
+       u32 val = EDP_PSR_ENABLE;
 
-       uint32_t max_sleep_time = 0x1f;
-       /*
-        * Let's respect VBT in case VBT asks a higher idle_frame value.
-        * Let's use 6 as the minimum to cover all known cases including
-        * the off-by-one issue that HW has in some cases. Also there are
-        * cases where sink should be able to train
-        * with the 5 or 6 idle patterns.
+       /* Let's use 6 as the minimum to cover all known cases including the
+        * off-by-one issue that HW has in some cases.
         */
-       uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
-       uint32_t val = EDP_PSR_ENABLE;
+       int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
 
-       val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
+       /* sink_sync_latency of 8 means source has to wait for more than 8
+        * frames, we'll go with 9 frames for now
+        */
+       idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
        val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
 
+       val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
        if (IS_HASWELL(dev_priv))
                val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
 
        if (dev_priv->psr.link_standby)
                val |= EDP_PSR_LINK_STANDBY;
 
-       if (dev_priv->vbt.psr.tp1_wakeup_time > 5)
-               val |= EDP_PSR_TP1_TIME_2500us;
-       else if (dev_priv->vbt.psr.tp1_wakeup_time > 1)
-               val |= EDP_PSR_TP1_TIME_500us;
-       else if (dev_priv->vbt.psr.tp1_wakeup_time > 0)
+       if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
+               val |=  EDP_PSR_TP1_TIME_0us;
+       else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
                val |= EDP_PSR_TP1_TIME_100us;
+       else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
+               val |= EDP_PSR_TP1_TIME_500us;
        else
-               val |= EDP_PSR_TP1_TIME_0us;
+               val |= EDP_PSR_TP1_TIME_2500us;
 
-       if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
-               val |= EDP_PSR_TP2_TP3_TIME_2500us;
-       else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
-               val |= EDP_PSR_TP2_TP3_TIME_500us;
-       else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
+       if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
+               val |=  EDP_PSR_TP2_TP3_TIME_0us;
+       else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
                val |= EDP_PSR_TP2_TP3_TIME_100us;
+       else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
+               val |= EDP_PSR_TP2_TP3_TIME_500us;
        else
-               val |= EDP_PSR_TP2_TP3_TIME_0us;
+               val |= EDP_PSR_TP2_TP3_TIME_2500us;
 
        if (intel_dp_source_supports_hbr2(intel_dp) &&
            drm_dp_tps3_supported(intel_dp->dpcd))
@@ -485,6 +380,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
        else
                val |= EDP_PSR_TP1_TP2_SEL;
 
+       if (INTEL_GEN(dev_priv) >= 8)
+               val |= EDP_PSR_CRC_ENABLE;
+
        val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK;
        I915_WRITE(EDP_PSR_CTL, val);
 }
@@ -494,15 +392,15 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
-       /*
-        * Let's respect VBT in case VBT asks a higher idle_frame value.
-        * Let's use 6 as the minimum to cover all known cases including
-        * the off-by-one issue that HW has in some cases. Also there are
-        * cases where sink should be able to train
-        * with the 5 or 6 idle patterns.
+       u32 val;
+
+       /* Let's use 6 as the minimum to cover all known cases including the
+        * off-by-one issue that HW has in some cases.
         */
-       uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
-       u32 val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
+       int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
+
+       idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
+       val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
 
        /* FIXME: selective update is probably totally broken because it doesn't
         * mesh at all with our frontbuffer tracking. And the hw alone isn't
@@ -513,36 +411,19 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
 
        val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
 
-       if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
-               val |= EDP_PSR2_TP2_TIME_2500;
-       else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
-               val |= EDP_PSR2_TP2_TIME_500;
-       else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
-               val |= EDP_PSR2_TP2_TIME_100;
+       if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us >= 0 &&
+           dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 50)
+               val |= EDP_PSR2_TP2_TIME_50us;
+       else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
+               val |= EDP_PSR2_TP2_TIME_100us;
+       else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
+               val |= EDP_PSR2_TP2_TIME_500us;
        else
-               val |= EDP_PSR2_TP2_TIME_50;
+               val |= EDP_PSR2_TP2_TIME_2500us;
 
        I915_WRITE(EDP_PSR2_CTL, val);
 }
 
-static void hsw_psr_activate(struct intel_dp *intel_dp)
-{
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-
-       /* On HSW+ after we enable PSR on source it will activate it
-        * as soon as it match configure idle_frame count. So
-        * we just actually enable it here on activation time.
-        */
-
-       /* psr1 and psr2 are mutually exclusive.*/
-       if (dev_priv->psr.psr2_enabled)
-               hsw_activate_psr2(intel_dp);
-       else
-               hsw_activate_psr1(intel_dp);
-}
-
 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
                                    struct intel_crtc_state *crtc_state)
 {
@@ -602,17 +483,11 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
         * ones. Since by Display design transcoder EDP is tied to port A
         * we can safely escape based on the port A.
         */
-       if (HAS_DDI(dev_priv) && dig_port->base.port != PORT_A) {
+       if (dig_port->base.port != PORT_A) {
                DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
                return;
        }
 
-       if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
-           !dev_priv->psr.link_standby) {
-               DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
-               return;
-       }
-
        if (IS_HASWELL(dev_priv) &&
            I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) &
                      S3D_ENABLE) {
@@ -640,11 +515,6 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
                return;
        }
 
-       if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
-               DRM_DEBUG_KMS("PSR condition failed: panel lacks power state control\n");
-               return;
-       }
-
        crtc_state->has_psr = true;
        crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
        DRM_DEBUG_KMS("Enabling PSR%s\n", crtc_state->has_psr2 ? "2" : "");
@@ -656,27 +526,29 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
        struct drm_device *dev = intel_dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
 
-       if (dev_priv->psr.psr2_enabled)
+       if (INTEL_GEN(dev_priv) >= 9)
                WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
-       else
-               WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
+       WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
        WARN_ON(dev_priv->psr.active);
        lockdep_assert_held(&dev_priv->psr.lock);
 
-       dev_priv->psr.activate(intel_dp);
+       /* psr1 and psr2 are mutually exclusive.*/
+       if (dev_priv->psr.psr2_enabled)
+               hsw_activate_psr2(intel_dp);
+       else
+               hsw_activate_psr1(intel_dp);
+
        dev_priv->psr.active = true;
 }
 
-static void hsw_psr_enable_source(struct intel_dp *intel_dp,
-                                 const struct intel_crtc_state *crtc_state)
+static void intel_psr_enable_source(struct intel_dp *intel_dp,
+                                   const struct intel_crtc_state *crtc_state)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
 
-       psr_aux_io_power_get(intel_dp);
-
        /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
         * use hardcoded values PSR AUX transactions
         */
@@ -712,7 +584,8 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp,
                           EDP_PSR_DEBUG_MASK_MEMUP |
                           EDP_PSR_DEBUG_MASK_HPD |
                           EDP_PSR_DEBUG_MASK_LPSP |
-                          EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
+                          EDP_PSR_DEBUG_MASK_DISP_REG_WRITE |
+                          EDP_PSR_DEBUG_MASK_MAX_SLEEP);
        }
 }
 
@@ -746,64 +619,19 @@ void intel_psr_enable(struct intel_dp *intel_dp,
        dev_priv->psr.psr2_enabled = crtc_state->has_psr2;
        dev_priv->psr.busy_frontbuffer_bits = 0;
 
-       dev_priv->psr.setup_vsc(intel_dp, crtc_state);
-       dev_priv->psr.enable_sink(intel_dp);
-       dev_priv->psr.enable_source(intel_dp, crtc_state);
+       intel_psr_setup_vsc(intel_dp, crtc_state);
+       intel_psr_enable_sink(intel_dp);
+       intel_psr_enable_source(intel_dp, crtc_state);
        dev_priv->psr.enabled = intel_dp;
 
-       if (INTEL_GEN(dev_priv) >= 9) {
-               intel_psr_activate(intel_dp);
-       } else {
-               /*
-                * FIXME: Activation should happen immediately since this
-                * function is just called after pipe is fully trained and
-                * enabled.
-                * However on some platforms we face issues when first
-                * activation follows a modeset so quickly.
-                *     - On VLV/CHV we get bank screen on first activation
-                *     - On HSW/BDW we get a recoverable frozen screen until
-                *       next exit-activate sequence.
-                */
-               schedule_delayed_work(&dev_priv->psr.work,
-                                     msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
-       }
+       intel_psr_activate(intel_dp);
 
 unlock:
        mutex_unlock(&dev_priv->psr.lock);
 }
 
-static void vlv_psr_disable(struct intel_dp *intel_dp,
-                           const struct intel_crtc_state *old_crtc_state)
-{
-       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
-       uint32_t val;
-
-       if (dev_priv->psr.active) {
-               /* Put VLV PSR back to PSR_state 0 (disabled). */
-               if (intel_wait_for_register(dev_priv,
-                                           VLV_PSRSTAT(crtc->pipe),
-                                           VLV_EDP_PSR_IN_TRANS,
-                                           0,
-                                           1))
-                       WARN(1, "PSR transition took longer than expected\n");
-
-               val = I915_READ(VLV_PSRCTL(crtc->pipe));
-               val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
-               val &= ~VLV_EDP_PSR_ENABLE;
-               val &= ~VLV_EDP_PSR_MODE_MASK;
-               I915_WRITE(VLV_PSRCTL(crtc->pipe), val);
-
-               dev_priv->psr.active = false;
-       } else {
-               WARN_ON(vlv_is_psr_active_on_pipe(dev, crtc->pipe));
-       }
-}
-
-static void hsw_psr_disable(struct intel_dp *intel_dp,
-                           const struct intel_crtc_state *old_crtc_state)
+static void
+intel_psr_disable_source(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
@@ -842,8 +670,25 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
                else
                        WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
        }
+}
+
+static void intel_psr_disable_locked(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+
+       lockdep_assert_held(&dev_priv->psr.lock);
+
+       if (!dev_priv->psr.enabled)
+               return;
+
+       intel_psr_disable_source(intel_dp);
 
-       psr_aux_io_power_put(intel_dp);
+       /* Disable PSR on Sink */
+       drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
+
+       dev_priv->psr.enabled = NULL;
 }
 
 /**
@@ -867,23 +712,49 @@ void intel_psr_disable(struct intel_dp *intel_dp,
                return;
 
        mutex_lock(&dev_priv->psr.lock);
-       if (!dev_priv->psr.enabled) {
-               mutex_unlock(&dev_priv->psr.lock);
-               return;
-       }
+       intel_psr_disable_locked(intel_dp);
+       mutex_unlock(&dev_priv->psr.lock);
+       cancel_work_sync(&dev_priv->psr.work);
+}
 
-       dev_priv->psr.disable_source(intel_dp, old_crtc_state);
+int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       i915_reg_t reg;
+       u32 mask;
 
-       /* Disable PSR on Sink */
-       drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
+       if (!new_crtc_state->has_psr)
+               return 0;
 
-       dev_priv->psr.enabled = NULL;
-       mutex_unlock(&dev_priv->psr.lock);
+       /*
+        * The sole user right now is intel_pipe_update_start(),
+        * which won't race with psr_enable/disable, which is
+        * where psr2_enabled is written to. So, we don't need
+        * to acquire the psr.lock. More importantly, we want the
+        * latency inside intel_pipe_update_start() to be as low
+        * as possible, so no need to acquire psr.lock when it is
+        * not needed and will induce latencies in the atomic
+        * update path.
+        */
+       if (dev_priv->psr.psr2_enabled) {
+               reg = EDP_PSR2_STATUS;
+               mask = EDP_PSR2_STATUS_STATE_MASK;
+       } else {
+               reg = EDP_PSR_STATUS;
+               mask = EDP_PSR_STATUS_STATE_MASK;
+       }
 
-       cancel_delayed_work_sync(&dev_priv->psr.work);
+       /*
+        * Max time for PSR to idle = Inverse of the refresh rate +
+        * 6 ms of exit training time + 1.5 ms of aux channel
+        * handshake. 50 msec is defesive enough to cover everything.
+        */
+       return intel_wait_for_register(dev_priv, reg, mask,
+                                      EDP_PSR_STATUS_STATE_IDLE, 50);
 }
 
-static bool psr_wait_for_idle(struct drm_i915_private *dev_priv)
+static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
 {
        struct intel_dp *intel_dp;
        i915_reg_t reg;
@@ -894,21 +765,12 @@ static bool psr_wait_for_idle(struct drm_i915_private *dev_priv)
        if (!intel_dp)
                return false;
 
-       if (HAS_DDI(dev_priv)) {
-               if (dev_priv->psr.psr2_enabled) {
-                       reg = EDP_PSR2_STATUS;
-                       mask = EDP_PSR2_STATUS_STATE_MASK;
-               } else {
-                       reg = EDP_PSR_STATUS;
-                       mask = EDP_PSR_STATUS_STATE_MASK;
-               }
+       if (dev_priv->psr.psr2_enabled) {
+               reg = EDP_PSR2_STATUS;
+               mask = EDP_PSR2_STATUS_STATE_MASK;
        } else {
-               struct drm_crtc *crtc =
-                       dp_to_dig_port(intel_dp)->base.base.crtc;
-               enum pipe pipe = to_intel_crtc(crtc)->pipe;
-
-               reg = VLV_PSRSTAT(pipe);
-               mask = VLV_EDP_PSR_IN_TRANS;
+               reg = EDP_PSR_STATUS;
+               mask = EDP_PSR_STATUS_STATE_MASK;
        }
 
        mutex_unlock(&dev_priv->psr.lock);
@@ -925,17 +787,20 @@ static bool psr_wait_for_idle(struct drm_i915_private *dev_priv)
 static void intel_psr_work(struct work_struct *work)
 {
        struct drm_i915_private *dev_priv =
-               container_of(work, typeof(*dev_priv), psr.work.work);
+               container_of(work, typeof(*dev_priv), psr.work);
 
        mutex_lock(&dev_priv->psr.lock);
 
+       if (!dev_priv->psr.enabled)
+               goto unlock;
+
        /*
         * We have to make sure PSR is ready for re-enable
         * otherwise it keeps disabled until next full enable/disable cycle.
         * PSR might take some time to get fully disabled
         * and be ready for re-enable.
         */
-       if (!psr_wait_for_idle(dev_priv))
+       if (!__psr_wait_for_idle_locked(dev_priv))
                goto unlock;
 
        /*
@@ -943,7 +808,7 @@ static void intel_psr_work(struct work_struct *work)
         * recheck. Since psr_flush first clears this and then reschedules we
         * won't ever miss a flush when bailing out here.
         */
-       if (dev_priv->psr.busy_frontbuffer_bits)
+       if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active)
                goto unlock;
 
        intel_psr_activate(dev_priv->psr.enabled);
@@ -953,102 +818,23 @@ unlock:
 
 static void intel_psr_exit(struct drm_i915_private *dev_priv)
 {
-       struct intel_dp *intel_dp = dev_priv->psr.enabled;
-       struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
-       enum pipe pipe = to_intel_crtc(crtc)->pipe;
        u32 val;
 
        if (!dev_priv->psr.active)
                return;
 
-       if (HAS_DDI(dev_priv)) {
-               if (dev_priv->psr.psr2_enabled) {
-                       val = I915_READ(EDP_PSR2_CTL);
-                       WARN_ON(!(val & EDP_PSR2_ENABLE));
-                       I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
-               } else {
-                       val = I915_READ(EDP_PSR_CTL);
-                       WARN_ON(!(val & EDP_PSR_ENABLE));
-                       I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
-               }
+       if (dev_priv->psr.psr2_enabled) {
+               val = I915_READ(EDP_PSR2_CTL);
+               WARN_ON(!(val & EDP_PSR2_ENABLE));
+               I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
        } else {
-               val = I915_READ(VLV_PSRCTL(pipe));
-
-               /*
-                * Here we do the transition drirectly from
-                * PSR_state 3 (active - no Remote Frame Buffer (RFB) update) to
-                * PSR_state 5 (exit).
-                * PSR State 4 (active with single frame update) can be skipped.
-                * On PSR_state 5 (exit) Hardware is responsible to transition
-                * back to PSR_state 1 (inactive).
-                * Now we are at Same state after vlv_psr_enable_source.
-                */
-               val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
-               I915_WRITE(VLV_PSRCTL(pipe), val);
-
-               /*
-                * Send AUX wake up - Spec says after transitioning to PSR
-                * active we have to send AUX wake up by writing 01h in DPCD
-                * 600h of sink device.
-                * XXX: This might slow down the transition, but without this
-                * HW doesn't complete the transition to PSR_state 1 and we
-                * never get the screen updated.
-                */
-               drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
-                                  DP_SET_POWER_D0);
+               val = I915_READ(EDP_PSR_CTL);
+               WARN_ON(!(val & EDP_PSR_ENABLE));
+               I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
        }
-
        dev_priv->psr.active = false;
 }
 
-/**
- * intel_psr_single_frame_update - Single Frame Update
- * @dev_priv: i915 device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * Some platforms support a single frame update feature that is used to
- * send and update only one frame on Remote Frame Buffer.
- * So far it is only implemented for Valleyview and Cherryview because
- * hardware requires this to be done before a page flip.
- */
-void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
-                                  unsigned frontbuffer_bits)
-{
-       struct drm_crtc *crtc;
-       enum pipe pipe;
-       u32 val;
-
-       if (!CAN_PSR(dev_priv))
-               return;
-
-       /*
-        * Single frame update is already supported on BDW+ but it requires
-        * many W/A and it isn't really needed.
-        */
-       if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
-               return;
-
-       mutex_lock(&dev_priv->psr.lock);
-       if (!dev_priv->psr.enabled) {
-               mutex_unlock(&dev_priv->psr.lock);
-               return;
-       }
-
-       crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
-       pipe = to_intel_crtc(crtc)->pipe;
-
-       if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) {
-               val = I915_READ(VLV_PSRCTL(pipe));
-
-               /*
-                * We need to set this bit before writing registers for a flip.
-                * This bit will be self-clear when it gets to the PSR active state.
-                */
-               I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
-       }
-       mutex_unlock(&dev_priv->psr.lock);
-}
-
 /**
  * intel_psr_invalidate - Invalidade PSR
  * @dev_priv: i915 device
@@ -1071,7 +857,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
        if (!CAN_PSR(dev_priv))
                return;
 
-       if (dev_priv->psr.has_hw_tracking && origin == ORIGIN_FLIP)
+       if (origin == ORIGIN_FLIP)
                return;
 
        mutex_lock(&dev_priv->psr.lock);
@@ -1114,7 +900,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
        if (!CAN_PSR(dev_priv))
                return;
 
-       if (dev_priv->psr.has_hw_tracking && origin == ORIGIN_FLIP)
+       if (origin == ORIGIN_FLIP)
                return;
 
        mutex_lock(&dev_priv->psr.lock);
@@ -1131,8 +917,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
 
        /* By definition flush = invalidate + flush */
        if (frontbuffer_bits) {
-               if (dev_priv->psr.psr2_enabled ||
-                   IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+               if (dev_priv->psr.psr2_enabled) {
                        intel_psr_exit(dev_priv);
                } else {
                        /*
@@ -1149,9 +934,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
        }
 
        if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
-               if (!work_busy(&dev_priv->psr.work.work))
-                       schedule_delayed_work(&dev_priv->psr.work,
-                                             msecs_to_jiffies(100));
+               schedule_work(&dev_priv->psr.work);
        mutex_unlock(&dev_priv->psr.lock);
 }
 
@@ -1184,38 +967,64 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
                /* HSW and BDW require workarounds that we don't implement. */
                dev_priv->psr.link_standby = false;
-       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               /* On VLV and CHV only standby mode is supported. */
-               dev_priv->psr.link_standby = true;
        else
                /* For new platforms let's respect VBT back again */
                dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
 
-       /* Override link_standby x link_off defaults */
-       if (i915_modparams.enable_psr == 2 && !dev_priv->psr.link_standby) {
-               DRM_DEBUG_KMS("PSR: Forcing link standby\n");
-               dev_priv->psr.link_standby = true;
-       }
-       if (i915_modparams.enable_psr == 3 && dev_priv->psr.link_standby) {
-               DRM_DEBUG_KMS("PSR: Forcing main link off\n");
-               dev_priv->psr.link_standby = false;
+       INIT_WORK(&dev_priv->psr.work, intel_psr_work);
+       mutex_init(&dev_priv->psr.lock);
+}
+
+void intel_psr_short_pulse(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_psr *psr = &dev_priv->psr;
+       u8 val;
+       const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
+                         DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
+                         DP_PSR_LINK_CRC_ERROR;
+
+       if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
+               return;
+
+       mutex_lock(&psr->lock);
+
+       if (psr->enabled != intel_dp)
+               goto exit;
+
+       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) != 1) {
+               DRM_ERROR("PSR_STATUS dpcd read failed\n");
+               goto exit;
        }
 
-       INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
-       mutex_init(&dev_priv->psr.lock);
+       if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) {
+               DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
+               intel_psr_disable_locked(intel_dp);
+       }
 
-       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-               dev_priv->psr.enable_source = vlv_psr_enable_source;
-               dev_priv->psr.disable_source = vlv_psr_disable;
-               dev_priv->psr.enable_sink = vlv_psr_enable_sink;
-               dev_priv->psr.activate = vlv_psr_activate;
-               dev_priv->psr.setup_vsc = vlv_psr_setup_vsc;
-       } else {
-               dev_priv->psr.has_hw_tracking = true;
-               dev_priv->psr.enable_source = hsw_psr_enable_source;
-               dev_priv->psr.disable_source = hsw_psr_disable;
-               dev_priv->psr.enable_sink = hsw_psr_enable_sink;
-               dev_priv->psr.activate = hsw_psr_activate;
-               dev_priv->psr.setup_vsc = hsw_psr_setup_vsc;
+       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) {
+               DRM_ERROR("PSR_ERROR_STATUS dpcd read failed\n");
+               goto exit;
        }
+
+       if (val & DP_PSR_RFB_STORAGE_ERROR)
+               DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n");
+       if (val & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
+               DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n");
+       if (val & DP_PSR_LINK_CRC_ERROR)
+               DRM_ERROR("PSR Link CRC error, disabling PSR\n");
+
+       if (val & ~errors)
+               DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
+                         val & ~errors);
+       if (val & errors)
+               intel_psr_disable_locked(intel_dp);
+       /* clear status register */
+       drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val);
+
+       /* TODO: handle PSR2 errors */
+exit:
+       mutex_unlock(&psr->lock);
 }
index 8f19349a6055e9b84ee9949244fb448ae06f95c5..33faad3197feea61430e5eb2ed7c3ed34160c4da 100644 (file)
@@ -496,6 +496,10 @@ static int init_ring_common(struct intel_engine_cs *engine)
                DRM_DEBUG_DRIVER("%s initialization failed [head=%08x], fudging\n",
                                 engine->name, I915_READ_HEAD(engine));
 
+       /* Check that the ring offsets point within the ring! */
+       GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
+       GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
+
        intel_ring_update_space(ring);
        I915_WRITE_HEAD(engine, ring->head);
        I915_WRITE_TAIL(engine, ring->tail);
@@ -520,8 +524,6 @@ static int init_ring_common(struct intel_engine_cs *engine)
                goto out;
        }
 
-       intel_engine_init_hangcheck(engine);
-
        if (INTEL_GEN(dev_priv) > 2)
                I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
 
@@ -531,16 +533,33 @@ out:
        return ret;
 }
 
-static void reset_ring_common(struct intel_engine_cs *engine,
-                             struct i915_request *request)
+static struct i915_request *reset_prepare(struct intel_engine_cs *engine)
 {
-       /*
-        * RC6 must be prevented until the reset is complete and the engine
-        * reinitialised. If it occurs in the middle of this sequence, the
-        * state written to/loaded from the power context is ill-defined (e.g.
-        * the PP_BASE_DIR may be lost).
-        */
-       assert_forcewakes_active(engine->i915, FORCEWAKE_ALL);
+       intel_engine_stop_cs(engine);
+
+       if (engine->irq_seqno_barrier)
+               engine->irq_seqno_barrier(engine);
+
+       return i915_gem_find_active_request(engine);
+}
+
+static void skip_request(struct i915_request *rq)
+{
+       void *vaddr = rq->ring->vaddr;
+       u32 head;
+
+       head = rq->infix;
+       if (rq->postfix < head) {
+               memset32(vaddr + head, MI_NOOP,
+                        (rq->ring->size - head) / sizeof(u32));
+               head = 0;
+       }
+       memset32(vaddr + head, MI_NOOP, (rq->postfix - head) / sizeof(u32));
+}
+
+static void reset_ring(struct intel_engine_cs *engine, struct i915_request *rq)
+{
+       GEM_TRACE("%s seqno=%x\n", engine->name, rq ? rq->global_seqno : 0);
 
        /*
         * Try to restore the logical GPU state to match the continuation
@@ -556,47 +575,18 @@ static void reset_ring_common(struct intel_engine_cs *engine,
         * If the request was innocent, we try to replay the request with
         * the restored context.
         */
-       if (request) {
-               struct drm_i915_private *dev_priv = request->i915;
-               struct intel_context *ce = to_intel_context(request->ctx,
-                                                           engine);
-               struct i915_hw_ppgtt *ppgtt;
-
-               if (ce->state) {
-                       I915_WRITE(CCID,
-                                  i915_ggtt_offset(ce->state) |
-                                  BIT(8) /* must be set! */ |
-                                  CCID_EXTENDED_STATE_SAVE |
-                                  CCID_EXTENDED_STATE_RESTORE |
-                                  CCID_EN);
-               }
-
-               ppgtt = request->ctx->ppgtt ?: engine->i915->mm.aliasing_ppgtt;
-               if (ppgtt) {
-                       u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10;
-
-                       I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
-                       I915_WRITE(RING_PP_DIR_BASE(engine), pd_offset);
-
-                       /* Wait for the PD reload to complete */
-                       if (intel_wait_for_register(dev_priv,
-                                                   RING_PP_DIR_BASE(engine),
-                                                   BIT(0), 0,
-                                                   10))
-                               DRM_ERROR("Wait for reload of ppgtt page-directory timed out\n");
-
-                       ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
-               }
-
+       if (rq) {
                /* If the rq hung, jump to its breadcrumb and skip the batch */
-               if (request->fence.error == -EIO)
-                       request->ring->head = request->postfix;
-       } else {
-               engine->legacy_active_context = NULL;
-               engine->legacy_active_ppgtt = NULL;
+               rq->ring->head = intel_ring_wrap(rq->ring, rq->head);
+               if (rq->fence.error == -EIO)
+                       skip_request(rq);
        }
 }
 
+static void reset_finish(struct intel_engine_cs *engine)
+{
+}
+
 static int intel_rcs_ctx_init(struct i915_request *rq)
 {
        int ret;
@@ -1033,6 +1023,8 @@ int intel_ring_pin(struct intel_ring *ring,
                flags |= PIN_OFFSET_BIAS | offset_bias;
        if (vma->obj->stolen)
                flags |= PIN_MAPPABLE;
+       else
+               flags |= PIN_HIGH;
 
        if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
                if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
@@ -1066,6 +1058,8 @@ err:
 
 void intel_ring_reset(struct intel_ring *ring, u32 tail)
 {
+       GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
+
        ring->tail = tail;
        ring->head = tail;
        ring->emit = tail;
@@ -1093,6 +1087,7 @@ void intel_ring_unpin(struct intel_ring *ring)
 static struct i915_vma *
 intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
 {
+       struct i915_address_space *vm = &dev_priv->ggtt.vm;
        struct drm_i915_gem_object *obj;
        struct i915_vma *vma;
 
@@ -1102,10 +1097,14 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
        if (IS_ERR(obj))
                return ERR_CAST(obj);
 
-       /* mark ring buffers as read-only from GPU side by default */
-       obj->gt_ro = 1;
+       /*
+        * Mark ring buffers as read-only from GPU side (so no stray overwrites)
+        * if supported by the platform's GGTT.
+        */
+       if (vm->has_read_only)
+               i915_gem_object_set_readonly(obj);
 
-       vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
+       vma = i915_vma_instance(obj, vm, NULL);
        if (IS_ERR(vma))
                goto err;
 
@@ -1169,10 +1168,46 @@ intel_ring_free(struct intel_ring *ring)
        kfree(ring);
 }
 
-static int context_pin(struct intel_context *ce)
+static void intel_ring_context_destroy(struct intel_context *ce)
 {
-       struct i915_vma *vma = ce->state;
-       int ret;
+       GEM_BUG_ON(ce->pin_count);
+
+       if (!ce->state)
+               return;
+
+       GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
+       i915_gem_object_put(ce->state->obj);
+}
+
+static int __context_pin_ppgtt(struct i915_gem_context *ctx)
+{
+       struct i915_hw_ppgtt *ppgtt;
+       int err = 0;
+
+       ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt;
+       if (ppgtt)
+               err = gen6_ppgtt_pin(ppgtt);
+
+       return err;
+}
+
+static void __context_unpin_ppgtt(struct i915_gem_context *ctx)
+{
+       struct i915_hw_ppgtt *ppgtt;
+
+       ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt;
+       if (ppgtt)
+               gen6_ppgtt_unpin(ppgtt);
+}
+
+static int __context_pin(struct intel_context *ce)
+{
+       struct i915_vma *vma;
+       int err;
+
+       vma = ce->state;
+       if (!vma)
+               return 0;
 
        /*
         * Clear this page out of any CPU caches for coherent swap-in/out.
@@ -1180,13 +1215,43 @@ static int context_pin(struct intel_context *ce)
         * on an active context (which by nature is already on the GPU).
         */
        if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
-               ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
-               if (ret)
-                       return ret;
+               err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
+               if (err)
+                       return err;
        }
 
-       return i915_vma_pin(vma, 0, I915_GTT_MIN_ALIGNMENT,
-                           PIN_GLOBAL | PIN_HIGH);
+       err = i915_vma_pin(vma, 0, I915_GTT_MIN_ALIGNMENT,
+                          PIN_GLOBAL | PIN_HIGH);
+       if (err)
+               return err;
+
+       /*
+        * And mark is as a globally pinned object to let the shrinker know
+        * it cannot reclaim the object until we release it.
+        */
+       vma->obj->pin_global++;
+
+       return 0;
+}
+
+static void __context_unpin(struct intel_context *ce)
+{
+       struct i915_vma *vma;
+
+       vma = ce->state;
+       if (!vma)
+               return;
+
+       vma->obj->pin_global--;
+       i915_vma_unpin(vma);
+}
+
+static void intel_ring_context_unpin(struct intel_context *ce)
+{
+       __context_unpin_ppgtt(ce->gem_context);
+       __context_unpin(ce);
+
+       i915_gem_context_put(ce->gem_context);
 }
 
 static struct i915_vma *
@@ -1243,7 +1308,7 @@ alloc_context_vma(struct intel_engine_cs *engine)
                i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
        }
 
-       vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+       vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
        if (IS_ERR(vma)) {
                err = PTR_ERR(vma);
                goto err_obj;
@@ -1258,81 +1323,79 @@ err_obj:
        return ERR_PTR(err);
 }
 
-static struct intel_ring *
-intel_ring_context_pin(struct intel_engine_cs *engine,
-                      struct i915_gem_context *ctx)
+static struct intel_context *
+__ring_context_pin(struct intel_engine_cs *engine,
+                  struct i915_gem_context *ctx,
+                  struct intel_context *ce)
 {
-       struct intel_context *ce = to_intel_context(ctx, engine);
-       int ret;
-
-       lockdep_assert_held(&ctx->i915->drm.struct_mutex);
-
-       if (likely(ce->pin_count++))
-               goto out;
-       GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
+       int err;
 
        if (!ce->state && engine->context_size) {
                struct i915_vma *vma;
 
                vma = alloc_context_vma(engine);
                if (IS_ERR(vma)) {
-                       ret = PTR_ERR(vma);
+                       err = PTR_ERR(vma);
                        goto err;
                }
 
                ce->state = vma;
        }
 
-       if (ce->state) {
-               ret = context_pin(ce);
-               if (ret)
-                       goto err;
+       err = __context_pin(ce);
+       if (err)
+               goto err;
 
-               ce->state->obj->pin_global++;
-       }
+       err = __context_pin_ppgtt(ce->gem_context);
+       if (err)
+               goto err_unpin;
 
        i915_gem_context_get(ctx);
 
-out:
        /* One ringbuffer to rule them all */
-       return engine->buffer;
+       GEM_BUG_ON(!engine->buffer);
+       ce->ring = engine->buffer;
+
+       return ce;
 
+err_unpin:
+       __context_unpin(ce);
 err:
        ce->pin_count = 0;
-       return ERR_PTR(ret);
+       return ERR_PTR(err);
 }
 
-static void intel_ring_context_unpin(struct intel_engine_cs *engine,
-                                    struct i915_gem_context *ctx)
+static const struct intel_context_ops ring_context_ops = {
+       .unpin = intel_ring_context_unpin,
+       .destroy = intel_ring_context_destroy,
+};
+
+static struct intel_context *
+intel_ring_context_pin(struct intel_engine_cs *engine,
+                      struct i915_gem_context *ctx)
 {
        struct intel_context *ce = to_intel_context(ctx, engine);
 
        lockdep_assert_held(&ctx->i915->drm.struct_mutex);
-       GEM_BUG_ON(ce->pin_count == 0);
 
-       if (--ce->pin_count)
-               return;
+       if (likely(ce->pin_count++))
+               return ce;
+       GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
 
-       if (ce->state) {
-               ce->state->obj->pin_global--;
-               i915_vma_unpin(ce->state);
-       }
+       ce->ops = &ring_context_ops;
 
-       i915_gem_context_put(ctx);
+       return __ring_context_pin(engine, ctx, ce);
 }
 
 static int intel_init_ring_buffer(struct intel_engine_cs *engine)
 {
-       struct intel_ring *ring;
        struct i915_timeline *timeline;
+       struct intel_ring *ring;
+       unsigned int size;
        int err;
 
        intel_engine_setup_common(engine);
 
-       err = intel_engine_init_common(engine);
-       if (err)
-               goto err;
-
        timeline = i915_timeline_create(engine->i915, engine->name);
        if (IS_ERR(timeline)) {
                err = PTR_ERR(timeline);
@@ -1354,8 +1417,23 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
        GEM_BUG_ON(engine->buffer);
        engine->buffer = ring;
 
+       size = PAGE_SIZE;
+       if (HAS_BROKEN_CS_TLB(engine->i915))
+               size = I830_WA_SIZE;
+       err = intel_engine_create_scratch(engine, size);
+       if (err)
+               goto err_unpin;
+
+       err = intel_engine_init_common(engine);
+       if (err)
+               goto err_scratch;
+
        return 0;
 
+err_scratch:
+       intel_engine_cleanup_scratch(engine);
+err_unpin:
+       intel_ring_unpin(ring);
 err_ring:
        intel_ring_free(ring);
 err:
@@ -1392,6 +1470,48 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
                intel_ring_reset(engine->buffer, 0);
 }
 
+static int load_pd_dir(struct i915_request *rq,
+                      const struct i915_hw_ppgtt *ppgtt)
+{
+       const struct intel_engine_cs * const engine = rq->engine;
+       u32 *cs;
+
+       cs = intel_ring_begin(rq, 6);
+       if (IS_ERR(cs))
+               return PTR_ERR(cs);
+
+       *cs++ = MI_LOAD_REGISTER_IMM(1);
+       *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
+       *cs++ = PP_DIR_DCLV_2G;
+
+       *cs++ = MI_LOAD_REGISTER_IMM(1);
+       *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+       *cs++ = ppgtt->pd.base.ggtt_offset << 10;
+
+       intel_ring_advance(rq, cs);
+
+       return 0;
+}
+
+static int flush_pd_dir(struct i915_request *rq)
+{
+       const struct intel_engine_cs * const engine = rq->engine;
+       u32 *cs;
+
+       cs = intel_ring_begin(rq, 4);
+       if (IS_ERR(cs))
+               return PTR_ERR(cs);
+
+       /* Stall until the page table load is complete */
+       *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+       *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+       *cs++ = i915_ggtt_offset(engine->scratch);
+       *cs++ = MI_NOOP;
+
+       intel_ring_advance(rq, cs);
+       return 0;
+}
+
 static inline int mi_set_context(struct i915_request *rq, u32 flags)
 {
        struct drm_i915_private *i915 = rq->i915;
@@ -1402,6 +1522,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
                (HAS_LEGACY_SEMAPHORES(i915) && IS_GEN7(i915)) ?
                INTEL_INFO(i915)->num_rings - 1 :
                0;
+       bool force_restore = false;
        int len;
        u32 *cs;
 
@@ -1415,6 +1536,12 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
        len = 4;
        if (IS_GEN7(i915))
                len += 2 + (num_rings ? 4*num_rings + 6 : 0);
+       if (flags & MI_FORCE_RESTORE) {
+               GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
+               flags &= ~MI_FORCE_RESTORE;
+               force_restore = true;
+               len += 2;
+       }
 
        cs = intel_ring_begin(rq, len);
        if (IS_ERR(cs))
@@ -1439,9 +1566,29 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
                }
        }
 
+       if (force_restore) {
+               /*
+                * The HW doesn't handle being told to restore the current
+                * context very well. Quite often it likes goes to go off and
+                * sulk, especially when it is meant to be reloading PP_DIR.
+                * A very simple fix to force the reload is to simply switch
+                * away from the current context and back again.
+                *
+                * Note that the kernel_context will contain random state
+                * following the INHIBIT_RESTORE. We accept this since we
+                * never use the kernel_context state; it is merely a
+                * placeholder we use to flush other contexts.
+                */
+               *cs++ = MI_SET_CONTEXT;
+               *cs++ = i915_ggtt_offset(to_intel_context(i915->kernel_context,
+                                                         engine)->state) |
+                       MI_MM_SPACE_GTT |
+                       MI_RESTORE_INHIBIT;
+       }
+
        *cs++ = MI_NOOP;
        *cs++ = MI_SET_CONTEXT;
-       *cs++ = i915_ggtt_offset(to_intel_context(rq->ctx, engine)->state) | flags;
+       *cs++ = i915_ggtt_offset(rq->hw_context->state) | flags;
        /*
         * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
         * WaMiSetContext_Hang:snb,ivb,vlv
@@ -1509,31 +1656,28 @@ static int remap_l3(struct i915_request *rq, int slice)
 static int switch_context(struct i915_request *rq)
 {
        struct intel_engine_cs *engine = rq->engine;
-       struct i915_gem_context *to_ctx = rq->ctx;
-       struct i915_hw_ppgtt *to_mm =
-               to_ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
-       struct i915_gem_context *from_ctx = engine->legacy_active_context;
-       struct i915_hw_ppgtt *from_mm = engine->legacy_active_ppgtt;
+       struct i915_gem_context *ctx = rq->gem_context;
+       struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
+       unsigned int unwind_mm = 0;
        u32 hw_flags = 0;
        int ret, i;
 
        lockdep_assert_held(&rq->i915->drm.struct_mutex);
        GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
 
-       if (to_mm != from_mm ||
-           (to_mm && intel_engine_flag(engine) & to_mm->pd_dirty_rings)) {
-               trace_switch_mm(engine, to_ctx);
-               ret = to_mm->switch_mm(to_mm, rq);
+       if (ppgtt) {
+               ret = load_pd_dir(rq, ppgtt);
                if (ret)
                        goto err;
 
-               to_mm->pd_dirty_rings &= ~intel_engine_flag(engine);
-               engine->legacy_active_ppgtt = to_mm;
-               hw_flags = MI_FORCE_RESTORE;
+               if (intel_engine_flag(engine) & ppgtt->pd_dirty_rings) {
+                       unwind_mm = intel_engine_flag(engine);
+                       ppgtt->pd_dirty_rings &= ~unwind_mm;
+                       hw_flags = MI_FORCE_RESTORE;
+               }
        }
 
-       if (to_intel_context(to_ctx, engine)->state &&
-           (to_ctx != from_ctx || hw_flags & MI_FORCE_RESTORE)) {
+       if (rq->hw_context->state) {
                GEM_BUG_ON(engine->id != RCS);
 
                /*
@@ -1543,35 +1687,38 @@ static int switch_context(struct i915_request *rq)
                 * as nothing actually executes using the kernel context; it
                 * is purely used for flushing user contexts.
                 */
-               if (i915_gem_context_is_kernel(to_ctx))
+               if (i915_gem_context_is_kernel(ctx))
                        hw_flags = MI_RESTORE_INHIBIT;
 
                ret = mi_set_context(rq, hw_flags);
                if (ret)
                        goto err_mm;
+       }
 
-               engine->legacy_active_context = to_ctx;
+       if (ppgtt) {
+               ret = flush_pd_dir(rq);
+               if (ret)
+                       goto err_mm;
        }
 
-       if (to_ctx->remap_slice) {
+       if (ctx->remap_slice) {
                for (i = 0; i < MAX_L3_SLICES; i++) {
-                       if (!(to_ctx->remap_slice & BIT(i)))
+                       if (!(ctx->remap_slice & BIT(i)))
                                continue;
 
                        ret = remap_l3(rq, i);
                        if (ret)
-                               goto err_ctx;
+                               goto err_mm;
                }
 
-               to_ctx->remap_slice = 0;
+               ctx->remap_slice = 0;
        }
 
        return 0;
 
-err_ctx:
-       engine->legacy_active_context = from_ctx;
 err_mm:
-       engine->legacy_active_ppgtt = from_mm;
+       if (unwind_mm)
+               ppgtt->pd_dirty_rings |= unwind_mm;
 err:
        return ret;
 }
@@ -1580,7 +1727,7 @@ static int ring_request_alloc(struct i915_request *request)
 {
        int ret;
 
-       GEM_BUG_ON(!to_intel_context(request->ctx, request->engine)->pin_count);
+       GEM_BUG_ON(!request->hw_context->pin_count);
 
        /* Flush enough space to reduce the likelihood of waiting after
         * we start building the request - in which case we will just
@@ -2006,11 +2153,11 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
        intel_ring_init_semaphores(dev_priv, engine);
 
        engine->init_hw = init_ring_common;
-       engine->reset_hw = reset_ring_common;
+       engine->reset.prepare = reset_prepare;
+       engine->reset.reset = reset_ring;
+       engine->reset.finish = reset_finish;
 
        engine->context_pin = intel_ring_context_pin;
-       engine->context_unpin = intel_ring_context_unpin;
-
        engine->request_alloc = ring_request_alloc;
 
        engine->emit_breadcrumb = i9xx_emit_breadcrumb;
@@ -2074,16 +2221,6 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
        if (ret)
                return ret;
 
-       if (INTEL_GEN(dev_priv) >= 6) {
-               ret = intel_engine_create_scratch(engine, PAGE_SIZE);
-               if (ret)
-                       return ret;
-       } else if (HAS_BROKEN_CS_TLB(dev_priv)) {
-               ret = intel_engine_create_scratch(engine, I830_WA_SIZE);
-               if (ret)
-                       return ret;
-       }
-
        return 0;
 }
 
index 010750e8ee447aa36e6872fd6289c0ce4fc02096..f5ffa6d31e82c3d19a4ceb8e1b0e78956d0eae3d 100644 (file)
@@ -122,7 +122,8 @@ struct intel_engine_hangcheck {
        int deadlock;
        struct intel_instdone instdone;
        struct i915_request *active_request;
-       bool stalled;
+       bool stalled:1;
+       bool wedged:1;
 };
 
 struct intel_ring {
@@ -192,6 +193,11 @@ struct i915_priolist {
        int priority;
 };
 
+struct st_preempt_hang {
+       struct completion completion;
+       bool inject_hang;
+};
+
 /**
  * struct intel_engine_execlists - execlist submission queue and port state
  *
@@ -291,32 +297,49 @@ struct intel_engine_execlists {
        /**
         * @queue: queue of requests, in priority lists
         */
-       struct rb_root queue;
+       struct rb_root_cached queue;
 
        /**
-        * @first: leftmost level in priority @queue
+        * @csb_read: control register for Context Switch buffer
+        *
+        * Note this register is always in mmio.
         */
-       struct rb_node *first;
+       u32 __iomem *csb_read;
 
        /**
-        * @fw_domains: forcewake domains for irq tasklet
+        * @csb_write: control register for Context Switch buffer
+        *
+        * Note this register may be either mmio or HWSP shadow.
         */
-       unsigned int fw_domains;
+       u32 *csb_write;
 
        /**
-        * @csb_head: context status buffer head
+        * @csb_status: status array for Context Switch buffer
+        *
+        * Note these register may be either mmio or HWSP shadow.
         */
-       unsigned int csb_head;
+       u32 *csb_status;
 
        /**
-        * @csb_use_mmio: access csb through mmio, instead of hwsp
+        * @preempt_complete_status: expected CSB upon completing preemption
         */
-       bool csb_use_mmio;
+       u32 preempt_complete_status;
 
        /**
-        * @preempt_complete_status: expected CSB upon completing preemption
+        * @csb_write_reset: reset value for CSB write pointer
+        *
+        * As the CSB write pointer maybe either in HWSP or as a field
+        * inside an mmio register, we want to reprogram it slightly
+        * differently to avoid later confusion.
         */
-       u32 preempt_complete_status;
+       u32 csb_write_reset;
+
+       /**
+        * @csb_head: context status buffer head
+        */
+       u8 csb_head;
+
+       I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;)
 };
 
 #define INTEL_ENGINE_CS_MAX_NAME 8
@@ -342,11 +365,10 @@ struct intel_engine_cs {
        struct i915_timeline timeline;
 
        struct drm_i915_gem_object *default_state;
+       void *pinned_default_state;
 
-       atomic_t irq_count;
        unsigned long irq_posted;
 #define ENGINE_IRQ_BREADCRUMB 0
-#define ENGINE_IRQ_EXECLIST 1
 
        /* Rather than have every client wait upon all user interrupts,
         * with the herd waking after every interrupt and each doing the
@@ -378,6 +400,7 @@ struct intel_engine_cs {
 
                unsigned int hangcheck_interrupts;
                unsigned int irq_enabled;
+               unsigned int irq_count;
 
                bool irq_armed : 1;
                I915_SELFTEST_DECLARE(bool mock : 1);
@@ -423,18 +446,22 @@ struct intel_engine_cs {
        void            (*irq_disable)(struct intel_engine_cs *engine);
 
        int             (*init_hw)(struct intel_engine_cs *engine);
-       void            (*reset_hw)(struct intel_engine_cs *engine,
-                                   struct i915_request *rq);
+
+       struct {
+               struct i915_request *(*prepare)(struct intel_engine_cs *engine);
+               void (*reset)(struct intel_engine_cs *engine,
+                             struct i915_request *rq);
+               void (*finish)(struct intel_engine_cs *engine);
+       } reset;
 
        void            (*park)(struct intel_engine_cs *engine);
        void            (*unpark)(struct intel_engine_cs *engine);
 
        void            (*set_default_submission)(struct intel_engine_cs *engine);
 
-       struct intel_ring *(*context_pin)(struct intel_engine_cs *engine,
-                                         struct i915_gem_context *ctx);
-       void            (*context_unpin)(struct intel_engine_cs *engine,
-                                        struct i915_gem_context *ctx);
+       struct intel_context *(*context_pin)(struct intel_engine_cs *engine,
+                                            struct i915_gem_context *ctx);
+
        int             (*request_alloc)(struct i915_request *rq);
        int             (*init_context)(struct i915_request *rq);
 
@@ -550,16 +577,7 @@ struct intel_engine_cs {
         * to the kernel context and trash it as the save may not happen
         * before the hardware is powered down.
         */
-       struct i915_gem_context *last_retired_context;
-
-       /* We track the current MI_SET_CONTEXT in order to eliminate
-        * redudant context switches. This presumes that requests are not
-        * reordered! Or when they are the tracking is updated along with
-        * the emission of individual requests into the legacy command
-        * stream (ring).
-        */
-       struct i915_gem_context *legacy_active_context;
-       struct i915_hw_ppgtt *legacy_active_ppgtt;
+       struct intel_context *last_retired_context;
 
        /* status_notifier: list of callbacks for context-switch changes */
        struct atomic_notifier_head context_status_notifier;
@@ -672,6 +690,12 @@ execlists_clear_active(struct intel_engine_execlists *execlists,
        __clear_bit(bit, (unsigned long *)&execlists->active);
 }
 
+static inline void
+execlists_clear_all_active(struct intel_engine_execlists *execlists)
+{
+       execlists->active = 0;
+}
+
 static inline bool
 execlists_is_active(const struct intel_engine_execlists *execlists,
                    unsigned int bit)
@@ -809,6 +833,19 @@ static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
        return pos & (ring->size - 1);
 }
 
+static inline bool
+intel_ring_offset_valid(const struct intel_ring *ring,
+                       unsigned int pos)
+{
+       if (pos & -ring->size) /* must be strictly within the ring */
+               return false;
+
+       if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */
+               return false;
+
+       return true;
+}
+
 static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
 {
        /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
@@ -820,12 +857,7 @@ static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
 static inline void
 assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
 {
-       /* We could combine these into a single tail operation, but keeping
-        * them as seperate tests will help identify the cause should one
-        * ever fire.
-        */
-       GEM_BUG_ON(!IS_ALIGNED(tail, 8));
-       GEM_BUG_ON(tail >= ring->size);
+       GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
 
        /*
         * "Ring Buffer Use"
@@ -865,14 +897,19 @@ void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
 
 void intel_engine_setup_common(struct intel_engine_cs *engine);
 int intel_engine_init_common(struct intel_engine_cs *engine);
-int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
 void intel_engine_cleanup_common(struct intel_engine_cs *engine);
 
+int intel_engine_create_scratch(struct intel_engine_cs *engine,
+                               unsigned int size);
+void intel_engine_cleanup_scratch(struct intel_engine_cs *engine);
+
 int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
 
+int intel_engine_stop_cs(struct intel_engine_cs *engine);
+
 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
 
@@ -918,11 +955,10 @@ static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine)
 /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
 int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
 
-static inline void intel_wait_init(struct intel_wait *wait,
-                                  struct i915_request *rq)
+static inline void intel_wait_init(struct intel_wait *wait)
 {
        wait->tsk = current;
-       wait->request = rq;
+       wait->request = NULL;
 }
 
 static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno)
@@ -1042,10 +1078,13 @@ gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset)
        return cs;
 }
 
+void intel_engines_sanitize(struct drm_i915_private *i915);
+
 bool intel_engine_is_idle(struct intel_engine_cs *engine);
 bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
 
 bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);
+void intel_engine_lost_context(struct intel_engine_cs *engine);
 
 void intel_engines_park(struct drm_i915_private *i915);
 void intel_engines_unpark(struct drm_i915_private *i915);
@@ -1123,4 +1162,24 @@ void intel_disable_engine_stats(struct intel_engine_cs *engine);
 
 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
 
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+
+static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
+{
+       if (!execlists->preempt_hang.inject_hang)
+               return false;
+
+       complete(&execlists->preempt_hang.completion);
+       return true;
+}
+
+#else
+
+static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
+{
+       return false;
+}
+
+#endif
+
 #endif /* _INTEL_RINGBUFFER_H_ */
index 53a6eaa9671abb5afcf90136e1ece0c5014d74d1..6b5aa3b074ecc8ffb11ae61ead04bc2ea0ee7bb2 100644 (file)
@@ -128,10 +128,20 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
                return "AUX_C";
        case POWER_DOMAIN_AUX_D:
                return "AUX_D";
+       case POWER_DOMAIN_AUX_E:
+               return "AUX_E";
        case POWER_DOMAIN_AUX_F:
                return "AUX_F";
        case POWER_DOMAIN_AUX_IO_A:
                return "AUX_IO_A";
+       case POWER_DOMAIN_AUX_TBT1:
+               return "AUX_TBT1";
+       case POWER_DOMAIN_AUX_TBT2:
+               return "AUX_TBT2";
+       case POWER_DOMAIN_AUX_TBT3:
+               return "AUX_TBT3";
+       case POWER_DOMAIN_AUX_TBT4:
+               return "AUX_TBT4";
        case POWER_DOMAIN_GMBUS:
                return "GMBUS";
        case POWER_DOMAIN_INIT:
@@ -382,7 +392,8 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
        u32 val;
 
        if (wait_fuses) {
-               pg = SKL_PW_TO_PG(id);
+               pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_TO_PG(id) :
+                                                SKL_PW_TO_PG(id);
                /*
                 * For PW1 we have to wait both for the PW0/PG0 fuse state
                 * before enabling the power well and PW1/PG1's own fuse
@@ -428,6 +439,43 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
        hsw_wait_for_power_well_disable(dev_priv, power_well);
 }
 
+#define ICL_AUX_PW_TO_PORT(pw) ((pw) - ICL_DISP_PW_AUX_A)
+
+static void
+icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
+                                   struct i915_power_well *power_well)
+{
+       enum i915_power_well_id id = power_well->id;
+       enum port port = ICL_AUX_PW_TO_PORT(id);
+       u32 val;
+
+       val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
+       I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), val | HSW_PWR_WELL_CTL_REQ(id));
+
+       val = I915_READ(ICL_PORT_CL_DW12(port));
+       I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
+
+       hsw_wait_for_power_well_enable(dev_priv, power_well);
+}
+
+static void
+icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
+                                    struct i915_power_well *power_well)
+{
+       enum i915_power_well_id id = power_well->id;
+       enum port port = ICL_AUX_PW_TO_PORT(id);
+       u32 val;
+
+       val = I915_READ(ICL_PORT_CL_DW12(port));
+       I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX);
+
+       val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id));
+       I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id),
+                  val & ~HSW_PWR_WELL_CTL_REQ(id));
+
+       hsw_wait_for_power_well_disable(dev_priv, power_well);
+}
+
 /*
  * We should only use the power well if we explicitly asked the hardware to
  * enable it, so check if it's enabled and also check if we've requested it to
@@ -1822,6 +1870,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
        BIT_ULL(POWER_DOMAIN_INIT))
 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS (              \
        BIT_ULL(POWER_DOMAIN_AUX_A) |           \
+       BIT_ULL(POWER_DOMAIN_AUX_IO_A) |                \
        BIT_ULL(POWER_DOMAIN_INIT))
 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS (              \
        BIT_ULL(POWER_DOMAIN_AUX_B) |           \
@@ -1894,6 +1943,105 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
        BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
        BIT_ULL(POWER_DOMAIN_INIT))
 
+/*
+ * ICL PW_0/PG_0 domains (HW/DMC control):
+ * - PCI
+ * - clocks except port PLL
+ * - central power except FBC
+ * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
+ * ICL PW_1/PG_1 domains (HW/DMC control):
+ * - DBUF function
+ * - PIPE_A and its planes, except VGA
+ * - transcoder EDP + PSR
+ * - transcoder DSI
+ * - DDI_A
+ * - FBC
+ */
+#define ICL_PW_4_POWER_DOMAINS (                       \
+       BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
+       BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |     \
+       BIT_ULL(POWER_DOMAIN_INIT))
+       /* VDSC/joining */
+#define ICL_PW_3_POWER_DOMAINS (                       \
+       ICL_PW_4_POWER_DOMAINS |                        \
+       BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
+       BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |        \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |           \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |           \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |        \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |           \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) |        \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) |           \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |        \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |           \
+       BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
+       BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
+       BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
+       BIT_ULL(POWER_DOMAIN_AUX_E) |                   \
+       BIT_ULL(POWER_DOMAIN_AUX_F) |                   \
+       BIT_ULL(POWER_DOMAIN_AUX_TBT1) |                \
+       BIT_ULL(POWER_DOMAIN_AUX_TBT2) |                \
+       BIT_ULL(POWER_DOMAIN_AUX_TBT3) |                \
+       BIT_ULL(POWER_DOMAIN_AUX_TBT4) |                \
+       BIT_ULL(POWER_DOMAIN_VGA) |                     \
+       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
+       BIT_ULL(POWER_DOMAIN_INIT))
+       /*
+        * - transcoder WD
+        * - KVMR (HW control)
+        */
+#define ICL_PW_2_POWER_DOMAINS (                       \
+       ICL_PW_3_POWER_DOMAINS |                        \
+       BIT_ULL(POWER_DOMAIN_INIT))
+       /*
+        * - eDP/DSI VDSC
+        * - KVMR (HW control)
+        */
+#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS (             \
+       ICL_PW_2_POWER_DOMAINS |                        \
+       BIT_ULL(POWER_DOMAIN_MODESET) |                 \
+       BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
+       BIT_ULL(POWER_DOMAIN_INIT))
+
+#define ICL_DDI_IO_A_POWER_DOMAINS (                   \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
+#define ICL_DDI_IO_B_POWER_DOMAINS (                   \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
+#define ICL_DDI_IO_C_POWER_DOMAINS (                   \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
+#define ICL_DDI_IO_D_POWER_DOMAINS (                   \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
+#define ICL_DDI_IO_E_POWER_DOMAINS (                   \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
+#define ICL_DDI_IO_F_POWER_DOMAINS (                   \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
+
+#define ICL_AUX_A_IO_POWER_DOMAINS (                   \
+       BIT_ULL(POWER_DOMAIN_AUX_A))
+#define ICL_AUX_B_IO_POWER_DOMAINS (                   \
+       BIT_ULL(POWER_DOMAIN_AUX_B))
+#define ICL_AUX_C_IO_POWER_DOMAINS (                   \
+       BIT_ULL(POWER_DOMAIN_AUX_C))
+#define ICL_AUX_D_IO_POWER_DOMAINS (                   \
+       BIT_ULL(POWER_DOMAIN_AUX_D))
+#define ICL_AUX_E_IO_POWER_DOMAINS (                   \
+       BIT_ULL(POWER_DOMAIN_AUX_E))
+#define ICL_AUX_F_IO_POWER_DOMAINS (                   \
+       BIT_ULL(POWER_DOMAIN_AUX_F))
+#define ICL_AUX_TBT1_IO_POWER_DOMAINS (                        \
+       BIT_ULL(POWER_DOMAIN_AUX_TBT1))
+#define ICL_AUX_TBT2_IO_POWER_DOMAINS (                        \
+       BIT_ULL(POWER_DOMAIN_AUX_TBT2))
+#define ICL_AUX_TBT3_IO_POWER_DOMAINS (                        \
+       BIT_ULL(POWER_DOMAIN_AUX_TBT3))
+#define ICL_AUX_TBT4_IO_POWER_DOMAINS (                        \
+       BIT_ULL(POWER_DOMAIN_AUX_TBT4))
+
 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
        .sync_hw = i9xx_power_well_sync_hw_noop,
        .enable = i9xx_always_on_power_well_noop,
@@ -2451,6 +2599,157 @@ static struct i915_power_well cnl_power_wells[] = {
        },
 };
 
+static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
+       .sync_hw = hsw_power_well_sync_hw,
+       .enable = icl_combo_phy_aux_power_well_enable,
+       .disable = icl_combo_phy_aux_power_well_disable,
+       .is_enabled = hsw_power_well_enabled,
+};
+
+static struct i915_power_well icl_power_wells[] = {
+       {
+               .name = "always-on",
+               .always_on = 1,
+               .domains = POWER_DOMAIN_MASK,
+               .ops = &i9xx_always_on_power_well_ops,
+               .id = I915_DISP_PW_ALWAYS_ON,
+       },
+       {
+               .name = "power well 1",
+               /* Handled by the DMC firmware */
+               .domains = 0,
+               .ops = &hsw_power_well_ops,
+               .id = ICL_DISP_PW_1,
+               .hsw.has_fuses = true,
+       },
+       {
+               .name = "power well 2",
+               .domains = ICL_PW_2_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = ICL_DISP_PW_2,
+               .hsw.has_fuses = true,
+       },
+       {
+               .name = "DC off",
+               .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
+               .ops = &gen9_dc_off_power_well_ops,
+               .id = SKL_DISP_PW_DC_OFF,
+       },
+       {
+               .name = "power well 3",
+               .domains = ICL_PW_3_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = ICL_DISP_PW_3,
+               .hsw.irq_pipe_mask = BIT(PIPE_B),
+               .hsw.has_vga = true,
+               .hsw.has_fuses = true,
+       },
+       {
+               .name = "DDI A IO",
+               .domains = ICL_DDI_IO_A_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = ICL_DISP_PW_DDI_A,
+       },
+       {
+               .name = "DDI B IO",
+               .domains = ICL_DDI_IO_B_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = ICL_DISP_PW_DDI_B,
+       },
+       {
+               .name = "DDI C IO",
+               .domains = ICL_DDI_IO_C_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = ICL_DISP_PW_DDI_C,
+       },
+       {
+               .name = "DDI D IO",
+               .domains = ICL_DDI_IO_D_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = ICL_DISP_PW_DDI_D,
+       },
+       {
+               .name = "DDI E IO",
+               .domains = ICL_DDI_IO_E_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = ICL_DISP_PW_DDI_E,
+       },
+       {
+               .name = "DDI F IO",
+               .domains = ICL_DDI_IO_F_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = ICL_DISP_PW_DDI_F,
+       },
+       {
+               .name = "AUX A",
+               .domains = ICL_AUX_A_IO_POWER_DOMAINS,
+               .ops = &icl_combo_phy_aux_power_well_ops,
+               .id = ICL_DISP_PW_AUX_A,
+       },
+       {
+               .name = "AUX B",
+               .domains = ICL_AUX_B_IO_POWER_DOMAINS,
+               .ops = &icl_combo_phy_aux_power_well_ops,
+               .id = ICL_DISP_PW_AUX_B,
+       },
+       {
+               .name = "AUX C",
+               .domains = ICL_AUX_C_IO_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = ICL_DISP_PW_AUX_C,
+       },
+       {
+               .name = "AUX D",
+               .domains = ICL_AUX_D_IO_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = ICL_DISP_PW_AUX_D,
+       },
+       {
+               .name = "AUX E",
+               .domains = ICL_AUX_E_IO_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = ICL_DISP_PW_AUX_E,
+       },
+       {
+               .name = "AUX F",
+               .domains = ICL_AUX_F_IO_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = ICL_DISP_PW_AUX_F,
+       },
+       {
+               .name = "AUX TBT1",
+               .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = ICL_DISP_PW_AUX_TBT1,
+       },
+       {
+               .name = "AUX TBT2",
+               .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = ICL_DISP_PW_AUX_TBT2,
+       },
+       {
+               .name = "AUX TBT3",
+               .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = ICL_DISP_PW_AUX_TBT3,
+       },
+       {
+               .name = "AUX TBT4",
+               .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = ICL_DISP_PW_AUX_TBT4,
+       },
+       {
+               .name = "power well 4",
+               .domains = ICL_PW_4_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = ICL_DISP_PW_4,
+               .hsw.has_fuses = true,
+               .hsw.irq_pipe_mask = BIT(PIPE_C),
+       },
+};
+
 static int
 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
                                   int disable_power_well)
@@ -2468,7 +2767,7 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
        int requested_dc;
        int max_dc;
 
-       if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
+       if (IS_GEN9_BC(dev_priv) || INTEL_INFO(dev_priv)->gen >= 10) {
                max_dc = 2;
                mask = 0;
        } else if (IS_GEN9_LP(dev_priv)) {
@@ -2556,7 +2855,9 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
         * The enabling order will be from lower to higher indexed wells,
         * the disabling order is reversed.
         */
-       if (IS_HASWELL(dev_priv)) {
+       if (IS_ICELAKE(dev_priv)) {
+               set_power_wells(power_domains, icl_power_wells);
+       } else if (IS_HASWELL(dev_priv)) {
                set_power_wells(power_domains, hsw_power_wells);
        } else if (IS_BROADWELL(dev_priv)) {
                set_power_wells(power_domains, bdw_power_wells);
@@ -2911,6 +3212,7 @@ static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
        switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
        default:
                MISSING_CASE(val);
+               /* fall through */
        case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
                procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
                break;
@@ -3023,6 +3325,8 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
 static void icl_display_core_init(struct drm_i915_private *dev_priv,
                                  bool resume)
 {
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_well *well;
        enum port port;
        u32 val;
 
@@ -3051,8 +3355,14 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
                I915_WRITE(ICL_PORT_CL_DW5(port), val);
        }
 
-       /* 4. Enable power well 1 (PG1) and aux IO power. */
-       /* FIXME: ICL power wells code not here yet. */
+       /*
+        * 4. Enable Power Well 1 (PG1).
+        *    The AUX IO power wells will be enabled on demand.
+        */
+       mutex_lock(&power_domains->lock);
+       well = lookup_power_well(dev_priv, ICL_DISP_PW_1);
+       intel_power_well_enable(dev_priv, well);
+       mutex_unlock(&power_domains->lock);
 
        /* 5. Enable CDCLK. */
        icl_init_cdclk(dev_priv);
@@ -3070,6 +3380,8 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
 
 static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
 {
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_well *well;
        enum port port;
        u32 val;
 
@@ -3083,8 +3395,15 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
        /* 3. Disable CD clock */
        icl_uninit_cdclk(dev_priv);
 
-       /* 4. Disable Power Well 1 (PG1) and Aux IO Power */
-       /* FIXME: ICL power wells code not here yet. */
+       /*
+        * 4. Disable Power Well 1 (PG1).
+        *    The AUX IO power wells are toggled on demand, so they are already
+        *    disabled at this point.
+        */
+       mutex_lock(&power_domains->lock);
+       well = lookup_power_well(dev_priv, ICL_DISP_PW_1);
+       intel_power_well_disable(dev_priv, well);
+       mutex_unlock(&power_domains->lock);
 
        /* 5. Disable Comp */
        for (port = PORT_A; port <= PORT_B; port++) {
index 25005023c243cb0526baf01f9c90d90dd3df4da2..812fe7b06f87389414c4c0cf0ae159de17fbc03d 100644 (file)
@@ -1160,6 +1160,9 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
                                                           adjusted_mode);
        }
 
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
        /*
         * Make the CRTC code factor in the SDVO pixel multiplier.  The
         * SDVO device will factor out the multiplier during mode_set.
@@ -1337,6 +1340,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
        switch (crtc_state->pixel_multiplier) {
        default:
                WARN(1, "unknown pixel multiplier specified\n");
+               /* fall through */
        case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
        case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
        case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
@@ -1397,33 +1401,40 @@ static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
 
        intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
 
-       if (active_outputs & intel_sdvo_connector->output_flag)
-               return true;
+       return active_outputs & intel_sdvo_connector->output_flag;
+}
+
+bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
+                            i915_reg_t sdvo_reg, enum pipe *pipe)
+{
+       u32 val;
+
+       val = I915_READ(sdvo_reg);
+
+       /* asserts want to know the pipe even if the port is disabled */
+       if (HAS_PCH_CPT(dev_priv))
+               *pipe = (val & SDVO_PIPE_SEL_MASK_CPT) >> SDVO_PIPE_SEL_SHIFT_CPT;
+       else if (IS_CHERRYVIEW(dev_priv))
+               *pipe = (val & SDVO_PIPE_SEL_MASK_CHV) >> SDVO_PIPE_SEL_SHIFT_CHV;
        else
-               return false;
+               *pipe = (val & SDVO_PIPE_SEL_MASK) >> SDVO_PIPE_SEL_SHIFT;
+
+       return val & SDVO_ENABLE;
 }
 
 static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
                                    enum pipe *pipe)
 {
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
        u16 active_outputs = 0;
-       u32 tmp;
+       bool ret;
 
-       tmp = I915_READ(intel_sdvo->sdvo_reg);
        intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
 
-       if (!(tmp & SDVO_ENABLE) && (active_outputs == 0))
-               return false;
-
-       if (HAS_PCH_CPT(dev_priv))
-               *pipe = PORT_TO_PIPE_CPT(tmp);
-       else
-               *pipe = PORT_TO_PIPE(tmp);
+       ret = intel_sdvo_port_enabled(dev_priv, intel_sdvo->sdvo_reg, pipe);
 
-       return true;
+       return ret || active_outputs;
 }
 
 static void intel_sdvo_get_config(struct intel_encoder *encoder,
@@ -1550,8 +1561,8 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
                intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
                intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
 
-               temp &= ~SDVO_PIPE_B_SELECT;
-               temp |= SDVO_ENABLE;
+               temp &= ~SDVO_PIPE_SEL_MASK;
+               temp |= SDVO_ENABLE | SDVO_PIPE_SEL(PIPE_A);
                intel_sdvo_write_sdvox(intel_sdvo, temp);
 
                temp &= ~SDVO_ENABLE;
@@ -1621,6 +1632,9 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
        struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
        int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
        if (intel_sdvo->pixel_clock_min > mode->clock)
                return MODE_CLOCK_LOW;
 
@@ -1897,7 +1911,7 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
        if (edid != NULL) {
                if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
                                                      edid)) {
-                       drm_mode_connector_update_edid_property(connector, edid);
+                       drm_connector_update_edid_property(connector, edid);
                        drm_add_edid_modes(connector, edid);
                }
 
@@ -2300,14 +2314,19 @@ intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
        switch (sdvo->controlled_output) {
        case SDVO_OUTPUT_LVDS1:
                mask |= SDVO_OUTPUT_LVDS1;
+               /* fall through */
        case SDVO_OUTPUT_LVDS0:
                mask |= SDVO_OUTPUT_LVDS0;
+               /* fall through */
        case SDVO_OUTPUT_TMDS1:
                mask |= SDVO_OUTPUT_TMDS1;
+               /* fall through */
        case SDVO_OUTPUT_TMDS0:
                mask |= SDVO_OUTPUT_TMDS0;
+               /* fall through */
        case SDVO_OUTPUT_RGB1:
                mask |= SDVO_OUTPUT_RGB1;
+               /* fall through */
        case SDVO_OUTPUT_RGB0:
                mask |= SDVO_OUTPUT_RGB0;
                break;
index ee23613f9fd4fcf4723a5bfaaa05ca27a6e2ebf0..f7026e887fa9bd65d996c1afa851a92be28e6e6a 100644 (file)
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
-bool intel_format_is_yuv(u32 format)
-{
-       switch (format) {
-       case DRM_FORMAT_YUYV:
-       case DRM_FORMAT_UYVY:
-       case DRM_FORMAT_VYUY:
-       case DRM_FORMAT_YVYU:
-       case DRM_FORMAT_NV12:
-               return true;
-       default:
-               return false;
-       }
-}
-
 int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
                             int usecs)
 {
@@ -107,13 +93,21 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
                                                      VBLANK_EVASION_TIME_US);
        max = vblank_start - 1;
 
-       local_irq_disable();
-
        if (min <= 0 || max <= 0)
-               return;
+               goto irq_disable;
 
        if (WARN_ON(drm_crtc_vblank_get(&crtc->base)))
-               return;
+               goto irq_disable;
+
+       /*
+        * Wait for psr to idle out after enabling the VBL interrupts
+        * VBL interrupts will start the PSR exit and prevent a PSR
+        * re-entry as well.
+        */
+       if (intel_psr_wait_for_idle(new_crtc_state))
+               DRM_ERROR("PSR idle timed out, atomic update may fail\n");
+
+       local_irq_disable();
 
        crtc->debug.min_vbl = min;
        crtc->debug.max_vbl = max;
@@ -171,6 +165,10 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
        crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
 
        trace_i915_pipe_update_vblank_evaded(crtc);
+       return;
+
+irq_disable:
+       local_irq_disable();
 }
 
 /**
@@ -284,13 +282,35 @@ skl_update_plane(struct intel_plane *plane,
        /* program plane scaler */
        if (plane_state->scaler_id >= 0) {
                int scaler_id = plane_state->scaler_id;
-               const struct intel_scaler *scaler;
+               const struct intel_scaler *scaler =
+                       &crtc_state->scaler_state.scalers[scaler_id];
+               u16 y_hphase, uv_rgb_hphase;
+               u16 y_vphase, uv_rgb_vphase;
+
+               /* TODO: handle sub-pixel coordinates */
+               if (fb->format->format == DRM_FORMAT_NV12) {
+                       y_hphase = skl_scaler_calc_phase(1, false);
+                       y_vphase = skl_scaler_calc_phase(1, false);
+
+                       /* MPEG2 chroma siting convention */
+                       uv_rgb_hphase = skl_scaler_calc_phase(2, true);
+                       uv_rgb_vphase = skl_scaler_calc_phase(2, false);
+               } else {
+                       /* not used */
+                       y_hphase = 0;
+                       y_vphase = 0;
 
-               scaler = &crtc_state->scaler_state.scalers[scaler_id];
+                       uv_rgb_hphase = skl_scaler_calc_phase(1, false);
+                       uv_rgb_vphase = skl_scaler_calc_phase(1, false);
+               }
 
                I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
                              PS_SCALER_EN | PS_PLANE_SEL(plane_id) | scaler->mode);
                I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
+               I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id),
+                             PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+               I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id),
+                             PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
                I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
                I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id),
                              ((crtc_w + 1) << 16)|(crtc_h + 1));
@@ -327,19 +347,21 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
 }
 
 bool
-skl_plane_get_hw_state(struct intel_plane *plane)
+skl_plane_get_hw_state(struct intel_plane *plane,
+                      enum pipe *pipe)
 {
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        enum intel_display_power_domain power_domain;
        enum plane_id plane_id = plane->id;
-       enum pipe pipe = plane->pipe;
        bool ret;
 
-       power_domain = POWER_DOMAIN_PIPE(pipe);
+       power_domain = POWER_DOMAIN_PIPE(plane->pipe);
        if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                return false;
 
-       ret = I915_READ(PLANE_CTL(pipe, plane_id)) & PLANE_CTL_ENABLE;
+       ret = I915_READ(PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE;
+
+       *pipe = plane->pipe;
 
        intel_display_power_put(dev_priv, power_domain);
 
@@ -380,7 +402,7 @@ chv_update_csc(const struct intel_plane_state *plane_state)
        const s16 *csc = csc_matrix[plane_state->base.color_encoding];
 
        /* Seems RGB data bypasses the CSC always */
-       if (!intel_format_is_yuv(fb->format->format))
+       if (!fb->format->is_yuv)
                return;
 
        I915_WRITE_FW(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
@@ -415,7 +437,7 @@ vlv_update_clrc(const struct intel_plane_state *plane_state)
        enum plane_id plane_id = plane->id;
        int contrast, brightness, sh_scale, sh_sin, sh_cos;
 
-       if (intel_format_is_yuv(fb->format->format) &&
+       if (fb->format->is_yuv &&
            plane_state->base.color_range == DRM_COLOR_YCBCR_LIMITED_RANGE) {
                /*
                 * Expand limited range to full range:
@@ -588,19 +610,21 @@ vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
 }
 
 static bool
-vlv_plane_get_hw_state(struct intel_plane *plane)
+vlv_plane_get_hw_state(struct intel_plane *plane,
+                      enum pipe *pipe)
 {
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        enum intel_display_power_domain power_domain;
        enum plane_id plane_id = plane->id;
-       enum pipe pipe = plane->pipe;
        bool ret;
 
-       power_domain = POWER_DOMAIN_PIPE(pipe);
+       power_domain = POWER_DOMAIN_PIPE(plane->pipe);
        if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                return false;
 
-       ret = I915_READ(SPCNTR(pipe, plane_id)) & SP_ENABLE;
+       ret = I915_READ(SPCNTR(plane->pipe, plane_id)) & SP_ENABLE;
+
+       *pipe = plane->pipe;
 
        intel_display_power_put(dev_priv, power_domain);
 
@@ -754,18 +778,20 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
 }
 
 static bool
-ivb_plane_get_hw_state(struct intel_plane *plane)
+ivb_plane_get_hw_state(struct intel_plane *plane,
+                      enum pipe *pipe)
 {
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        enum intel_display_power_domain power_domain;
-       enum pipe pipe = plane->pipe;
        bool ret;
 
-       power_domain = POWER_DOMAIN_PIPE(pipe);
+       power_domain = POWER_DOMAIN_PIPE(plane->pipe);
        if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                return false;
 
-       ret =  I915_READ(SPRCTL(pipe)) & SPRITE_ENABLE;
+       ret =  I915_READ(SPRCTL(plane->pipe)) & SPRITE_ENABLE;
+
+       *pipe = plane->pipe;
 
        intel_display_power_put(dev_priv, power_domain);
 
@@ -910,18 +936,20 @@ g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
 }
 
 static bool
-g4x_plane_get_hw_state(struct intel_plane *plane)
+g4x_plane_get_hw_state(struct intel_plane *plane,
+                      enum pipe *pipe)
 {
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        enum intel_display_power_domain power_domain;
-       enum pipe pipe = plane->pipe;
        bool ret;
 
-       power_domain = POWER_DOMAIN_PIPE(pipe);
+       power_domain = POWER_DOMAIN_PIPE(plane->pipe);
        if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
                return false;
 
-       ret = I915_READ(DVSCNTR(pipe)) & DVS_ENABLE;
+       ret = I915_READ(DVSCNTR(plane->pipe)) & DVS_ENABLE;
+
+       *pipe = plane->pipe;
 
        intel_display_power_put(dev_priv, power_domain);
 
@@ -1010,7 +1038,7 @@ intel_check_sprite_plane(struct intel_plane *plane,
                src->y1 = src_y << 16;
                src->y2 = (src_y + src_h) << 16;
 
-               if (intel_format_is_yuv(fb->format->format) &&
+               if (fb->format->is_yuv &&
                    fb->format->format != DRM_FORMAT_NV12 &&
                    (src_x % 2 || src_w % 2)) {
                        DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n",
@@ -1071,6 +1099,37 @@ intel_check_sprite_plane(struct intel_plane *plane,
        return 0;
 }
 
+static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv)
+{
+       return INTEL_GEN(dev_priv) >= 9;
+}
+
+static void intel_plane_set_ckey(struct intel_plane_state *plane_state,
+                                const struct drm_intel_sprite_colorkey *set)
+{
+       struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+
+       *key = *set;
+
+       /*
+        * We want src key enabled on the
+        * sprite and not on the primary.
+        */
+       if (plane->id == PLANE_PRIMARY &&
+           set->flags & I915_SET_COLORKEY_SOURCE)
+               key->flags = 0;
+
+       /*
+        * On SKL+ we want dst key enabled on
+        * the primary and not on the sprite.
+        */
+       if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_PRIMARY &&
+           set->flags & I915_SET_COLORKEY_DESTINATION)
+               key->flags = 0;
+}
+
 int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
                                    struct drm_file *file_priv)
 {
@@ -1100,6 +1159,16 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
        if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY)
                return -ENOENT;
 
+       /*
+        * SKL+ only plane 2 can do destination keying against plane 1.
+        * Also multiple planes can't do destination keying on the same
+        * pipe simultaneously.
+        */
+       if (INTEL_GEN(dev_priv) >= 9 &&
+           to_intel_plane(plane)->id >= PLANE_SPRITE1 &&
+           set->flags & I915_SET_COLORKEY_DESTINATION)
+               return -EINVAL;
+
        drm_modeset_acquire_init(&ctx, 0);
 
        state = drm_atomic_state_alloc(plane->dev);
@@ -1112,11 +1181,28 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
        while (1) {
                plane_state = drm_atomic_get_plane_state(state, plane);
                ret = PTR_ERR_OR_ZERO(plane_state);
-               if (!ret) {
-                       to_intel_plane_state(plane_state)->ckey = *set;
-                       ret = drm_atomic_commit(state);
+               if (!ret)
+                       intel_plane_set_ckey(to_intel_plane_state(plane_state), set);
+
+               /*
+                * On some platforms we have to configure
+                * the dst colorkey on the primary plane.
+                */
+               if (!ret && has_dst_key_in_primary_plane(dev_priv)) {
+                       struct intel_crtc *crtc =
+                               intel_get_crtc_for_pipe(dev_priv,
+                                                       to_intel_plane(plane)->pipe);
+
+                       plane_state = drm_atomic_get_plane_state(state,
+                                                                crtc->base.primary);
+                       ret = PTR_ERR_OR_ZERO(plane_state);
+                       if (!ret)
+                               intel_plane_set_ckey(to_intel_plane_state(plane_state), set);
                }
 
+               if (!ret)
+                       ret = drm_atomic_commit(state);
+
                if (ret != -EDEADLK)
                        break;
 
@@ -1211,8 +1297,17 @@ static const uint64_t skl_plane_format_modifiers_ccs[] = {
        DRM_FORMAT_MOD_INVALID
 };
 
-static bool g4x_mod_supported(uint32_t format, uint64_t modifier)
+static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane,
+                                           u32 format, u64 modifier)
 {
+       switch (modifier) {
+       case DRM_FORMAT_MOD_LINEAR:
+       case I915_FORMAT_MOD_X_TILED:
+               break;
+       default:
+               return false;
+       }
+
        switch (format) {
        case DRM_FORMAT_XRGB8888:
        case DRM_FORMAT_YUYV:
@@ -1228,8 +1323,17 @@ static bool g4x_mod_supported(uint32_t format, uint64_t modifier)
        }
 }
 
-static bool snb_mod_supported(uint32_t format, uint64_t modifier)
+static bool snb_sprite_format_mod_supported(struct drm_plane *_plane,
+                                           u32 format, u64 modifier)
 {
+       switch (modifier) {
+       case DRM_FORMAT_MOD_LINEAR:
+       case I915_FORMAT_MOD_X_TILED:
+               break;
+       default:
+               return false;
+       }
+
        switch (format) {
        case DRM_FORMAT_XRGB8888:
        case DRM_FORMAT_XBGR8888:
@@ -1246,8 +1350,17 @@ static bool snb_mod_supported(uint32_t format, uint64_t modifier)
        }
 }
 
-static bool vlv_mod_supported(uint32_t format, uint64_t modifier)
+static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane,
+                                           u32 format, u64 modifier)
 {
+       switch (modifier) {
+       case DRM_FORMAT_MOD_LINEAR:
+       case I915_FORMAT_MOD_X_TILED:
+               break;
+       default:
+               return false;
+       }
+
        switch (format) {
        case DRM_FORMAT_RGB565:
        case DRM_FORMAT_ABGR8888:
@@ -1269,8 +1382,26 @@ static bool vlv_mod_supported(uint32_t format, uint64_t modifier)
        }
 }
 
-static bool skl_mod_supported(uint32_t format, uint64_t modifier)
+static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
+                                          u32 format, u64 modifier)
 {
+       struct intel_plane *plane = to_intel_plane(_plane);
+
+       switch (modifier) {
+       case DRM_FORMAT_MOD_LINEAR:
+       case I915_FORMAT_MOD_X_TILED:
+       case I915_FORMAT_MOD_Y_TILED:
+       case I915_FORMAT_MOD_Yf_TILED:
+               break;
+       case I915_FORMAT_MOD_Y_TILED_CCS:
+       case I915_FORMAT_MOD_Yf_TILED_CCS:
+               if (!plane->has_ccs)
+                       return false;
+               break;
+       default:
+               return false;
+       }
+
        switch (format) {
        case DRM_FORMAT_XRGB8888:
        case DRM_FORMAT_XBGR8888:
@@ -1302,38 +1433,48 @@ static bool skl_mod_supported(uint32_t format, uint64_t modifier)
        }
 }
 
-static bool intel_sprite_plane_format_mod_supported(struct drm_plane *plane,
-                                                    uint32_t format,
-                                                    uint64_t modifier)
-{
-       struct drm_i915_private *dev_priv = to_i915(plane->dev);
-
-       if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
-               return false;
+static const struct drm_plane_funcs g4x_sprite_funcs = {
+       .update_plane = drm_atomic_helper_update_plane,
+       .disable_plane = drm_atomic_helper_disable_plane,
+       .destroy = intel_plane_destroy,
+       .atomic_get_property = intel_plane_atomic_get_property,
+       .atomic_set_property = intel_plane_atomic_set_property,
+       .atomic_duplicate_state = intel_plane_duplicate_state,
+       .atomic_destroy_state = intel_plane_destroy_state,
+       .format_mod_supported = g4x_sprite_format_mod_supported,
+};
 
-       if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_INTEL &&
-           modifier != DRM_FORMAT_MOD_LINEAR)
-               return false;
+static const struct drm_plane_funcs snb_sprite_funcs = {
+       .update_plane = drm_atomic_helper_update_plane,
+       .disable_plane = drm_atomic_helper_disable_plane,
+       .destroy = intel_plane_destroy,
+       .atomic_get_property = intel_plane_atomic_get_property,
+       .atomic_set_property = intel_plane_atomic_set_property,
+       .atomic_duplicate_state = intel_plane_duplicate_state,
+       .atomic_destroy_state = intel_plane_destroy_state,
+       .format_mod_supported = snb_sprite_format_mod_supported,
+};
 
-       if (INTEL_GEN(dev_priv) >= 9)
-               return skl_mod_supported(format, modifier);
-       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               return vlv_mod_supported(format, modifier);
-       else if (INTEL_GEN(dev_priv) >= 6)
-               return snb_mod_supported(format, modifier);
-       else
-               return g4x_mod_supported(format, modifier);
-}
+static const struct drm_plane_funcs vlv_sprite_funcs = {
+       .update_plane = drm_atomic_helper_update_plane,
+       .disable_plane = drm_atomic_helper_disable_plane,
+       .destroy = intel_plane_destroy,
+       .atomic_get_property = intel_plane_atomic_get_property,
+       .atomic_set_property = intel_plane_atomic_set_property,
+       .atomic_duplicate_state = intel_plane_duplicate_state,
+       .atomic_destroy_state = intel_plane_destroy_state,
+       .format_mod_supported = vlv_sprite_format_mod_supported,
+};
 
-static const struct drm_plane_funcs intel_sprite_plane_funcs = {
-        .update_plane = drm_atomic_helper_update_plane,
-        .disable_plane = drm_atomic_helper_disable_plane,
-        .destroy = intel_plane_destroy,
-        .atomic_get_property = intel_plane_atomic_get_property,
-        .atomic_set_property = intel_plane_atomic_set_property,
-        .atomic_duplicate_state = intel_plane_duplicate_state,
-        .atomic_destroy_state = intel_plane_destroy_state,
-        .format_mod_supported = intel_sprite_plane_format_mod_supported,
+static const struct drm_plane_funcs skl_plane_funcs = {
+       .update_plane = drm_atomic_helper_update_plane,
+       .disable_plane = drm_atomic_helper_disable_plane,
+       .destroy = intel_plane_destroy,
+       .atomic_get_property = intel_plane_atomic_get_property,
+       .atomic_set_property = intel_plane_atomic_set_property,
+       .atomic_duplicate_state = intel_plane_duplicate_state,
+       .atomic_destroy_state = intel_plane_destroy_state,
+       .format_mod_supported = skl_plane_format_mod_supported,
 };
 
 bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
@@ -1359,6 +1500,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
 {
        struct intel_plane *intel_plane = NULL;
        struct intel_plane_state *state = NULL;
+       const struct drm_plane_funcs *plane_funcs;
        unsigned long possible_crtcs;
        const uint32_t *plane_formats;
        const uint64_t *modifiers;
@@ -1383,6 +1525,9 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
                intel_plane->can_scale = true;
                state->scaler_id = -1;
 
+               intel_plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
+                                                        PLANE_SPRITE0 + plane);
+
                intel_plane->update_plane = skl_update_plane;
                intel_plane->disable_plane = skl_disable_plane;
                intel_plane->get_hw_state = skl_plane_get_hw_state;
@@ -1396,10 +1541,12 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
                        num_plane_formats = ARRAY_SIZE(skl_plane_formats);
                }
 
-               if (skl_plane_has_ccs(dev_priv, pipe, PLANE_SPRITE0 + plane))
+               if (intel_plane->has_ccs)
                        modifiers = skl_plane_format_modifiers_ccs;
                else
                        modifiers = skl_plane_format_modifiers_noccs;
+
+               plane_funcs = &skl_plane_funcs;
        } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                intel_plane->can_scale = false;
                intel_plane->max_downscale = 1;
@@ -1411,6 +1558,8 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
                plane_formats = vlv_plane_formats;
                num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
                modifiers = i9xx_plane_format_modifiers;
+
+               plane_funcs = &vlv_sprite_funcs;
        } else if (INTEL_GEN(dev_priv) >= 7) {
                if (IS_IVYBRIDGE(dev_priv)) {
                        intel_plane->can_scale = true;
@@ -1427,6 +1576,8 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
                plane_formats = snb_plane_formats;
                num_plane_formats = ARRAY_SIZE(snb_plane_formats);
                modifiers = i9xx_plane_format_modifiers;
+
+               plane_funcs = &snb_sprite_funcs;
        } else {
                intel_plane->can_scale = true;
                intel_plane->max_downscale = 16;
@@ -1439,9 +1590,13 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
                if (IS_GEN6(dev_priv)) {
                        plane_formats = snb_plane_formats;
                        num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+
+                       plane_funcs = &snb_sprite_funcs;
                } else {
                        plane_formats = g4x_plane_formats;
                        num_plane_formats = ARRAY_SIZE(g4x_plane_formats);
+
+                       plane_funcs = &g4x_sprite_funcs;
                }
        }
 
@@ -1468,14 +1623,14 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
 
        if (INTEL_GEN(dev_priv) >= 9)
                ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
-                                              possible_crtcs, &intel_sprite_plane_funcs,
+                                              possible_crtcs, plane_funcs,
                                               plane_formats, num_plane_formats,
                                               modifiers,
                                               DRM_PLANE_TYPE_OVERLAY,
                                               "plane %d%c", plane + 2, pipe_name(pipe));
        else
                ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
-                                              possible_crtcs, &intel_sprite_plane_funcs,
+                                              possible_crtcs, plane_funcs,
                                               plane_formats, num_plane_formats,
                                               modifiers,
                                               DRM_PLANE_TYPE_OVERLAY,
index 885fc3809f7f904e8bc82e0eeee9cddd3dcef3f4..b5b04cb892e945b747f20702b5740b4af572644f 100644 (file)
@@ -798,16 +798,12 @@ static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
 static bool
 intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
 {
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        u32 tmp = I915_READ(TV_CTL);
 
-       if (!(tmp & TV_ENC_ENABLE))
-               return false;
-
-       *pipe = PORT_TO_PIPE(tmp);
+       *pipe = (tmp & TV_ENC_PIPE_SEL_MASK) >> TV_ENC_PIPE_SEL_SHIFT;
 
-       return true;
+       return tmp & TV_ENC_ENABLE;
 }
 
 static void
@@ -850,6 +846,9 @@ intel_tv_mode_valid(struct drm_connector *connector,
        const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
        int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
        if (mode->clock > max_dotclk)
                return MODE_CLOCK_HIGH;
 
@@ -877,16 +876,21 @@ intel_tv_compute_config(struct intel_encoder *encoder,
                        struct drm_connector_state *conn_state)
 {
        const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state);
+       struct drm_display_mode *adjusted_mode =
+               &pipe_config->base.adjusted_mode;
 
        if (!tv_mode)
                return false;
 
-       pipe_config->base.adjusted_mode.crtc_clock = tv_mode->clock;
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
+       adjusted_mode->crtc_clock = tv_mode->clock;
        DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
        pipe_config->pipe_bpp = 8*3;
 
        /* TV has it's own notion of sync and other mode flags, so clear them. */
-       pipe_config->base.adjusted_mode.flags = 0;
+       adjusted_mode->flags = 0;
 
        /*
         * FIXME: We don't check whether the input mode is actually what we want
@@ -1024,8 +1028,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
                break;
        }
 
-       if (intel_crtc->pipe == 1)
-               tv_ctl |= TV_ENC_PIPEB_SELECT;
+       tv_ctl |= TV_ENC_PIPE_SEL(intel_crtc->pipe);
        tv_ctl |= tv_mode->oversample;
 
        if (tv_mode->progressive)
@@ -1149,12 +1152,9 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
        save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
 
        /* Poll for TV detection */
-       tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK);
+       tv_ctl &= ~(TV_ENC_ENABLE | TV_ENC_PIPE_SEL_MASK | TV_TEST_MODE_MASK);
        tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
-       if (intel_crtc->pipe == 1)
-               tv_ctl |= TV_ENC_PIPEB_SELECT;
-       else
-               tv_ctl &= ~TV_ENC_PIPEB_SELECT;
+       tv_ctl |= TV_ENC_PIPE_SEL(intel_crtc->pipe);
 
        tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK);
        tv_dac |= (TVDAC_STATE_CHG_EN |
@@ -1347,8 +1347,7 @@ intel_tv_get_modes(struct drm_connector *connector)
                mode_ptr = drm_mode_create(connector->dev);
                if (!mode_ptr)
                        continue;
-               strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
-               mode_ptr->name[DRM_DISPLAY_MODE_LEN - 1] = '\0';
+               strlcpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
 
                mode_ptr->hdisplay = hactive_s;
                mode_ptr->hsync_start = hactive_s + 1;
index 1cffaf7b5dbef228f194131f258f893ef296415c..7c95697e1a358d90a56509fa7d5e66ca2e6cb747 100644 (file)
@@ -50,10 +50,10 @@ static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv)
        return ret;
 }
 
-static int __get_platform_enable_guc(struct drm_i915_private *dev_priv)
+static int __get_platform_enable_guc(struct drm_i915_private *i915)
 {
-       struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
-       struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+       struct intel_uc_fw *guc_fw = &i915->guc.fw;
+       struct intel_uc_fw *huc_fw = &i915->huc.fw;
        int enable_guc = 0;
 
        /* Default is to enable GuC/HuC if we know their firmwares */
@@ -67,11 +67,11 @@ static int __get_platform_enable_guc(struct drm_i915_private *dev_priv)
        return enable_guc;
 }
 
-static int __get_default_guc_log_level(struct drm_i915_private *dev_priv)
+static int __get_default_guc_log_level(struct drm_i915_private *i915)
 {
        int guc_log_level;
 
-       if (!HAS_GUC(dev_priv) || !intel_uc_is_using_guc())
+       if (!HAS_GUC(i915) || !intel_uc_is_using_guc())
                guc_log_level = GUC_LOG_LEVEL_DISABLED;
        else if (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
                 IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
@@ -86,7 +86,7 @@ static int __get_default_guc_log_level(struct drm_i915_private *dev_priv)
 
 /**
  * sanitize_options_early - sanitize uC related modparam options
- * @dev_priv: device private
+ * @i915: device private
  *
  * In case of "enable_guc" option this function will attempt to modify
  * it only if it was initially set to "auto(-1)". Default value for this
@@ -101,14 +101,14 @@ static int __get_default_guc_log_level(struct drm_i915_private *dev_priv)
  * unless GuC is enabled on given platform and the driver is compiled with
  * debug config when this modparam will default to "enable(1..4)".
  */
-static void sanitize_options_early(struct drm_i915_private *dev_priv)
+static void sanitize_options_early(struct drm_i915_private *i915)
 {
-       struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
-       struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+       struct intel_uc_fw *guc_fw = &i915->guc.fw;
+       struct intel_uc_fw *huc_fw = &i915->huc.fw;
 
        /* A negative value means "use platform default" */
        if (i915_modparams.enable_guc < 0)
-               i915_modparams.enable_guc = __get_platform_enable_guc(dev_priv);
+               i915_modparams.enable_guc = __get_platform_enable_guc(i915);
 
        DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n",
                         i915_modparams.enable_guc,
@@ -119,28 +119,28 @@ static void sanitize_options_early(struct drm_i915_private *dev_priv)
        if (intel_uc_is_using_guc() && !intel_uc_fw_is_selected(guc_fw)) {
                DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
                         "enable_guc", i915_modparams.enable_guc,
-                        !HAS_GUC(dev_priv) ? "no GuC hardware" :
-                                             "no GuC firmware");
+                        !HAS_GUC(i915) ? "no GuC hardware" :
+                                         "no GuC firmware");
        }
 
        /* Verify HuC firmware availability */
        if (intel_uc_is_using_huc() && !intel_uc_fw_is_selected(huc_fw)) {
                DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
                         "enable_guc", i915_modparams.enable_guc,
-                        !HAS_HUC(dev_priv) ? "no HuC hardware" :
-                                             "no HuC firmware");
+                        !HAS_HUC(i915) ? "no HuC hardware" :
+                                         "no HuC firmware");
        }
 
        /* A negative value means "use platform/config default" */
        if (i915_modparams.guc_log_level < 0)
                i915_modparams.guc_log_level =
-                       __get_default_guc_log_level(dev_priv);
+                       __get_default_guc_log_level(i915);
 
        if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc()) {
                DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
                         "guc_log_level", i915_modparams.guc_log_level,
-                        !HAS_GUC(dev_priv) ? "no GuC hardware" :
-                                             "GuC not enabled");
+                        !HAS_GUC(i915) ? "no GuC hardware" :
+                                         "GuC not enabled");
                i915_modparams.guc_log_level = 0;
        }
 
@@ -171,44 +171,30 @@ void intel_uc_init_early(struct drm_i915_private *i915)
        intel_huc_init_early(huc);
 
        sanitize_options_early(i915);
-
-       if (USES_GUC(i915))
-               intel_uc_fw_fetch(i915, &guc->fw);
-
-       if (USES_HUC(i915))
-               intel_uc_fw_fetch(i915, &huc->fw);
 }
 
 void intel_uc_cleanup_early(struct drm_i915_private *i915)
 {
        struct intel_guc *guc = &i915->guc;
-       struct intel_huc *huc = &i915->huc;
-
-       if (USES_HUC(i915))
-               intel_uc_fw_fini(&huc->fw);
-
-       if (USES_GUC(i915))
-               intel_uc_fw_fini(&guc->fw);
 
        guc_free_load_err_log(guc);
 }
 
 /**
  * intel_uc_init_mmio - setup uC MMIO access
- *
- * @dev_priv: device private
+ * @i915: device private
  *
  * Setup minimal state necessary for MMIO accesses later in the
  * initialization sequence.
  */
-void intel_uc_init_mmio(struct drm_i915_private *dev_priv)
+void intel_uc_init_mmio(struct drm_i915_private *i915)
 {
-       intel_guc_init_send_regs(&dev_priv->guc);
+       intel_guc_init_send_regs(&i915->guc);
 }
 
 static void guc_capture_load_err_log(struct intel_guc *guc)
 {
-       if (!guc->log.vma || !i915_modparams.guc_log_level)
+       if (!guc->log.vma || !intel_guc_log_get_level(&guc->log))
                return;
 
        if (!guc->load_err_log)
@@ -225,11 +211,11 @@ static void guc_free_load_err_log(struct intel_guc *guc)
 
 static int guc_enable_communication(struct intel_guc *guc)
 {
-       struct drm_i915_private *dev_priv = guc_to_i915(guc);
+       struct drm_i915_private *i915 = guc_to_i915(guc);
 
-       gen9_enable_guc_interrupts(dev_priv);
+       gen9_enable_guc_interrupts(i915);
 
-       if (HAS_GUC_CT(dev_priv))
+       if (HAS_GUC_CT(i915))
                return intel_guc_ct_enable(&guc->ct);
 
        guc->send = intel_guc_send_mmio;
@@ -239,60 +225,73 @@ static int guc_enable_communication(struct intel_guc *guc)
 
 static void guc_disable_communication(struct intel_guc *guc)
 {
-       struct drm_i915_private *dev_priv = guc_to_i915(guc);
+       struct drm_i915_private *i915 = guc_to_i915(guc);
 
-       if (HAS_GUC_CT(dev_priv))
+       if (HAS_GUC_CT(i915))
                intel_guc_ct_disable(&guc->ct);
 
-       gen9_disable_guc_interrupts(dev_priv);
+       gen9_disable_guc_interrupts(i915);
 
        guc->send = intel_guc_send_nop;
        guc->handler = intel_guc_to_host_event_handler_nop;
 }
 
-int intel_uc_init_misc(struct drm_i915_private *dev_priv)
+int intel_uc_init_misc(struct drm_i915_private *i915)
 {
-       struct intel_guc *guc = &dev_priv->guc;
+       struct intel_guc *guc = &i915->guc;
+       struct intel_huc *huc = &i915->huc;
        int ret;
 
-       if (!USES_GUC(dev_priv))
+       if (!USES_GUC(i915))
                return 0;
 
-       intel_guc_init_ggtt_pin_bias(guc);
-
-       ret = intel_guc_init_wq(guc);
+       ret = intel_guc_init_misc(guc);
        if (ret)
                return ret;
 
+       if (USES_HUC(i915)) {
+               ret = intel_huc_init_misc(huc);
+               if (ret)
+                       goto err_guc;
+       }
+
        return 0;
+
+err_guc:
+       intel_guc_fini_misc(guc);
+       return ret;
 }
 
-void intel_uc_fini_misc(struct drm_i915_private *dev_priv)
+void intel_uc_fini_misc(struct drm_i915_private *i915)
 {
-       struct intel_guc *guc = &dev_priv->guc;
+       struct intel_guc *guc = &i915->guc;
+       struct intel_huc *huc = &i915->huc;
 
-       if (!USES_GUC(dev_priv))
+       if (!USES_GUC(i915))
                return;
 
-       intel_guc_fini_wq(guc);
+       if (USES_HUC(i915))
+               intel_huc_fini_misc(huc);
+
+       intel_guc_fini_misc(guc);
 }
 
-int intel_uc_init(struct drm_i915_private *dev_priv)
+int intel_uc_init(struct drm_i915_private *i915)
 {
-       struct intel_guc *guc = &dev_priv->guc;
+       struct intel_guc *guc = &i915->guc;
        int ret;
 
-       if (!USES_GUC(dev_priv))
+       if (!USES_GUC(i915))
                return 0;
 
-       if (!HAS_GUC(dev_priv))
+       if (!HAS_GUC(i915))
                return -ENODEV;
 
        ret = intel_guc_init(guc);
        if (ret)
                return ret;
 
-       if (USES_GUC_SUBMISSION(dev_priv)) {
+       if (USES_GUC_SUBMISSION(i915)) {
                /*
                 * This is stuff we need to have available at fw load time
                 * if we are planning to enable submission later
@@ -307,16 +306,16 @@ int intel_uc_init(struct drm_i915_private *dev_priv)
        return 0;
 }
 
-void intel_uc_fini(struct drm_i915_private *dev_priv)
+void intel_uc_fini(struct drm_i915_private *i915)
 {
-       struct intel_guc *guc = &dev_priv->guc;
+       struct intel_guc *guc = &i915->guc;
 
-       if (!USES_GUC(dev_priv))
+       if (!USES_GUC(i915))
                return;
 
-       GEM_BUG_ON(!HAS_GUC(dev_priv));
+       GEM_BUG_ON(!HAS_GUC(i915));
 
-       if (USES_GUC_SUBMISSION(dev_priv))
+       if (USES_GUC_SUBMISSION(i915))
                intel_guc_submission_fini(guc);
 
        intel_guc_fini(guc);
@@ -340,22 +339,22 @@ void intel_uc_sanitize(struct drm_i915_private *i915)
        __intel_uc_reset_hw(i915);
 }
 
-int intel_uc_init_hw(struct drm_i915_private *dev_priv)
+int intel_uc_init_hw(struct drm_i915_private *i915)
 {
-       struct intel_guc *guc = &dev_priv->guc;
-       struct intel_huc *huc = &dev_priv->huc;
+       struct intel_guc *guc = &i915->guc;
+       struct intel_huc *huc = &i915->huc;
        int ret, attempts;
 
-       if (!USES_GUC(dev_priv))
+       if (!USES_GUC(i915))
                return 0;
 
-       GEM_BUG_ON(!HAS_GUC(dev_priv));
+       GEM_BUG_ON(!HAS_GUC(i915));
 
-       gen9_reset_guc_interrupts(dev_priv);
+       gen9_reset_guc_interrupts(i915);
 
        /* WaEnableuKernelHeaderValidFix:skl */
        /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
-       if (IS_GEN9(dev_priv))
+       if (IS_GEN9(i915))
                attempts = 3;
        else
                attempts = 1;
@@ -365,11 +364,11 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
                 * Always reset the GuC just before (re)loading, so
                 * that the state and timing are fairly predictable
                 */
-               ret = __intel_uc_reset_hw(dev_priv);
+               ret = __intel_uc_reset_hw(i915);
                if (ret)
                        goto err_out;
 
-               if (USES_HUC(dev_priv)) {
+               if (USES_HUC(i915)) {
                        ret = intel_huc_fw_upload(huc);
                        if (ret)
                                goto err_out;
@@ -392,24 +391,24 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
        if (ret)
                goto err_log_capture;
 
-       if (USES_HUC(dev_priv)) {
+       if (USES_HUC(i915)) {
                ret = intel_huc_auth(huc);
                if (ret)
                        goto err_communication;
        }
 
-       if (USES_GUC_SUBMISSION(dev_priv)) {
+       if (USES_GUC_SUBMISSION(i915)) {
                ret = intel_guc_submission_enable(guc);
                if (ret)
                        goto err_communication;
        }
 
-       dev_info(dev_priv->drm.dev, "GuC firmware version %u.%u\n",
+       dev_info(i915->drm.dev, "GuC firmware version %u.%u\n",
                 guc->fw.major_ver_found, guc->fw.minor_ver_found);
-       dev_info(dev_priv->drm.dev, "GuC submission %s\n",
-                enableddisabled(USES_GUC_SUBMISSION(dev_priv)));
-       dev_info(dev_priv->drm.dev, "HuC %s\n",
-                enableddisabled(USES_HUC(dev_priv)));
+       dev_info(i915->drm.dev, "GuC submission %s\n",
+                enableddisabled(USES_GUC_SUBMISSION(i915)));
+       dev_info(i915->drm.dev, "HuC %s\n",
+                enableddisabled(USES_HUC(i915)));
 
        return 0;
 
@@ -428,20 +427,20 @@ err_out:
        if (GEM_WARN_ON(ret == -EIO))
                ret = -EINVAL;
 
-       dev_err(dev_priv->drm.dev, "GuC initialization failed %d\n", ret);
+       dev_err(i915->drm.dev, "GuC initialization failed %d\n", ret);
        return ret;
 }
 
-void intel_uc_fini_hw(struct drm_i915_private *dev_priv)
+void intel_uc_fini_hw(struct drm_i915_private *i915)
 {
-       struct intel_guc *guc = &dev_priv->guc;
+       struct intel_guc *guc = &i915->guc;
 
-       if (!USES_GUC(dev_priv))
+       if (!USES_GUC(i915))
                return;
 
-       GEM_BUG_ON(!HAS_GUC(dev_priv));
+       GEM_BUG_ON(!HAS_GUC(i915));
 
-       if (USES_GUC_SUBMISSION(dev_priv))
+       if (USES_GUC_SUBMISSION(i915))
                intel_guc_submission_disable(guc);
 
        guc_disable_communication(guc);
index 448293eb638d3ce7ef765c8a026cf41e89ff76e5..b892ca8396e8778d0e4670813412c4a8a0b669c9 100644 (file)
@@ -1702,15 +1702,9 @@ static void gen3_stop_engine(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
        const u32 base = engine->mmio_base;
-       const i915_reg_t mode = RING_MI_MODE(base);
-
-       I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
-       if (__intel_wait_for_register_fw(dev_priv,
-                                        mode, MODE_IDLE, MODE_IDLE,
-                                        500, 0,
-                                        NULL))
-               DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n",
-                                engine->name);
+
+       if (intel_engine_stop_cs(engine))
+               DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n", engine->name);
 
        I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base)));
        POSTING_READ_FW(RING_HEAD(base)); /* paranoia */
@@ -2099,21 +2093,25 @@ static int gen8_reset_engines(struct drm_i915_private *dev_priv,
 {
        struct intel_engine_cs *engine;
        unsigned int tmp;
+       int ret;
 
-       for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
-               if (gen8_reset_engine_start(engine))
+       for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+               if (gen8_reset_engine_start(engine)) {
+                       ret = -EIO;
                        goto not_ready;
+               }
+       }
 
        if (INTEL_GEN(dev_priv) >= 11)
-               return gen11_reset_engines(dev_priv, engine_mask);
+               ret = gen11_reset_engines(dev_priv, engine_mask);
        else
-               return gen6_reset_engines(dev_priv, engine_mask);
+               ret = gen6_reset_engines(dev_priv, engine_mask);
 
 not_ready:
        for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
                gen8_reset_engine_cancel(engine);
 
-       return -EIO;
+       return ret;
 }
 
 typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
@@ -2176,6 +2174,8 @@ int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
                 * Thus assume it is best to stop engines on all gens
                 * where we have a gpu reset.
                 *
+                * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
+                *
                 * WaMediaResetMainRingCleanup:ctg,elk (presumably)
                 *
                 * FIXME: Wa for more modern gens needs to be validated
index 47478d6096308f4d60877f2173685b623fa4ae3d..2fbe93178fb2a3bb8eace212b040cb31dbc379cb 100644 (file)
@@ -67,21 +67,21 @@ struct intel_uncore_funcs {
        void (*force_wake_put)(struct drm_i915_private *dev_priv,
                               enum forcewake_domains domains);
 
-       uint8_t  (*mmio_readb)(struct drm_i915_private *dev_priv,
-                              i915_reg_t r, bool trace);
-       uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv,
-                              i915_reg_t r, bool trace);
-       uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv,
-                              i915_reg_t r, bool trace);
-       uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv,
-                              i915_reg_t r, bool trace);
+       u8 (*mmio_readb)(struct drm_i915_private *dev_priv,
+                        i915_reg_t r, bool trace);
+       u16 (*mmio_readw)(struct drm_i915_private *dev_priv,
+                         i915_reg_t r, bool trace);
+       u32 (*mmio_readl)(struct drm_i915_private *dev_priv,
+                         i915_reg_t r, bool trace);
+       u64 (*mmio_readq)(struct drm_i915_private *dev_priv,
+                         i915_reg_t r, bool trace);
 
        void (*mmio_writeb)(struct drm_i915_private *dev_priv,
-                           i915_reg_t r, uint8_t val, bool trace);
+                           i915_reg_t r, u8 val, bool trace);
        void (*mmio_writew)(struct drm_i915_private *dev_priv,
-                           i915_reg_t r, uint16_t val, bool trace);
+                           i915_reg_t r, u16 val, bool trace);
        void (*mmio_writel)(struct drm_i915_private *dev_priv,
-                           i915_reg_t r, uint32_t val, bool trace);
+                           i915_reg_t r, u32 val, bool trace);
 };
 
 struct intel_forcewake_range {
index 458468237b5f94d4572e421194459c7ea150fb66..bba98cf83cbd9d1c0348e42acb5f8fba98a106dc 100644 (file)
@@ -318,6 +318,12 @@ enum vbt_gmbus_ddi {
        DDC_BUS_DDI_C,
        DDC_BUS_DDI_D,
        DDC_BUS_DDI_F,
+       ICL_DDC_BUS_DDI_A = 0x1,
+       ICL_DDC_BUS_DDI_B,
+       ICL_DDC_BUS_PORT_1 = 0x4,
+       ICL_DDC_BUS_PORT_2,
+       ICL_DDC_BUS_PORT_3,
+       ICL_DDC_BUS_PORT_4,
 };
 
 #define VBT_DP_MAX_LINK_RATE_HBR3      0
@@ -414,7 +420,9 @@ struct child_device_config {
        u16 extended_type;
        u8 dvo_function;
        u8 dp_usb_type_c:1;                                     /* 195 */
-       u8 flags2_reserved:7;                                   /* 195 */
+       u8 tbt:1;                                               /* 209 */
+       u8 flags2_reserved:2;                                   /* 195 */
+       u8 dp_port_trace_length:4;                              /* 209 */
        u8 dp_gpio_index;                                       /* 195 */
        u16 dp_gpio_pin_num;                                    /* 195 */
        u8 dp_iboost_level:4;                                   /* 196 */
@@ -448,7 +456,7 @@ struct bdb_general_definitions {
         * number = (block_size - sizeof(bdb_general_definitions))/
         *           defs->child_dev_size;
         */
-       uint8_t devices[0];
+       u8 devices[0];
 } __packed;
 
 /* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
@@ -635,7 +643,7 @@ struct bdb_sdvo_lvds_options {
 #define BDB_DRIVER_FEATURE_NO_LVDS             0
 #define BDB_DRIVER_FEATURE_INT_LVDS            1
 #define BDB_DRIVER_FEATURE_SDVO_LVDS           2
-#define BDB_DRIVER_FEATURE_EDP                 3
+#define BDB_DRIVER_FEATURE_INT_SDVO_LVDS       3
 
 struct bdb_driver_features {
        u8 boot_dev_algorithm:1;
index 2df3538ceba546e8169d7bac9bbe03e23bfdf3eb..4bcdeaf8d98fa3de5aec7790971098905b5a688b 100644 (file)
  * - Public functions to init or apply the given workaround type.
  */
 
-static int wa_add(struct drm_i915_private *dev_priv,
-                 i915_reg_t addr,
-                 const u32 mask, const u32 val)
+static void wa_add(struct drm_i915_private *i915,
+                  i915_reg_t reg, const u32 mask, const u32 val)
 {
-       const unsigned int idx = dev_priv->workarounds.count;
+       struct i915_workarounds *wa = &i915->workarounds;
+       unsigned int start = 0, end = wa->count;
+       unsigned int addr = i915_mmio_reg_offset(reg);
+       struct i915_wa_reg *r;
+
+       while (start < end) {
+               unsigned int mid = start + (end - start) / 2;
+
+               if (wa->reg[mid].addr < addr) {
+                       start = mid + 1;
+               } else if (wa->reg[mid].addr > addr) {
+                       end = mid;
+               } else {
+                       r = &wa->reg[mid];
+
+                       if ((mask & ~r->mask) == 0) {
+                               DRM_ERROR("Discarding overwritten w/a for reg %04x (mask: %08x, value: %08x)\n",
+                                         addr, r->mask, r->value);
+
+                               r->value &= ~mask;
+                       }
+
+                       r->value |= val;
+                       r->mask  |= mask;
+                       return;
+               }
+       }
 
-       if (WARN_ON(idx >= I915_MAX_WA_REGS))
-               return -ENOSPC;
+       if (WARN_ON_ONCE(wa->count >= I915_MAX_WA_REGS)) {
+               DRM_ERROR("Dropping w/a for reg %04x (mask: %08x, value: %08x)\n",
+                         addr, mask, val);
+               return;
+       }
 
-       dev_priv->workarounds.reg[idx].addr = addr;
-       dev_priv->workarounds.reg[idx].value = val;
-       dev_priv->workarounds.reg[idx].mask = mask;
+       r = &wa->reg[wa->count++];
+       r->addr  = addr;
+       r->value = val;
+       r->mask  = mask;
 
-       dev_priv->workarounds.count++;
+       while (r-- > wa->reg) {
+               GEM_BUG_ON(r[0].addr == r[1].addr);
+               if (r[1].addr > r[0].addr)
+                       break;
 
-       return 0;
+               swap(r[1], r[0]);
+       }
 }
 
-#define WA_REG(addr, mask, val) do { \
-               const int r = wa_add(dev_priv, (addr), (mask), (val)); \
-               if (r) \
-                       return r; \
-       } while (0)
+#define WA_REG(addr, mask, val) wa_add(dev_priv, (addr), (mask), (val))
 
 #define WA_SET_BIT_MASKED(addr, mask) \
        WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
@@ -463,6 +492,22 @@ static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
         */
        WA_SET_BIT_MASKED(ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
 
+       /* Wa_2006611047:icl (pre-prod)
+        * Formerly known as WaDisableImprovedTdlClkGating
+        */
+       if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
+               WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
+                                 GEN11_TDL_CLOCK_GATING_FIX_DISABLE);
+
+       /* WaEnableStateCacheRedirectToCS:icl */
+       WA_SET_BIT_MASKED(GEN9_SLICE_COMMON_ECO_CHICKEN1,
+                         GEN11_STATE_CACHE_REDIRECT_TO_CS);
+
+       /* Wa_2006665173:icl (pre-prod) */
+       if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
+               WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
+                                 GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
+
        return 0;
 }
 
@@ -521,7 +566,7 @@ int intel_ctx_workarounds_emit(struct i915_request *rq)
 
        *cs++ = MI_LOAD_REGISTER_IMM(w->count);
        for (i = 0; i < w->count; i++) {
-               *cs++ = i915_mmio_reg_offset(w->reg[i].addr);
+               *cs++ = w->reg[i].addr;
                *cs++ = w->reg[i].value;
        }
        *cs++ = MI_NOOP;
@@ -647,6 +692,19 @@ static void kbl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
        I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
                   I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
                   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
+       /* WaKBLVECSSemaphoreWaitPoll:kbl */
+       if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_E0)) {
+               struct intel_engine_cs *engine;
+               unsigned int tmp;
+
+               for_each_engine(engine, dev_priv, tmp) {
+                       if (engine->id == RCS)
+                               continue;
+
+                       I915_WRITE(RING_SEMA_WAIT_POLL(engine->mmio_base), 1);
+               }
+       }
 }
 
 static void glk_gt_workarounds_apply(struct drm_i915_private *dev_priv)
@@ -672,8 +730,74 @@ static void cfl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
                   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
+static void wa_init_mcr(struct drm_i915_private *dev_priv)
+{
+       const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
+       u32 mcr;
+       u32 mcr_slice_subslice_mask;
+
+       /*
+        * WaProgramMgsrForL3BankSpecificMmioReads: cnl,icl
+        * L3Banks could be fused off in single slice scenario. If that is
+        * the case, we might need to program MCR select to a valid L3Bank
+        * by default, to make sure we correctly read certain registers
+        * later on (in the range 0xB100 - 0xB3FF).
+        * This might be incompatible with
+        * WaProgramMgsrForCorrectSliceSpecificMmioReads.
+        * Fortunately, this should not happen in production hardware, so
+        * we only assert that this is the case (instead of implementing
+        * something more complex that requires checking the range of every
+        * MMIO read).
+        */
+       if (INTEL_GEN(dev_priv) >= 10 &&
+           is_power_of_2(sseu->slice_mask)) {
+               /*
+                * read FUSE3 for enabled L3 Bank IDs, if L3 Bank matches
+                * enabled subslice, no need to redirect MCR packet
+                */
+               u32 slice = fls(sseu->slice_mask);
+               u32 fuse3 = I915_READ(GEN10_MIRROR_FUSE3);
+               u8 ss_mask = sseu->subslice_mask[slice];
+
+               u8 enabled_mask = (ss_mask | ss_mask >>
+                                  GEN10_L3BANK_PAIR_COUNT) & GEN10_L3BANK_MASK;
+               u8 disabled_mask = fuse3 & GEN10_L3BANK_MASK;
+
+               /*
+                * Production silicon should have matched L3Bank and
+                * subslice enabled
+                */
+               WARN_ON((enabled_mask & disabled_mask) != enabled_mask);
+       }
+
+       mcr = I915_READ(GEN8_MCR_SELECTOR);
+
+       if (INTEL_GEN(dev_priv) >= 11)
+               mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
+                                         GEN11_MCR_SUBSLICE_MASK;
+       else
+               mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK |
+                                         GEN8_MCR_SUBSLICE_MASK;
+       /*
+        * WaProgramMgsrForCorrectSliceSpecificMmioReads:cnl,icl
+        * Before any MMIO read into slice/subslice specific registers, MCR
+        * packet control register needs to be programmed to point to any
+        * enabled s/ss pair. Otherwise, incorrect values will be returned.
+        * This means each subsequent MMIO read will be forwarded to an
+        * specific s/ss combination, but this is OK since these registers
+        * are consistent across s/ss in almost all cases. In the rare
+        * occasions, such as INSTDONE, where this value is dependent
+        * on s/ss combo, the read should be done with read_subslice_reg.
+        */
+       mcr &= ~mcr_slice_subslice_mask;
+       mcr |= intel_calculate_mcr_s_ss_select(dev_priv);
+       I915_WRITE(GEN8_MCR_SELECTOR, mcr);
+}
+
 static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
 {
+       wa_init_mcr(dev_priv);
+
        /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
        if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
                I915_WRITE(GAMT_CHKN_BIT_REG,
@@ -692,6 +816,8 @@ static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
 
 static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
 {
+       wa_init_mcr(dev_priv);
+
        /* This is not an Wa. Enable for better image quality */
        I915_WRITE(_3D_CHICKEN3,
                   _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
@@ -772,6 +898,13 @@ static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
                   PMFLUSHDONE_LNICRSDROP |
                   PMFLUSH_GAPL3UNBLOCK |
                   PMFLUSHDONE_LNEBLK);
+
+       /* Wa_1406463099:icl
+        * Formerly known as WaGamTlbPendError
+        */
+       I915_WRITE(GAMT_CHKN_BIT_REG,
+                  I915_READ(GAMT_CHKN_BIT_REG) |
+                  GAMT_CHKN_DISABLE_L3_COH_PIPE);
 }
 
 void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
index 91c72911be3cb73b126e9925648746c7db214f93..7efb326badcd677e98ddfe5c6d1e869d201492ba 100644 (file)
@@ -338,7 +338,7 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
 
 static int igt_check_page_sizes(struct i915_vma *vma)
 {
-       struct drm_i915_private *i915 = to_i915(vma->obj->base.dev);
+       struct drm_i915_private *i915 = vma->vm->i915;
        unsigned int supported = INTEL_INFO(i915)->page_sizes;
        struct drm_i915_gem_object *obj = vma->obj;
        int err = 0;
@@ -379,7 +379,7 @@ static int igt_check_page_sizes(struct i915_vma *vma)
 static int igt_mock_exhaust_device_supported_pages(void *arg)
 {
        struct i915_hw_ppgtt *ppgtt = arg;
-       struct drm_i915_private *i915 = ppgtt->base.i915;
+       struct drm_i915_private *i915 = ppgtt->vm.i915;
        unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
        struct drm_i915_gem_object *obj;
        struct i915_vma *vma;
@@ -415,7 +415,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
                                goto out_put;
                        }
 
-                       vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+                       vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
                        if (IS_ERR(vma)) {
                                err = PTR_ERR(vma);
                                goto out_put;
@@ -458,7 +458,7 @@ out_device:
 static int igt_mock_ppgtt_misaligned_dma(void *arg)
 {
        struct i915_hw_ppgtt *ppgtt = arg;
-       struct drm_i915_private *i915 = ppgtt->base.i915;
+       struct drm_i915_private *i915 = ppgtt->vm.i915;
        unsigned long supported = INTEL_INFO(i915)->page_sizes;
        struct drm_i915_gem_object *obj;
        int bit;
@@ -500,7 +500,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
                /* Force the page size for this object */
                obj->mm.page_sizes.sg = page_size;
 
-               vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+               vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
                if (IS_ERR(vma)) {
                        err = PTR_ERR(vma);
                        goto out_unpin;
@@ -570,6 +570,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
                i915_vma_close(vma);
 
                i915_gem_object_unpin_pages(obj);
+               __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
                i915_gem_object_put(obj);
        }
 
@@ -591,12 +592,13 @@ static void close_object_list(struct list_head *objects,
        list_for_each_entry_safe(obj, on, objects, st_link) {
                struct i915_vma *vma;
 
-               vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+               vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
                if (!IS_ERR(vma))
                        i915_vma_close(vma);
 
                list_del(&obj->st_link);
                i915_gem_object_unpin_pages(obj);
+               __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
                i915_gem_object_put(obj);
        }
 }
@@ -604,8 +606,8 @@ static void close_object_list(struct list_head *objects,
 static int igt_mock_ppgtt_huge_fill(void *arg)
 {
        struct i915_hw_ppgtt *ppgtt = arg;
-       struct drm_i915_private *i915 = ppgtt->base.i915;
-       unsigned long max_pages = ppgtt->base.total >> PAGE_SHIFT;
+       struct drm_i915_private *i915 = ppgtt->vm.i915;
+       unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT;
        unsigned long page_num;
        bool single = false;
        LIST_HEAD(objects);
@@ -641,7 +643,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
 
                list_add(&obj->st_link, &objects);
 
-               vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+               vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
                if (IS_ERR(vma)) {
                        err = PTR_ERR(vma);
                        break;
@@ -725,7 +727,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
 static int igt_mock_ppgtt_64K(void *arg)
 {
        struct i915_hw_ppgtt *ppgtt = arg;
-       struct drm_i915_private *i915 = ppgtt->base.i915;
+       struct drm_i915_private *i915 = ppgtt->vm.i915;
        struct drm_i915_gem_object *obj;
        const struct object_info {
                unsigned int size;
@@ -819,7 +821,7 @@ static int igt_mock_ppgtt_64K(void *arg)
                         */
                        obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
 
-                       vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+                       vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
                        if (IS_ERR(vma)) {
                                err = PTR_ERR(vma);
                                goto out_object_unpin;
@@ -866,6 +868,7 @@ static int igt_mock_ppgtt_64K(void *arg)
                        i915_vma_close(vma);
 
                        i915_gem_object_unpin_pages(obj);
+                       __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
                        i915_gem_object_put(obj);
                }
        }
@@ -887,8 +890,8 @@ out_object_put:
 static struct i915_vma *
 gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
 {
-       struct drm_i915_private *i915 = to_i915(vma->obj->base.dev);
-       const int gen = INTEL_GEN(vma->vm->i915);
+       struct drm_i915_private *i915 = vma->vm->i915;
+       const int gen = INTEL_GEN(i915);
        unsigned int count = vma->size >> PAGE_SHIFT;
        struct drm_i915_gem_object *obj;
        struct i915_vma *batch;
@@ -919,12 +922,12 @@ gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
                        *cmd++ = val;
                } else if (gen >= 4) {
                        *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
-                               (gen < 6 ? 1 << 22 : 0);
+                               (gen < 6 ? MI_USE_GGTT : 0);
                        *cmd++ = 0;
                        *cmd++ = offset;
                        *cmd++ = val;
                } else {
-                       *cmd++ = MI_STORE_DWORD_IMM | 1 << 22;
+                       *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
                        *cmd++ = offset;
                        *cmd++ = val;
                }
@@ -985,7 +988,10 @@ static int gpu_write(struct i915_vma *vma,
                goto err_request;
        }
 
-       i915_vma_move_to_active(batch, rq, 0);
+       err = i915_vma_move_to_active(batch, rq, 0);
+       if (err)
+               goto err_request;
+
        i915_gem_object_set_active_reference(batch->obj);
        i915_vma_unpin(batch);
        i915_vma_close(batch);
@@ -996,14 +1002,12 @@ static int gpu_write(struct i915_vma *vma,
        if (err)
                goto err_request;
 
-       i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
-
-       reservation_object_lock(vma->resv, NULL);
-       reservation_object_add_excl_fence(vma->resv, &rq->fence);
-       reservation_object_unlock(vma->resv);
+       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       if (err)
+               i915_request_skip(rq, err);
 
 err_request:
-       __i915_request_add(rq, err == 0);
+       i915_request_add(rq);
 
        return err;
 }
@@ -1047,7 +1051,8 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
                            u32 dword, u32 val)
 {
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
-       struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+       struct i915_address_space *vm =
+               ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
        unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
        struct i915_vma *vma;
        int err;
@@ -1100,7 +1105,8 @@ static int igt_write_huge(struct i915_gem_context *ctx,
                          struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
-       struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+       struct i915_address_space *vm =
+               ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
        static struct intel_engine_cs *engines[I915_NUM_ENGINES];
        struct intel_engine_cs *engine;
        I915_RND_STATE(prng);
@@ -1262,6 +1268,7 @@ static int igt_ppgtt_exhaust_huge(void *arg)
                        }
 
                        i915_gem_object_unpin_pages(obj);
+                       __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
                        i915_gem_object_put(obj);
                }
        }
@@ -1323,6 +1330,7 @@ static int igt_ppgtt_internal_huge(void *arg)
                }
 
                i915_gem_object_unpin_pages(obj);
+               __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
                i915_gem_object_put(obj);
        }
 
@@ -1391,6 +1399,7 @@ static int igt_ppgtt_gemfs_huge(void *arg)
                }
 
                i915_gem_object_unpin_pages(obj);
+               __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
                i915_gem_object_put(obj);
        }
 
@@ -1439,7 +1448,7 @@ static int igt_ppgtt_pin_update(void *arg)
                if (IS_ERR(obj))
                        return PTR_ERR(obj);
 
-               vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+               vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
                if (IS_ERR(vma)) {
                        err = PTR_ERR(vma);
                        goto out_put;
@@ -1493,7 +1502,7 @@ static int igt_ppgtt_pin_update(void *arg)
        if (IS_ERR(obj))
                return PTR_ERR(obj);
 
-       vma = i915_vma_instance(obj, &ppgtt->base, NULL);
+       vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
        if (IS_ERR(vma)) {
                err = PTR_ERR(vma);
                goto out_put;
@@ -1531,7 +1540,8 @@ static int igt_tmpfs_fallback(void *arg)
        struct i915_gem_context *ctx = arg;
        struct drm_i915_private *i915 = ctx->i915;
        struct vfsmount *gemfs = i915->mm.gemfs;
-       struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+       struct i915_address_space *vm =
+               ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
        struct drm_i915_gem_object *obj;
        struct i915_vma *vma;
        u32 *vaddr;
@@ -1587,7 +1597,8 @@ static int igt_shrink_thp(void *arg)
 {
        struct i915_gem_context *ctx = arg;
        struct drm_i915_private *i915 = ctx->i915;
-       struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+       struct i915_address_space *vm =
+               ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
        struct drm_i915_gem_object *obj;
        struct i915_vma *vma;
        unsigned int flags = PIN_USER;
@@ -1690,20 +1701,20 @@ int i915_gem_huge_page_mock_selftests(void)
        dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39));
 
        mutex_lock(&dev_priv->drm.struct_mutex);
-       ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV), "mock");
+       ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV));
        if (IS_ERR(ppgtt)) {
                err = PTR_ERR(ppgtt);
                goto out_unlock;
        }
 
-       if (!i915_vm_is_48bit(&ppgtt->base)) {
+       if (!i915_vm_is_48bit(&ppgtt->vm)) {
                pr_err("failed to create 48b PPGTT\n");
                err = -EINVAL;
                goto out_close;
        }
 
        /* If we were ever hit this then it's time to mock the 64K scratch */
-       if (!i915_vm_has_scratch_64K(&ppgtt->base)) {
+       if (!i915_vm_has_scratch_64K(&ppgtt->vm)) {
                pr_err("PPGTT missing 64K scratch page\n");
                err = -EINVAL;
                goto out_close;
@@ -1712,7 +1723,7 @@ int i915_gem_huge_page_mock_selftests(void)
        err = i915_subtests(tests, ppgtt);
 
 out_close:
-       i915_ppgtt_close(&ppgtt->base);
+       i915_ppgtt_close(&ppgtt->vm);
        i915_ppgtt_put(ppgtt);
 
 out_unlock:
@@ -1720,7 +1731,7 @@ out_unlock:
 
        i915_modparams.enable_ppgtt = saved_ppgtt;
 
-       drm_dev_unref(&dev_priv->drm);
+       drm_dev_put(&dev_priv->drm);
 
        return err;
 }
@@ -1744,6 +1755,9 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
                return 0;
        }
 
+       if (i915_terminally_wedged(&dev_priv->gpu_error))
+               return 0;
+
        file = mock_file(dev_priv);
        if (IS_ERR(file))
                return PTR_ERR(file);
@@ -1758,7 +1772,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
        }
 
        if (ctx->ppgtt)
-               ctx->ppgtt->base.scrub_64K = true;
+               ctx->ppgtt->vm.scrub_64K = true;
 
        err = i915_subtests(tests, ctx);
 
index 340a98c0c804a5c8203b4130a114ddf0b2ed7b20..3a095c37c1203ba7a5e2a72bd29d5d40395d9039 100644 (file)
@@ -42,11 +42,21 @@ static int cpu_set(struct drm_i915_gem_object *obj,
 
        page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
        map = kmap_atomic(page);
-       if (needs_clflush & CLFLUSH_BEFORE)
+
+       if (needs_clflush & CLFLUSH_BEFORE) {
+               mb();
                clflush(map+offset_in_page(offset) / sizeof(*map));
+               mb();
+       }
+
        map[offset_in_page(offset) / sizeof(*map)] = v;
-       if (needs_clflush & CLFLUSH_AFTER)
+
+       if (needs_clflush & CLFLUSH_AFTER) {
+               mb();
                clflush(map+offset_in_page(offset) / sizeof(*map));
+               mb();
+       }
+
        kunmap_atomic(map);
 
        i915_gem_obj_finish_shmem_access(obj);
@@ -68,8 +78,13 @@ static int cpu_get(struct drm_i915_gem_object *obj,
 
        page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
        map = kmap_atomic(page);
-       if (needs_clflush & CLFLUSH_BEFORE)
+
+       if (needs_clflush & CLFLUSH_BEFORE) {
+               mb();
                clflush(map+offset_in_page(offset) / sizeof(*map));
+               mb();
+       }
+
        *v = map[offset_in_page(offset) / sizeof(*map)];
        kunmap_atomic(map);
 
@@ -199,7 +214,7 @@ static int gpu_set(struct drm_i915_gem_object *obj,
 
        cs = intel_ring_begin(rq, 4);
        if (IS_ERR(cs)) {
-               __i915_request_add(rq, false);
+               i915_request_add(rq);
                i915_vma_unpin(vma);
                return PTR_ERR(cs);
        }
@@ -210,28 +225,24 @@ static int gpu_set(struct drm_i915_gem_object *obj,
                *cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset);
                *cs++ = v;
        } else if (INTEL_GEN(i915) >= 4) {
-               *cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
+               *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
                *cs++ = 0;
                *cs++ = i915_ggtt_offset(vma) + offset;
                *cs++ = v;
        } else {
-               *cs++ = MI_STORE_DWORD_IMM | 1 << 22;
+               *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
                *cs++ = i915_ggtt_offset(vma) + offset;
                *cs++ = v;
                *cs++ = MI_NOOP;
        }
        intel_ring_advance(rq, cs);
 
-       i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
        i915_vma_unpin(vma);
 
-       reservation_object_lock(obj->resv, NULL);
-       reservation_object_add_excl_fence(obj->resv, &rq->fence);
-       reservation_object_unlock(obj->resv);
-
-       __i915_request_add(rq, true);
+       i915_request_add(rq);
 
-       return 0;
+       return err;
 }
 
 static bool always_valid(struct drm_i915_private *i915)
@@ -239,8 +250,16 @@ static bool always_valid(struct drm_i915_private *i915)
        return true;
 }
 
+static bool needs_fence_registers(struct drm_i915_private *i915)
+{
+       return !i915_terminally_wedged(&i915->gpu_error);
+}
+
 static bool needs_mi_store_dword(struct drm_i915_private *i915)
 {
+       if (i915_terminally_wedged(&i915->gpu_error))
+               return false;
+
        return intel_engine_can_store_dword(i915->engine[RCS]);
 }
 
@@ -251,7 +270,7 @@ static const struct igt_coherency_mode {
        bool (*valid)(struct drm_i915_private *i915);
 } igt_coherency_mode[] = {
        { "cpu", cpu_set, cpu_get, always_valid },
-       { "gtt", gtt_set, gtt_get, always_valid },
+       { "gtt", gtt_set, gtt_get, needs_fence_registers },
        { "wc", wc_set, wc_get, always_valid },
        { "gpu", gpu_set, NULL, needs_mi_store_dword },
        { },
index ddb03f009232f2f05dee9b6bb6dd528e457d0114..1c92560d35da6553cea677aaec9d07ed11333a7e 100644 (file)
  */
 
 #include "../i915_selftest.h"
+#include "i915_random.h"
 #include "igt_flush_test.h"
 
 #include "mock_drm.h"
+#include "mock_gem_device.h"
 #include "huge_gem_object.h"
 
 #define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
@@ -62,12 +64,12 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
                        *cmd++ = value;
                } else if (gen >= 4) {
                        *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
-                               (gen < 6 ? 1 << 22 : 0);
+                               (gen < 6 ? MI_USE_GGTT : 0);
                        *cmd++ = 0;
                        *cmd++ = offset;
                        *cmd++ = value;
                } else {
-                       *cmd++ = MI_STORE_DWORD_IMM | 1 << 22;
+                       *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
                        *cmd++ = offset;
                        *cmd++ = value;
                }
@@ -114,7 +116,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
 {
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct i915_address_space *vm =
-               ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+               ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
        struct i915_request *rq;
        struct i915_vma *vma;
        struct i915_vma *batch;
@@ -169,24 +171,28 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
        if (err)
                goto err_request;
 
-       i915_vma_move_to_active(batch, rq, 0);
+       err = i915_vma_move_to_active(batch, rq, 0);
+       if (err)
+               goto skip_request;
+
+       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       if (err)
+               goto skip_request;
+
        i915_gem_object_set_active_reference(batch->obj);
        i915_vma_unpin(batch);
        i915_vma_close(batch);
 
-       i915_vma_move_to_active(vma, rq, 0);
        i915_vma_unpin(vma);
 
-       reservation_object_lock(obj->resv, NULL);
-       reservation_object_add_excl_fence(obj->resv, &rq->fence);
-       reservation_object_unlock(obj->resv);
-
-       __i915_request_add(rq, true);
+       i915_request_add(rq);
 
        return 0;
 
+skip_request:
+       i915_request_skip(rq, err);
 err_request:
-       __i915_request_add(rq, false);
+       i915_request_add(rq);
 err_batch:
        i915_vma_unpin(batch);
 err_vma:
@@ -247,9 +253,9 @@ static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
                }
 
                for (; m < DW_PER_PAGE; m++) {
-                       if (map[m] != 0xdeadbeef) {
+                       if (map[m] != STACK_MAGIC) {
                                pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
-                                      n, m, map[m], 0xdeadbeef);
+                                      n, m, map[m], STACK_MAGIC);
                                err = -EINVAL;
                                goto out_unmap;
                        }
@@ -289,7 +295,7 @@ create_test_object(struct i915_gem_context *ctx,
 {
        struct drm_i915_gem_object *obj;
        struct i915_address_space *vm =
-               ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base;
+               ctx->ppgtt ? &ctx->ppgtt->vm : &ctx->i915->ggtt.vm;
        u64 size;
        int err;
 
@@ -305,7 +311,7 @@ create_test_object(struct i915_gem_context *ctx,
        if (err)
                return ERR_PTR(err);
 
-       err = cpu_fill(obj, 0xdeadbeef);
+       err = cpu_fill(obj, STACK_MAGIC);
        if (err) {
                pr_err("Failed to fill object with cpu, err=%d\n",
                       err);
@@ -335,11 +341,15 @@ static int igt_ctx_exec(void *arg)
        bool first_shared_gtt = true;
        int err = -ENODEV;
 
-       /* Create a few different contexts (with different mm) and write
+       /*
+        * Create a few different contexts (with different mm) and write
         * through each ctx/mm using the GPU making sure those writes end
         * up in the expected pages of our obj.
         */
 
+       if (!DRIVER_CAPS(i915)->has_logical_contexts)
+               return 0;
+
        file = mock_file(i915);
        if (IS_ERR(file))
                return PTR_ERR(file);
@@ -366,6 +376,9 @@ static int igt_ctx_exec(void *arg)
                }
 
                for_each_engine(engine, i915, id) {
+                       if (!engine->context_size)
+                               continue; /* No logical context support in HW */
+
                        if (!intel_engine_can_store_dword(engine))
                                continue;
 
@@ -420,6 +433,237 @@ out_unlock:
        return err;
 }
 
+static int igt_ctx_readonly(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       struct drm_i915_gem_object *obj = NULL;
+       struct drm_file *file;
+       I915_RND_STATE(prng);
+       IGT_TIMEOUT(end_time);
+       LIST_HEAD(objects);
+       struct i915_gem_context *ctx;
+       struct i915_hw_ppgtt *ppgtt;
+       unsigned long ndwords, dw;
+       int err = -ENODEV;
+
+       /*
+        * Create a few read-only objects (with the occasional writable object)
+        * and try to write into these object checking that the GPU discards
+        * any write to a read-only object.
+        */
+
+       file = mock_file(i915);
+       if (IS_ERR(file))
+               return PTR_ERR(file);
+
+       mutex_lock(&i915->drm.struct_mutex);
+
+       ctx = i915_gem_create_context(i915, file->driver_priv);
+       if (IS_ERR(ctx)) {
+               err = PTR_ERR(ctx);
+               goto out_unlock;
+       }
+
+       ppgtt = ctx->ppgtt ?: i915->mm.aliasing_ppgtt;
+       if (!ppgtt || !ppgtt->vm.has_read_only) {
+               err = 0;
+               goto out_unlock;
+       }
+
+       ndwords = 0;
+       dw = 0;
+       while (!time_after(jiffies, end_time)) {
+               struct intel_engine_cs *engine;
+               unsigned int id;
+
+               for_each_engine(engine, i915, id) {
+                       if (!intel_engine_can_store_dword(engine))
+                               continue;
+
+                       if (!obj) {
+                               obj = create_test_object(ctx, file, &objects);
+                               if (IS_ERR(obj)) {
+                                       err = PTR_ERR(obj);
+                                       goto out_unlock;
+                               }
+
+                               if (prandom_u32_state(&prng) & 1)
+                                       i915_gem_object_set_readonly(obj);
+                       }
+
+                       intel_runtime_pm_get(i915);
+                       err = gpu_fill(obj, ctx, engine, dw);
+                       intel_runtime_pm_put(i915);
+                       if (err) {
+                               pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
+                                      ndwords, dw, max_dwords(obj),
+                                      engine->name, ctx->hw_id,
+                                      yesno(!!ctx->ppgtt), err);
+                               goto out_unlock;
+                       }
+
+                       if (++dw == max_dwords(obj)) {
+                               obj = NULL;
+                               dw = 0;
+                       }
+                       ndwords++;
+               }
+       }
+       pr_info("Submitted %lu dwords (across %u engines)\n",
+               ndwords, INTEL_INFO(i915)->num_rings);
+
+       dw = 0;
+       list_for_each_entry(obj, &objects, st_link) {
+               unsigned int rem =
+                       min_t(unsigned int, ndwords - dw, max_dwords(obj));
+               unsigned int num_writes;
+
+               num_writes = rem;
+               if (i915_gem_object_is_readonly(obj))
+                       num_writes = 0;
+
+               err = cpu_check(obj, num_writes);
+               if (err)
+                       break;
+
+               dw += rem;
+       }
+
+out_unlock:
+       if (igt_flush_test(i915, I915_WAIT_LOCKED))
+               err = -EIO;
+       mutex_unlock(&i915->drm.struct_mutex);
+
+       mock_file_free(i915, file);
+       return err;
+}
+
+static __maybe_unused const char *
+__engine_name(struct drm_i915_private *i915, unsigned int engines)
+{
+       struct intel_engine_cs *engine;
+       unsigned int tmp;
+
+       if (engines == ALL_ENGINES)
+               return "all";
+
+       for_each_engine_masked(engine, i915, engines, tmp)
+               return engine->name;
+
+       return "none";
+}
+
+static int __igt_switch_to_kernel_context(struct drm_i915_private *i915,
+                                         struct i915_gem_context *ctx,
+                                         unsigned int engines)
+{
+       struct intel_engine_cs *engine;
+       unsigned int tmp;
+       int err;
+
+       GEM_TRACE("Testing %s\n", __engine_name(i915, engines));
+       for_each_engine_masked(engine, i915, engines, tmp) {
+               struct i915_request *rq;
+
+               rq = i915_request_alloc(engine, ctx);
+               if (IS_ERR(rq))
+                       return PTR_ERR(rq);
+
+               i915_request_add(rq);
+       }
+
+       err = i915_gem_switch_to_kernel_context(i915);
+       if (err)
+               return err;
+
+       for_each_engine_masked(engine, i915, engines, tmp) {
+               if (!engine_has_kernel_context_barrier(engine)) {
+                       pr_err("kernel context not last on engine %s!\n",
+                              engine->name);
+                       return -EINVAL;
+               }
+       }
+
+       err = i915_gem_wait_for_idle(i915,
+                                    I915_WAIT_LOCKED,
+                                    MAX_SCHEDULE_TIMEOUT);
+       if (err)
+               return err;
+
+       GEM_BUG_ON(i915->gt.active_requests);
+       for_each_engine_masked(engine, i915, engines, tmp) {
+               if (engine->last_retired_context->gem_context != i915->kernel_context) {
+                       pr_err("engine %s not idling in kernel context!\n",
+                              engine->name);
+                       return -EINVAL;
+               }
+       }
+
+       err = i915_gem_switch_to_kernel_context(i915);
+       if (err)
+               return err;
+
+       if (i915->gt.active_requests) {
+               pr_err("switch-to-kernel-context emitted %d requests even though it should already be idling in the kernel context\n",
+                      i915->gt.active_requests);
+               return -EINVAL;
+       }
+
+       for_each_engine_masked(engine, i915, engines, tmp) {
+               if (!intel_engine_has_kernel_context(engine)) {
+                       pr_err("kernel context not last on engine %s!\n",
+                              engine->name);
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static int igt_switch_to_kernel_context(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       struct intel_engine_cs *engine;
+       struct i915_gem_context *ctx;
+       enum intel_engine_id id;
+       int err;
+
+       /*
+        * A core premise of switching to the kernel context is that
+        * if an engine is already idling in the kernel context, we
+        * do not emit another request and wake it up. The other being
+        * that we do indeed end up idling in the kernel context.
+        */
+
+       mutex_lock(&i915->drm.struct_mutex);
+       ctx = kernel_context(i915);
+       if (IS_ERR(ctx)) {
+               mutex_unlock(&i915->drm.struct_mutex);
+               return PTR_ERR(ctx);
+       }
+
+       /* First check idling each individual engine */
+       for_each_engine(engine, i915, id) {
+               err = __igt_switch_to_kernel_context(i915, ctx, BIT(id));
+               if (err)
+                       goto out_unlock;
+       }
+
+       /* Now en masse */
+       err = __igt_switch_to_kernel_context(i915, ctx, ALL_ENGINES);
+       if (err)
+               goto out_unlock;
+
+out_unlock:
+       GEM_TRACE_DUMP_ON(err);
+       if (igt_flush_test(i915, I915_WAIT_LOCKED))
+               err = -EIO;
+       mutex_unlock(&i915->drm.struct_mutex);
+
+       kernel_context_close(ctx);
+       return err;
+}
+
 static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
 {
        struct drm_i915_gem_object *obj;
@@ -432,7 +676,7 @@ static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
        list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
                struct i915_vma *vma;
 
-               vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+               vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
                if (IS_ERR(vma))
                        continue;
 
@@ -447,14 +691,37 @@ static void fake_aliasing_ppgtt_disable(struct drm_i915_private *i915)
        i915_gem_fini_aliasing_ppgtt(i915);
 }
 
+int i915_gem_context_mock_selftests(void)
+{
+       static const struct i915_subtest tests[] = {
+               SUBTEST(igt_switch_to_kernel_context),
+       };
+       struct drm_i915_private *i915;
+       int err;
+
+       i915 = mock_gem_device();
+       if (!i915)
+               return -ENOMEM;
+
+       err = i915_subtests(tests, i915);
+
+       drm_dev_put(&i915->drm);
+       return err;
+}
+
 int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
 {
        static const struct i915_subtest tests[] = {
+               SUBTEST(igt_switch_to_kernel_context),
                SUBTEST(igt_ctx_exec),
+               SUBTEST(igt_ctx_readonly),
        };
        bool fake_alias = false;
        int err;
 
+       if (i915_terminally_wedged(&dev_priv->gpu_error))
+               return 0;
+
        /* Install a fake aliasing gtt for exercise */
        if (USES_PPGTT(dev_priv) && !dev_priv->mm.aliasing_ppgtt) {
                mutex_lock(&dev_priv->drm.struct_mutex);
index 89dc25a5a53b87182967d9603a842d5aa2300c3e..a7055b12e53ce140368837caf89fb29f046a9a34 100644 (file)
@@ -389,7 +389,7 @@ int i915_gem_dmabuf_mock_selftests(void)
 
        err = i915_subtests(tests, i915);
 
-       drm_dev_unref(&i915->drm);
+       drm_dev_put(&i915->drm);
        return err;
 }
 
index ab9d7bee0aae1f3fbe371dbe68274f4423b1826d..128ad1cf0647a0986d83625669062382fb55a7b0 100644 (file)
@@ -35,7 +35,7 @@ static int populate_ggtt(struct drm_i915_private *i915)
        u64 size;
 
        for (size = 0;
-            size + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+            size + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
             size += I915_GTT_PAGE_SIZE) {
                struct i915_vma *vma;
 
@@ -57,7 +57,7 @@ static int populate_ggtt(struct drm_i915_private *i915)
                return -EINVAL;
        }
 
-       if (list_empty(&i915->ggtt.base.inactive_list)) {
+       if (list_empty(&i915->ggtt.vm.inactive_list)) {
                pr_err("No objects on the GGTT inactive list!\n");
                return -EINVAL;
        }
@@ -69,7 +69,7 @@ static void unpin_ggtt(struct drm_i915_private *i915)
 {
        struct i915_vma *vma;
 
-       list_for_each_entry(vma, &i915->ggtt.base.inactive_list, vm_link)
+       list_for_each_entry(vma, &i915->ggtt.vm.inactive_list, vm_link)
                i915_vma_unpin(vma);
 }
 
@@ -103,7 +103,7 @@ static int igt_evict_something(void *arg)
                goto cleanup;
 
        /* Everything is pinned, nothing should happen */
-       err = i915_gem_evict_something(&ggtt->base,
+       err = i915_gem_evict_something(&ggtt->vm,
                                       I915_GTT_PAGE_SIZE, 0, 0,
                                       0, U64_MAX,
                                       0);
@@ -116,7 +116,7 @@ static int igt_evict_something(void *arg)
        unpin_ggtt(i915);
 
        /* Everything is unpinned, we should be able to evict something */
-       err = i915_gem_evict_something(&ggtt->base,
+       err = i915_gem_evict_something(&ggtt->vm,
                                       I915_GTT_PAGE_SIZE, 0, 0,
                                       0, U64_MAX,
                                       0);
@@ -181,7 +181,7 @@ static int igt_evict_for_vma(void *arg)
                goto cleanup;
 
        /* Everything is pinned, nothing should happen */
-       err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+       err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
        if (err != -ENOSPC) {
                pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n",
                       err);
@@ -191,7 +191,7 @@ static int igt_evict_for_vma(void *arg)
        unpin_ggtt(i915);
 
        /* Everything is unpinned, we should be able to evict the node */
-       err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+       err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
        if (err) {
                pr_err("i915_gem_evict_for_node returned err=%d\n",
                       err);
@@ -229,7 +229,7 @@ static int igt_evict_for_cache_color(void *arg)
         * i915_gtt_color_adjust throughout our driver, so using a mock color
         * adjust will work just fine for our purposes.
         */
-       ggtt->base.mm.color_adjust = mock_color_adjust;
+       ggtt->vm.mm.color_adjust = mock_color_adjust;
 
        obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
        if (IS_ERR(obj)) {
@@ -265,7 +265,7 @@ static int igt_evict_for_cache_color(void *arg)
        i915_vma_unpin(vma);
 
        /* Remove just the second vma */
-       err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+       err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
        if (err) {
                pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err);
                goto cleanup;
@@ -276,7 +276,7 @@ static int igt_evict_for_cache_color(void *arg)
         */
        target.color = I915_CACHE_L3_LLC;
 
-       err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+       err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
        if (!err) {
                pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err);
                err = -EINVAL;
@@ -288,7 +288,7 @@ static int igt_evict_for_cache_color(void *arg)
 cleanup:
        unpin_ggtt(i915);
        cleanup_objects(i915);
-       ggtt->base.mm.color_adjust = NULL;
+       ggtt->vm.mm.color_adjust = NULL;
        return err;
 }
 
@@ -305,7 +305,7 @@ static int igt_evict_vm(void *arg)
                goto cleanup;
 
        /* Everything is pinned, nothing should happen */
-       err = i915_gem_evict_vm(&ggtt->base);
+       err = i915_gem_evict_vm(&ggtt->vm);
        if (err) {
                pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
                       err);
@@ -314,7 +314,7 @@ static int igt_evict_vm(void *arg)
 
        unpin_ggtt(i915);
 
-       err = i915_gem_evict_vm(&ggtt->base);
+       err = i915_gem_evict_vm(&ggtt->vm);
        if (err) {
                pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
                       err);
@@ -359,9 +359,9 @@ static int igt_evict_contexts(void *arg)
 
        /* Reserve a block so that we know we have enough to fit a few rq */
        memset(&hole, 0, sizeof(hole));
-       err = i915_gem_gtt_insert(&i915->ggtt.base, &hole,
+       err = i915_gem_gtt_insert(&i915->ggtt.vm, &hole,
                                  PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
-                                 0, i915->ggtt.base.total,
+                                 0, i915->ggtt.vm.total,
                                  PIN_NOEVICT);
        if (err)
                goto out_locked;
@@ -377,9 +377,9 @@ static int igt_evict_contexts(void *arg)
                        goto out_locked;
                }
 
-               if (i915_gem_gtt_insert(&i915->ggtt.base, &r->node,
+               if (i915_gem_gtt_insert(&i915->ggtt.vm, &r->node,
                                        1ul << 20, 0, I915_COLOR_UNEVICTABLE,
-                                       0, i915->ggtt.base.total,
+                                       0, i915->ggtt.vm.total,
                                        PIN_NOEVICT)) {
                        kfree(r);
                        break;
@@ -490,7 +490,7 @@ int i915_gem_evict_mock_selftests(void)
        err = i915_subtests(tests, i915);
        mutex_unlock(&i915->drm.struct_mutex);
 
-       drm_dev_unref(&i915->drm);
+       drm_dev_put(&i915->drm);
        return err;
 }
 
@@ -500,5 +500,8 @@ int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
                SUBTEST(igt_evict_contexts),
        };
 
+       if (i915_terminally_wedged(&i915->gpu_error))
+               return 0;
+
        return i915_subtests(tests, i915);
 }
index f7dc926f4ef1fa610e5415d0b2771b30fcbdbbe5..8e2e269db97e82917b299afbe680fc008b8c90a1 100644 (file)
 #include "mock_drm.h"
 #include "mock_gem_device.h"
 
+static void cleanup_freed_objects(struct drm_i915_private *i915)
+{
+       /*
+        * As we may hold onto the struct_mutex for inordinate lengths of
+        * time, the NMI khungtaskd detector may fire for the free objects
+        * worker.
+        */
+       mutex_unlock(&i915->drm.struct_mutex);
+
+       i915_gem_drain_freed_objects(i915);
+
+       mutex_lock(&i915->drm.struct_mutex);
+}
+
 static void fake_free_pages(struct drm_i915_gem_object *obj,
                            struct sg_table *pages)
 {
@@ -134,31 +148,34 @@ static int igt_ppgtt_alloc(void *arg)
 {
        struct drm_i915_private *dev_priv = arg;
        struct i915_hw_ppgtt *ppgtt;
-       u64 size, last;
-       int err;
+       u64 size, last, limit;
+       int err = 0;
 
        /* Allocate a ppggt and try to fill the entire range */
 
        if (!USES_PPGTT(dev_priv))
                return 0;
 
-       ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
-       if (!ppgtt)
-               return -ENOMEM;
+       ppgtt = __hw_ppgtt_create(dev_priv);
+       if (IS_ERR(ppgtt))
+               return PTR_ERR(ppgtt);
 
-       mutex_lock(&dev_priv->drm.struct_mutex);
-       err = __hw_ppgtt_init(ppgtt, dev_priv);
-       if (err)
-               goto err_ppgtt;
-
-       if (!ppgtt->base.allocate_va_range)
+       if (!ppgtt->vm.allocate_va_range)
                goto err_ppgtt_cleanup;
 
+       /*
+        * While we only allocate the page tables here and so we could
+        * address a much larger GTT than we could actually fit into
+        * RAM, a practical limit is the amount of physical pages in the system.
+        * This should ensure that we do not run into the oomkiller during
+        * the test and take down the machine wilfully.
+        */
+       limit = totalram_pages << PAGE_SHIFT;
+       limit = min(ppgtt->vm.total, limit);
+
        /* Check we can allocate the entire range */
-       for (size = 4096;
-            size <= ppgtt->base.total;
-            size <<= 2) {
-               err = ppgtt->base.allocate_va_range(&ppgtt->base, 0, size);
+       for (size = 4096; size <= limit; size <<= 2) {
+               err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size);
                if (err) {
                        if (err == -ENOMEM) {
                                pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
@@ -168,15 +185,15 @@ static int igt_ppgtt_alloc(void *arg)
                        goto err_ppgtt_cleanup;
                }
 
-               ppgtt->base.clear_range(&ppgtt->base, 0, size);
+               cond_resched();
+
+               ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
        }
 
        /* Check we can incrementally allocate the entire range */
-       for (last = 0, size = 4096;
-            size <= ppgtt->base.total;
-            last = size, size <<= 2) {
-               err = ppgtt->base.allocate_va_range(&ppgtt->base,
-                                                   last, size - last);
+       for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) {
+               err = ppgtt->vm.allocate_va_range(&ppgtt->vm,
+                                                 last, size - last);
                if (err) {
                        if (err == -ENOMEM) {
                                pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
@@ -185,13 +202,14 @@ static int igt_ppgtt_alloc(void *arg)
                        }
                        goto err_ppgtt_cleanup;
                }
+
+               cond_resched();
        }
 
 err_ppgtt_cleanup:
-       ppgtt->base.cleanup(&ppgtt->base);
-err_ppgtt:
+       mutex_lock(&dev_priv->drm.struct_mutex);
+       i915_ppgtt_put(ppgtt);
        mutex_unlock(&dev_priv->drm.struct_mutex);
-       kfree(ppgtt);
        return err;
 }
 
@@ -293,6 +311,8 @@ static int lowlevel_hole(struct drm_i915_private *i915,
                i915_gem_object_put(obj);
 
                kfree(order);
+
+               cleanup_freed_objects(i915);
        }
 
        return 0;
@@ -521,6 +541,7 @@ static int fill_hole(struct drm_i915_private *i915,
                }
 
                close_object_list(&objects, vm);
+               cleanup_freed_objects(i915);
        }
 
        return 0;
@@ -607,6 +628,8 @@ err_put:
                i915_gem_object_put(obj);
                if (err)
                        return err;
+
+               cleanup_freed_objects(i915);
        }
 
        return 0;
@@ -791,6 +814,8 @@ err_obj:
                kfree(order);
                if (err)
                        return err;
+
+               cleanup_freed_objects(i915);
        }
 
        return 0;
@@ -859,6 +884,7 @@ static int __shrink_hole(struct drm_i915_private *i915,
        }
 
        close_object_list(&objects, vm);
+       cleanup_freed_objects(i915);
        return err;
 }
 
@@ -951,6 +977,7 @@ static int shrink_boom(struct drm_i915_private *i915,
                i915_gem_object_put(explode);
 
                memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
+               cleanup_freed_objects(i915);
        }
 
        return 0;
@@ -982,17 +1009,17 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
                return PTR_ERR(file);
 
        mutex_lock(&dev_priv->drm.struct_mutex);
-       ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv, "mock");
+       ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv);
        if (IS_ERR(ppgtt)) {
                err = PTR_ERR(ppgtt);
                goto out_unlock;
        }
-       GEM_BUG_ON(offset_in_page(ppgtt->base.total));
-       GEM_BUG_ON(ppgtt->base.closed);
+       GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
+       GEM_BUG_ON(ppgtt->vm.closed);
 
-       err = func(dev_priv, &ppgtt->base, 0, ppgtt->base.total, end_time);
+       err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
 
-       i915_ppgtt_close(&ppgtt->base);
+       i915_ppgtt_close(&ppgtt->vm);
        i915_ppgtt_put(ppgtt);
 out_unlock:
        mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -1061,18 +1088,18 @@ static int exercise_ggtt(struct drm_i915_private *i915,
 
        mutex_lock(&i915->drm.struct_mutex);
 restart:
-       list_sort(NULL, &ggtt->base.mm.hole_stack, sort_holes);
-       drm_mm_for_each_hole(node, &ggtt->base.mm, hole_start, hole_end) {
+       list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
+       drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
                if (hole_start < last)
                        continue;
 
-               if (ggtt->base.mm.color_adjust)
-                       ggtt->base.mm.color_adjust(node, 0,
-                                                  &hole_start, &hole_end);
+               if (ggtt->vm.mm.color_adjust)
+                       ggtt->vm.mm.color_adjust(node, 0,
+                                                &hole_start, &hole_end);
                if (hole_start >= hole_end)
                        continue;
 
-               err = func(i915, &ggtt->base, hole_start, hole_end, end_time);
+               err = func(i915, &ggtt->vm, hole_start, hole_end, end_time);
                if (err)
                        break;
 
@@ -1134,7 +1161,7 @@ static int igt_ggtt_page(void *arg)
                goto out_free;
 
        memset(&tmp, 0, sizeof(tmp));
-       err = drm_mm_insert_node_in_range(&ggtt->base.mm, &tmp,
+       err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
                                          count * PAGE_SIZE, 0,
                                          I915_COLOR_UNEVICTABLE,
                                          0, ggtt->mappable_end,
@@ -1147,9 +1174,9 @@ static int igt_ggtt_page(void *arg)
        for (n = 0; n < count; n++) {
                u64 offset = tmp.start + n * PAGE_SIZE;
 
-               ggtt->base.insert_page(&ggtt->base,
-                                      i915_gem_object_get_dma_address(obj, 0),
-                                      offset, I915_CACHE_NONE, 0);
+               ggtt->vm.insert_page(&ggtt->vm,
+                                    i915_gem_object_get_dma_address(obj, 0),
+                                    offset, I915_CACHE_NONE, 0);
        }
 
        order = i915_random_order(count, &prng);
@@ -1188,7 +1215,7 @@ static int igt_ggtt_page(void *arg)
 
        kfree(order);
 out_remove:
-       ggtt->base.clear_range(&ggtt->base, tmp.start, tmp.size);
+       ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
        intel_runtime_pm_put(i915);
        drm_mm_remove_node(&tmp);
 out_unpin:
@@ -1217,6 +1244,7 @@ static int exercise_mock(struct drm_i915_private *i915,
                                     u64 hole_start, u64 hole_end,
                                     unsigned long end_time))
 {
+       const u64 limit = totalram_pages << PAGE_SHIFT;
        struct i915_gem_context *ctx;
        struct i915_hw_ppgtt *ppgtt;
        IGT_TIMEOUT(end_time);
@@ -1229,7 +1257,7 @@ static int exercise_mock(struct drm_i915_private *i915,
        ppgtt = ctx->ppgtt;
        GEM_BUG_ON(!ppgtt);
 
-       err = func(i915, &ppgtt->base, 0, ppgtt->base.total, end_time);
+       err = func(i915, &ppgtt->vm, 0, min(ppgtt->vm.total, limit), end_time);
 
        mock_context_close(ctx);
        return err;
@@ -1270,7 +1298,7 @@ static int igt_gtt_reserve(void *arg)
 
        /* Start by filling the GGTT */
        for (total = 0;
-            total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+            total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
             total += 2*I915_GTT_PAGE_SIZE) {
                struct i915_vma *vma;
 
@@ -1288,20 +1316,20 @@ static int igt_gtt_reserve(void *arg)
 
                list_add(&obj->st_link, &objects);
 
-               vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+               vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
                if (IS_ERR(vma)) {
                        err = PTR_ERR(vma);
                        goto out;
                }
 
-               err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
+               err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
                                           obj->base.size,
                                           total,
                                           obj->cache_level,
                                           0);
                if (err) {
                        pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
-                              total, i915->ggtt.base.total, err);
+                              total, i915->ggtt.vm.total, err);
                        goto out;
                }
                track_vma_bind(vma);
@@ -1319,7 +1347,7 @@ static int igt_gtt_reserve(void *arg)
 
        /* Now we start forcing evictions */
        for (total = I915_GTT_PAGE_SIZE;
-            total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+            total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
             total += 2*I915_GTT_PAGE_SIZE) {
                struct i915_vma *vma;
 
@@ -1337,20 +1365,20 @@ static int igt_gtt_reserve(void *arg)
 
                list_add(&obj->st_link, &objects);
 
-               vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+               vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
                if (IS_ERR(vma)) {
                        err = PTR_ERR(vma);
                        goto out;
                }
 
-               err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
+               err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
                                           obj->base.size,
                                           total,
                                           obj->cache_level,
                                           0);
                if (err) {
                        pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
-                              total, i915->ggtt.base.total, err);
+                              total, i915->ggtt.vm.total, err);
                        goto out;
                }
                track_vma_bind(vma);
@@ -1371,7 +1399,7 @@ static int igt_gtt_reserve(void *arg)
                struct i915_vma *vma;
                u64 offset;
 
-               vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+               vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
                if (IS_ERR(vma)) {
                        err = PTR_ERR(vma);
                        goto out;
@@ -1383,18 +1411,18 @@ static int igt_gtt_reserve(void *arg)
                        goto out;
                }
 
-               offset = random_offset(0, i915->ggtt.base.total,
+               offset = random_offset(0, i915->ggtt.vm.total,
                                       2*I915_GTT_PAGE_SIZE,
                                       I915_GTT_MIN_ALIGNMENT);
 
-               err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
+               err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
                                           obj->base.size,
                                           offset,
                                           obj->cache_level,
                                           0);
                if (err) {
                        pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
-                              total, i915->ggtt.base.total, err);
+                              total, i915->ggtt.vm.total, err);
                        goto out;
                }
                track_vma_bind(vma);
@@ -1429,8 +1457,8 @@ static int igt_gtt_insert(void *arg)
                u64 start, end;
        } invalid_insert[] = {
                {
-                       i915->ggtt.base.total + I915_GTT_PAGE_SIZE, 0,
-                       0, i915->ggtt.base.total,
+                       i915->ggtt.vm.total + I915_GTT_PAGE_SIZE, 0,
+                       0, i915->ggtt.vm.total,
                },
                {
                        2*I915_GTT_PAGE_SIZE, 0,
@@ -1460,7 +1488,7 @@ static int igt_gtt_insert(void *arg)
 
        /* Check a couple of obviously invalid requests */
        for (ii = invalid_insert; ii->size; ii++) {
-               err = i915_gem_gtt_insert(&i915->ggtt.base, &tmp,
+               err = i915_gem_gtt_insert(&i915->ggtt.vm, &tmp,
                                          ii->size, ii->alignment,
                                          I915_COLOR_UNEVICTABLE,
                                          ii->start, ii->end,
@@ -1475,7 +1503,7 @@ static int igt_gtt_insert(void *arg)
 
        /* Start by filling the GGTT */
        for (total = 0;
-            total + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+            total + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
             total += I915_GTT_PAGE_SIZE) {
                struct i915_vma *vma;
 
@@ -1493,15 +1521,15 @@ static int igt_gtt_insert(void *arg)
 
                list_add(&obj->st_link, &objects);
 
-               vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+               vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
                if (IS_ERR(vma)) {
                        err = PTR_ERR(vma);
                        goto out;
                }
 
-               err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
+               err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
                                          obj->base.size, 0, obj->cache_level,
-                                         0, i915->ggtt.base.total,
+                                         0, i915->ggtt.vm.total,
                                          0);
                if (err == -ENOSPC) {
                        /* maxed out the GGTT space */
@@ -1510,7 +1538,7 @@ static int igt_gtt_insert(void *arg)
                }
                if (err) {
                        pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
-                              total, i915->ggtt.base.total, err);
+                              total, i915->ggtt.vm.total, err);
                        goto out;
                }
                track_vma_bind(vma);
@@ -1522,7 +1550,7 @@ static int igt_gtt_insert(void *arg)
        list_for_each_entry(obj, &objects, st_link) {
                struct i915_vma *vma;
 
-               vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+               vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
                if (IS_ERR(vma)) {
                        err = PTR_ERR(vma);
                        goto out;
@@ -1542,7 +1570,7 @@ static int igt_gtt_insert(void *arg)
                struct i915_vma *vma;
                u64 offset;
 
-               vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+               vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
                if (IS_ERR(vma)) {
                        err = PTR_ERR(vma);
                        goto out;
@@ -1557,13 +1585,13 @@ static int igt_gtt_insert(void *arg)
                        goto out;
                }
 
-               err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
+               err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
                                          obj->base.size, 0, obj->cache_level,
-                                         0, i915->ggtt.base.total,
+                                         0, i915->ggtt.vm.total,
                                          0);
                if (err) {
                        pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
-                              total, i915->ggtt.base.total, err);
+                              total, i915->ggtt.vm.total, err);
                        goto out;
                }
                track_vma_bind(vma);
@@ -1579,7 +1607,7 @@ static int igt_gtt_insert(void *arg)
 
        /* And then force evictions */
        for (total = 0;
-            total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+            total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
             total += 2*I915_GTT_PAGE_SIZE) {
                struct i915_vma *vma;
 
@@ -1597,19 +1625,19 @@ static int igt_gtt_insert(void *arg)
 
                list_add(&obj->st_link, &objects);
 
-               vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+               vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
                if (IS_ERR(vma)) {
                        err = PTR_ERR(vma);
                        goto out;
                }
 
-               err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
+               err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
                                          obj->base.size, 0, obj->cache_level,
-                                         0, i915->ggtt.base.total,
+                                         0, i915->ggtt.vm.total,
                                          0);
                if (err) {
                        pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
-                              total, i915->ggtt.base.total, err);
+                              total, i915->ggtt.vm.total, err);
                        goto out;
                }
                track_vma_bind(vma);
@@ -1646,7 +1674,7 @@ int i915_gem_gtt_mock_selftests(void)
        err = i915_subtests(tests, i915);
        mutex_unlock(&i915->drm.struct_mutex);
 
-       drm_dev_unref(&i915->drm);
+       drm_dev_put(&i915->drm);
        return err;
 }
 
@@ -1669,7 +1697,7 @@ int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
                SUBTEST(igt_ggtt_page),
        };
 
-       GEM_BUG_ON(offset_in_page(i915->ggtt.base.total));
+       GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
 
        return i915_subtests(tests, i915);
 }
index fbdb2419d418cbc385d7ca1a11e09f543d41e81c..c69cbd5aed527940fdffc4e96c3645ddd77ae708 100644 (file)
@@ -113,7 +113,7 @@ static int igt_gem_huge(void *arg)
 
        obj = huge_gem_object(i915,
                              nreal * PAGE_SIZE,
-                             i915->ggtt.base.total + PAGE_SIZE);
+                             i915->ggtt.vm.total + PAGE_SIZE);
        if (IS_ERR(obj))
                return PTR_ERR(obj);
 
@@ -169,9 +169,16 @@ static u64 tiled_offset(const struct tile *tile, u64 v)
                v += y * tile->width;
                v += div64_u64_rem(x, tile->width, &x) << tile->size;
                v += x;
-       } else {
+       } else if (tile->width == 128) {
                const unsigned int ytile_span = 16;
-               const unsigned int ytile_height = 32 * ytile_span;
+               const unsigned int ytile_height = 512;
+
+               v += y * ytile_span;
+               v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
+               v += x;
+       } else {
+               const unsigned int ytile_span = 32;
+               const unsigned int ytile_height = 256;
 
                v += y * ytile_span;
                v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
@@ -288,6 +295,8 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
                kunmap(p);
                if (err)
                        return err;
+
+               i915_vma_destroy(vma);
        }
 
        return 0;
@@ -311,7 +320,7 @@ static int igt_partial_tiling(void *arg)
 
        obj = huge_gem_object(i915,
                              nreal << PAGE_SHIFT,
-                             (1 + next_prime_number(i915->ggtt.base.total >> PAGE_SHIFT)) << PAGE_SHIFT);
+                             (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
        if (IS_ERR(obj))
                return PTR_ERR(obj);
 
@@ -347,6 +356,14 @@ static int igt_partial_tiling(void *arg)
                unsigned int pitch;
                struct tile tile;
 
+               if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+                       /*
+                        * The swizzling pattern is actually unknown as it
+                        * varies based on physical address of each page.
+                        * See i915_gem_detect_bit_6_swizzle().
+                        */
+                       break;
+
                tile.tiling = tiling;
                switch (tiling) {
                case I915_TILING_X:
@@ -357,7 +374,8 @@ static int igt_partial_tiling(void *arg)
                        break;
                }
 
-               if (tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN ||
+               GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN);
+               if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
                    tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
                        continue;
 
@@ -440,7 +458,7 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
        struct i915_vma *vma;
        int err;
 
-       vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+       vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
        if (IS_ERR(vma))
                return PTR_ERR(vma);
 
@@ -454,12 +472,14 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
                return PTR_ERR(rq);
        }
 
-       i915_vma_move_to_active(vma, rq, 0);
+       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+
        i915_request_add(rq);
 
-       i915_gem_object_set_active_reference(obj);
+       __i915_gem_object_release_unless_active(obj);
        i915_vma_unpin(vma);
-       return 0;
+
+       return err;
 }
 
 static bool assert_mmap_offset(struct drm_i915_private *i915,
@@ -488,6 +508,15 @@ static int igt_mmap_offset_exhaustion(void *arg)
        u64 hole_start, hole_end;
        int loop, err;
 
+       /* Disable background reaper */
+       mutex_lock(&i915->drm.struct_mutex);
+       if (!i915->gt.active_requests++)
+               i915_gem_unpark(i915);
+       mutex_unlock(&i915->drm.struct_mutex);
+       cancel_delayed_work_sync(&i915->gt.retire_work);
+       cancel_delayed_work_sync(&i915->gt.idle_work);
+       GEM_BUG_ON(!i915->gt.awake);
+
        /* Trim the device mmap space to only a page */
        memset(&resv, 0, sizeof(resv));
        drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
@@ -496,7 +525,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
                err = drm_mm_reserve_node(mm, &resv);
                if (err) {
                        pr_err("Failed to trim VMA manager, err=%d\n", err);
-                       return err;
+                       goto out_park;
                }
                break;
        }
@@ -538,6 +567,9 @@ static int igt_mmap_offset_exhaustion(void *arg)
 
        /* Now fill with busy dead objects that we expect to reap */
        for (loop = 0; loop < 3; loop++) {
+               if (i915_terminally_wedged(&i915->gpu_error))
+                       break;
+
                obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
                if (IS_ERR(obj)) {
                        err = PTR_ERR(obj);
@@ -554,6 +586,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
                        goto err_obj;
                }
 
+               /* NB we rely on the _active_ reference to access obj now */
                GEM_BUG_ON(!i915_gem_object_is_active(obj));
                err = i915_gem_object_create_mmap_offset(obj);
                if (err) {
@@ -565,6 +598,13 @@ static int igt_mmap_offset_exhaustion(void *arg)
 
 out:
        drm_mm_remove_node(&resv);
+out_park:
+       mutex_lock(&i915->drm.struct_mutex);
+       if (--i915->gt.active_requests)
+               queue_delayed_work(i915->wq, &i915->gt.retire_work, 0);
+       else
+               queue_delayed_work(i915->wq, &i915->gt.idle_work, 0);
+       mutex_unlock(&i915->drm.struct_mutex);
        return err;
 err_obj:
        i915_gem_object_put(obj);
@@ -586,7 +626,7 @@ int i915_gem_object_mock_selftests(void)
 
        err = i915_subtests(tests, i915);
 
-       drm_dev_unref(&i915->drm);
+       drm_dev_put(&i915->drm);
        return err;
 }
 
index d16d74178e9dfaa7ff7e9b2045a3660ba6531b87..1b70208eeea7a80fc7f9a14693f11de9bf3c0b1d 100644 (file)
@@ -24,3 +24,4 @@ selftest(vma, i915_vma_mock_selftests)
 selftest(evict, i915_gem_evict_mock_selftests)
 selftest(gtt, i915_gem_gtt_mock_selftests)
 selftest(hugepages, i915_gem_huge_page_mock_selftests)
+selftest(contexts, i915_gem_context_mock_selftests)
index 94bc2e1898a4df70c622a8a41f133fdc676272fa..c4aac6141e04d0a217302122ebff9d8668de9ed8 100644 (file)
@@ -262,7 +262,7 @@ int i915_request_mock_selftests(void)
                return -ENOMEM;
 
        err = i915_subtests(tests, i915);
-       drm_dev_unref(&i915->drm);
+       drm_dev_put(&i915->drm);
 
        return err;
 }
@@ -286,7 +286,9 @@ static int begin_live_test(struct live_test *t,
        t->func = func;
        t->name = name;
 
-       err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
+       err = i915_gem_wait_for_idle(i915,
+                                    I915_WAIT_LOCKED,
+                                    MAX_SCHEDULE_TIMEOUT);
        if (err) {
                pr_err("%s(%s): failed to idle before, with err=%d!",
                       func, name, err);
@@ -342,9 +344,9 @@ static int live_nop_request(void *arg)
        mutex_lock(&i915->drm.struct_mutex);
 
        for_each_engine(engine, i915, id) {
-               IGT_TIMEOUT(end_time);
-               struct i915_request *request;
+               struct i915_request *request = NULL;
                unsigned long n, prime;
+               IGT_TIMEOUT(end_time);
                ktime_t times[2] = {};
 
                err = begin_live_test(&t, i915, __func__, engine->name);
@@ -430,7 +432,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
        if (err)
                goto err;
 
-       vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+       vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
        if (IS_ERR(vma)) {
                err = PTR_ERR(vma);
                goto err;
@@ -466,7 +468,7 @@ empty_request(struct intel_engine_cs *engine,
                goto out_request;
 
 out_request:
-       __i915_request_add(request, err == 0);
+       i915_request_add(request);
        return err ? ERR_PTR(err) : request;
 }
 
@@ -555,7 +557,8 @@ out_unlock:
 static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
 {
        struct i915_gem_context *ctx = i915->kernel_context;
-       struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+       struct i915_address_space *vm =
+               ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
        struct drm_i915_gem_object *obj;
        const int gen = INTEL_GEN(i915);
        struct i915_vma *vma;
@@ -593,11 +596,8 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
        } else if (gen >= 6) {
                *cmd++ = MI_BATCH_BUFFER_START | 1 << 8;
                *cmd++ = lower_32_bits(vma->node.start);
-       } else if (gen >= 4) {
-               *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
-               *cmd++ = lower_32_bits(vma->node.start);
        } else {
-               *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | 1;
+               *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
                *cmd++ = lower_32_bits(vma->node.start);
        }
        *cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
@@ -677,7 +677,9 @@ static int live_all_engines(void *arg)
                        i915_gem_object_set_active_reference(batch->obj);
                }
 
-               i915_vma_move_to_active(batch, request[id], 0);
+               err = i915_vma_move_to_active(batch, request[id], 0);
+               GEM_BUG_ON(err);
+
                i915_request_get(request[id]);
                i915_request_add(request[id]);
        }
@@ -787,7 +789,9 @@ static int live_sequential_engines(void *arg)
                GEM_BUG_ON(err);
                request[id]->batch = batch;
 
-               i915_vma_move_to_active(batch, request[id], 0);
+               err = i915_vma_move_to_active(batch, request[id], 0);
+               GEM_BUG_ON(err);
+
                i915_gem_object_set_active_reference(batch->obj);
                i915_vma_get(batch);
 
@@ -861,5 +865,9 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
                SUBTEST(live_sequential_engines),
                SUBTEST(live_empty_request),
        };
+
+       if (i915_terminally_wedged(&i915->gpu_error))
+               return 0;
+
        return i915_subtests(tests, i915);
 }
index addc5a599c4adf65c3a4e122b39f1ca9aee67e8c..86c54ea37f488c99847416a98db8fc0f9d4db033 100644 (file)
@@ -210,6 +210,8 @@ int __i915_subtests(const char *caller,
                        return -EINTR;
 
                pr_debug(DRIVER_NAME ": Running %s/%s\n", caller, st->name);
+               GEM_TRACE("Running %s/%s\n", caller, st->name);
+
                err = st->func(data);
                if (err && err != -EINTR) {
                        pr_err(DRIVER_NAME "/%s: %s failed with error %d\n",
index e90f97236e505ee9bec04a2b61c8a6bcc46cc6ab..ffa74290e0547a77f762a890b992277f2459419f 100644 (file)
@@ -35,7 +35,7 @@ static bool assert_vma(struct i915_vma *vma,
 {
        bool ok = true;
 
-       if (vma->vm != &ctx->ppgtt->base) {
+       if (vma->vm != &ctx->ppgtt->vm) {
                pr_err("VMA created with wrong VM\n");
                ok = false;
        }
@@ -110,8 +110,7 @@ static int create_vmas(struct drm_i915_private *i915,
        list_for_each_entry(obj, objects, st_link) {
                for (pinned = 0; pinned <= 1; pinned++) {
                        list_for_each_entry(ctx, contexts, link) {
-                               struct i915_address_space *vm =
-                                       &ctx->ppgtt->base;
+                               struct i915_address_space *vm = &ctx->ppgtt->vm;
                                struct i915_vma *vma;
                                int err;
 
@@ -259,12 +258,12 @@ static int igt_vma_pin1(void *arg)
                VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 8192),
                VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
                VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
-               VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.base.total - 4096)),
+               VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)),
 
                VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
                INVALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | i915->ggtt.mappable_end),
-               VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)),
-               INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.base.total),
+               VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)),
+               INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.vm.total),
                INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | round_down(U64_MAX, PAGE_SIZE)),
 
                VALID(4096, PIN_GLOBAL),
@@ -272,12 +271,12 @@ static int igt_vma_pin1(void *arg)
                VALID(i915->ggtt.mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE),
                VALID(i915->ggtt.mappable_end, PIN_GLOBAL | PIN_MAPPABLE),
                NOSPACE(i915->ggtt.mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE),
-               VALID(i915->ggtt.base.total - 4096, PIN_GLOBAL),
-               VALID(i915->ggtt.base.total, PIN_GLOBAL),
-               NOSPACE(i915->ggtt.base.total + 4096, PIN_GLOBAL),
+               VALID(i915->ggtt.vm.total - 4096, PIN_GLOBAL),
+               VALID(i915->ggtt.vm.total, PIN_GLOBAL),
+               NOSPACE(i915->ggtt.vm.total + 4096, PIN_GLOBAL),
                NOSPACE(round_down(U64_MAX, PAGE_SIZE), PIN_GLOBAL),
                INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
-               INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)),
+               INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)),
                INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (round_down(U64_MAX, PAGE_SIZE) - 4096)),
 
                VALID(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
@@ -289,9 +288,9 @@ static int igt_vma_pin1(void *arg)
                 * variable start, end and size.
                 */
                NOSPACE(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | i915->ggtt.mappable_end),
-               NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.base.total),
+               NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.vm.total),
                NOSPACE(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
-               NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.base.total - 4096)),
+               NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)),
 #endif
                { },
 #undef NOSPACE
@@ -307,13 +306,13 @@ static int igt_vma_pin1(void *arg)
         * focusing on error handling of boundary conditions.
         */
 
-       GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.base.mm));
+       GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.vm.mm));
 
        obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
        if (IS_ERR(obj))
                return PTR_ERR(obj);
 
-       vma = checked_vma_instance(obj, &i915->ggtt.base, NULL);
+       vma = checked_vma_instance(obj, &i915->ggtt.vm, NULL);
        if (IS_ERR(vma))
                goto out;
 
@@ -405,7 +404,7 @@ static unsigned int rotated_size(const struct intel_rotation_plane_info *a,
 static int igt_vma_rotate(void *arg)
 {
        struct drm_i915_private *i915 = arg;
-       struct i915_address_space *vm = &i915->ggtt.base;
+       struct i915_address_space *vm = &i915->ggtt.vm;
        struct drm_i915_gem_object *obj;
        const struct intel_rotation_plane_info planes[] = {
                { .width = 1, .height = 1, .stride = 1 },
@@ -604,7 +603,7 @@ static bool assert_pin(struct i915_vma *vma,
 static int igt_vma_partial(void *arg)
 {
        struct drm_i915_private *i915 = arg;
-       struct i915_address_space *vm = &i915->ggtt.base;
+       struct i915_address_space *vm = &i915->ggtt.vm;
        const unsigned int npages = 1021; /* prime! */
        struct drm_i915_gem_object *obj;
        const struct phase {
@@ -734,7 +733,7 @@ int i915_vma_mock_selftests(void)
        err = i915_subtests(tests, i915);
        mutex_unlock(&i915->drm.struct_mutex);
 
-       drm_dev_unref(&i915->drm);
+       drm_dev_put(&i915->drm);
        return err;
 }
 
index 0d06f559243f9e7c462fe557a0fdd7ce11f88240..af66e3d4e23a421521bbbfc27093d84dd2a4d8ea 100644 (file)
@@ -9,52 +9,8 @@
 #include "../i915_selftest.h"
 #include "igt_flush_test.h"
 
-struct wedge_me {
-       struct delayed_work work;
-       struct drm_i915_private *i915;
-       const void *symbol;
-};
-
-static void wedge_me(struct work_struct *work)
-{
-       struct wedge_me *w = container_of(work, typeof(*w), work.work);
-
-       pr_err("%pS timed out, cancelling all further testing.\n", w->symbol);
-
-       GEM_TRACE("%pS timed out.\n", w->symbol);
-       GEM_TRACE_DUMP();
-
-       i915_gem_set_wedged(w->i915);
-}
-
-static void __init_wedge(struct wedge_me *w,
-                        struct drm_i915_private *i915,
-                        long timeout,
-                        const void *symbol)
-{
-       w->i915 = i915;
-       w->symbol = symbol;
-
-       INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
-       schedule_delayed_work(&w->work, timeout);
-}
-
-static void __fini_wedge(struct wedge_me *w)
-{
-       cancel_delayed_work_sync(&w->work);
-       destroy_delayed_work_on_stack(&w->work);
-       w->i915 = NULL;
-}
-
-#define wedge_on_timeout(W, DEV, TIMEOUT)                              \
-       for (__init_wedge((W), (DEV), (TIMEOUT), __builtin_return_address(0)); \
-            (W)->i915;                                                 \
-            __fini_wedge((W)))
-
 int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
 {
-       struct wedge_me w;
-
        cond_resched();
 
        if (flags & I915_WAIT_LOCKED &&
@@ -63,8 +19,15 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
                i915_gem_set_wedged(i915);
        }
 
-       wedge_on_timeout(&w, i915, HZ)
-               i915_gem_wait_for_idle(i915, flags);
+       if (i915_gem_wait_for_idle(i915, flags, HZ / 5) == -ETIME) {
+               pr_err("%pS timed out, cancelling all further testing.\n",
+                      __builtin_return_address(0));
+
+               GEM_TRACE("%pS timed out.\n", __builtin_return_address(0));
+               GEM_TRACE_DUMP();
+
+               i915_gem_set_wedged(i915);
+       }
 
        return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0;
 }
diff --git a/drivers/gpu/drm/i915/selftests/igt_wedge_me.h b/drivers/gpu/drm/i915/selftests/igt_wedge_me.h
new file mode 100644 (file)
index 0000000..08e5ff1
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef IGT_WEDGE_ME_H
+#define IGT_WEDGE_ME_H
+
+#include <linux/workqueue.h>
+
+#include "../i915_gem.h"
+
+struct drm_i915_private;
+
+struct igt_wedge_me {
+       struct delayed_work work;
+       struct drm_i915_private *i915;
+       const char *name;
+};
+
+static void __igt_wedge_me(struct work_struct *work)
+{
+       struct igt_wedge_me *w = container_of(work, typeof(*w), work.work);
+
+       pr_err("%s timed out, cancelling test.\n", w->name);
+
+       GEM_TRACE("%s timed out.\n", w->name);
+       GEM_TRACE_DUMP();
+
+       i915_gem_set_wedged(w->i915);
+}
+
+static void __igt_init_wedge(struct igt_wedge_me *w,
+                            struct drm_i915_private *i915,
+                            long timeout,
+                            const char *name)
+{
+       w->i915 = i915;
+       w->name = name;
+
+       INIT_DELAYED_WORK_ONSTACK(&w->work, __igt_wedge_me);
+       schedule_delayed_work(&w->work, timeout);
+}
+
+static void __igt_fini_wedge(struct igt_wedge_me *w)
+{
+       cancel_delayed_work_sync(&w->work);
+       destroy_delayed_work_on_stack(&w->work);
+       w->i915 = NULL;
+}
+
+#define igt_wedge_on_timeout(W, DEV, TIMEOUT)                          \
+       for (__igt_init_wedge((W), (DEV), (TIMEOUT), __func__);         \
+            (W)->i915;                                                 \
+            __igt_fini_wedge((W)))
+
+#endif /* IGT_WEDGE_ME_H */
index d6926e7820e5602dfcfdb1852490126c76ceede2..f03b407fdbe24b8170579f5bd69c6c5a6b5d71ce 100644 (file)
@@ -464,7 +464,7 @@ int intel_breadcrumbs_mock_selftests(void)
                return -ENOMEM;
 
        err = i915_subtests(tests, i915->engine[RCS]);
-       drm_dev_unref(&i915->drm);
+       drm_dev_put(&i915->drm);
 
        return err;
 }
index fb74e2cf8a0a9a56d28974d04a23f42cc92bfcb1..407c98fb917057dcb3026b1fb11bd6d2cc82a13b 100644 (file)
@@ -196,19 +196,23 @@ static int igt_guc_clients(void *args)
        }
 
        unreserve_doorbell(guc->execbuf_client);
-       err = guc_clients_doorbell_init(guc);
+
+       __create_doorbell(guc->execbuf_client);
+       err = __guc_allocate_doorbell(guc, guc->execbuf_client->stage_id);
        if (err != -EIO) {
                pr_err("unexpected (err = %d)", err);
-               goto out;
+               goto out_db;
        }
 
        if (!available_dbs(guc, guc->execbuf_client->priority)) {
                pr_err("doorbell not available when it should\n");
                err = -EIO;
-               goto out;
+               goto out_db;
        }
 
+out_db:
        /* clean after test */
+       __destroy_doorbell(guc->execbuf_client);
        err = reserve_doorbell(guc->execbuf_client);
        if (err) {
                pr_err("failed to reserve back the doorbell back\n");
index 438e0b045a2c1e05afc5c6dd3c7b64ff91d9bb30..65d66cdedd26c6027be1881f380e27578d07518a 100644 (file)
@@ -27,6 +27,7 @@
 #include "../i915_selftest.h"
 #include "i915_random.h"
 #include "igt_flush_test.h"
+#include "igt_wedge_me.h"
 
 #include "mock_context.h"
 #include "mock_drm.h"
@@ -105,7 +106,10 @@ static int emit_recurse_batch(struct hang *h,
                              struct i915_request *rq)
 {
        struct drm_i915_private *i915 = h->i915;
-       struct i915_address_space *vm = rq->ctx->ppgtt ? &rq->ctx->ppgtt->base : &i915->ggtt.base;
+       struct i915_address_space *vm =
+               rq->gem_context->ppgtt ?
+               &rq->gem_context->ppgtt->vm :
+               &i915->ggtt.vm;
        struct i915_vma *hws, *vma;
        unsigned int flags;
        u32 *batch;
@@ -127,13 +131,19 @@ static int emit_recurse_batch(struct hang *h,
        if (err)
                goto unpin_vma;
 
-       i915_vma_move_to_active(vma, rq, 0);
+       err = i915_vma_move_to_active(vma, rq, 0);
+       if (err)
+               goto unpin_hws;
+
        if (!i915_gem_object_has_active_reference(vma->obj)) {
                i915_gem_object_get(vma->obj);
                i915_gem_object_set_active_reference(vma->obj);
        }
 
-       i915_vma_move_to_active(hws, rq, 0);
+       err = i915_vma_move_to_active(hws, rq, 0);
+       if (err)
+               goto unpin_hws;
+
        if (!i915_gem_object_has_active_reference(hws->obj)) {
                i915_gem_object_get(hws->obj);
                i915_gem_object_set_active_reference(hws->obj);
@@ -168,7 +178,7 @@ static int emit_recurse_batch(struct hang *h,
                *batch++ = MI_BATCH_BUFFER_START | 1 << 8;
                *batch++ = lower_32_bits(vma->node.start);
        } else if (INTEL_GEN(i915) >= 4) {
-               *batch++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
+               *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
                *batch++ = 0;
                *batch++ = lower_32_bits(hws_address(hws, rq));
                *batch++ = rq->fence.seqno;
@@ -181,7 +191,7 @@ static int emit_recurse_batch(struct hang *h,
                *batch++ = MI_BATCH_BUFFER_START | 2 << 6;
                *batch++ = lower_32_bits(vma->node.start);
        } else {
-               *batch++ = MI_STORE_DWORD_IMM;
+               *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
                *batch++ = lower_32_bits(hws_address(hws, rq));
                *batch++ = rq->fence.seqno;
                *batch++ = MI_ARB_CHECK;
@@ -190,7 +200,7 @@ static int emit_recurse_batch(struct hang *h,
                batch += 1024 / sizeof(*batch);
 
                *batch++ = MI_ARB_CHECK;
-               *batch++ = MI_BATCH_BUFFER_START | 2 << 6 | 1;
+               *batch++ = MI_BATCH_BUFFER_START | 2 << 6;
                *batch++ = lower_32_bits(vma->node.start);
        }
        *batch++ = MI_BATCH_BUFFER_END; /* not reached */
@@ -202,6 +212,7 @@ static int emit_recurse_batch(struct hang *h,
 
        err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
 
+unpin_hws:
        i915_vma_unpin(hws);
 unpin_vma:
        i915_vma_unpin(vma);
@@ -242,7 +253,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
 
        err = emit_recurse_batch(h, rq);
        if (err) {
-               __i915_request_add(rq, false);
+               i915_request_add(rq);
                return ERR_PTR(err);
        }
 
@@ -315,7 +326,7 @@ static int igt_hang_sanitycheck(void *arg)
                *h.batch = MI_BATCH_BUFFER_END;
                i915_gem_chipset_flush(i915);
 
-               __i915_request_add(rq, true);
+               i915_request_add(rq);
 
                timeout = i915_request_wait(rq,
                                            I915_WAIT_LOCKED,
@@ -461,7 +472,7 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
                                }
 
                                i915_request_get(rq);
-                               __i915_request_add(rq, true);
+                               i915_request_add(rq);
                                mutex_unlock(&i915->drm.struct_mutex);
 
                                if (!wait_until_running(&h, rq)) {
@@ -560,6 +571,30 @@ struct active_engine {
 #define TEST_SELF      BIT(2)
 #define TEST_PRIORITY  BIT(3)
 
+static int active_request_put(struct i915_request *rq)
+{
+       int err = 0;
+
+       if (!rq)
+               return 0;
+
+       if (i915_request_wait(rq, 0, 5 * HZ) < 0) {
+               GEM_TRACE("%s timed out waiting for completion of fence %llx:%d, seqno %d.\n",
+                         rq->engine->name,
+                         rq->fence.context,
+                         rq->fence.seqno,
+                         i915_request_global_seqno(rq));
+               GEM_TRACE_DUMP();
+
+               i915_gem_set_wedged(rq->i915);
+               err = -EIO;
+       }
+
+       i915_request_put(rq);
+
+       return err;
+}
+
 static int active_engine(void *data)
 {
        I915_RND_STATE(prng);
@@ -608,24 +643,20 @@ static int active_engine(void *data)
                i915_request_add(new);
                mutex_unlock(&engine->i915->drm.struct_mutex);
 
-               if (old) {
-                       if (i915_request_wait(old, 0, HZ) < 0) {
-                               GEM_TRACE("%s timed out.\n", engine->name);
-                               GEM_TRACE_DUMP();
-
-                               i915_gem_set_wedged(engine->i915);
-                               i915_request_put(old);
-                               err = -EIO;
-                               break;
-                       }
-                       i915_request_put(old);
-               }
+               err = active_request_put(old);
+               if (err)
+                       break;
 
                cond_resched();
        }
 
-       for (count = 0; count < ARRAY_SIZE(rq); count++)
-               i915_request_put(rq[count]);
+       for (count = 0; count < ARRAY_SIZE(rq); count++) {
+               int err__ = active_request_put(rq[count]);
+
+               /* Keep the first error */
+               if (!err)
+                       err = err__;
+       }
 
 err_file:
        mock_file_free(engine->i915, file);
@@ -719,7 +750,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
                                }
 
                                i915_request_get(rq);
-                               __i915_request_add(rq, true);
+                               i915_request_add(rq);
                                mutex_unlock(&i915->drm.struct_mutex);
 
                                if (!wait_until_running(&h, rq)) {
@@ -891,7 +922,7 @@ static u32 fake_hangcheck(struct i915_request *rq, u32 mask)
        return reset_count;
 }
 
-static int igt_wait_reset(void *arg)
+static int igt_reset_wait(void *arg)
 {
        struct drm_i915_private *i915 = arg;
        struct i915_request *rq;
@@ -919,7 +950,7 @@ static int igt_wait_reset(void *arg)
        }
 
        i915_request_get(rq);
-       __i915_request_add(rq, true);
+       i915_request_add(rq);
 
        if (!wait_until_running(&h, rq)) {
                struct drm_printer p = drm_info_printer(i915->drm.dev);
@@ -965,6 +996,170 @@ unlock:
        return err;
 }
 
+struct evict_vma {
+       struct completion completion;
+       struct i915_vma *vma;
+};
+
+static int evict_vma(void *data)
+{
+       struct evict_vma *arg = data;
+       struct i915_address_space *vm = arg->vma->vm;
+       struct drm_i915_private *i915 = vm->i915;
+       struct drm_mm_node evict = arg->vma->node;
+       int err;
+
+       complete(&arg->completion);
+
+       mutex_lock(&i915->drm.struct_mutex);
+       err = i915_gem_evict_for_node(vm, &evict, 0);
+       mutex_unlock(&i915->drm.struct_mutex);
+
+       return err;
+}
+
+static int __igt_reset_evict_vma(struct drm_i915_private *i915,
+                                struct i915_address_space *vm)
+{
+       struct drm_i915_gem_object *obj;
+       struct task_struct *tsk = NULL;
+       struct i915_request *rq;
+       struct evict_vma arg;
+       struct hang h;
+       int err;
+
+       if (!intel_engine_can_store_dword(i915->engine[RCS]))
+               return 0;
+
+       /* Check that we can recover an unbind stuck on a hanging request */
+
+       global_reset_lock(i915);
+
+       mutex_lock(&i915->drm.struct_mutex);
+       err = hang_init(&h, i915);
+       if (err)
+               goto unlock;
+
+       obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+       if (IS_ERR(obj)) {
+               err = PTR_ERR(obj);
+               goto fini;
+       }
+
+       arg.vma = i915_vma_instance(obj, vm, NULL);
+       if (IS_ERR(arg.vma)) {
+               err = PTR_ERR(arg.vma);
+               goto out_obj;
+       }
+
+       rq = hang_create_request(&h, i915->engine[RCS]);
+       if (IS_ERR(rq)) {
+               err = PTR_ERR(rq);
+               goto out_obj;
+       }
+
+       err = i915_vma_pin(arg.vma, 0, 0,
+                          i915_vma_is_ggtt(arg.vma) ? PIN_GLOBAL : PIN_USER);
+       if (err)
+               goto out_obj;
+
+       err = i915_vma_move_to_active(arg.vma, rq, EXEC_OBJECT_WRITE);
+       i915_vma_unpin(arg.vma);
+
+       i915_request_get(rq);
+       i915_request_add(rq);
+       if (err)
+               goto out_rq;
+
+       mutex_unlock(&i915->drm.struct_mutex);
+
+       if (!wait_until_running(&h, rq)) {
+               struct drm_printer p = drm_info_printer(i915->drm.dev);
+
+               pr_err("%s: Failed to start request %x, at %x\n",
+                      __func__, rq->fence.seqno, hws_seqno(&h, rq));
+               intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
+
+               i915_gem_set_wedged(i915);
+               goto out_reset;
+       }
+
+       init_completion(&arg.completion);
+
+       tsk = kthread_run(evict_vma, &arg, "igt/evict_vma");
+       if (IS_ERR(tsk)) {
+               err = PTR_ERR(tsk);
+               tsk = NULL;
+               goto out_reset;
+       }
+
+       wait_for_completion(&arg.completion);
+
+       if (wait_for(waitqueue_active(&rq->execute), 10)) {
+               struct drm_printer p = drm_info_printer(i915->drm.dev);
+
+               pr_err("igt/evict_vma kthread did not wait\n");
+               intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
+
+               i915_gem_set_wedged(i915);
+               goto out_reset;
+       }
+
+out_reset:
+       fake_hangcheck(rq, intel_engine_flag(rq->engine));
+
+       if (tsk) {
+               struct igt_wedge_me w;
+
+               /* The reset, even indirectly, should take less than 10ms. */
+               igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/)
+                       err = kthread_stop(tsk);
+       }
+
+       mutex_lock(&i915->drm.struct_mutex);
+out_rq:
+       i915_request_put(rq);
+out_obj:
+       i915_gem_object_put(obj);
+fini:
+       hang_fini(&h);
+unlock:
+       mutex_unlock(&i915->drm.struct_mutex);
+       global_reset_unlock(i915);
+
+       if (i915_terminally_wedged(&i915->gpu_error))
+               return -EIO;
+
+       return err;
+}
+
+static int igt_reset_evict_ggtt(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+
+       return __igt_reset_evict_vma(i915, &i915->ggtt.vm);
+}
+
+static int igt_reset_evict_ppgtt(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       struct i915_gem_context *ctx;
+       int err;
+
+       mutex_lock(&i915->drm.struct_mutex);
+       ctx = kernel_context(i915);
+       mutex_unlock(&i915->drm.struct_mutex);
+       if (IS_ERR(ctx))
+               return PTR_ERR(ctx);
+
+       err = 0;
+       if (ctx->ppgtt) /* aliasing == global gtt locking, covered above */
+               err = __igt_reset_evict_vma(i915, &ctx->ppgtt->vm);
+
+       kernel_context_close(ctx);
+       return err;
+}
+
 static int wait_for_others(struct drm_i915_private *i915,
                           struct intel_engine_cs *exclude)
 {
@@ -1014,7 +1209,7 @@ static int igt_reset_queue(void *arg)
                }
 
                i915_request_get(prev);
-               __i915_request_add(prev, true);
+               i915_request_add(prev);
 
                count = 0;
                do {
@@ -1028,7 +1223,7 @@ static int igt_reset_queue(void *arg)
                        }
 
                        i915_request_get(rq);
-                       __i915_request_add(rq, true);
+                       i915_request_add(rq);
 
                        /*
                         * XXX We don't handle resetting the kernel context
@@ -1161,7 +1356,7 @@ static int igt_handle_error(void *arg)
        }
 
        i915_request_get(rq);
-       __i915_request_add(rq, true);
+       i915_request_add(rq);
 
        if (!wait_until_running(&h, rq)) {
                struct drm_printer p = drm_info_printer(i915->drm.dev);
@@ -1210,8 +1405,10 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
                SUBTEST(igt_reset_idle_engine),
                SUBTEST(igt_reset_active_engine),
                SUBTEST(igt_reset_engines),
-               SUBTEST(igt_wait_reset),
                SUBTEST(igt_reset_queue),
+               SUBTEST(igt_reset_wait),
+               SUBTEST(igt_reset_evict_ggtt),
+               SUBTEST(igt_reset_evict_ppgtt),
                SUBTEST(igt_handle_error),
        };
        bool saved_hangcheck;
@@ -1220,6 +1417,9 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
        if (!intel_has_gpu_reset(i915))
                return 0;
 
+       if (i915_terminally_wedged(&i915->gpu_error))
+               return -EIO; /* we're long past hope of a successful reset */
+
        intel_runtime_pm_get(i915);
        saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck);
 
index 1b8a0712515092e54b63776bd9d7c324ca905795..582566faef090f395251c816690dad9dee521d7c 100644 (file)
@@ -83,7 +83,7 @@ static int emit_recurse_batch(struct spinner *spin,
                              struct i915_request *rq,
                              u32 arbitration_command)
 {
-       struct i915_address_space *vm = &rq->ctx->ppgtt->base;
+       struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
        struct i915_vma *hws, *vma;
        u32 *batch;
        int err;
@@ -104,13 +104,19 @@ static int emit_recurse_batch(struct spinner *spin,
        if (err)
                goto unpin_vma;
 
-       i915_vma_move_to_active(vma, rq, 0);
+       err = i915_vma_move_to_active(vma, rq, 0);
+       if (err)
+               goto unpin_hws;
+
        if (!i915_gem_object_has_active_reference(vma->obj)) {
                i915_gem_object_get(vma->obj);
                i915_gem_object_set_active_reference(vma->obj);
        }
 
-       i915_vma_move_to_active(hws, rq, 0);
+       err = i915_vma_move_to_active(hws, rq, 0);
+       if (err)
+               goto unpin_hws;
+
        if (!i915_gem_object_has_active_reference(hws->obj)) {
                i915_gem_object_get(hws->obj);
                i915_gem_object_set_active_reference(hws->obj);
@@ -134,6 +140,7 @@ static int emit_recurse_batch(struct spinner *spin,
 
        err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
 
+unpin_hws:
        i915_vma_unpin(hws);
 unpin_vma:
        i915_vma_unpin(vma);
@@ -155,7 +162,7 @@ spinner_create_request(struct spinner *spin,
 
        err = emit_recurse_batch(spin, rq, arbitration_command);
        if (err) {
-               __i915_request_add(rq, false);
+               i915_request_add(rq);
                return ERR_PTR(err);
        }
 
@@ -444,16 +451,134 @@ err_wedged:
        goto err_ctx_lo;
 }
 
+static int live_preempt_hang(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       struct i915_gem_context *ctx_hi, *ctx_lo;
+       struct spinner spin_hi, spin_lo;
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+       int err = -ENOMEM;
+
+       if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+               return 0;
+
+       if (!intel_has_reset_engine(i915))
+               return 0;
+
+       mutex_lock(&i915->drm.struct_mutex);
+
+       if (spinner_init(&spin_hi, i915))
+               goto err_unlock;
+
+       if (spinner_init(&spin_lo, i915))
+               goto err_spin_hi;
+
+       ctx_hi = kernel_context(i915);
+       if (!ctx_hi)
+               goto err_spin_lo;
+       ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
+
+       ctx_lo = kernel_context(i915);
+       if (!ctx_lo)
+               goto err_ctx_hi;
+       ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
+
+       for_each_engine(engine, i915, id) {
+               struct i915_request *rq;
+
+               if (!intel_engine_has_preemption(engine))
+                       continue;
+
+               rq = spinner_create_request(&spin_lo, ctx_lo, engine,
+                                           MI_ARB_CHECK);
+               if (IS_ERR(rq)) {
+                       err = PTR_ERR(rq);
+                       goto err_ctx_lo;
+               }
+
+               i915_request_add(rq);
+               if (!wait_for_spinner(&spin_lo, rq)) {
+                       GEM_TRACE("lo spinner failed to start\n");
+                       GEM_TRACE_DUMP();
+                       i915_gem_set_wedged(i915);
+                       err = -EIO;
+                       goto err_ctx_lo;
+               }
+
+               rq = spinner_create_request(&spin_hi, ctx_hi, engine,
+                                           MI_ARB_CHECK);
+               if (IS_ERR(rq)) {
+                       spinner_end(&spin_lo);
+                       err = PTR_ERR(rq);
+                       goto err_ctx_lo;
+               }
+
+               init_completion(&engine->execlists.preempt_hang.completion);
+               engine->execlists.preempt_hang.inject_hang = true;
+
+               i915_request_add(rq);
+
+               if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion,
+                                                HZ / 10)) {
+                       pr_err("Preemption did not occur within timeout!");
+                       GEM_TRACE_DUMP();
+                       i915_gem_set_wedged(i915);
+                       err = -EIO;
+                       goto err_ctx_lo;
+               }
+
+               set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+               i915_reset_engine(engine, NULL);
+               clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+
+               engine->execlists.preempt_hang.inject_hang = false;
+
+               if (!wait_for_spinner(&spin_hi, rq)) {
+                       GEM_TRACE("hi spinner failed to start\n");
+                       GEM_TRACE_DUMP();
+                       i915_gem_set_wedged(i915);
+                       err = -EIO;
+                       goto err_ctx_lo;
+               }
+
+               spinner_end(&spin_hi);
+               spinner_end(&spin_lo);
+               if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+                       err = -EIO;
+                       goto err_ctx_lo;
+               }
+       }
+
+       err = 0;
+err_ctx_lo:
+       kernel_context_close(ctx_lo);
+err_ctx_hi:
+       kernel_context_close(ctx_hi);
+err_spin_lo:
+       spinner_fini(&spin_lo);
+err_spin_hi:
+       spinner_fini(&spin_hi);
+err_unlock:
+       igt_flush_test(i915, I915_WAIT_LOCKED);
+       mutex_unlock(&i915->drm.struct_mutex);
+       return err;
+}
+
 int intel_execlists_live_selftests(struct drm_i915_private *i915)
 {
        static const struct i915_subtest tests[] = {
                SUBTEST(live_sanitycheck),
                SUBTEST(live_preempt),
                SUBTEST(live_late_preempt),
+               SUBTEST(live_preempt_hang),
        };
 
        if (!HAS_EXECLISTS(i915))
                return 0;
 
+       if (i915_terminally_wedged(&i915->gpu_error))
+               return 0;
+
        return i915_subtests(tests, i915);
 }
index 17444a3abbb933d396ba4e98f74af02e503370a4..0d39b3bf0c0d0f11aaa883d672e9ba11dc33f5b5 100644 (file)
@@ -6,6 +6,7 @@
 
 #include "../i915_selftest.h"
 
+#include "igt_wedge_me.h"
 #include "mock_context.h"
 
 static struct drm_i915_gem_object *
@@ -33,7 +34,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
        memset(cs, 0xc5, PAGE_SIZE);
        i915_gem_object_unpin_map(result);
 
-       vma = i915_vma_instance(result, &engine->i915->ggtt.base, NULL);
+       vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
        if (IS_ERR(vma)) {
                err = PTR_ERR(vma);
                goto err_obj;
@@ -49,6 +50,10 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
                goto err_pin;
        }
 
+       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       if (err)
+               goto err_req;
+
        srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
        if (INTEL_GEN(ctx->i915) >= 8)
                srm++;
@@ -67,15 +72,10 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
        }
        intel_ring_advance(rq, cs);
 
-       i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
-       reservation_object_lock(vma->resv, NULL);
-       reservation_object_add_excl_fence(vma->resv, &rq->fence);
-       reservation_object_unlock(vma->resv);
-
        i915_gem_object_get(result);
        i915_gem_object_set_active_reference(result);
 
-       __i915_request_add(rq, true);
+       i915_request_add(rq);
        i915_vma_unpin(vma);
 
        return result;
@@ -112,6 +112,7 @@ static int check_whitelist(const struct whitelist *w,
                           struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_object *results;
+       struct igt_wedge_me wedge;
        u32 *vaddr;
        int err;
        int i;
@@ -120,7 +121,11 @@ static int check_whitelist(const struct whitelist *w,
        if (IS_ERR(results))
                return PTR_ERR(results);
 
-       err = i915_gem_object_set_to_cpu_domain(results, false);
+       err = 0;
+       igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */
+               err = i915_gem_object_set_to_cpu_domain(results, false);
+       if (i915_terminally_wedged(&ctx->i915->gpu_error))
+               err = -EIO;
        if (err)
                goto out_put;
 
@@ -283,6 +288,9 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915)
        };
        int err;
 
+       if (i915_terminally_wedged(&i915->gpu_error))
+               return 0;
+
        mutex_lock(&i915->drm.struct_mutex);
        err = i915_subtests(tests, i915);
        mutex_unlock(&i915->drm.struct_mutex);
index 501becc47c0cd65291e84f5fad2b77600ae3fbc9..8904f1ce64e3d69bfa2745faf06d7caf47352dea 100644 (file)
@@ -30,6 +30,7 @@ mock_context(struct drm_i915_private *i915,
             const char *name)
 {
        struct i915_gem_context *ctx;
+       unsigned int n;
        int ret;
 
        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -43,6 +44,12 @@ mock_context(struct drm_i915_private *i915,
        INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
        INIT_LIST_HEAD(&ctx->handles_list);
 
+       for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
+               struct intel_context *ce = &ctx->__engine[n];
+
+               ce->gem_context = ctx;
+       }
+
        ret = ida_simple_get(&i915->contexts.hw_ida,
                             0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
        if (ret < 0)
index 302f7d1036359ab47db4009037561b99e2d617d6..ca682caf1062f08913b482b073f85dde16af53e7 100644 (file)
@@ -94,18 +94,6 @@ static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
        vm_unmap_ram(vaddr, mock->npages);
 }
 
-static void *mock_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
-{
-       struct mock_dmabuf *mock = to_mock(dma_buf);
-
-       return kmap_atomic(mock->pages[page_num]);
-}
-
-static void mock_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
-{
-       kunmap_atomic(addr);
-}
-
 static void *mock_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
 {
        struct mock_dmabuf *mock = to_mock(dma_buf);
@@ -130,9 +118,7 @@ static const struct dma_buf_ops mock_dmabuf_ops =  {
        .unmap_dma_buf = mock_unmap_dma_buf,
        .release = mock_dmabuf_release,
        .map = mock_dmabuf_kmap,
-       .map_atomic = mock_dmabuf_kmap_atomic,
        .unmap = mock_dmabuf_kunmap,
-       .unmap_atomic = mock_dmabuf_kunmap_atomic,
        .mmap = mock_dmabuf_mmap,
        .vmap = mock_dmabuf_vmap,
        .vunmap = mock_dmabuf_vunmap,
index 26bf29d97007885b5f331dcd2b0d4811ac0941d6..22a73da45ad58b9bfae36cd823c6a934c4262c49 100644 (file)
@@ -72,25 +72,34 @@ static void hw_delay_complete(struct timer_list *t)
        spin_unlock(&engine->hw_lock);
 }
 
-static struct intel_ring *
-mock_context_pin(struct intel_engine_cs *engine,
-                struct i915_gem_context *ctx)
+static void mock_context_unpin(struct intel_context *ce)
 {
-       struct intel_context *ce = to_intel_context(ctx, engine);
-
-       if (!ce->pin_count++)
-               i915_gem_context_get(ctx);
+       i915_gem_context_put(ce->gem_context);
+}
 
-       return engine->buffer;
+static void mock_context_destroy(struct intel_context *ce)
+{
+       GEM_BUG_ON(ce->pin_count);
 }
 
-static void mock_context_unpin(struct intel_engine_cs *engine,
-                              struct i915_gem_context *ctx)
+static const struct intel_context_ops mock_context_ops = {
+       .unpin = mock_context_unpin,
+       .destroy = mock_context_destroy,
+};
+
+static struct intel_context *
+mock_context_pin(struct intel_engine_cs *engine,
+                struct i915_gem_context *ctx)
 {
        struct intel_context *ce = to_intel_context(ctx, engine);
 
-       if (!--ce->pin_count)
-               i915_gem_context_put(ctx);
+       if (!ce->pin_count++) {
+               i915_gem_context_get(ctx);
+               ce->ring = engine->buffer;
+               ce->ops = &mock_context_ops;
+       }
+
+       return ce;
 }
 
 static int mock_request_alloc(struct i915_request *request)
@@ -185,13 +194,14 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
        engine->base.status_page.page_addr = (void *)(engine + 1);
 
        engine->base.context_pin = mock_context_pin;
-       engine->base.context_unpin = mock_context_unpin;
        engine->base.request_alloc = mock_request_alloc;
        engine->base.emit_flush = mock_emit_flush;
        engine->base.emit_breadcrumb = mock_emit_breadcrumb;
        engine->base.submit_request = mock_submit_request;
 
        i915_timeline_init(i915, &engine->base.timeline, engine->base.name);
+       lockdep_set_subclass(&engine->base.timeline.lock, TIMELINE_ENGINE);
+
        intel_engine_init_breadcrumbs(&engine->base);
        engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */
 
@@ -204,8 +214,13 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
        if (!engine->base.buffer)
                goto err_breadcrumbs;
 
+       if (IS_ERR(intel_context_pin(i915->kernel_context, &engine->base)))
+               goto err_ring;
+
        return &engine->base;
 
+err_ring:
+       mock_ring_free(engine->base.buffer);
 err_breadcrumbs:
        intel_engine_fini_breadcrumbs(&engine->base);
        i915_timeline_fini(&engine->base.timeline);
@@ -238,11 +253,15 @@ void mock_engine_free(struct intel_engine_cs *engine)
 {
        struct mock_engine *mock =
                container_of(engine, typeof(*mock), base);
+       struct intel_context *ce;
 
        GEM_BUG_ON(timer_pending(&mock->hw_delay));
 
-       if (engine->last_retired_context)
-               intel_context_unpin(engine->last_retired_context, engine);
+       ce = fetch_and_zero(&engine->last_retired_context);
+       if (ce)
+               intel_context_unpin(ce);
+
+       __intel_context_unpin(engine->i915->kernel_context, engine);
 
        mock_ring_free(engine->buffer);
 
index 94baedfa0f7456b0fe0521215e7f2f3940918ff2..43ed8b28aeaa0ce86c6162814b5b9f30ce981b70 100644 (file)
@@ -136,8 +136,6 @@ static struct dev_pm_domain pm_domain = {
 struct drm_i915_private *mock_gem_device(void)
 {
        struct drm_i915_private *i915;
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
        struct pci_dev *pdev;
        int err;
 
@@ -159,7 +157,8 @@ struct drm_i915_private *mock_gem_device(void)
        dev_pm_domain_set(&pdev->dev, &pm_domain);
        pm_runtime_enable(&pdev->dev);
        pm_runtime_dont_use_autosuspend(&pdev->dev);
-       WARN_ON(pm_runtime_get_sync(&pdev->dev));
+       if (pm_runtime_enabled(&pdev->dev))
+               WARN_ON(pm_runtime_get_sync(&pdev->dev));
 
        i915 = (struct drm_i915_private *)(pdev + 1);
        pci_set_drvdata(pdev, i915);
@@ -233,13 +232,13 @@ struct drm_i915_private *mock_gem_device(void)
        mock_init_ggtt(i915);
 
        mkwrite_device_info(i915)->ring_mask = BIT(0);
-       i915->engine[RCS] = mock_engine(i915, "mock", RCS);
-       if (!i915->engine[RCS])
-               goto err_unlock;
-
        i915->kernel_context = mock_context(i915, NULL);
        if (!i915->kernel_context)
-               goto err_engine;
+               goto err_unlock;
+
+       i915->engine[RCS] = mock_engine(i915, "mock", RCS);
+       if (!i915->engine[RCS])
+               goto err_context;
 
        mutex_unlock(&i915->drm.struct_mutex);
 
@@ -247,9 +246,8 @@ struct drm_i915_private *mock_gem_device(void)
 
        return i915;
 
-err_engine:
-       for_each_engine(engine, i915, id)
-               mock_engine_free(engine);
+err_context:
+       i915_gem_contexts_fini(i915);
 err_unlock:
        mutex_unlock(&i915->drm.struct_mutex);
        kmem_cache_destroy(i915->priorities);
index 36c112088940585c8bcf0bcdb66027f3ec9e61da..a140ea5c3a7c50edca13d1ad0c81955e1dea76e8 100644 (file)
@@ -66,25 +66,21 @@ mock_ppgtt(struct drm_i915_private *i915,
                return NULL;
 
        kref_init(&ppgtt->ref);
-       ppgtt->base.i915 = i915;
-       ppgtt->base.total = round_down(U64_MAX, PAGE_SIZE);
-       ppgtt->base.file = ERR_PTR(-ENODEV);
-
-       INIT_LIST_HEAD(&ppgtt->base.active_list);
-       INIT_LIST_HEAD(&ppgtt->base.inactive_list);
-       INIT_LIST_HEAD(&ppgtt->base.unbound_list);
-
-       INIT_LIST_HEAD(&ppgtt->base.global_link);
-       drm_mm_init(&ppgtt->base.mm, 0, ppgtt->base.total);
-
-       ppgtt->base.clear_range = nop_clear_range;
-       ppgtt->base.insert_page = mock_insert_page;
-       ppgtt->base.insert_entries = mock_insert_entries;
-       ppgtt->base.bind_vma = mock_bind_ppgtt;
-       ppgtt->base.unbind_vma = mock_unbind_ppgtt;
-       ppgtt->base.set_pages = ppgtt_set_pages;
-       ppgtt->base.clear_pages = clear_pages;
-       ppgtt->base.cleanup = mock_cleanup;
+       ppgtt->vm.i915 = i915;
+       ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE);
+       ppgtt->vm.file = ERR_PTR(-ENODEV);
+
+       i915_address_space_init(&ppgtt->vm, i915);
+
+       ppgtt->vm.clear_range = nop_clear_range;
+       ppgtt->vm.insert_page = mock_insert_page;
+       ppgtt->vm.insert_entries = mock_insert_entries;
+       ppgtt->vm.cleanup = mock_cleanup;
+
+       ppgtt->vm.vma_ops.bind_vma    = mock_bind_ppgtt;
+       ppgtt->vm.vma_ops.unbind_vma  = mock_unbind_ppgtt;
+       ppgtt->vm.vma_ops.set_pages   = ppgtt_set_pages;
+       ppgtt->vm.vma_ops.clear_pages = clear_pages;
 
        return ppgtt;
 }
@@ -105,29 +101,28 @@ void mock_init_ggtt(struct drm_i915_private *i915)
 {
        struct i915_ggtt *ggtt = &i915->ggtt;
 
-       INIT_LIST_HEAD(&i915->vm_list);
-
-       ggtt->base.i915 = i915;
+       ggtt->vm.i915 = i915;
 
        ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE);
        ggtt->mappable_end = resource_size(&ggtt->gmadr);
-       ggtt->base.total = 4096 * PAGE_SIZE;
-
-       ggtt->base.clear_range = nop_clear_range;
-       ggtt->base.insert_page = mock_insert_page;
-       ggtt->base.insert_entries = mock_insert_entries;
-       ggtt->base.bind_vma = mock_bind_ggtt;
-       ggtt->base.unbind_vma = mock_unbind_ggtt;
-       ggtt->base.set_pages = ggtt_set_pages;
-       ggtt->base.clear_pages = clear_pages;
-       ggtt->base.cleanup = mock_cleanup;
-
-       i915_address_space_init(&ggtt->base, i915, "global");
+       ggtt->vm.total = 4096 * PAGE_SIZE;
+
+       ggtt->vm.clear_range = nop_clear_range;
+       ggtt->vm.insert_page = mock_insert_page;
+       ggtt->vm.insert_entries = mock_insert_entries;
+       ggtt->vm.cleanup = mock_cleanup;
+
+       ggtt->vm.vma_ops.bind_vma    = mock_bind_ggtt;
+       ggtt->vm.vma_ops.unbind_vma  = mock_unbind_ggtt;
+       ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
+       ggtt->vm.vma_ops.clear_pages = clear_pages;
+
+       i915_address_space_init(&ggtt->vm, i915);
 }
 
 void mock_fini_ggtt(struct drm_i915_private *i915)
 {
        struct i915_ggtt *ggtt = &i915->ggtt;
 
-       i915_address_space_fini(&ggtt->base);
+       i915_address_space_fini(&ggtt->vm);
 }
similarity index 96%
rename from drivers/gpu/drm/i915/intel_dsi.c
rename to drivers/gpu/drm/i915/vlv_dsi.c
index cf39ca90d887872ddb2de5e011041f785fda3996..435a2c35ee8c4acd46d9f3fccd3c6dc700ad8b8d 100644 (file)
@@ -69,7 +69,7 @@ enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt)
        }
 }
 
-void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
+void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
 {
        struct drm_encoder *encoder = &intel_dsi->base.base;
        struct drm_device *dev = encoder->dev;
@@ -326,6 +326,9 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
                                                conn_state->scaling_mode);
        }
 
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
        /* DSI uses short packets for sync events, so clear mode flags for DSI */
        adjusted_mode->flags = 0;
 
@@ -339,11 +342,15 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
                        pipe_config->cpu_transcoder = TRANSCODER_DSI_C;
                else
                        pipe_config->cpu_transcoder = TRANSCODER_DSI_A;
-       }
 
-       ret = intel_compute_dsi_pll(encoder, pipe_config);
-       if (ret)
-               return false;
+               ret = bxt_dsi_pll_compute(encoder, pipe_config);
+               if (ret)
+                       return false;
+       } else {
+               ret = vlv_dsi_pll_compute(encoder, pipe_config);
+               if (ret)
+                       return false;
+       }
 
        pipe_config->clock_set = true;
 
@@ -543,12 +550,12 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
-       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               vlv_dsi_device_ready(encoder);
-       else if (IS_BROXTON(dev_priv))
-               bxt_dsi_device_ready(encoder);
-       else if (IS_GEMINILAKE(dev_priv))
+       if (IS_GEMINILAKE(dev_priv))
                glk_dsi_device_ready(encoder);
+       else if (IS_GEN9_LP(dev_priv))
+               bxt_dsi_device_ready(encoder);
+       else
+               vlv_dsi_device_ready(encoder);
 }
 
 static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
@@ -807,8 +814,13 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
         * The BIOS may leave the PLL in a wonky state where it doesn't
         * lock. It needs to be fully powered down to fix it.
         */
-       intel_disable_dsi_pll(encoder);
-       intel_enable_dsi_pll(encoder, pipe_config);
+       if (IS_GEN9_LP(dev_priv)) {
+               bxt_dsi_pll_disable(encoder);
+               bxt_dsi_pll_enable(encoder, pipe_config);
+       } else {
+               vlv_dsi_pll_disable(encoder);
+               vlv_dsi_pll_enable(encoder, pipe_config);
+       }
 
        if (IS_BROXTON(dev_priv)) {
                /* Add MIPI IO reset programming for modeset */
@@ -926,11 +938,10 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
-       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) ||
-           IS_BROXTON(dev_priv))
-               vlv_dsi_clear_device_ready(encoder);
-       else if (IS_GEMINILAKE(dev_priv))
+       if (IS_GEMINILAKE(dev_priv))
                glk_dsi_clear_device_ready(encoder);
+       else
+               vlv_dsi_clear_device_ready(encoder);
 }
 
 static void intel_dsi_post_disable(struct intel_encoder *encoder,
@@ -946,7 +957,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
 
        if (is_vid_mode(intel_dsi)) {
                for_each_dsi_port(port, intel_dsi->ports)
-                       wait_for_dsi_fifo_empty(intel_dsi, port);
+                       vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
 
                intel_dsi_port_disable(encoder);
                usleep_range(2000, 5000);
@@ -976,11 +987,13 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
                                val & ~MIPIO_RST_CTRL);
        }
 
-       intel_disable_dsi_pll(encoder);
-
-       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+       if (IS_GEN9_LP(dev_priv)) {
+               bxt_dsi_pll_disable(encoder);
+       } else {
                u32 val;
 
+               vlv_dsi_pll_disable(encoder);
+
                val = I915_READ(DSPCLK_GATE_D);
                val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
                I915_WRITE(DSPCLK_GATE_D, val);
@@ -1021,7 +1034,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
         * configuration, otherwise accessing DSI registers will hang the
         * machine. See BSpec North Display Engine registers/MIPI[BXT].
         */
-       if (IS_GEN9_LP(dev_priv) && !intel_dsi_pll_is_enabled(dev_priv))
+       if (IS_GEN9_LP(dev_priv) && !bxt_dsi_pll_is_enabled(dev_priv))
                goto out_put_power;
 
        /* XXX: this only works for one DSI output */
@@ -1244,16 +1257,19 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
 
        pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
 
-       if (IS_GEN9_LP(dev_priv))
+       if (IS_GEN9_LP(dev_priv)) {
                bxt_dsi_get_pipe_config(encoder, pipe_config);
+               pclk = bxt_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
+                                       pipe_config);
+       } else {
+               pclk = vlv_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
+                                       pipe_config);
+       }
 
-       pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
-                                 pipe_config);
-       if (!pclk)
-               return;
-
-       pipe_config->base.adjusted_mode.crtc_clock = pclk;
-       pipe_config->port_clock = pclk;
+       if (pclk) {
+               pipe_config->base.adjusted_mode.crtc_clock = pclk;
+               pipe_config->port_clock = pclk;
+       }
 }
 
 static enum drm_mode_status
@@ -1266,6 +1282,9 @@ intel_dsi_mode_valid(struct drm_connector *connector,
 
        DRM_DEBUG_KMS("\n");
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
        if (fixed_mode) {
                if (mode->hdisplay > fixed_mode->hdisplay)
                        return MODE_PANEL;
@@ -1579,20 +1598,24 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
        enum port port;
        u32 val;
 
-       if (!IS_GEMINILAKE(dev_priv)) {
-               for_each_dsi_port(port, intel_dsi->ports) {
-                       /* Panel commands can be sent when clock is in LP11 */
-                       I915_WRITE(MIPI_DEVICE_READY(port), 0x0);
+       if (IS_GEMINILAKE(dev_priv))
+               return;
+
+       for_each_dsi_port(port, intel_dsi->ports) {
+               /* Panel commands can be sent when clock is in LP11 */
+               I915_WRITE(MIPI_DEVICE_READY(port), 0x0);
 
-                       intel_dsi_reset_clocks(encoder, port);
-                       I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
+               if (IS_GEN9_LP(dev_priv))
+                       bxt_dsi_reset_clocks(encoder, port);
+               else
+                       vlv_dsi_reset_clocks(encoder, port);
+               I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
 
-                       val = I915_READ(MIPI_DSI_FUNC_PRG(port));
-                       val &= ~VID_MODE_FORMAT_MASK;
-                       I915_WRITE(MIPI_DSI_FUNC_PRG(port), val);
+               val = I915_READ(MIPI_DSI_FUNC_PRG(port));
+               val &= ~VID_MODE_FORMAT_MASK;
+               I915_WRITE(MIPI_DSI_FUNC_PRG(port), val);
 
-                       I915_WRITE(MIPI_DEVICE_READY(port), 0x1);
-               }
+               I915_WRITE(MIPI_DEVICE_READY(port), 0x1);
        }
 }
 
@@ -1665,16 +1688,16 @@ static int intel_dsi_get_panel_orientation(struct intel_connector *connector)
 {
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
        int orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
-       enum i9xx_plane_id plane;
+       enum i9xx_plane_id i9xx_plane;
        u32 val;
 
        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                if (connector->encoder->crtc_mask == BIT(PIPE_B))
-                       plane = PLANE_B;
+                       i9xx_plane = PLANE_B;
                else
-                       plane = PLANE_A;
+                       i9xx_plane = PLANE_A;
 
-               val = I915_READ(DSPCNTR(plane));
+               val = I915_READ(DSPCNTR(i9xx_plane));
                if (val & DISPPLANE_ROTATE_180)
                        orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
        }
@@ -1707,7 +1730,7 @@ static void intel_dsi_add_properties(struct intel_connector *connector)
        }
 }
 
-void intel_dsi_init(struct drm_i915_private *dev_priv)
+void vlv_dsi_init(struct drm_i915_private *dev_priv)
 {
        struct drm_device *dev = &dev_priv->drm;
        struct intel_dsi *intel_dsi;
@@ -1724,14 +1747,10 @@ void intel_dsi_init(struct drm_i915_private *dev_priv)
        if (!intel_bios_is_dsi_present(dev_priv, &port))
                return;
 
-       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-               dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
-       } else if (IS_GEN9_LP(dev_priv)) {
+       if (IS_GEN9_LP(dev_priv))
                dev_priv->mipi_mmio_base = BXT_MIPI_BASE;
-       } else {
-               DRM_ERROR("Unsupported Mipi device to reg base");
-               return;
-       }
+       else
+               dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
 
        intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
        if (!intel_dsi)
similarity index 84%
rename from drivers/gpu/drm/i915/intel_dsi_pll.c
rename to drivers/gpu/drm/i915/vlv_dsi_pll.c
index 2ff2ee7f3b78cfdb6f75f76811151dbc6f1fa24f..a132a8037ecc6b2a317229918e8d2471cdbcb9dd 100644 (file)
@@ -111,8 +111,8 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
  * XXX: The muxing and gating is hard coded for now. Need to add support for
  * sharing PLLs with two DSI outputs.
  */
-static int vlv_compute_dsi_pll(struct intel_encoder *encoder,
-                              struct intel_crtc_state *config)
+int vlv_dsi_pll_compute(struct intel_encoder *encoder,
+                       struct intel_crtc_state *config)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -142,8 +142,8 @@ static int vlv_compute_dsi_pll(struct intel_encoder *encoder,
        return 0;
 }
 
-static void vlv_enable_dsi_pll(struct intel_encoder *encoder,
-                              const struct intel_crtc_state *config)
+void vlv_dsi_pll_enable(struct intel_encoder *encoder,
+                       const struct intel_crtc_state *config)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
@@ -175,7 +175,7 @@ static void vlv_enable_dsi_pll(struct intel_encoder *encoder,
        DRM_DEBUG_KMS("DSI PLL locked\n");
 }
 
-static void vlv_disable_dsi_pll(struct intel_encoder *encoder)
+void vlv_dsi_pll_disable(struct intel_encoder *encoder)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        u32 tmp;
@@ -192,7 +192,7 @@ static void vlv_disable_dsi_pll(struct intel_encoder *encoder)
        mutex_unlock(&dev_priv->sb_lock);
 }
 
-static bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
+bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
 {
        bool enabled;
        u32 val;
@@ -229,7 +229,7 @@ static bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
        return enabled;
 }
 
-static void bxt_disable_dsi_pll(struct intel_encoder *encoder)
+void bxt_dsi_pll_disable(struct intel_encoder *encoder)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        u32 val;
@@ -261,8 +261,8 @@ static void assert_bpp_mismatch(enum mipi_dsi_pixel_format fmt, int pipe_bpp)
             bpp, pipe_bpp);
 }
 
-static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
-                           struct intel_crtc_state *config)
+u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+                    struct intel_crtc_state *config)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -327,8 +327,8 @@ static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
        return pclk;
 }
 
-static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
-                           struct intel_crtc_state *config)
+u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+                    struct intel_crtc_state *config)
 {
        u32 pclk;
        u32 dsi_clk;
@@ -357,16 +357,7 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
        return pclk;
 }
 
-u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
-                      struct intel_crtc_state *config)
-{
-       if (IS_GEN9_LP(to_i915(encoder->base.dev)))
-               return bxt_dsi_get_pclk(encoder, pipe_bpp, config);
-       else
-               return vlv_dsi_get_pclk(encoder, pipe_bpp, config);
-}
-
-static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
+void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
 {
        u32 temp;
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -480,8 +471,8 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
        I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
 }
 
-static int gen9lp_compute_dsi_pll(struct intel_encoder *encoder,
-                              struct intel_crtc_state *config)
+int bxt_dsi_pll_compute(struct intel_encoder *encoder,
+                       struct intel_crtc_state *config)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -528,8 +519,8 @@ static int gen9lp_compute_dsi_pll(struct intel_encoder *encoder,
        return 0;
 }
 
-static void gen9lp_enable_dsi_pll(struct intel_encoder *encoder,
-                              const struct intel_crtc_state *config)
+void bxt_dsi_pll_enable(struct intel_encoder *encoder,
+                       const struct intel_crtc_state *config)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -568,52 +559,7 @@ static void gen9lp_enable_dsi_pll(struct intel_encoder *encoder,
        DRM_DEBUG_KMS("DSI PLL locked\n");
 }
 
-bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
-{
-       if (IS_GEN9_LP(dev_priv))
-               return bxt_dsi_pll_is_enabled(dev_priv);
-
-       MISSING_CASE(INTEL_DEVID(dev_priv));
-
-       return false;
-}
-
-int intel_compute_dsi_pll(struct intel_encoder *encoder,
-                         struct intel_crtc_state *config)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
-       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               return vlv_compute_dsi_pll(encoder, config);
-       else if (IS_GEN9_LP(dev_priv))
-               return gen9lp_compute_dsi_pll(encoder, config);
-
-       return -ENODEV;
-}
-
-void intel_enable_dsi_pll(struct intel_encoder *encoder,
-                         const struct intel_crtc_state *config)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
-       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               vlv_enable_dsi_pll(encoder, config);
-       else if (IS_GEN9_LP(dev_priv))
-               gen9lp_enable_dsi_pll(encoder, config);
-}
-
-void intel_disable_dsi_pll(struct intel_encoder *encoder)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
-       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               vlv_disable_dsi_pll(encoder);
-       else if (IS_GEN9_LP(dev_priv))
-               bxt_disable_dsi_pll(encoder);
-}
-
-static void gen9lp_dsi_reset_clocks(struct intel_encoder *encoder,
-                                   enum port port)
+void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
 {
        u32 tmp;
        struct drm_device *dev = encoder->base.dev;
@@ -638,13 +584,3 @@ static void gen9lp_dsi_reset_clocks(struct intel_encoder *encoder,
        }
        I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
 }
-
-void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
-       if (IS_GEN9_LP(dev_priv))
-               gen9lp_dsi_reset_clocks(encoder, port);
-       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               vlv_dsi_reset_clocks(encoder, port);
-}
index 1d053bbefc02c694af593008ae19312a7826c994..5ea0c82f995773085388ac2e9d1bd710eeb8d5b8 100644 (file)
 
 #define MAX_CRTC       4
 
-struct imx_drm_device {
-       struct drm_device                       *drm;
-       unsigned int                            pipes;
-       struct drm_atomic_state                 *state;
-};
-
 #if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
 static int legacyfb_depth = 16;
 module_param(legacyfb_depth, int, 0444);
@@ -219,22 +213,12 @@ static int compare_of(struct device *dev, void *data)
 static int imx_drm_bind(struct device *dev)
 {
        struct drm_device *drm;
-       struct imx_drm_device *imxdrm;
        int ret;
 
        drm = drm_dev_alloc(&imx_drm_driver, dev);
        if (IS_ERR(drm))
                return PTR_ERR(drm);
 
-       imxdrm = devm_kzalloc(dev, sizeof(*imxdrm), GFP_KERNEL);
-       if (!imxdrm) {
-               ret = -ENOMEM;
-               goto err_unref;
-       }
-
-       imxdrm->drm = drm;
-       drm->dev_private = imxdrm;
-
        /*
         * enable drm irq mode.
         * - with irq_enabled = true, we can use the vblank feature.
@@ -306,8 +290,7 @@ err_unbind:
        component_unbind_all(drm->dev, drm);
 err_kms:
        drm_mode_config_cleanup(drm);
-err_unref:
-       drm_dev_unref(drm);
+       drm_dev_put(drm);
 
        return ret;
 }
@@ -327,7 +310,7 @@ static void imx_drm_unbind(struct device *dev)
        component_unbind_all(drm->dev, drm);
        dev_set_drvdata(dev, NULL);
 
-       drm_dev_unref(drm);
+       drm_dev_put(drm);
 }
 
 static const struct component_master_ops imx_drm_ops = {
@@ -355,37 +338,15 @@ static int imx_drm_platform_remove(struct platform_device *pdev)
 static int imx_drm_suspend(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
-       struct imx_drm_device *imxdrm;
-
-       /* The drm_dev is NULL before .load hook is called */
-       if (drm_dev == NULL)
-               return 0;
-
-       drm_kms_helper_poll_disable(drm_dev);
 
-       imxdrm = drm_dev->dev_private;
-       imxdrm->state = drm_atomic_helper_suspend(drm_dev);
-       if (IS_ERR(imxdrm->state)) {
-               drm_kms_helper_poll_enable(drm_dev);
-               return PTR_ERR(imxdrm->state);
-       }
-
-       return 0;
+       return drm_mode_config_helper_suspend(drm_dev);
 }
 
 static int imx_drm_resume(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
-       struct imx_drm_device *imx_drm;
 
-       if (drm_dev == NULL)
-               return 0;
-
-       imx_drm = drm_dev->dev_private;
-       drm_atomic_helper_resume(drm_dev, imx_drm->state);
-       drm_kms_helper_poll_enable(drm_dev);
-
-       return 0;
+       return drm_mode_config_helper_resume(drm_dev);
 }
 #endif
 
index 15c2bec47a04cc0aaa37c5954a7dc69233c0eb38..ab9c6f706eb3d8b87db9ac4bac336bf7cd3c9cd9 100644 (file)
@@ -10,7 +10,6 @@ struct drm_display_mode;
 struct drm_encoder;
 struct drm_framebuffer;
 struct drm_plane;
-struct imx_drm_crtc;
 struct platform_device;
 
 struct imx_crtc_state {
index dd5312b02a8d21749fb004d41db74ff4bc511b1a..3bd0f8a18e748ca4be125d02a12bc50f7fe1bebc 100644 (file)
@@ -143,7 +143,7 @@ static int imx_ldb_connector_get_modes(struct drm_connector *connector)
                imx_ldb_ch->edid = drm_get_edid(connector, imx_ldb_ch->ddc);
 
        if (imx_ldb_ch->edid) {
-               drm_mode_connector_update_edid_property(connector,
+               drm_connector_update_edid_property(connector,
                                                        imx_ldb_ch->edid);
                num_modes = drm_add_edid_modes(connector, imx_ldb_ch->edid);
        }
@@ -471,8 +471,7 @@ static int imx_ldb_register(struct drm_device *drm,
                drm_connector_init(drm, &imx_ldb_ch->connector,
                                &imx_ldb_connector_funcs,
                                DRM_MODE_CONNECTOR_LVDS);
-               drm_mode_connector_attach_encoder(&imx_ldb_ch->connector,
-                               encoder);
+               drm_connector_attach_encoder(&imx_ldb_ch->connector, encoder);
        }
 
        if (imx_ldb_ch->panel) {
index bc27c26994641a1289324d0731cd1e1e7c4b13bf..cffd3310240e5ddec86f13a2cce9b456d0c29d1f 100644 (file)
@@ -235,7 +235,7 @@ static int imx_tve_connector_get_modes(struct drm_connector *connector)
 
        edid = drm_get_edid(connector, tve->ddc);
        if (edid) {
-               drm_mode_connector_update_edid_property(connector, edid);
+               drm_connector_update_edid_property(connector, edid);
                ret = drm_add_edid_modes(connector, edid);
                kfree(edid);
        }
@@ -493,7 +493,7 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve)
        drm_connector_init(drm, &tve->connector, &imx_tve_connector_funcs,
                           DRM_MODE_CONNECTOR_VGA);
 
-       drm_mode_connector_attach_encoder(&tve->connector, &tve->encoder);
+       drm_connector_attach_encoder(&tve->connector, &tve->encoder);
 
        return 0;
 }
index e83af0f2be869a105036bf49c4f1b612acab83ee..7d4b710b837ac40d90704484c4b2dc731df851bf 100644 (file)
@@ -35,7 +35,6 @@
 struct ipu_crtc {
        struct device           *dev;
        struct drm_crtc         base;
-       struct imx_drm_crtc     *imx_crtc;
 
        /* plane[0] is the full plane, plane[1] is the partial plane */
        struct ipu_plane        *plane[2];
@@ -213,7 +212,7 @@ static bool ipu_crtc_mode_fixup(struct drm_crtc *crtc,
 static int ipu_crtc_atomic_check(struct drm_crtc *crtc,
                                 struct drm_crtc_state *state)
 {
-       u32 primary_plane_mask = 1 << drm_plane_index(crtc->primary);
+       u32 primary_plane_mask = drm_plane_mask(crtc->primary);
 
        if (state->active && (primary_plane_mask & state->plane_mask) == 0)
                return -EINVAL;
index aedecda9728a9847d1762cd1e35d4c6fe19c8991..aefd04e18f935037d3694a653b9cefb25540916c 100644 (file)
@@ -63,7 +63,7 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
        }
 
        if (imxpd->edid) {
-               drm_mode_connector_update_edid_property(connector, imxpd->edid);
+               drm_connector_update_edid_property(connector, imxpd->edid);
                num_modes = drm_add_edid_modes(connector, imxpd->edid);
        }
 
@@ -197,7 +197,7 @@ static int imx_pd_register(struct drm_device *drm,
                        return ret;
                }
        } else {
-               drm_mode_connector_attach_encoder(&imxpd->connector, encoder);
+               drm_connector_attach_encoder(&imxpd->connector, encoder);
        }
 
        return 0;
index 658b8dd45b834fb463cab8424e5067dc82258e39..2d6aa150a9ff08f0c0be658593abe1114d44a514 100644 (file)
@@ -539,6 +539,9 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
        int ret;
        int i;
 
+       if (!path)
+               return 0;
+
        for (i = 0; i < path_len; i++) {
                enum mtk_ddp_comp_id comp_id = path[i];
                struct device_node *node;
index 8130f3dab66144c4bda2de1849f1496fc0d400fa..87e4191c250ebea7f3f0acb3ca23d3fcb9a9639c 100644 (file)
 #define DISP_REG_CONFIG_DISP_UFOE_MOUT_EN      0x050
 #define DISP_REG_CONFIG_DISP_COLOR0_SEL_IN     0x084
 #define DISP_REG_CONFIG_DISP_COLOR1_SEL_IN     0x088
+#define DISP_REG_CONFIG_DSIE_SEL_IN            0x0a4
+#define DISP_REG_CONFIG_DSIO_SEL_IN            0x0a8
 #define DISP_REG_CONFIG_DPI_SEL_IN             0x0ac
-#define DISP_REG_CONFIG_DISP_RDMA1_MOUT_EN     0x0c8
+#define DISP_REG_CONFIG_DISP_RDMA2_SOUT                0x0b8
+#define DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN     0x0c4
+#define DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN     0x0c8
 #define DISP_REG_CONFIG_MMSYS_CG_CON0          0x100
 
 #define DISP_REG_CONFIG_DISP_OVL_MOUT_EN       0x030
 #define DISP_REG_MUTEX_RST(n)  (0x28 + 0x20 * (n))
 #define DISP_REG_MUTEX_MOD(n)  (0x2c + 0x20 * (n))
 #define DISP_REG_MUTEX_SOF(n)  (0x30 + 0x20 * (n))
+#define DISP_REG_MUTEX_MOD2(n) (0x34 + 0x20 * (n))
 
 #define INT_MUTEX                              BIT(1)
 
-#define MT8173_MUTEX_MOD_DISP_OVL0             BIT(11)
-#define MT8173_MUTEX_MOD_DISP_OVL1             BIT(12)
-#define MT8173_MUTEX_MOD_DISP_RDMA0            BIT(13)
-#define MT8173_MUTEX_MOD_DISP_RDMA1            BIT(14)
-#define MT8173_MUTEX_MOD_DISP_RDMA2            BIT(15)
-#define MT8173_MUTEX_MOD_DISP_WDMA0            BIT(16)
-#define MT8173_MUTEX_MOD_DISP_WDMA1            BIT(17)
-#define MT8173_MUTEX_MOD_DISP_COLOR0           BIT(18)
-#define MT8173_MUTEX_MOD_DISP_COLOR1           BIT(19)
-#define MT8173_MUTEX_MOD_DISP_AAL              BIT(20)
-#define MT8173_MUTEX_MOD_DISP_GAMMA            BIT(21)
-#define MT8173_MUTEX_MOD_DISP_UFOE             BIT(22)
-#define MT8173_MUTEX_MOD_DISP_PWM0             BIT(23)
-#define MT8173_MUTEX_MOD_DISP_PWM1             BIT(24)
-#define MT8173_MUTEX_MOD_DISP_OD               BIT(25)
-
-#define MT2701_MUTEX_MOD_DISP_OVL              BIT(3)
-#define MT2701_MUTEX_MOD_DISP_WDMA             BIT(6)
-#define MT2701_MUTEX_MOD_DISP_COLOR            BIT(7)
-#define MT2701_MUTEX_MOD_DISP_BLS              BIT(9)
-#define MT2701_MUTEX_MOD_DISP_RDMA0            BIT(10)
-#define MT2701_MUTEX_MOD_DISP_RDMA1            BIT(12)
+#define MT8173_MUTEX_MOD_DISP_OVL0             11
+#define MT8173_MUTEX_MOD_DISP_OVL1             12
+#define MT8173_MUTEX_MOD_DISP_RDMA0            13
+#define MT8173_MUTEX_MOD_DISP_RDMA1            14
+#define MT8173_MUTEX_MOD_DISP_RDMA2            15
+#define MT8173_MUTEX_MOD_DISP_WDMA0            16
+#define MT8173_MUTEX_MOD_DISP_WDMA1            17
+#define MT8173_MUTEX_MOD_DISP_COLOR0           18
+#define MT8173_MUTEX_MOD_DISP_COLOR1           19
+#define MT8173_MUTEX_MOD_DISP_AAL              20
+#define MT8173_MUTEX_MOD_DISP_GAMMA            21
+#define MT8173_MUTEX_MOD_DISP_UFOE             22
+#define MT8173_MUTEX_MOD_DISP_PWM0             23
+#define MT8173_MUTEX_MOD_DISP_PWM1             24
+#define MT8173_MUTEX_MOD_DISP_OD               25
+
+#define MT2712_MUTEX_MOD_DISP_PWM2             10
+#define MT2712_MUTEX_MOD_DISP_OVL0             11
+#define MT2712_MUTEX_MOD_DISP_OVL1             12
+#define MT2712_MUTEX_MOD_DISP_RDMA0            13
+#define MT2712_MUTEX_MOD_DISP_RDMA1            14
+#define MT2712_MUTEX_MOD_DISP_RDMA2            15
+#define MT2712_MUTEX_MOD_DISP_WDMA0            16
+#define MT2712_MUTEX_MOD_DISP_WDMA1            17
+#define MT2712_MUTEX_MOD_DISP_COLOR0           18
+#define MT2712_MUTEX_MOD_DISP_COLOR1           19
+#define MT2712_MUTEX_MOD_DISP_AAL0             20
+#define MT2712_MUTEX_MOD_DISP_UFOE             22
+#define MT2712_MUTEX_MOD_DISP_PWM0             23
+#define MT2712_MUTEX_MOD_DISP_PWM1             24
+#define MT2712_MUTEX_MOD_DISP_OD0              25
+#define MT2712_MUTEX_MOD2_DISP_AAL1            33
+#define MT2712_MUTEX_MOD2_DISP_OD1             34
+
+#define MT2701_MUTEX_MOD_DISP_OVL              3
+#define MT2701_MUTEX_MOD_DISP_WDMA             6
+#define MT2701_MUTEX_MOD_DISP_COLOR            7
+#define MT2701_MUTEX_MOD_DISP_BLS              9
+#define MT2701_MUTEX_MOD_DISP_RDMA0            10
+#define MT2701_MUTEX_MOD_DISP_RDMA1            12
 
 #define MUTEX_SOF_SINGLE_MODE          0
 #define MUTEX_SOF_DSI0                 1
 #define MUTEX_SOF_DSI1                 2
 #define MUTEX_SOF_DPI0                 3
+#define MUTEX_SOF_DPI1                 4
+#define MUTEX_SOF_DSI2                 5
+#define MUTEX_SOF_DSI3                 6
 
 #define OVL0_MOUT_EN_COLOR0            0x1
 #define OD_MOUT_EN_RDMA0               0x1
+#define OD1_MOUT_EN_RDMA1              BIT(16)
 #define UFOE_MOUT_EN_DSI0              0x1
 #define COLOR0_SEL_IN_OVL0             0x1
 #define OVL1_MOUT_EN_COLOR1            0x1
 #define GAMMA_MOUT_EN_RDMA1            0x1
-#define RDMA1_MOUT_DPI0                        0x2
+#define RDMA0_SOUT_DPI0                        0x2
+#define RDMA0_SOUT_DSI2                        0x4
+#define RDMA0_SOUT_DSI3                        0x5
+#define RDMA1_SOUT_DPI0                        0x2
+#define RDMA1_SOUT_DPI1                        0x3
+#define RDMA1_SOUT_DSI1                        0x1
+#define RDMA1_SOUT_DSI2                        0x4
+#define RDMA1_SOUT_DSI3                        0x5
+#define RDMA2_SOUT_DPI0                        0x2
+#define RDMA2_SOUT_DPI1                        0x3
+#define RDMA2_SOUT_DSI1                        0x1
+#define RDMA2_SOUT_DSI2                        0x4
+#define RDMA2_SOUT_DSI3                        0x5
 #define DPI0_SEL_IN_RDMA1              0x1
+#define DPI0_SEL_IN_RDMA2              0x3
+#define DPI1_SEL_IN_RDMA1              (0x1 << 8)
+#define DPI1_SEL_IN_RDMA2              (0x3 << 8)
+#define DSI1_SEL_IN_RDMA1              0x1
+#define DSI1_SEL_IN_RDMA2              0x4
+#define DSI2_SEL_IN_RDMA1              (0x1 << 16)
+#define DSI2_SEL_IN_RDMA2              (0x4 << 16)
+#define DSI3_SEL_IN_RDMA1              (0x1 << 16)
+#define DSI3_SEL_IN_RDMA2              (0x4 << 16)
 #define COLOR1_SEL_IN_OVL1             0x1
 
 #define OVL_MOUT_EN_RDMA               0x1
@@ -108,12 +156,32 @@ static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = {
        [DDP_COMPONENT_WDMA0] = MT2701_MUTEX_MOD_DISP_WDMA,
 };
 
+static const unsigned int mt2712_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+       [DDP_COMPONENT_AAL0] = MT2712_MUTEX_MOD_DISP_AAL0,
+       [DDP_COMPONENT_AAL1] = MT2712_MUTEX_MOD2_DISP_AAL1,
+       [DDP_COMPONENT_COLOR0] = MT2712_MUTEX_MOD_DISP_COLOR0,
+       [DDP_COMPONENT_COLOR1] = MT2712_MUTEX_MOD_DISP_COLOR1,
+       [DDP_COMPONENT_OD0] = MT2712_MUTEX_MOD_DISP_OD0,
+       [DDP_COMPONENT_OD1] = MT2712_MUTEX_MOD2_DISP_OD1,
+       [DDP_COMPONENT_OVL0] = MT2712_MUTEX_MOD_DISP_OVL0,
+       [DDP_COMPONENT_OVL1] = MT2712_MUTEX_MOD_DISP_OVL1,
+       [DDP_COMPONENT_PWM0] = MT2712_MUTEX_MOD_DISP_PWM0,
+       [DDP_COMPONENT_PWM1] = MT2712_MUTEX_MOD_DISP_PWM1,
+       [DDP_COMPONENT_PWM2] = MT2712_MUTEX_MOD_DISP_PWM2,
+       [DDP_COMPONENT_RDMA0] = MT2712_MUTEX_MOD_DISP_RDMA0,
+       [DDP_COMPONENT_RDMA1] = MT2712_MUTEX_MOD_DISP_RDMA1,
+       [DDP_COMPONENT_RDMA2] = MT2712_MUTEX_MOD_DISP_RDMA2,
+       [DDP_COMPONENT_UFOE] = MT2712_MUTEX_MOD_DISP_UFOE,
+       [DDP_COMPONENT_WDMA0] = MT2712_MUTEX_MOD_DISP_WDMA0,
+       [DDP_COMPONENT_WDMA1] = MT2712_MUTEX_MOD_DISP_WDMA1,
+};
+
 static const unsigned int mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = {
-       [DDP_COMPONENT_AAL] = MT8173_MUTEX_MOD_DISP_AAL,
+       [DDP_COMPONENT_AAL0] = MT8173_MUTEX_MOD_DISP_AAL,
        [DDP_COMPONENT_COLOR0] = MT8173_MUTEX_MOD_DISP_COLOR0,
        [DDP_COMPONENT_COLOR1] = MT8173_MUTEX_MOD_DISP_COLOR1,
        [DDP_COMPONENT_GAMMA] = MT8173_MUTEX_MOD_DISP_GAMMA,
-       [DDP_COMPONENT_OD] = MT8173_MUTEX_MOD_DISP_OD,
+       [DDP_COMPONENT_OD0] = MT8173_MUTEX_MOD_DISP_OD,
        [DDP_COMPONENT_OVL0] = MT8173_MUTEX_MOD_DISP_OVL0,
        [DDP_COMPONENT_OVL1] = MT8173_MUTEX_MOD_DISP_OVL1,
        [DDP_COMPONENT_PWM0] = MT8173_MUTEX_MOD_DISP_PWM0,
@@ -138,7 +206,7 @@ static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
        } else if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_RDMA0) {
                *addr = DISP_REG_CONFIG_DISP_OVL_MOUT_EN;
                value = OVL_MOUT_EN_RDMA;
-       } else if (cur == DDP_COMPONENT_OD && next == DDP_COMPONENT_RDMA0) {
+       } else if (cur == DDP_COMPONENT_OD0 && next == DDP_COMPONENT_RDMA0) {
                *addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN;
                value = OD_MOUT_EN_RDMA0;
        } else if (cur == DDP_COMPONENT_UFOE && next == DDP_COMPONENT_DSI0) {
@@ -150,9 +218,48 @@ static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
        } else if (cur == DDP_COMPONENT_GAMMA && next == DDP_COMPONENT_RDMA1) {
                *addr = DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN;
                value = GAMMA_MOUT_EN_RDMA1;
+       } else if (cur == DDP_COMPONENT_OD1 && next == DDP_COMPONENT_RDMA1) {
+               *addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN;
+               value = OD1_MOUT_EN_RDMA1;
+       } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) {
+               *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+               value = RDMA0_SOUT_DPI0;
+       } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) {
+               *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+               value = RDMA0_SOUT_DSI2;
+       } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI3) {
+               *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+               value = RDMA0_SOUT_DSI3;
+       } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
+               *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+               value = RDMA1_SOUT_DSI1;
+       } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI2) {
+               *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+               value = RDMA1_SOUT_DSI2;
+       } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI3) {
+               *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+               value = RDMA1_SOUT_DSI3;
        } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) {
-               *addr = DISP_REG_CONFIG_DISP_RDMA1_MOUT_EN;
-               value = RDMA1_MOUT_DPI0;
+               *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+               value = RDMA1_SOUT_DPI0;
+       } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
+               *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
+               value = RDMA1_SOUT_DPI1;
+       } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI0) {
+               *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+               value = RDMA2_SOUT_DPI0;
+       } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
+               *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+               value = RDMA2_SOUT_DPI1;
+       } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
+               *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+               value = RDMA2_SOUT_DSI1;
+       } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
+               *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+               value = RDMA2_SOUT_DSI2;
+       } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI3) {
+               *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
+               value = RDMA2_SOUT_DSI3;
        } else {
                value = 0;
        }
@@ -172,6 +279,33 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
        } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) {
                *addr = DISP_REG_CONFIG_DPI_SEL_IN;
                value = DPI0_SEL_IN_RDMA1;
+       } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
+               *addr = DISP_REG_CONFIG_DPI_SEL_IN;
+               value = DPI1_SEL_IN_RDMA1;
+       } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
+               *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
+               value = DSI1_SEL_IN_RDMA1;
+       } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI2) {
+               *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+               value = DSI2_SEL_IN_RDMA1;
+       } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI3) {
+               *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
+               value = DSI3_SEL_IN_RDMA1;
+       } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI0) {
+               *addr = DISP_REG_CONFIG_DPI_SEL_IN;
+               value = DPI0_SEL_IN_RDMA2;
+       } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
+               *addr = DISP_REG_CONFIG_DPI_SEL_IN;
+               value = DPI1_SEL_IN_RDMA2;
+       } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
+               *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+               value = DSI1_SEL_IN_RDMA2;
+       } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
+               *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+               value = DSI2_SEL_IN_RDMA2;
+       } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI3) {
+               *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+               value = DSI3_SEL_IN_RDMA2;
        } else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) {
                *addr = DISP_REG_CONFIG_DISP_COLOR1_SEL_IN;
                value = COLOR1_SEL_IN_OVL1;
@@ -278,6 +412,7 @@ void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex,
        struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
                                           mutex[mutex->id]);
        unsigned int reg;
+       unsigned int offset;
 
        WARN_ON(&ddp->mutex[mutex->id] != mutex);
 
@@ -288,13 +423,30 @@ void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex,
        case DDP_COMPONENT_DSI1:
                reg = MUTEX_SOF_DSI0;
                break;
+       case DDP_COMPONENT_DSI2:
+               reg = MUTEX_SOF_DSI2;
+               break;
+       case DDP_COMPONENT_DSI3:
+               reg = MUTEX_SOF_DSI3;
+               break;
        case DDP_COMPONENT_DPI0:
                reg = MUTEX_SOF_DPI0;
                break;
+       case DDP_COMPONENT_DPI1:
+               reg = MUTEX_SOF_DPI1;
+               break;
        default:
-               reg = readl_relaxed(ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
-               reg |= ddp->mutex_mod[id];
-               writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
+               if (ddp->mutex_mod[id] < 32) {
+                       offset = DISP_REG_MUTEX_MOD(mutex->id);
+                       reg = readl_relaxed(ddp->regs + offset);
+                       reg |= 1 << ddp->mutex_mod[id];
+                       writel_relaxed(reg, ddp->regs + offset);
+               } else {
+                       offset = DISP_REG_MUTEX_MOD2(mutex->id);
+                       reg = readl_relaxed(ddp->regs + offset);
+                       reg |= 1 << (ddp->mutex_mod[id] - 32);
+                       writel_relaxed(reg, ddp->regs + offset);
+               }
                return;
        }
 
@@ -307,20 +459,32 @@ void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex,
        struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
                                           mutex[mutex->id]);
        unsigned int reg;
+       unsigned int offset;
 
        WARN_ON(&ddp->mutex[mutex->id] != mutex);
 
        switch (id) {
        case DDP_COMPONENT_DSI0:
        case DDP_COMPONENT_DSI1:
+       case DDP_COMPONENT_DSI2:
+       case DDP_COMPONENT_DSI3:
        case DDP_COMPONENT_DPI0:
+       case DDP_COMPONENT_DPI1:
                writel_relaxed(MUTEX_SOF_SINGLE_MODE,
                               ddp->regs + DISP_REG_MUTEX_SOF(mutex->id));
                break;
        default:
-               reg = readl_relaxed(ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
-               reg &= ~(ddp->mutex_mod[id]);
-               writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
+               if (ddp->mutex_mod[id] < 32) {
+                       offset = DISP_REG_MUTEX_MOD(mutex->id);
+                       reg = readl_relaxed(ddp->regs + offset);
+                       reg &= ~(1 << ddp->mutex_mod[id]);
+                       writel_relaxed(reg, ddp->regs + offset);
+               } else {
+                       offset = DISP_REG_MUTEX_MOD2(mutex->id);
+                       reg = readl_relaxed(ddp->regs + offset);
+                       reg &= ~(1 << (ddp->mutex_mod[id] - 32));
+                       writel_relaxed(reg, ddp->regs + offset);
+               }
                break;
        }
 }
@@ -407,6 +571,7 @@ static int mtk_ddp_remove(struct platform_device *pdev)
 
 static const struct of_device_id ddp_driver_dt_match[] = {
        { .compatible = "mediatek,mt2701-disp-mutex", .data = mt2701_mutex_mod},
+       { .compatible = "mediatek,mt2712-disp-mutex", .data = mt2712_mutex_mod},
        { .compatible = "mediatek,mt8173-disp-mutex", .data = mt8173_mutex_mod},
        {},
 };
index 4672317e3ad12497ee68b86cf1dcb55f5a436d9f..ff974d82a4a67a918fb08f2fee2bcc073f91e648 100644 (file)
@@ -218,18 +218,25 @@ struct mtk_ddp_comp_match {
 };
 
 static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
-       [DDP_COMPONENT_AAL]     = { MTK_DISP_AAL,       0, &ddp_aal },
+       [DDP_COMPONENT_AAL0]    = { MTK_DISP_AAL,       0, &ddp_aal },
+       [DDP_COMPONENT_AAL1]    = { MTK_DISP_AAL,       1, &ddp_aal },
        [DDP_COMPONENT_BLS]     = { MTK_DISP_BLS,       0, NULL },
        [DDP_COMPONENT_COLOR0]  = { MTK_DISP_COLOR,     0, NULL },
        [DDP_COMPONENT_COLOR1]  = { MTK_DISP_COLOR,     1, NULL },
        [DDP_COMPONENT_DPI0]    = { MTK_DPI,            0, NULL },
+       [DDP_COMPONENT_DPI1]    = { MTK_DPI,            1, NULL },
        [DDP_COMPONENT_DSI0]    = { MTK_DSI,            0, NULL },
        [DDP_COMPONENT_DSI1]    = { MTK_DSI,            1, NULL },
+       [DDP_COMPONENT_DSI2]    = { MTK_DSI,            2, NULL },
+       [DDP_COMPONENT_DSI3]    = { MTK_DSI,            3, NULL },
        [DDP_COMPONENT_GAMMA]   = { MTK_DISP_GAMMA,     0, &ddp_gamma },
-       [DDP_COMPONENT_OD]      = { MTK_DISP_OD,        0, &ddp_od },
+       [DDP_COMPONENT_OD0]     = { MTK_DISP_OD,        0, &ddp_od },
+       [DDP_COMPONENT_OD1]     = { MTK_DISP_OD,        1, &ddp_od },
        [DDP_COMPONENT_OVL0]    = { MTK_DISP_OVL,       0, NULL },
        [DDP_COMPONENT_OVL1]    = { MTK_DISP_OVL,       1, NULL },
        [DDP_COMPONENT_PWM0]    = { MTK_DISP_PWM,       0, NULL },
+       [DDP_COMPONENT_PWM1]    = { MTK_DISP_PWM,       1, NULL },
+       [DDP_COMPONENT_PWM2]    = { MTK_DISP_PWM,       2, NULL },
        [DDP_COMPONENT_RDMA0]   = { MTK_DISP_RDMA,      0, NULL },
        [DDP_COMPONENT_RDMA1]   = { MTK_DISP_RDMA,      1, NULL },
        [DDP_COMPONENT_RDMA2]   = { MTK_DISP_RDMA,      2, NULL },
@@ -271,7 +278,11 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
 
        if (comp_id == DDP_COMPONENT_BLS ||
            comp_id == DDP_COMPONENT_DPI0 ||
+           comp_id == DDP_COMPONENT_DPI1 ||
            comp_id == DDP_COMPONENT_DSI0 ||
+           comp_id == DDP_COMPONENT_DSI1 ||
+           comp_id == DDP_COMPONENT_DSI2 ||
+           comp_id == DDP_COMPONENT_DSI3 ||
            comp_id == DDP_COMPONENT_PWM0) {
                comp->regs = NULL;
                comp->clk = NULL;
index 0828cf8bf85ca2d02406ada2d0d5a9635acd1a52..7413ffeb3c9d8bee1ca6bb825eb7a54391c7d013 100644 (file)
@@ -41,19 +41,25 @@ enum mtk_ddp_comp_type {
 };
 
 enum mtk_ddp_comp_id {
-       DDP_COMPONENT_AAL,
+       DDP_COMPONENT_AAL0,
+       DDP_COMPONENT_AAL1,
        DDP_COMPONENT_BLS,
        DDP_COMPONENT_COLOR0,
        DDP_COMPONENT_COLOR1,
        DDP_COMPONENT_DPI0,
+       DDP_COMPONENT_DPI1,
        DDP_COMPONENT_DSI0,
        DDP_COMPONENT_DSI1,
+       DDP_COMPONENT_DSI2,
+       DDP_COMPONENT_DSI3,
        DDP_COMPONENT_GAMMA,
-       DDP_COMPONENT_OD,
+       DDP_COMPONENT_OD0,
+       DDP_COMPONENT_OD1,
        DDP_COMPONENT_OVL0,
        DDP_COMPONENT_OVL1,
        DDP_COMPONENT_PWM0,
        DDP_COMPONENT_PWM1,
+       DDP_COMPONENT_PWM2,
        DDP_COMPONENT_RDMA0,
        DDP_COMPONENT_RDMA1,
        DDP_COMPONENT_RDMA2,
index a2ca90fc403cb5779e40bebb0124f0aa3f5147d7..39721119713bc29d6cba63e2947d6be1ddc439cd 100644 (file)
@@ -146,11 +146,37 @@ static const enum mtk_ddp_comp_id mt2701_mtk_ddp_ext[] = {
        DDP_COMPONENT_DPI0,
 };
 
+static const enum mtk_ddp_comp_id mt2712_mtk_ddp_main[] = {
+       DDP_COMPONENT_OVL0,
+       DDP_COMPONENT_COLOR0,
+       DDP_COMPONENT_AAL0,
+       DDP_COMPONENT_OD0,
+       DDP_COMPONENT_RDMA0,
+       DDP_COMPONENT_DPI0,
+       DDP_COMPONENT_PWM0,
+};
+
+static const enum mtk_ddp_comp_id mt2712_mtk_ddp_ext[] = {
+       DDP_COMPONENT_OVL1,
+       DDP_COMPONENT_COLOR1,
+       DDP_COMPONENT_AAL1,
+       DDP_COMPONENT_OD1,
+       DDP_COMPONENT_RDMA1,
+       DDP_COMPONENT_DPI1,
+       DDP_COMPONENT_PWM1,
+};
+
+static const enum mtk_ddp_comp_id mt2712_mtk_ddp_third[] = {
+       DDP_COMPONENT_RDMA2,
+       DDP_COMPONENT_DSI3,
+       DDP_COMPONENT_PWM2,
+};
+
 static const enum mtk_ddp_comp_id mt8173_mtk_ddp_main[] = {
        DDP_COMPONENT_OVL0,
        DDP_COMPONENT_COLOR0,
-       DDP_COMPONENT_AAL,
-       DDP_COMPONENT_OD,
+       DDP_COMPONENT_AAL0,
+       DDP_COMPONENT_OD0,
        DDP_COMPONENT_RDMA0,
        DDP_COMPONENT_UFOE,
        DDP_COMPONENT_DSI0,
@@ -173,6 +199,15 @@ static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = {
        .shadow_register = true,
 };
 
+static const struct mtk_mmsys_driver_data mt2712_mmsys_driver_data = {
+       .main_path = mt2712_mtk_ddp_main,
+       .main_len = ARRAY_SIZE(mt2712_mtk_ddp_main),
+       .ext_path = mt2712_mtk_ddp_ext,
+       .ext_len = ARRAY_SIZE(mt2712_mtk_ddp_ext),
+       .third_path = mt2712_mtk_ddp_third,
+       .third_len = ARRAY_SIZE(mt2712_mtk_ddp_third),
+};
+
 static const struct mtk_mmsys_driver_data mt8173_mmsys_driver_data = {
        .main_path = mt8173_mtk_ddp_main,
        .main_len = ARRAY_SIZE(mt8173_mtk_ddp_main),
@@ -232,6 +267,11 @@ static int mtk_drm_kms_init(struct drm_device *drm)
        if (ret < 0)
                goto err_component_unbind;
 
+       ret = mtk_drm_crtc_create(drm, private->data->third_path,
+                                 private->data->third_len);
+       if (ret < 0)
+               goto err_component_unbind;
+
        /* Use OVL device for all DMA memory allocations */
        np = private->comp_node[private->data->main_path[0]] ?:
             private->comp_node[private->data->ext_path[0]];
@@ -360,24 +400,44 @@ static const struct component_master_ops mtk_drm_ops = {
 };
 
 static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
-       { .compatible = "mediatek,mt2701-disp-ovl",   .data = (void *)MTK_DISP_OVL },
-       { .compatible = "mediatek,mt8173-disp-ovl",   .data = (void *)MTK_DISP_OVL },
-       { .compatible = "mediatek,mt2701-disp-rdma",  .data = (void *)MTK_DISP_RDMA },
-       { .compatible = "mediatek,mt8173-disp-rdma",  .data = (void *)MTK_DISP_RDMA },
-       { .compatible = "mediatek,mt8173-disp-wdma",  .data = (void *)MTK_DISP_WDMA },
-       { .compatible = "mediatek,mt2701-disp-color", .data = (void *)MTK_DISP_COLOR },
-       { .compatible = "mediatek,mt8173-disp-color", .data = (void *)MTK_DISP_COLOR },
-       { .compatible = "mediatek,mt8173-disp-aal",   .data = (void *)MTK_DISP_AAL},
-       { .compatible = "mediatek,mt8173-disp-gamma", .data = (void *)MTK_DISP_GAMMA, },
-       { .compatible = "mediatek,mt8173-disp-ufoe",  .data = (void *)MTK_DISP_UFOE },
-       { .compatible = "mediatek,mt2701-dsi",        .data = (void *)MTK_DSI },
-       { .compatible = "mediatek,mt8173-dsi",        .data = (void *)MTK_DSI },
-       { .compatible = "mediatek,mt8173-dpi",        .data = (void *)MTK_DPI },
-       { .compatible = "mediatek,mt2701-disp-mutex", .data = (void *)MTK_DISP_MUTEX },
-       { .compatible = "mediatek,mt8173-disp-mutex", .data = (void *)MTK_DISP_MUTEX },
-       { .compatible = "mediatek,mt2701-disp-pwm",   .data = (void *)MTK_DISP_BLS },
-       { .compatible = "mediatek,mt8173-disp-pwm",   .data = (void *)MTK_DISP_PWM },
-       { .compatible = "mediatek,mt8173-disp-od",    .data = (void *)MTK_DISP_OD },
+       { .compatible = "mediatek,mt2701-disp-ovl",
+         .data = (void *)MTK_DISP_OVL },
+       { .compatible = "mediatek,mt8173-disp-ovl",
+         .data = (void *)MTK_DISP_OVL },
+       { .compatible = "mediatek,mt2701-disp-rdma",
+         .data = (void *)MTK_DISP_RDMA },
+       { .compatible = "mediatek,mt8173-disp-rdma",
+         .data = (void *)MTK_DISP_RDMA },
+       { .compatible = "mediatek,mt8173-disp-wdma",
+         .data = (void *)MTK_DISP_WDMA },
+       { .compatible = "mediatek,mt2701-disp-color",
+         .data = (void *)MTK_DISP_COLOR },
+       { .compatible = "mediatek,mt8173-disp-color",
+         .data = (void *)MTK_DISP_COLOR },
+       { .compatible = "mediatek,mt8173-disp-aal",
+         .data = (void *)MTK_DISP_AAL},
+       { .compatible = "mediatek,mt8173-disp-gamma",
+         .data = (void *)MTK_DISP_GAMMA, },
+       { .compatible = "mediatek,mt8173-disp-ufoe",
+         .data = (void *)MTK_DISP_UFOE },
+       { .compatible = "mediatek,mt2701-dsi",
+         .data = (void *)MTK_DSI },
+       { .compatible = "mediatek,mt8173-dsi",
+         .data = (void *)MTK_DSI },
+       { .compatible = "mediatek,mt8173-dpi",
+         .data = (void *)MTK_DPI },
+       { .compatible = "mediatek,mt2701-disp-mutex",
+         .data = (void *)MTK_DISP_MUTEX },
+       { .compatible = "mediatek,mt2712-disp-mutex",
+         .data = (void *)MTK_DISP_MUTEX },
+       { .compatible = "mediatek,mt8173-disp-mutex",
+         .data = (void *)MTK_DISP_MUTEX },
+       { .compatible = "mediatek,mt2701-disp-pwm",
+         .data = (void *)MTK_DISP_BLS },
+       { .compatible = "mediatek,mt8173-disp-pwm",
+         .data = (void *)MTK_DISP_PWM },
+       { .compatible = "mediatek,mt8173-disp-od",
+         .data = (void *)MTK_DISP_OD },
        { }
 };
 
@@ -552,6 +612,8 @@ static SIMPLE_DEV_PM_OPS(mtk_drm_pm_ops, mtk_drm_sys_suspend,
 static const struct of_device_id mtk_drm_of_ids[] = {
        { .compatible = "mediatek,mt2701-mmsys",
          .data = &mt2701_mmsys_driver_data},
+       { .compatible = "mediatek,mt2712-mmsys",
+         .data = &mt2712_mmsys_driver_data},
        { .compatible = "mediatek,mt8173-mmsys",
          .data = &mt8173_mmsys_driver_data},
        { }
index c3378c452c0a03134c1eaa6175921102fc8ab751..ecc00ca3221daa80f6952f3083f37dc5f1fad0cf 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/io.h>
 #include "mtk_drm_ddp_comp.h"
 
-#define MAX_CRTC       2
+#define MAX_CRTC       3
 #define MAX_CONNECTOR  2
 
 struct device;
@@ -33,6 +33,9 @@ struct mtk_mmsys_driver_data {
        unsigned int main_len;
        const enum mtk_ddp_comp_id *ext_path;
        unsigned int ext_len;
+       const enum mtk_ddp_comp_id *third_path;
+       unsigned int third_len;
+
        bool shadow_register;
 };
 
index 0d8d506695f9f87d9622708f9144549fc1b63ef6..be5f6f1daf5542973f28b6c3db07f9decafd5bbe 100644 (file)
@@ -15,6 +15,7 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_gem.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 #include <linux/dma-buf.h>
 #include <linux/reservation.h>
 
 #include "mtk_drm_fb.h"
 #include "mtk_drm_gem.h"
 
-/*
- * mtk specific framebuffer structure.
- *
- * @fb: drm framebuffer object.
- * @gem_obj: array of gem objects.
- */
-struct mtk_drm_fb {
-       struct drm_framebuffer  base;
-       /* For now we only support a single plane */
-       struct drm_gem_object   *gem_obj;
-};
-
-#define to_mtk_fb(x) container_of(x, struct mtk_drm_fb, base)
-
-struct drm_gem_object *mtk_fb_get_gem_obj(struct drm_framebuffer *fb)
-{
-       struct mtk_drm_fb *mtk_fb = to_mtk_fb(fb);
-
-       return mtk_fb->gem_obj;
-}
-
-static int mtk_drm_fb_create_handle(struct drm_framebuffer *fb,
-                                   struct drm_file *file_priv,
-                                   unsigned int *handle)
-{
-       struct mtk_drm_fb *mtk_fb = to_mtk_fb(fb);
-
-       return drm_gem_handle_create(file_priv, mtk_fb->gem_obj, handle);
-}
-
-static void mtk_drm_fb_destroy(struct drm_framebuffer *fb)
-{
-       struct mtk_drm_fb *mtk_fb = to_mtk_fb(fb);
-
-       drm_framebuffer_cleanup(fb);
-
-       drm_gem_object_put_unlocked(mtk_fb->gem_obj);
-
-       kfree(mtk_fb);
-}
-
 static const struct drm_framebuffer_funcs mtk_drm_fb_funcs = {
-       .create_handle = mtk_drm_fb_create_handle,
-       .destroy = mtk_drm_fb_destroy,
+       .create_handle = drm_gem_fb_create_handle,
+       .destroy = drm_gem_fb_destroy,
 };
 
-static struct mtk_drm_fb *mtk_drm_framebuffer_init(struct drm_device *dev,
+static struct drm_framebuffer *mtk_drm_framebuffer_init(struct drm_device *dev,
                                        const struct drm_mode_fb_cmd2 *mode,
                                        struct drm_gem_object *obj)
 {
-       struct mtk_drm_fb *mtk_fb;
+       struct drm_framebuffer *fb;
        int ret;
 
        if (drm_format_num_planes(mode->pixel_format) != 1)
                return ERR_PTR(-EINVAL);
 
-       mtk_fb = kzalloc(sizeof(*mtk_fb), GFP_KERNEL);
-       if (!mtk_fb)
+       fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+       if (!fb)
                return ERR_PTR(-ENOMEM);
 
-       drm_helper_mode_fill_fb_struct(dev, &mtk_fb->base, mode);
+       drm_helper_mode_fill_fb_struct(dev, fb, mode);
 
-       mtk_fb->gem_obj = obj;
+       fb->obj[0] = obj;
 
-       ret = drm_framebuffer_init(dev, &mtk_fb->base, &mtk_drm_fb_funcs);
+       ret = drm_framebuffer_init(dev, fb, &mtk_drm_fb_funcs);
        if (ret) {
                DRM_ERROR("failed to initialize framebuffer\n");
-               kfree(mtk_fb);
+               kfree(fb);
                return ERR_PTR(ret);
        }
 
-       return mtk_fb;
+       return fb;
 }
 
 /*
@@ -110,7 +70,7 @@ int mtk_fb_wait(struct drm_framebuffer *fb)
        if (!fb)
                return 0;
 
-       gem = mtk_fb_get_gem_obj(fb);
+       gem = fb->obj[0];
        if (!gem || !gem->dma_buf || !gem->dma_buf->resv)
                return 0;
 
@@ -128,7 +88,7 @@ struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
                                               struct drm_file *file,
                                               const struct drm_mode_fb_cmd2 *cmd)
 {
-       struct mtk_drm_fb *mtk_fb;
+       struct drm_framebuffer *fb;
        struct drm_gem_object *gem;
        unsigned int width = cmd->width;
        unsigned int height = cmd->height;
@@ -151,13 +111,13 @@ struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
                goto unreference;
        }
 
-       mtk_fb = mtk_drm_framebuffer_init(dev, cmd, gem);
-       if (IS_ERR(mtk_fb)) {
-               ret = PTR_ERR(mtk_fb);
+       fb = mtk_drm_framebuffer_init(dev, cmd, gem);
+       if (IS_ERR(fb)) {
+               ret = PTR_ERR(fb);
                goto unreference;
        }
 
-       return &mtk_fb->base;
+       return fb;
 
 unreference:
        drm_gem_object_put_unlocked(gem);
index 9b2ae345a4e901fa80493a6610cb61233a501f86..7f976b196a154b09e5422c6818eba4bca4e4477c 100644 (file)
@@ -14,7 +14,6 @@
 #ifndef MTK_DRM_FB_H
 #define MTK_DRM_FB_H
 
-struct drm_gem_object *mtk_fb_get_gem_obj(struct drm_framebuffer *fb);
 int mtk_fb_wait(struct drm_framebuffer *fb);
 struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
                                               struct drm_file *file,
index 2f4b0ffee598948e12a00822c073ca9f34d3fb31..f7e6aa1b5b7d1d4e5b994d1acc0f8166a9eb09f0 100644 (file)
@@ -95,11 +95,6 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
        if (!fb)
                return 0;
 
-       if (!mtk_fb_get_gem_obj(fb)) {
-               DRM_DEBUG_KMS("buffer is null\n");
-               return -EFAULT;
-       }
-
        if (!state->crtc)
                return 0;
 
@@ -127,7 +122,7 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
        if (!crtc || WARN_ON(!fb))
                return;
 
-       gem = mtk_fb_get_gem_obj(fb);
+       gem = fb->obj[0];
        mtk_gem = to_mtk_gem_obj(gem);
        addr = mtk_gem->dma_addr;
        pitch = fb->pitches[0];
index aa0943ec32b00d88260aad3b52a844d829ed1bb4..66df1b1779592195e38fe0437874c39241a2f297 100644 (file)
@@ -782,7 +782,7 @@ static int mtk_dsi_create_connector(struct drm_device *drm, struct mtk_dsi *dsi)
        drm_connector_helper_add(&dsi->conn, &mtk_dsi_connector_helper_funcs);
 
        dsi->conn.dpms = DRM_MODE_DPMS_OFF;
-       drm_mode_connector_attach_encoder(&dsi->conn, &dsi->encoder);
+       drm_connector_attach_encoder(&dsi->conn, &dsi->encoder);
 
        if (dsi->panel) {
                ret = drm_panel_attach(dsi->panel, &dsi->conn);
index 59a11026dceb4cea5c2e4ee1459dc3d1854463a5..2d45d1dd9554a6c8cea17a3c52c8554af94a613e 100644 (file)
@@ -1220,7 +1220,7 @@ static int mtk_hdmi_conn_get_modes(struct drm_connector *conn)
 
        hdmi->dvi_mode = !drm_detect_monitor_audio(edid);
 
-       drm_mode_connector_update_edid_property(conn, edid);
+       drm_connector_update_edid_property(conn, edid);
 
        ret = drm_add_edid_modes(conn, edid);
        kfree(edid);
@@ -1306,7 +1306,7 @@ static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge)
        hdmi->conn.interlace_allowed = true;
        hdmi->conn.doublescan_allowed = false;
 
-       ret = drm_mode_connector_attach_encoder(&hdmi->conn,
+       ret = drm_connector_attach_encoder(&hdmi->conn,
                                                bridge->encoder);
        if (ret) {
                dev_err(hdmi->dev,
index 32b1a6cdecfc05133147e6ff85c959f4668362f1..d3443125e66164a863fb41bfdb435a1ce13340b6 100644 (file)
@@ -197,8 +197,10 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
        priv->io_base = regs;
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hhi");
-       if (!res)
-               return -EINVAL;
+       if (!res) {
+               ret = -EINVAL;
+               goto free_drm;
+       }
        /* Simply ioremap since it may be a shared register zone */
        regs = devm_ioremap(dev, res->start, resource_size(res));
        if (!regs) {
@@ -215,8 +217,10 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
        }
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc");
-       if (!res)
-               return -EINVAL;
+       if (!res) {
+               ret = -EINVAL;
+               goto free_drm;
+       }
        /* Simply ioremap since it may be a shared register zone */
        regs = devm_ioremap(dev, res->start, resource_size(res));
        if (!regs) {
index c9ad45686e7ae29eee5f72882fb8396f58e05e9b..df7247cd93f98f9f237721bb1eddd104cbbdc345 100644 (file)
@@ -329,6 +329,12 @@ static void dw_hdmi_set_vclk(struct meson_dw_hdmi *dw_hdmi,
 
        vclk_freq = mode->clock;
 
+       if (!vic) {
+               meson_vclk_setup(priv, MESON_VCLK_TARGET_DMT, vclk_freq,
+                                vclk_freq, vclk_freq, false);
+               return;
+       }
+
        if (mode->flags & DRM_MODE_FLAG_DBLCLK)
                vclk_freq *= 2;
 
@@ -542,10 +548,12 @@ static enum drm_mode_status
 dw_hdmi_mode_valid(struct drm_connector *connector,
                   const struct drm_display_mode *mode)
 {
+       struct meson_drm *priv = connector->dev->dev_private;
        unsigned int vclk_freq;
        unsigned int venc_freq;
        unsigned int hdmi_freq;
        int vic = drm_match_cea_mode(mode);
+       enum drm_mode_status status;
 
        DRM_DEBUG_DRIVER("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n",
                mode->base.id, mode->name, mode->vrefresh, mode->clock,
@@ -556,8 +564,11 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
 
        /* Check against non-VIC supported modes */
        if (!vic) {
-               if (!meson_venc_hdmi_supported_mode(mode))
-                       return MODE_BAD;
+               status = meson_venc_hdmi_supported_mode(mode);
+               if (status != MODE_OK)
+                       return status;
+
+               return meson_vclk_dmt_supported_freq(priv, mode->clock);
        /* Check against supported VIC modes */
        } else if (!meson_venc_hdmi_supported_vic(vic))
                return MODE_BAD;
@@ -583,16 +594,11 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
        dev_dbg(connector->dev->dev, "%s: vclk:%d venc=%d hdmi=%d\n", __func__,
                vclk_freq, venc_freq, hdmi_freq);
 
-       /* Finally filter by configurable vclk frequencies */
+       /* Finally filter by configurable vclk frequencies for VIC modes */
        switch (vclk_freq) {
-       case 25175:
-       case 40000:
        case 54000:
-       case 65000:
        case 74250:
-       case 108000:
        case 148500:
-       case 162000:
        case 297000:
        case 594000:
                return MODE_OK;
index f0511220317f98ab211b80152de26abae92ea175..ae5473257f727cd446b2aba0b3dd172a86b276f1 100644 (file)
@@ -320,32 +320,23 @@ static void meson_venci_cvbs_clock_config(struct meson_drm *priv)
                                CTS_VDAC_EN, CTS_VDAC_EN);
 }
 
-
+enum {
 /* PLL O1 O2 O3 VP DV     EN TX */
 /* 4320 /4 /4 /1 /5 /1  => /2 /2 */
-#define MESON_VCLK_HDMI_ENCI_54000     1
+       MESON_VCLK_HDMI_ENCI_54000 = 1,
 /* 4320 /4 /4 /1 /5 /1  => /1 /2 */
-#define MESON_VCLK_HDMI_DDR_54000      2
+       MESON_VCLK_HDMI_DDR_54000,
 /* 2970 /4 /1 /1 /5 /1  => /1 /2 */
-#define MESON_VCLK_HDMI_DDR_148500     3
-/* 4028 /4 /4 /1 /5 /2  => /1 /1 */
-#define MESON_VCLK_HDMI_25175          4
-/* 3200 /4 /2 /1 /5 /2  => /1 /1 */
-#define MESON_VCLK_HDMI_40000          5
-/* 5200 /4 /2 /1 /5 /2  => /1 /1 */
-#define MESON_VCLK_HDMI_65000          6
+       MESON_VCLK_HDMI_DDR_148500,
 /* 2970 /2 /2 /2 /5 /1  => /1 /1 */
-#define MESON_VCLK_HDMI_74250          7
-/* 4320 /4 /1 /1 /5 /2  => /1 /1 */
-#define MESON_VCLK_HDMI_108000         8
+       MESON_VCLK_HDMI_74250,
 /* 2970 /1 /2 /2 /5 /1  => /1 /1 */
-#define MESON_VCLK_HDMI_148500         9
-/* 3240 /2 /1 /1 /5 /2  => /1 /1 */
-#define MESON_VCLK_HDMI_162000         10
+       MESON_VCLK_HDMI_148500,
 /* 2970 /1 /1 /1 /5 /2  => /1 /1 */
-#define MESON_VCLK_HDMI_297000         11
+       MESON_VCLK_HDMI_297000,
 /* 5940 /1 /1 /2 /5 /1  => /1 /1 */
-#define MESON_VCLK_HDMI_594000         12
+       MESON_VCLK_HDMI_594000
+};
 
 struct meson_vclk_params {
        unsigned int pll_base_freq;
@@ -411,46 +402,6 @@ struct meson_vclk_params {
                .vid_pll_div = VID_PLL_DIV_5,
                .vclk_div = 1,
        },
-       [MESON_VCLK_HDMI_25175] = {
-               .pll_base_freq = 4028000,
-               .pll_od1 = 4,
-               .pll_od2 = 4,
-               .pll_od3 = 1,
-               .vid_pll_div = VID_PLL_DIV_5,
-               .vclk_div = 2,
-       },
-       [MESON_VCLK_HDMI_40000] = {
-               .pll_base_freq = 3200000,
-               .pll_od1 = 4,
-               .pll_od2 = 2,
-               .pll_od3 = 1,
-               .vid_pll_div = VID_PLL_DIV_5,
-               .vclk_div = 2,
-       },
-       [MESON_VCLK_HDMI_65000] = {
-               .pll_base_freq = 5200000,
-               .pll_od1 = 4,
-               .pll_od2 = 2,
-               .pll_od3 = 1,
-               .vid_pll_div = VID_PLL_DIV_5,
-               .vclk_div = 2,
-       },
-       [MESON_VCLK_HDMI_108000] = {
-               .pll_base_freq = 4320000,
-               .pll_od1 = 4,
-               .pll_od2 = 1,
-               .pll_od3 = 1,
-               .vid_pll_div = VID_PLL_DIV_5,
-               .vclk_div = 2,
-       },
-       [MESON_VCLK_HDMI_162000] = {
-               .pll_base_freq = 3240000,
-               .pll_od1 = 2,
-               .pll_od2 = 1,
-               .pll_od3 = 1,
-               .vid_pll_div = VID_PLL_DIV_5,
-               .vclk_div = 2,
-       },
 };
 
 static inline unsigned int pll_od_to_reg(unsigned int od)
@@ -470,358 +421,217 @@ static inline unsigned int pll_od_to_reg(unsigned int od)
        return 0;
 }
 
-void meson_hdmi_pll_set(struct meson_drm *priv,
-                       unsigned int base,
-                       unsigned int od1,
-                       unsigned int od2,
-                       unsigned int od3)
+void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m,
+                              unsigned int frac, unsigned int od1,
+                              unsigned int od2, unsigned int od3)
 {
        unsigned int val;
 
        if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
-               switch (base) {
-               case 2970000:
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800023d);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
-
-                       /* Enable and unreset */
-                       regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
-                                               0x7 << 28, 0x4 << 28);
-
-                       /* Poll for lock bit */
-                       regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
-                                       val, (val & HDMI_PLL_LOCK), 10, 0);
-
-                       /* div_frac */
-                       regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
-                                               0xFFFF,  0x4e00);
-                       break;
-
-               case 3200000:
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000242);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
-
-                       /* unreset */
-                       regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
-                                               BIT(28), 0);
-
-                       /* Poll for lock bit */
-                       regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
-                                       val, (val & HDMI_PLL_LOCK), 10, 0);
-
-                       /* div_frac */
-                       regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
-                                               0xFFFF,  0x4aab);
-                       break;
-
-               case 3240000:
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000243);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
-
-                       /* unreset */
-                       regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
-                                               BIT(28), 0);
-
-                       /* Poll for lock bit */
-                       regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
-                                       val, (val & HDMI_PLL_LOCK), 10, 0);
-
-                       /* div_frac */
-                       regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
-                                               0xFFFF,  0x4800);
-                       break;
-
-               case 3865000:
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000250);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
-
-                       /* unreset */
-                       regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
-                                               BIT(28), 0);
-
-                       /* Poll for lock bit */
-                       regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
-                                       val, (val & HDMI_PLL_LOCK), 10, 0);
-
-                       /* div_frac */
-                       regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
-                                               0xFFFF,  0x4855);
-                       break;
-
-               case 4028000:
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000253);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
-
-                       /* unreset */
-                       regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
-                                               BIT(28), 0);
-
-                       /* Poll for lock bit */
-                       regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
-                                       val, (val & HDMI_PLL_LOCK), 10, 0);
-
-                       /* div_frac */
-                       regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
-                                               0xFFFF,  0x4eab);
-                       break;
-
-               case 4320000:
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800025a);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
-
-                       /* unreset */
-                       regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
-                                               BIT(28), 0);
-
-                       /* Poll for lock bit */
-                       regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
-                                       val, (val & HDMI_PLL_LOCK), 10, 0);
-                       break;
+               regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000200 | m);
+               if (frac)
+                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2,
+                                    0x00004000 | frac);
+               else
+                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2,
+                                    0x00000000);
+               regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
+               regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
+               regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
+               regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
 
-               case 5940000:
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800027b);
-                       regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
-                                               0xFFFF,  0x4c00);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x135c5091);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
-
-                       /* unreset */
-                       regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
-                                               BIT(28), 0);
-
-                       /* Poll for lock bit */
-                       regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
-                                       val, (val & HDMI_PLL_LOCK), 10, 0);
-                       break;
+               /* Enable and unreset */
+               regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+                                  0x7 << 28, 0x4 << 28);
 
-               case 5200000:
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800026c);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x135c5091);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
-
-                       /* unreset */
-                       regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
-                                               BIT(28), 0);
-
-                       /* Poll for lock bit */
-                       regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
-                                       val, (val & HDMI_PLL_LOCK), 10, 0);
-                       break;
-               };
+               /* Poll for lock bit */
+               regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
+                                        val, (val & HDMI_PLL_LOCK), 10, 0);
        } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
                   meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
-               switch (base) {
-               case 2970000:
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x4000027b);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb300);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
-                       break;
-
-               case 3200000:
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x40000285);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb155);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
-                       break;
-
-               case 3240000:
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x40000287);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb000);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
-                       break;
-
-               case 3865000:
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002a1);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb02b);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
-                       break;
-
-               case 4028000:
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002a7);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb355);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
-                       break;
-
-               case 4320000:
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002b4);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb000);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
-                       break;
-
-               case 5940000:
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002f7);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb200);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
-                       break;
-
-               case 5200000:
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002d8);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb2ab);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
-                       regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
-                       break;
-
-               };
+               regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x40000200 | m);
+               regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb000 | frac);
+               regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
+               regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
+               regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
+               regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
 
                /* Reset PLL */
                regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
-                                       HDMI_PLL_RESET, HDMI_PLL_RESET);
+                               HDMI_PLL_RESET, HDMI_PLL_RESET);
                regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
-                                       HDMI_PLL_RESET, 0);
+                               HDMI_PLL_RESET, 0);
 
                /* Poll for lock bit */
                regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
                                (val & HDMI_PLL_LOCK), 10, 0);
-       };
+       }
 
        if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
                regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
-                                  3 << 16, pll_od_to_reg(od1) << 16);
+                               3 << 16, pll_od_to_reg(od1) << 16);
        else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
-                meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+                       meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
                regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
-                                  3 << 21, pll_od_to_reg(od1) << 21);
+                               3 << 21, pll_od_to_reg(od1) << 21);
 
        if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
                regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
-                                  3 << 22, pll_od_to_reg(od2) << 22);
+                               3 << 22, pll_od_to_reg(od2) << 22);
        else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
-                meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+                       meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
                regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
-                                  3 << 23, pll_od_to_reg(od2) << 23);
+                               3 << 23, pll_od_to_reg(od2) << 23);
 
        if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
                regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
-                                  3 << 18, pll_od_to_reg(od3) << 18);
+                               3 << 18, pll_od_to_reg(od3) << 18);
        else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
-                meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+                       meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
                regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
-                                  3 << 19, pll_od_to_reg(od3) << 19);
+                               3 << 19, pll_od_to_reg(od3) << 19);
+
 }
 
-void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
-                     unsigned int vclk_freq, unsigned int venc_freq,
-                     unsigned int dac_freq, bool hdmi_use_enci)
+#define XTAL_FREQ 24000
+
+static unsigned int meson_hdmi_pll_get_m(struct meson_drm *priv,
+                                        unsigned int pll_freq)
 {
-       unsigned int freq;
-       unsigned int hdmi_tx_div;
-       unsigned int venc_div;
+       /* The GXBB PLL has a /2 pre-multiplier */
+       if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
+               pll_freq /= 2;
 
-       if (target == MESON_VCLK_TARGET_CVBS) {
-               meson_venci_cvbs_clock_config(priv);
-               return;
+       return pll_freq / XTAL_FREQ;
+}
+
+#define HDMI_FRAC_MAX_GXBB     4096
+#define HDMI_FRAC_MAX_GXL      1024
+
+static unsigned int meson_hdmi_pll_get_frac(struct meson_drm *priv,
+                                           unsigned int m,
+                                           unsigned int pll_freq)
+{
+       unsigned int parent_freq = XTAL_FREQ;
+       unsigned int frac_max = HDMI_FRAC_MAX_GXL;
+       unsigned int frac_m;
+       unsigned int frac;
+
+       /* The GXBB PLL has a /2 pre-multiplier and a larger FRAC width */
+       if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
+               frac_max = HDMI_FRAC_MAX_GXBB;
+               parent_freq *= 2;
        }
 
-       hdmi_tx_div = vclk_freq / dac_freq;
+       /* We can have a perfect match !*/
+       if (pll_freq / m == parent_freq &&
+           pll_freq % m == 0)
+               return 0;
 
-       if (hdmi_tx_div == 0) {
-               pr_err("Fatal Error, invalid HDMI-TX freq %d\n",
-                               dac_freq);
-               return;
+       frac = div_u64((u64)pll_freq * (u64)frac_max, parent_freq);
+       frac_m = m * frac_max;
+       if (frac_m > frac)
+               return frac_max;
+       frac -= frac_m;
+
+       return min((u16)frac, (u16)(frac_max - 1));
+}
+
+static bool meson_hdmi_pll_validate_params(struct meson_drm *priv,
+                                          unsigned int m,
+                                          unsigned int frac)
+{
+       if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
+               /* Empiric supported min/max dividers */
+               if (m < 53 || m > 123)
+                       return false;
+               if (frac >= HDMI_FRAC_MAX_GXBB)
+                       return false;
+       } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
+                  meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
+               /* Empiric supported min/max dividers */
+               if (m < 106 || m > 247)
+                       return false;
+               if (frac >= HDMI_FRAC_MAX_GXL)
+                       return false;
        }
 
-       venc_div = vclk_freq / venc_freq;
+       return true;
+}
 
-       if (venc_div == 0) {
-               pr_err("Fatal Error, invalid HDMI venc freq %d\n",
-                               venc_freq);
-               return;
+static bool meson_hdmi_pll_find_params(struct meson_drm *priv,
+                                      unsigned int freq,
+                                      unsigned int *m,
+                                      unsigned int *frac,
+                                      unsigned int *od)
+{
+       /* Cycle from /16 to /2 */
+       for (*od = 16 ; *od > 1 ; *od >>= 1) {
+               *m = meson_hdmi_pll_get_m(priv, freq * *od);
+               if (!*m)
+                       continue;
+               *frac = meson_hdmi_pll_get_frac(priv, *m, freq * *od);
+
+               DRM_DEBUG_DRIVER("PLL params for %dkHz: m=%x frac=%x od=%d\n",
+                                freq, *m, *frac, *od);
+
+               if (meson_hdmi_pll_validate_params(priv, *m, *frac))
+                       return true;
        }
 
-       switch (vclk_freq) {
-       case 54000:
-               if (hdmi_use_enci)
-                       freq = MESON_VCLK_HDMI_ENCI_54000;
-               else
-                       freq = MESON_VCLK_HDMI_DDR_54000;
-               break;
-       case 25175:
-               freq = MESON_VCLK_HDMI_25175;
-               break;
-       case 40000:
-               freq = MESON_VCLK_HDMI_40000;
-               break;
-       case 65000:
-               freq = MESON_VCLK_HDMI_65000;
-               break;
-       case 74250:
-               freq = MESON_VCLK_HDMI_74250;
-               break;
-       case 108000:
-               freq = MESON_VCLK_HDMI_108000;
-               break;
-       case 148500:
-               if (dac_freq != 148500)
-                       freq = MESON_VCLK_HDMI_DDR_148500;
-               else
-                       freq = MESON_VCLK_HDMI_148500;
-               break;
-       case 162000:
-               freq = MESON_VCLK_HDMI_162000;
-               break;
-       case 297000:
-               freq = MESON_VCLK_HDMI_297000;
-               break;
-       case 594000:
-               freq = MESON_VCLK_HDMI_594000;
-               break;
-       default:
-               pr_err("Fatal Error, invalid HDMI vclk freq %d\n",
-                       vclk_freq);
+       return false;
+}
+
+/* pll_freq is the frequency after the OD dividers */
+enum drm_mode_status
+meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq)
+{
+       unsigned int od, m, frac;
+
+       /* In DMT mode, path after PLL is always /10 */
+       freq *= 10;
+
+       if (meson_hdmi_pll_find_params(priv, freq, &m, &frac, &od))
+               return MODE_OK;
+
+       return MODE_CLOCK_RANGE;
+}
+EXPORT_SYMBOL_GPL(meson_vclk_dmt_supported_freq);
+
+/* pll_freq is the frequency after the OD dividers */
+static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
+                                      unsigned int pll_freq)
+{
+       unsigned int od, m, frac, od1, od2, od3;
+
+       if (meson_hdmi_pll_find_params(priv, pll_freq, &m, &frac, &od)) {
+               od3 = 1;
+               if (od < 4) {
+                       od1 = 2;
+                       od2 = 1;
+               } else {
+                       od2 = od / 4;
+                       od1 = od / od2;
+               }
+
+               DRM_DEBUG_DRIVER("PLL params for %dkHz: m=%x frac=%x od=%d/%d/%d\n",
+                                pll_freq, m, frac, od1, od2, od3);
+
+               meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
+
                return;
        }
 
+       DRM_ERROR("Fatal, unable to find parameters for PLL freq %d\n",
+                 pll_freq);
+}
+
+static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
+                          unsigned int od1, unsigned int od2, unsigned int od3,
+                          unsigned int vid_pll_div, unsigned int vclk_div,
+                          unsigned int hdmi_tx_div, unsigned int venc_div,
+                          bool hdmi_use_enci)
+{
        /* Set HDMI-TX sys clock */
        regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL,
                           CTS_HDMI_SYS_SEL_MASK, 0);
@@ -831,19 +641,49 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
                           CTS_HDMI_SYS_EN, CTS_HDMI_SYS_EN);
 
        /* Set HDMI PLL rate */
-       meson_hdmi_pll_set(priv, params[freq].pll_base_freq,
-                          params[freq].pll_od1,
-                          params[freq].pll_od2,
-                          params[freq].pll_od3);
+       if (!od1 && !od2 && !od3) {
+               meson_hdmi_pll_generic_set(priv, pll_base_freq);
+       } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
+               switch (pll_base_freq) {
+               case 2970000:
+                       meson_hdmi_pll_set_params(priv, 0x3d, 0xe00,
+                                                 od1, od2, od3);
+                       break;
+               case 4320000:
+                       meson_hdmi_pll_set_params(priv, 0x5a, 0,
+                                                 od1, od2, od3);
+                       break;
+               case 5940000:
+                       meson_hdmi_pll_set_params(priv, 0x7b, 0xc00,
+                                                 od1, od2, od3);
+                       break;
+               }
+       } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
+                  meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
+               switch (pll_base_freq) {
+               case 2970000:
+                       meson_hdmi_pll_set_params(priv, 0x7b, 0x300,
+                                                 od1, od2, od3);
+                       break;
+               case 4320000:
+                       meson_hdmi_pll_set_params(priv, 0xb4, 0,
+                                                 od1, od2, od3);
+                       break;
+               case 5940000:
+                       meson_hdmi_pll_set_params(priv, 0xf7, 0x200,
+                                                 od1, od2, od3);
+                       break;
+               }
+       }
 
        /* Setup vid_pll divider */
-       meson_vid_pll_set(priv, params[freq].vid_pll_div);
+       meson_vid_pll_set(priv, vid_pll_div);
 
        /* Set VCLK div */
        regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
                           VCLK_SEL_MASK, 0);
        regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
-                          VCLK_DIV_MASK, params[freq].vclk_div - 1);
+                          VCLK_DIV_MASK, vclk_div - 1);
 
        /* Set HDMI-TX source */
        switch (hdmi_tx_div) {
@@ -981,4 +821,80 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
 
        regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL, VCLK_EN, VCLK_EN);
 }
+
+void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
+                     unsigned int vclk_freq, unsigned int venc_freq,
+                     unsigned int dac_freq, bool hdmi_use_enci)
+{
+       unsigned int freq;
+       unsigned int hdmi_tx_div;
+       unsigned int venc_div;
+
+       if (target == MESON_VCLK_TARGET_CVBS) {
+               meson_venci_cvbs_clock_config(priv);
+               return;
+       } else if (target == MESON_VCLK_TARGET_DMT) {
+               /* The DMT clock path is fixed after the PLL:
+                * - automatic PLL freq + OD management
+                * - vid_pll_div = VID_PLL_DIV_5
+                * - vclk_div = 2
+                * - hdmi_tx_div = 1
+                * - venc_div = 1
+                * - encp encoder
+                */
+               meson_vclk_set(priv, vclk_freq * 10, 0, 0, 0,
+                              VID_PLL_DIV_5, 2, 1, 1, false);
+               return;
+       }
+
+       hdmi_tx_div = vclk_freq / dac_freq;
+
+       if (hdmi_tx_div == 0) {
+               pr_err("Fatal Error, invalid HDMI-TX freq %d\n",
+                      dac_freq);
+               return;
+       }
+
+       venc_div = vclk_freq / venc_freq;
+
+       if (venc_div == 0) {
+               pr_err("Fatal Error, invalid HDMI venc freq %d\n",
+                      venc_freq);
+               return;
+       }
+
+       switch (vclk_freq) {
+       case 54000:
+               if (hdmi_use_enci)
+                       freq = MESON_VCLK_HDMI_ENCI_54000;
+               else
+                       freq = MESON_VCLK_HDMI_DDR_54000;
+               break;
+       case 74250:
+               freq = MESON_VCLK_HDMI_74250;
+               break;
+       case 148500:
+               if (dac_freq != 148500)
+                       freq = MESON_VCLK_HDMI_DDR_148500;
+               else
+                       freq = MESON_VCLK_HDMI_148500;
+               break;
+       case 297000:
+               freq = MESON_VCLK_HDMI_297000;
+               break;
+       case 594000:
+               freq = MESON_VCLK_HDMI_594000;
+               break;
+       default:
+               pr_err("Fatal Error, invalid HDMI vclk freq %d\n",
+                      vclk_freq);
+               return;
+       }
+
+       meson_vclk_set(priv, params[freq].pll_base_freq,
+                      params[freq].pll_od1, params[freq].pll_od2,
+                      params[freq].pll_od3, params[freq].vid_pll_div,
+                      params[freq].vclk_div, hdmi_tx_div, venc_div,
+                      hdmi_use_enci);
+}
 EXPORT_SYMBOL_GPL(meson_vclk_setup);
index 0401b5213471372aa5fc4782724dca122dc6f084..869fa3a3073e98c01c9a17a72463f7d060605b72 100644 (file)
 enum {
        MESON_VCLK_TARGET_CVBS = 0,
        MESON_VCLK_TARGET_HDMI = 1,
+       MESON_VCLK_TARGET_DMT = 2,
 };
 
 /* 27MHz is the CVBS Pixel Clock */
 #define MESON_VCLK_CVBS                        27000
 
+enum drm_mode_status
+meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq);
+
 void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
                      unsigned int vclk_freq, unsigned int venc_freq,
                      unsigned int dac_freq, bool hdmi_use_enci);
index 6e2701389801383172904fb3f4f628a6edb24a4f..514245e69b3847d1dc1d5f96249d5e49f849a4ca 100644 (file)
@@ -697,314 +697,6 @@ union meson_hdmi_venc_mode meson_hdmi_encp_mode_1080p60 = {
        },
 };
 
-union meson_hdmi_venc_mode meson_hdmi_encp_mode_640x480_60 = {
-       .encp = {
-               .dvi_settings = 0x21,
-               .video_mode = 0x4040,
-               .video_mode_adv = 0x18,
-               /* video_prog_mode */
-               /* video_sync_mode */
-               /* video_yc_dly */
-               /* video_rgb_ctrl */
-               /* video_filt_ctrl */
-               /* video_ofld_voav_ofst */
-               /* yfp1_htime */
-               /* yfp2_htime */
-               .max_pxcnt = 0x31f,
-               /* hspuls_begin */
-               /* hspuls_end */
-               /* hspuls_switch */
-               /* vspuls_begin */
-               /* vspuls_end */
-               /* vspuls_bline */
-               /* vspuls_eline */
-               .havon_begin = 0x90,
-               .havon_end = 0x30f,
-               .vavon_bline = 0x23,
-               .vavon_eline = 0x202,
-               /* eqpuls_begin */
-               /* eqpuls_end */
-               /* eqpuls_bline */
-               /* eqpuls_eline */
-               .hso_begin = 0,
-               .hso_end = 0x60,
-               .vso_begin = 0x1e,
-               .vso_end = 0x32,
-               .vso_bline = 0,
-               .vso_eline = 2,
-               .vso_eline_present = true,
-               /* sy_val */
-               /* sy2_val */
-               .max_lncnt = 0x20c,
-       },
-};
-
-union meson_hdmi_venc_mode meson_hdmi_encp_mode_800x600_60 = {
-       .encp = {
-               .dvi_settings = 0x21,
-               .video_mode = 0x4040,
-               .video_mode_adv = 0x18,
-               /* video_prog_mode */
-               /* video_sync_mode */
-               /* video_yc_dly */
-               /* video_rgb_ctrl */
-               /* video_filt_ctrl */
-               /* video_ofld_voav_ofst */
-               /* yfp1_htime */
-               /* yfp2_htime */
-               .max_pxcnt = 0x41f,
-               /* hspuls_begin */
-               /* hspuls_end */
-               /* hspuls_switch */
-               /* vspuls_begin */
-               /* vspuls_end */
-               /* vspuls_bline */
-               /* vspuls_eline */
-               .havon_begin = 0xD8,
-               .havon_end = 0x3f7,
-               .vavon_bline = 0x1b,
-               .vavon_eline = 0x272,
-               /* eqpuls_begin */
-               /* eqpuls_end */
-               /* eqpuls_bline */
-               /* eqpuls_eline */
-               .hso_begin = 0,
-               .hso_end = 0x80,
-               .vso_begin = 0x1e,
-               .vso_end = 0x32,
-               .vso_bline = 0,
-               .vso_eline = 4,
-               .vso_eline_present = true,
-               /* sy_val */
-               /* sy2_val */
-               .max_lncnt = 0x273,
-       },
-};
-
-union meson_hdmi_venc_mode meson_hdmi_encp_mode_1024x768_60 = {
-       .encp = {
-               .dvi_settings = 0x21,
-               .video_mode = 0x4040,
-               .video_mode_adv = 0x18,
-               /* video_prog_mode */
-               /* video_sync_mode */
-               /* video_yc_dly */
-               /* video_rgb_ctrl */
-               /* video_filt_ctrl */
-               /* video_ofld_voav_ofst */
-               /* yfp1_htime */
-               /* yfp2_htime */
-               .max_pxcnt = 1343,
-               /* hspuls_begin */
-               /* hspuls_end */
-               /* hspuls_switch */
-               /* vspuls_begin */
-               /* vspuls_end */
-               /* vspuls_bline */
-               /* vspuls_eline */
-               .havon_begin = 296,
-               .havon_end = 1319,
-               .vavon_bline = 35,
-               .vavon_eline = 802,
-               /* eqpuls_begin */
-               /* eqpuls_end */
-               /* eqpuls_bline */
-               /* eqpuls_eline */
-               .hso_begin = 0,
-               .hso_end = 136,
-               .vso_begin = 30,
-               .vso_end = 50,
-               .vso_bline = 0,
-               .vso_eline = 6,
-               .vso_eline_present = true,
-               /* sy_val */
-               /* sy2_val */
-               .max_lncnt = 805,
-       },
-};
-
-union meson_hdmi_venc_mode meson_hdmi_encp_mode_1152x864_75 = {
-       .encp = {
-               .dvi_settings = 0x21,
-               .video_mode = 0x4040,
-               .video_mode_adv = 0x18,
-               /* video_prog_mode */
-               /* video_sync_mode */
-               /* video_yc_dly */
-               /* video_rgb_ctrl */
-               /* video_filt_ctrl */
-               /* video_ofld_voav_ofst */
-               /* yfp1_htime */
-               /* yfp2_htime */
-               .max_pxcnt = 0x63f,
-               /* hspuls_begin */
-               /* hspuls_end */
-               /* hspuls_switch */
-               /* vspuls_begin */
-               /* vspuls_end */
-               /* vspuls_bline */
-               /* vspuls_eline */
-               .havon_begin = 0x180,
-               .havon_end = 0x5ff,
-               .vavon_bline = 0x23,
-               .vavon_eline = 0x382,
-               /* eqpuls_begin */
-               /* eqpuls_end */
-               /* eqpuls_bline */
-               /* eqpuls_eline */
-               .hso_begin = 0,
-               .hso_end = 0x80,
-               .vso_begin = 0x1e,
-               .vso_end = 0x32,
-               .vso_bline = 0,
-               .vso_eline = 3,
-               .vso_eline_present = true,
-               /* sy_val */
-               /* sy2_val */
-               .max_lncnt = 0x383,
-       },
-};
-
-union meson_hdmi_venc_mode meson_hdmi_encp_mode_1280x1024_60 = {
-       .encp = {
-               .dvi_settings = 0x21,
-               .video_mode = 0x4040,
-               .video_mode_adv = 0x18,
-               /* video_prog_mode */
-               /* video_sync_mode */
-               /* video_yc_dly */
-               /* video_rgb_ctrl */
-               /* video_filt_ctrl */
-               /* video_ofld_voav_ofst */
-               /* yfp1_htime */
-               /* yfp2_htime */
-               .max_pxcnt = 0x697,
-               /* hspuls_begin */
-               /* hspuls_end */
-               /* hspuls_switch */
-               /* vspuls_begin */
-               /* vspuls_end */
-               /* vspuls_bline */
-               /* vspuls_eline */
-               .havon_begin = 0x168,
-               .havon_end = 0x667,
-               .vavon_bline = 0x29,
-               .vavon_eline = 0x428,
-               /* eqpuls_begin */
-               /* eqpuls_end */
-               /* eqpuls_bline */
-               /* eqpuls_eline */
-               .hso_begin = 0,
-               .hso_end = 0x70,
-               .vso_begin = 0x1e,
-               .vso_end = 0x32,
-               .vso_bline = 0,
-               .vso_eline = 3,
-               .vso_eline_present = true,
-               /* sy_val */
-               /* sy2_val */
-               .max_lncnt = 0x429,
-       },
-};
-
-union meson_hdmi_venc_mode meson_hdmi_encp_mode_1600x1200_60 = {
-       .encp = {
-               .dvi_settings = 0x21,
-               .video_mode = 0x4040,
-               .video_mode_adv = 0x18,
-               /* video_prog_mode */
-               /* video_sync_mode */
-               /* video_yc_dly */
-               /* video_rgb_ctrl */
-               /* video_filt_ctrl */
-               /* video_ofld_voav_ofst */
-               /* yfp1_htime */
-               /* yfp2_htime */
-               .max_pxcnt = 0x86f,
-               /* hspuls_begin */
-               /* hspuls_end */
-               /* hspuls_switch */
-               /* vspuls_begin */
-               /* vspuls_end */
-               /* vspuls_bline */
-               /* vspuls_eline */
-               .havon_begin = 0x1f0,
-               .havon_end = 0x82f,
-               .vavon_bline = 0x31,
-               .vavon_eline = 0x4e0,
-               /* eqpuls_begin */
-               /* eqpuls_end */
-               /* eqpuls_bline */
-               /* eqpuls_eline */
-               .hso_begin = 0,
-               .hso_end = 0xc0,
-               .vso_begin = 0x1e,
-               .vso_end = 0x32,
-               .vso_bline = 0,
-               .vso_eline = 3,
-               .vso_eline_present = true,
-               /* sy_val */
-               /* sy2_val */
-               .max_lncnt = 0x4e1,
-       },
-};
-
-struct meson_hdmi_venc_dmt_mode {
-       struct drm_display_mode drm_mode;
-       union meson_hdmi_venc_mode *mode;
-} meson_hdmi_venc_dmt_modes[] = {
-       /* 640x480@60Hz */
-       {
-               { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
-                 752, 800, 0, 480, 490, 492, 525, 0,
-                 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
-               &meson_hdmi_encp_mode_640x480_60,
-       },
-       /* 800x600@60Hz */
-       {
-               { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
-                 968, 1056, 0, 600, 601, 605, 628, 0,
-                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-               &meson_hdmi_encp_mode_800x600_60,
-       },
-       /* 1024x768@60Hz */
-       {
-               { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024,
-                 1048, 1184, 1344, 0, 768, 771, 777, 806, 0,
-                 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
-               &meson_hdmi_encp_mode_1024x768_60,
-       },
-       /* 1152x864@75Hz */
-       {
-               { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152,
-                 1216, 1344, 1600, 0, 864, 865, 868, 900, 0,
-                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-               &meson_hdmi_encp_mode_1152x864_75,
-       },
-       /* 1280x1024@60Hz */
-       {
-               { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280,
-                 1328, 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
-                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-               &meson_hdmi_encp_mode_1280x1024_60,
-       },
-       /* 1600x1200@60Hz */
-       {
-               { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600,
-                 1664, 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
-                 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-               &meson_hdmi_encp_mode_1600x1200_60,
-       },
-       /* 1920x1080@60Hz */
-       {
-               { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920,
-                 2008, 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
-                 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
-               &meson_hdmi_encp_mode_1080p60
-       },
-       { }, /* sentinel */
-};
-
 struct meson_hdmi_venc_vic_mode {
        unsigned int vic;
        union meson_hdmi_venc_mode *mode;
@@ -1044,17 +736,20 @@ static unsigned long modulo(unsigned long a, unsigned long b)
                return a;
 }
 
-bool meson_venc_hdmi_supported_mode(const struct drm_display_mode *mode)
+enum drm_mode_status
+meson_venc_hdmi_supported_mode(const struct drm_display_mode *mode)
 {
-       struct meson_hdmi_venc_dmt_mode *vmode = meson_hdmi_venc_dmt_modes;
+       if (mode->flags & ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC |
+                           DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))
+               return MODE_BAD;
 
-       while (vmode->mode) {
-               if (drm_mode_equal(&vmode->drm_mode, mode))
-                       return true;
-               vmode++;
-       }
+       if (mode->hdisplay < 640 || mode->hdisplay > 1920)
+               return MODE_BAD_HVALUE;
 
-       return false;
+       if (mode->vdisplay < 480 || mode->vdisplay > 1200)
+               return MODE_BAD_VVALUE;
+
+       return MODE_OK;
 }
 EXPORT_SYMBOL_GPL(meson_venc_hdmi_supported_mode);
 
@@ -1072,18 +767,29 @@ bool meson_venc_hdmi_supported_vic(int vic)
 }
 EXPORT_SYMBOL_GPL(meson_venc_hdmi_supported_vic);
 
-static union meson_hdmi_venc_mode
-*meson_venc_hdmi_get_dmt_vmode(const struct drm_display_mode *mode)
+void meson_venc_hdmi_get_dmt_vmode(const struct drm_display_mode *mode,
+                                  union meson_hdmi_venc_mode *dmt_mode)
 {
-       struct meson_hdmi_venc_dmt_mode *vmode = meson_hdmi_venc_dmt_modes;
-
-       while (vmode->mode) {
-               if (drm_mode_equal(&vmode->drm_mode, mode))
-                       return vmode->mode;
-               vmode++;
-       }
-
-       return NULL;
+       memset(dmt_mode, 0, sizeof(*dmt_mode));
+
+       dmt_mode->encp.dvi_settings = 0x21;
+       dmt_mode->encp.video_mode = 0x4040;
+       dmt_mode->encp.video_mode_adv = 0x18;
+       dmt_mode->encp.max_pxcnt = mode->htotal - 1;
+       dmt_mode->encp.havon_begin = mode->htotal - mode->hsync_start;
+       dmt_mode->encp.havon_end = dmt_mode->encp.havon_begin +
+                                  mode->hdisplay - 1;
+       dmt_mode->encp.vavon_bline = mode->vtotal - mode->vsync_start;
+       dmt_mode->encp.vavon_eline = dmt_mode->encp.vavon_bline +
+                                    mode->vdisplay - 1;
+       dmt_mode->encp.hso_begin = 0;
+       dmt_mode->encp.hso_end = mode->hsync_end - mode->hsync_start;
+       dmt_mode->encp.vso_begin = 30;
+       dmt_mode->encp.vso_end = 50;
+       dmt_mode->encp.vso_bline = 0;
+       dmt_mode->encp.vso_eline = mode->vsync_end - mode->vsync_start;
+       dmt_mode->encp.vso_eline_present = true;
+       dmt_mode->encp.max_lncnt = mode->vtotal - 1;
 }
 
 static union meson_hdmi_venc_mode *meson_venc_hdmi_get_vic_vmode(int vic)
@@ -1120,6 +826,7 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
                              struct drm_display_mode *mode)
 {
        union meson_hdmi_venc_mode *vmode = NULL;
+       union meson_hdmi_venc_mode vmode_dmt;
        bool use_enci = false;
        bool venc_repeat = false;
        bool hdmi_repeat = false;
@@ -1147,14 +854,17 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
        unsigned int sof_lines;
        unsigned int vsync_lines;
 
-       if (meson_venc_hdmi_supported_vic(vic))
+       if (meson_venc_hdmi_supported_vic(vic)) {
                vmode = meson_venc_hdmi_get_vic_vmode(vic);
-       else
-               vmode = meson_venc_hdmi_get_dmt_vmode(mode);
-       if (!vmode) {
-               dev_err(priv->dev, "%s: Fatal Error, unsupported mode "
-                       DRM_MODE_FMT "\n", __func__, DRM_MODE_ARG(mode));
-               return;
+               if (!vmode) {
+                       dev_err(priv->dev, "%s: Fatal Error, unsupported mode "
+                               DRM_MODE_FMT "\n", __func__,
+                               DRM_MODE_ARG(mode));
+                       return;
+               }
+       } else {
+               meson_venc_hdmi_get_dmt_vmode(mode, &vmode_dmt);
+               vmode = &vmode_dmt;
        }
 
        /* Use VENCI for 480i and 576i and double HDMI pixels */
index 7c18a36a0dd0c1e975c33931557ee60dc4e463ae..97eaebbfa0c4a6c2ef12e921d65bbf045276fa25 100644 (file)
@@ -58,7 +58,8 @@ struct meson_cvbs_enci_mode {
 };
 
 /* HDMI Clock parameters */
-bool meson_venc_hdmi_supported_mode(const struct drm_display_mode *mode);
+enum drm_mode_status
+meson_venc_hdmi_supported_mode(const struct drm_display_mode *mode);
 bool meson_venc_hdmi_supported_vic(int vic);
 bool meson_venc_hdmi_venc_repeat(int vic);
 
index 79d95ca8a0c099601e8dada4680320bf829f97dc..f7945bae3b4a9e74b7400b463c95984b93cabc59 100644 (file)
@@ -282,7 +282,7 @@ int meson_venc_cvbs_create(struct meson_drm *priv)
 
        encoder->possible_crtcs = BIT(0);
 
-       drm_mode_connector_attach_encoder(connector, encoder);
+       drm_connector_attach_encoder(connector, encoder);
 
        return 0;
 }
index 8918539a19aa8b825b1491288440ab2bce5528cf..acf7bfe684549728644759e394119717f4d18f54 100644 (file)
@@ -1553,7 +1553,7 @@ static int mga_vga_get_modes(struct drm_connector *connector)
 
        edid = drm_get_edid(connector, &mga_connector->i2c->adapter);
        if (edid) {
-               drm_mode_connector_update_edid_property(connector, edid);
+               drm_connector_update_edid_property(connector, edid);
                ret = drm_add_edid_modes(connector, edid);
                kfree(edid);
        }
@@ -1747,7 +1747,7 @@ int mgag200_modeset_init(struct mga_device *mdev)
                return -1;
        }
 
-       drm_mode_connector_attach_encoder(connector, encoder);
+       drm_connector_attach_encoder(connector, encoder);
 
        ret = mgag200_fbdev_init(mdev);
        if (ret) {
index 38cbde971b48cdc24f0f435602459d22dec58656..843a9d40c05e33f5f13d1cbb7b09c751127940f2 100644 (file)
@@ -12,6 +12,7 @@ config DRM_MSM
        select SHMEM
        select TMPFS
        select QCOM_SCM
+       select WANT_DEV_COREDUMP
        select SND_SOC_HDMI_CODEC if SND_SOC
        select SYNC_FILE
        select PM_OPP
index cd40c050b2d7fb770446f3f11ea6372ca8b7fdf3..7c773e0036631aa278d684d73937ca2170123c55 100644 (file)
@@ -1,5 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 ccflags-y := -Idrivers/gpu/drm/msm
+ccflags-y += -Idrivers/gpu/drm/msm/disp/dpu1
 ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
 
 msm-y := \
@@ -45,6 +46,33 @@ msm-y := \
        disp/mdp5/mdp5_mixer.o \
        disp/mdp5/mdp5_plane.o \
        disp/mdp5/mdp5_smp.o \
+       disp/dpu1/dpu_core_irq.o \
+       disp/dpu1/dpu_core_perf.o \
+       disp/dpu1/dpu_crtc.o \
+       disp/dpu1/dpu_encoder.o \
+       disp/dpu1/dpu_encoder_phys_cmd.o \
+       disp/dpu1/dpu_encoder_phys_vid.o \
+       disp/dpu1/dpu_formats.o \
+       disp/dpu1/dpu_hw_blk.o \
+       disp/dpu1/dpu_hw_catalog.o \
+       disp/dpu1/dpu_hw_cdm.o \
+       disp/dpu1/dpu_hw_ctl.o \
+       disp/dpu1/dpu_hw_interrupts.o \
+       disp/dpu1/dpu_hw_intf.o \
+       disp/dpu1/dpu_hw_lm.o \
+       disp/dpu1/dpu_hw_pingpong.o \
+       disp/dpu1/dpu_hw_sspp.o \
+       disp/dpu1/dpu_hw_top.o \
+       disp/dpu1/dpu_hw_util.o \
+       disp/dpu1/dpu_hw_vbif.o \
+       disp/dpu1/dpu_io_util.o \
+       disp/dpu1/dpu_irq.o \
+       disp/dpu1/dpu_kms.o \
+       disp/dpu1/dpu_mdss.o \
+       disp/dpu1/dpu_plane.o \
+       disp/dpu1/dpu_power_handle.o \
+       disp/dpu1/dpu_rm.o \
+       disp/dpu1/dpu_vbif.o \
        msm_atomic.o \
        msm_debugfs.o \
        msm_drv.o \
@@ -62,7 +90,8 @@ msm-y := \
        msm_ringbuffer.o \
        msm_submitqueue.o
 
-msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o
+msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \
+                         disp/dpu1/dpu_dbg.o
 
 msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
 msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
index 3ebbeb3a9b68f1080cfec70ad3ef197456871b66..669c2d4b070dea4c711e0e8df9585d68dd810cd9 100644 (file)
@@ -411,15 +411,6 @@ static const unsigned int a3xx_registers[] = {
        ~0   /* sentinel */
 };
 
-#ifdef CONFIG_DEBUG_FS
-static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
-{
-       seq_printf(m, "status:   %08x\n",
-                       gpu_read(gpu, REG_A3XX_RBBM_STATUS));
-       adreno_show(gpu, m);
-}
-#endif
-
 /* would be nice to not have to duplicate the _show() stuff with printk(): */
 static void a3xx_dump(struct msm_gpu *gpu)
 {
@@ -427,6 +418,21 @@ static void a3xx_dump(struct msm_gpu *gpu)
                        gpu_read(gpu, REG_A3XX_RBBM_STATUS));
        adreno_dump(gpu);
 }
+
+static struct msm_gpu_state *a3xx_gpu_state_get(struct msm_gpu *gpu)
+{
+       struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+       if (!state)
+               return ERR_PTR(-ENOMEM);
+
+       adreno_gpu_state_get(gpu, state);
+
+       state->rbbm_status = gpu_read(gpu, REG_A3XX_RBBM_STATUS);
+
+       return state;
+}
+
 /* Register offset defines for A3XX */
 static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
        REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
@@ -450,9 +456,11 @@ static const struct adreno_gpu_funcs funcs = {
                .active_ring = adreno_active_ring,
                .irq = a3xx_irq,
                .destroy = a3xx_destroy,
-#ifdef CONFIG_DEBUG_FS
-               .show = a3xx_show,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+               .show = adreno_show,
 #endif
+               .gpu_state_get = a3xx_gpu_state_get,
+               .gpu_state_put = adreno_gpu_state_put,
        },
 };
 
index 16d3d596638e20477112a835a9a545d264623b23..7c4e6dc1ed59961e0df2f14fcbc9805c5a2cc359 100644 (file)
@@ -455,15 +455,19 @@ static const unsigned int a4xx_registers[] = {
        ~0 /* sentinel */
 };
 
-#ifdef CONFIG_DEBUG_FS
-static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
+static struct msm_gpu_state *a4xx_gpu_state_get(struct msm_gpu *gpu)
 {
-       seq_printf(m, "status:   %08x\n",
-                       gpu_read(gpu, REG_A4XX_RBBM_STATUS));
-       adreno_show(gpu, m);
+       struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+       if (!state)
+               return ERR_PTR(-ENOMEM);
+
+       adreno_gpu_state_get(gpu, state);
 
+       state->rbbm_status = gpu_read(gpu, REG_A4XX_RBBM_STATUS);
+
+       return state;
 }
-#endif
 
 /* Register offset defines for A4XX, in order of enum adreno_regs */
 static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
@@ -538,9 +542,11 @@ static const struct adreno_gpu_funcs funcs = {
                .active_ring = adreno_active_ring,
                .irq = a4xx_irq,
                .destroy = a4xx_destroy,
-#ifdef CONFIG_DEBUG_FS
-               .show = a4xx_show,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+               .show = adreno_show,
 #endif
+               .gpu_state_get = a4xx_gpu_state_get,
+               .gpu_state_put = adreno_gpu_state_put,
        },
        .get_timestamp = a4xx_get_timestamp,
 };
index d39400e5bc42907866b4cf7d734ee7f6118c5399..bd84f71d27d8567243956d9c1ddc1d9e6a09d458 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/soc/qcom/mdt_loader.h>
 #include <linux/pm_opp.h>
 #include <linux/nvmem-consumer.h>
+#include <linux/iopoll.h>
 #include "msm_gem.h"
 #include "msm_mmu.h"
 #include "a5xx_gpu.h"
@@ -1123,8 +1124,9 @@ static const u32 a5xx_registers[] = {
        0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB,
        0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05,
        0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3,
-       0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 0xAC60, 0xAC60, 0xB000, 0xB97F,
-       0xB9A0, 0xB9BF, ~0
+       0xEAA5, 0xEAC2, 0xA800, 0xA800, 0xA820, 0xA828, 0xA840, 0xA87D,
+       0XA880, 0xA88D, 0xA890, 0xA8A3, 0xA8D0, 0xA8D8, 0xA8E0, 0xA8F5,
+       0xAC60, 0xAC60, ~0,
 };
 
 static void a5xx_dump(struct msm_gpu *gpu)
@@ -1195,19 +1197,231 @@ static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
        return 0;
 }
 
-#ifdef CONFIG_DEBUG_FS
-static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
+struct a5xx_crashdumper {
+       void *ptr;
+       struct drm_gem_object *bo;
+       u64 iova;
+};
+
+struct a5xx_gpu_state {
+       struct msm_gpu_state base;
+       u32 *hlsqregs;
+};
+
+#define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \
+       readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \
+               interval, timeout)
+
+static int a5xx_crashdumper_init(struct msm_gpu *gpu,
+               struct a5xx_crashdumper *dumper)
 {
-       seq_printf(m, "status:   %08x\n",
-                       gpu_read(gpu, REG_A5XX_RBBM_STATUS));
+       dumper->ptr = msm_gem_kernel_new_locked(gpu->dev,
+               SZ_1M, MSM_BO_UNCACHED, gpu->aspace,
+               &dumper->bo, &dumper->iova);
 
-       /*
-        * Temporarily disable hardware clock gating before going into
-        * adreno_show to avoid issues while reading the registers
-        */
+       if (IS_ERR(dumper->ptr))
+               return PTR_ERR(dumper->ptr);
+
+       return 0;
+}
+
+static void a5xx_crashdumper_free(struct msm_gpu *gpu,
+               struct a5xx_crashdumper *dumper)
+{
+       msm_gem_put_iova(dumper->bo, gpu->aspace);
+       msm_gem_put_vaddr(dumper->bo);
+
+       drm_gem_object_unreference(dumper->bo);
+}
+
+static int a5xx_crashdumper_run(struct msm_gpu *gpu,
+               struct a5xx_crashdumper *dumper)
+{
+       u32 val;
+
+       if (IS_ERR_OR_NULL(dumper->ptr))
+               return -EINVAL;
+
+       gpu_write64(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_LO,
+               REG_A5XX_CP_CRASH_SCRIPT_BASE_HI, dumper->iova);
+
+       gpu_write(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, 1);
+
+       return gpu_poll_timeout(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, val,
+               val & 0x04, 100, 10000);
+}
+
+/*
+ * These are a list of the registers that need to be read through the HLSQ
+ * aperture through the crashdumper.  These are not nominally accessible from
+ * the CPU on a secure platform.
+ */
+static const struct {
+       u32 type;
+       u32 regoffset;
+       u32 count;
+} a5xx_hlsq_aperture_regs[] = {
+       { 0x35, 0xe00, 0x32 },   /* HSLQ non-context */
+       { 0x31, 0x2080, 0x1 },   /* HLSQ 2D context 0 */
+       { 0x33, 0x2480, 0x1 },   /* HLSQ 2D context 1 */
+       { 0x32, 0xe780, 0x62 },  /* HLSQ 3D context 0 */
+       { 0x34, 0xef80, 0x62 },  /* HLSQ 3D context 1 */
+       { 0x3f, 0x0ec0, 0x40 },  /* SP non-context */
+       { 0x3d, 0x2040, 0x1 },   /* SP 2D context 0 */
+       { 0x3b, 0x2440, 0x1 },   /* SP 2D context 1 */
+       { 0x3e, 0xe580, 0x170 }, /* SP 3D context 0 */
+       { 0x3c, 0xed80, 0x170 }, /* SP 3D context 1 */
+       { 0x3a, 0x0f00, 0x1c },  /* TP non-context */
+       { 0x38, 0x2000, 0xa },   /* TP 2D context 0 */
+       { 0x36, 0x2400, 0xa },   /* TP 2D context 1 */
+       { 0x39, 0xe700, 0x80 },  /* TP 3D context 0 */
+       { 0x37, 0xef00, 0x80 },  /* TP 3D context 1 */
+};
+
+static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
+               struct a5xx_gpu_state *a5xx_state)
+{
+       struct a5xx_crashdumper dumper = { 0 };
+       u32 offset, count = 0;
+       u64 *ptr;
+       int i;
+
+       if (a5xx_crashdumper_init(gpu, &dumper))
+               return;
+
+       /* The script will be written at offset 0 */
+       ptr = dumper.ptr;
+
+       /* Start writing the data at offset 256k */
+       offset = dumper.iova + (256 * SZ_1K);
+
+       /* Count how many additional registers to get from the HLSQ aperture */
+       for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++)
+               count += a5xx_hlsq_aperture_regs[i].count;
+
+       a5xx_state->hlsqregs = kcalloc(count, sizeof(u32), GFP_KERNEL);
+       if (!a5xx_state->hlsqregs)
+               return;
+
+       /* Build the crashdump script */
+       for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
+               u32 type = a5xx_hlsq_aperture_regs[i].type;
+               u32 c = a5xx_hlsq_aperture_regs[i].count;
+
+               /* Write the register to select the desired bank */
+               *ptr++ = ((u64) type << 8);
+               *ptr++ = (((u64) REG_A5XX_HLSQ_DBG_READ_SEL) << 44) |
+                       (1 << 21) | 1;
+
+               *ptr++ = offset;
+               *ptr++ = (((u64) REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE) << 44)
+                       | c;
+
+               offset += c * sizeof(u32);
+       }
+
+       /* Write two zeros to close off the script */
+       *ptr++ = 0;
+       *ptr++ = 0;
+
+       if (a5xx_crashdumper_run(gpu, &dumper)) {
+               kfree(a5xx_state->hlsqregs);
+               a5xx_crashdumper_free(gpu, &dumper);
+               return;
+       }
+
+       /* Copy the data from the crashdumper to the state */
+       memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K),
+               count * sizeof(u32));
+
+       a5xx_crashdumper_free(gpu, &dumper);
+}
+
+static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu)
+{
+       struct a5xx_gpu_state *a5xx_state = kzalloc(sizeof(*a5xx_state),
+                       GFP_KERNEL);
+
+       if (!a5xx_state)
+               return ERR_PTR(-ENOMEM);
+
+       /* Temporarily disable hardware clock gating before reading the hw */
        a5xx_set_hwcg(gpu, false);
-       adreno_show(gpu, m);
+
+       /* First get the generic state from the adreno core */
+       adreno_gpu_state_get(gpu, &(a5xx_state->base));
+
+       a5xx_state->base.rbbm_status = gpu_read(gpu, REG_A5XX_RBBM_STATUS);
+
+       /* Get the HLSQ regs with the help of the crashdumper */
+       a5xx_gpu_state_get_hlsq_regs(gpu, a5xx_state);
+
        a5xx_set_hwcg(gpu, true);
+
+       return &a5xx_state->base;
+}
+
+static void a5xx_gpu_state_destroy(struct kref *kref)
+{
+       struct msm_gpu_state *state = container_of(kref,
+               struct msm_gpu_state, ref);
+       struct a5xx_gpu_state *a5xx_state = container_of(state,
+               struct a5xx_gpu_state, base);
+
+       kfree(a5xx_state->hlsqregs);
+
+       adreno_gpu_state_destroy(state);
+       kfree(a5xx_state);
+}
+
+int a5xx_gpu_state_put(struct msm_gpu_state *state)
+{
+       if (IS_ERR_OR_NULL(state))
+               return 1;
+
+       return kref_put(&state->ref, a5xx_gpu_state_destroy);
+}
+
+
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+void a5xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+               struct drm_printer *p)
+{
+       int i, j;
+       u32 pos = 0;
+       struct a5xx_gpu_state *a5xx_state = container_of(state,
+               struct a5xx_gpu_state, base);
+
+       if (IS_ERR_OR_NULL(state))
+               return;
+
+       adreno_show(gpu, state, p);
+
+       /* Dump the additional a5xx HLSQ registers */
+       if (!a5xx_state->hlsqregs)
+               return;
+
+       drm_printf(p, "registers-hlsq:\n");
+
+       for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
+               u32 o = a5xx_hlsq_aperture_regs[i].regoffset;
+               u32 c = a5xx_hlsq_aperture_regs[i].count;
+
+               for (j = 0; j < c; j++, pos++, o++) {
+                       /*
+                        * To keep the crashdump simple we pull the entire range
+                        * for each register type but not all of the registers
+                        * in the range are valid. Fortunately invalid registers
+                        * stick out like a sore thumb with a value of
+                        * 0xdeadbeef
+                        */
+                       if (a5xx_state->hlsqregs[pos] == 0xdeadbeef)
+                               continue;
+
+                       drm_printf(p, "  - { offset: 0x%04x, value: 0x%08x }\n",
+                               o << 2, a5xx_state->hlsqregs[pos]);
+               }
+       }
 }
 #endif
 
@@ -1239,11 +1453,15 @@ static const struct adreno_gpu_funcs funcs = {
                .active_ring = a5xx_active_ring,
                .irq = a5xx_irq,
                .destroy = a5xx_destroy,
-#ifdef CONFIG_DEBUG_FS
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
                .show = a5xx_show,
+#endif
+#if defined(CONFIG_DEBUG_FS)
                .debugfs_init = a5xx_debugfs_init,
 #endif
                .gpu_busy = a5xx_gpu_busy,
+               .gpu_state_get = a5xx_gpu_state_get,
+               .gpu_state_put = a5xx_gpu_state_put,
        },
        .get_timestamp = a5xx_get_timestamp,
 };
index 0ae5ace65462dbc0cfb146318674314f9694c4d3..44813624a28664b5b2359079a9c66cd91b6f56a8 100644 (file)
@@ -35,6 +35,7 @@ static const struct adreno_info gpulist[] = {
                        [ADRENO_FW_PFP] = "a300_pfp.fw",
                },
                .gmem  = SZ_256K,
+               .inactive_period = DRM_MSM_INACTIVE_PERIOD,
                .init  = a3xx_gpu_init,
        }, {
                .rev   = ADRENO_REV(3, 0, 6, 0),
@@ -45,6 +46,7 @@ static const struct adreno_info gpulist[] = {
                        [ADRENO_FW_PFP] = "a300_pfp.fw",
                },
                .gmem  = SZ_128K,
+               .inactive_period = DRM_MSM_INACTIVE_PERIOD,
                .init  = a3xx_gpu_init,
        }, {
                .rev   = ADRENO_REV(3, 2, ANY_ID, ANY_ID),
@@ -55,6 +57,7 @@ static const struct adreno_info gpulist[] = {
                        [ADRENO_FW_PFP] = "a300_pfp.fw",
                },
                .gmem  = SZ_512K,
+               .inactive_period = DRM_MSM_INACTIVE_PERIOD,
                .init  = a3xx_gpu_init,
        }, {
                .rev   = ADRENO_REV(3, 3, 0, ANY_ID),
@@ -65,6 +68,7 @@ static const struct adreno_info gpulist[] = {
                        [ADRENO_FW_PFP] = "a330_pfp.fw",
                },
                .gmem  = SZ_1M,
+               .inactive_period = DRM_MSM_INACTIVE_PERIOD,
                .init  = a3xx_gpu_init,
        }, {
                .rev   = ADRENO_REV(4, 2, 0, ANY_ID),
@@ -75,6 +79,7 @@ static const struct adreno_info gpulist[] = {
                        [ADRENO_FW_PFP] = "a420_pfp.fw",
                },
                .gmem  = (SZ_1M + SZ_512K),
+               .inactive_period = DRM_MSM_INACTIVE_PERIOD,
                .init  = a4xx_gpu_init,
        }, {
                .rev   = ADRENO_REV(4, 3, 0, ANY_ID),
@@ -85,6 +90,7 @@ static const struct adreno_info gpulist[] = {
                        [ADRENO_FW_PFP] = "a420_pfp.fw",
                },
                .gmem  = (SZ_1M + SZ_512K),
+               .inactive_period = DRM_MSM_INACTIVE_PERIOD,
                .init  = a4xx_gpu_init,
        }, {
                .rev = ADRENO_REV(5, 3, 0, 2),
@@ -96,6 +102,11 @@ static const struct adreno_info gpulist[] = {
                        [ADRENO_FW_GPMU] = "a530v3_gpmu.fw2",
                },
                .gmem = SZ_1M,
+               /*
+                * Increase inactive period to 250 to avoid bouncing
+                * the GDSC which appears to make it grumpy
+                */
+               .inactive_period = 250,
                .quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
                        ADRENO_QUIRK_FAULT_DETECT_MASK,
                .init = a5xx_gpu_init,
@@ -158,7 +169,7 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
        mutex_lock(&dev->struct_mutex);
        ret = msm_gpu_hw_init(gpu);
        mutex_unlock(&dev->struct_mutex);
-       pm_runtime_put_sync(&pdev->dev);
+       pm_runtime_put_autosuspend(&pdev->dev);
        if (ret) {
                dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
                return NULL;
@@ -316,6 +327,7 @@ static int adreno_suspend(struct device *dev)
 #endif
 
 static const struct dev_pm_ops adreno_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
        SET_RUNTIME_PM_OPS(adreno_suspend, adreno_resume, NULL)
 };
 
index 17d0506d058c77dca97949fe1c152c8cb3420bff..38ac50b7382950078c68c5ef9f7124d571449b25 100644 (file)
@@ -17,6 +17,7 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/ascii85.h>
 #include <linux/pm_opp.h>
 #include "adreno_gpu.h"
 #include "msm_gem.h"
@@ -368,40 +369,185 @@ bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
        return false;
 }
 
-#ifdef CONFIG_DEBUG_FS
-void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
+int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       int i, count = 0;
+
+       kref_init(&state->ref);
+
+       ktime_get_real_ts64(&state->time);
+
+       for (i = 0; i < gpu->nr_rings; i++) {
+               int size = 0, j;
+
+               state->ring[i].fence = gpu->rb[i]->memptrs->fence;
+               state->ring[i].iova = gpu->rb[i]->iova;
+               state->ring[i].seqno = gpu->rb[i]->seqno;
+               state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]);
+               state->ring[i].wptr = get_wptr(gpu->rb[i]);
+
+               /* Copy at least 'wptr' dwords of the data */
+               size = state->ring[i].wptr;
+
+               /* After wptr find the last non zero dword to save space */
+               for (j = state->ring[i].wptr; j < MSM_GPU_RINGBUFFER_SZ >> 2; j++)
+                       if (gpu->rb[i]->start[j])
+                               size = j + 1;
+
+               if (size) {
+                       state->ring[i].data = kmalloc(size << 2, GFP_KERNEL);
+                       if (state->ring[i].data) {
+                               memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2);
+                               state->ring[i].data_size = size << 2;
+                       }
+               }
+       }
+
+       /* Count the number of registers */
+       for (i = 0; adreno_gpu->registers[i] != ~0; i += 2)
+               count += adreno_gpu->registers[i + 1] -
+                       adreno_gpu->registers[i] + 1;
+
+       state->registers = kcalloc(count * 2, sizeof(u32), GFP_KERNEL);
+       if (state->registers) {
+               int pos = 0;
+
+               for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
+                       u32 start = adreno_gpu->registers[i];
+                       u32 end   = adreno_gpu->registers[i + 1];
+                       u32 addr;
+
+                       for (addr = start; addr <= end; addr++) {
+                               state->registers[pos++] = addr;
+                               state->registers[pos++] = gpu_read(gpu, addr);
+                       }
+               }
+
+               state->nr_registers = count;
+       }
+
+       return 0;
+}
+
+void adreno_gpu_state_destroy(struct msm_gpu_state *state)
+{
        int i;
 
-       seq_printf(m, "revision: %d (%d.%d.%d.%d)\n",
+       for (i = 0; i < ARRAY_SIZE(state->ring); i++)
+               kfree(state->ring[i].data);
+
+       for (i = 0; state->bos && i < state->nr_bos; i++)
+               kvfree(state->bos[i].data);
+
+       kfree(state->bos);
+       kfree(state->comm);
+       kfree(state->cmd);
+       kfree(state->registers);
+}
+
+static void adreno_gpu_state_kref_destroy(struct kref *kref)
+{
+       struct msm_gpu_state *state = container_of(kref,
+               struct msm_gpu_state, ref);
+
+       adreno_gpu_state_destroy(state);
+       kfree(state);
+}
+
+int adreno_gpu_state_put(struct msm_gpu_state *state)
+{
+       if (IS_ERR_OR_NULL(state))
+               return 1;
+
+       return kref_put(&state->ref, adreno_gpu_state_kref_destroy);
+}
+
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+
+static void adreno_show_object(struct drm_printer *p, u32 *ptr, int len)
+{
+       char out[ASCII85_BUFSZ];
+       long l, datalen, i;
+
+       if (!ptr || !len)
+               return;
+
+       /*
+        * Only dump the non-zero part of the buffer - rarely will any data
+        * completely fill the entire allocated size of the buffer
+        */
+       for (datalen = 0, i = 0; i < len >> 2; i++) {
+               if (ptr[i])
+                       datalen = (i << 2) + 1;
+       }
+
+       /* Skip printing the object if it is empty */
+       if (datalen == 0)
+               return;
+
+       l = ascii85_encode_len(datalen);
+
+       drm_puts(p, "    data: !!ascii85 |\n");
+       drm_puts(p, "     ");
+
+       for (i = 0; i < l; i++)
+               drm_puts(p, ascii85_encode(ptr[i], out));
+
+       drm_puts(p, "\n");
+}
+
+void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+               struct drm_printer *p)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       int i;
+
+       if (IS_ERR_OR_NULL(state))
+               return;
+
+       drm_printf(p, "revision: %d (%d.%d.%d.%d)\n",
                        adreno_gpu->info->revn, adreno_gpu->rev.core,
                        adreno_gpu->rev.major, adreno_gpu->rev.minor,
                        adreno_gpu->rev.patchid);
 
-       for (i = 0; i < gpu->nr_rings; i++) {
-               struct msm_ringbuffer *ring = gpu->rb[i];
+       drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status);
 
-               seq_printf(m, "rb %d: fence:    %d/%d\n", i,
-                       ring->memptrs->fence, ring->seqno);
+       drm_puts(p, "ringbuffer:\n");
 
-               seq_printf(m, "      rptr:     %d\n",
-                       get_rptr(adreno_gpu, ring));
-               seq_printf(m, "rb wptr:  %d\n", get_wptr(ring));
+       for (i = 0; i < gpu->nr_rings; i++) {
+               drm_printf(p, "  - id: %d\n", i);
+               drm_printf(p, "    iova: 0x%016llx\n", state->ring[i].iova);
+               drm_printf(p, "    last-fence: %d\n", state->ring[i].seqno);
+               drm_printf(p, "    retired-fence: %d\n", state->ring[i].fence);
+               drm_printf(p, "    rptr: %d\n", state->ring[i].rptr);
+               drm_printf(p, "    wptr: %d\n", state->ring[i].wptr);
+               drm_printf(p, "    size: %d\n", MSM_GPU_RINGBUFFER_SZ);
+
+               adreno_show_object(p, state->ring[i].data,
+                       state->ring[i].data_size);
        }
 
-       /* dump these out in a form that can be parsed by demsm: */
-       seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
-       for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
-               uint32_t start = adreno_gpu->registers[i];
-               uint32_t end   = adreno_gpu->registers[i+1];
-               uint32_t addr;
+       if (state->bos) {
+               drm_puts(p, "bos:\n");
 
-               for (addr = start; addr <= end; addr++) {
-                       uint32_t val = gpu_read(gpu, addr);
-                       seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
+               for (i = 0; i < state->nr_bos; i++) {
+                       drm_printf(p, "  - iova: 0x%016llx\n",
+                               state->bos[i].iova);
+                       drm_printf(p, "    size: %zd\n", state->bos[i].size);
+
+                       adreno_show_object(p, state->bos[i].data,
+                               state->bos[i].size);
                }
        }
+
+       drm_puts(p, "registers:\n");
+
+       for (i = 0; i < state->nr_registers; i++) {
+               drm_printf(p, "  - { offset: 0x%04x, value: 0x%08x }\n",
+                       state->registers[i * 2] << 2,
+                       state->registers[(i * 2) + 1]);
+       }
 }
 #endif
 
@@ -565,7 +711,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 
        adreno_get_pwrlevels(&pdev->dev, gpu);
 
-       pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD);
+       pm_runtime_set_autosuspend_delay(&pdev->dev,
+               adreno_gpu->info->inactive_period);
        pm_runtime_use_autosuspend(&pdev->dev);
        pm_runtime_enable(&pdev->dev);
 
index d6b0e7b813f403a623ad6c238888c2d530669c09..4406776597fdb126ce161555c662cc2a7d9f0ad5 100644 (file)
@@ -84,6 +84,7 @@ struct adreno_info {
        enum adreno_quirks quirks;
        struct msm_gpu *(*init)(struct drm_device *dev);
        const char *zapfw;
+       u32 inactive_period;
 };
 
 const struct adreno_info *adreno_info(struct adreno_rev rev);
@@ -214,8 +215,9 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
                struct msm_file_private *ctx);
 void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
 bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
-#ifdef CONFIG_DEBUG_FS
-void adreno_show(struct msm_gpu *gpu, struct seq_file *m);
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+               struct drm_printer *p);
 #endif
 void adreno_dump_info(struct msm_gpu *gpu);
 void adreno_dump(struct msm_gpu *gpu);
@@ -228,6 +230,11 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 void adreno_gpu_cleanup(struct adreno_gpu *gpu);
 
 
+void adreno_gpu_state_destroy(struct msm_gpu_state *state);
+
+int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state);
+int adreno_gpu_state_put(struct msm_gpu_state *state);
+
 /* ringbuffer helpers (the parts that are adreno specific) */
 
 static inline void
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
new file mode 100644 (file)
index 0000000..879c13f
--- /dev/null
@@ -0,0 +1,479 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)    "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+
+#include "dpu_core_irq.h"
+#include "dpu_trace.h"
+
+/**
+ * dpu_core_irq_callback_handler - dispatch core interrupts
+ * @arg:               private data of callback handler
+ * @irq_idx:           interrupt index
+ */
+static void dpu_core_irq_callback_handler(void *arg, int irq_idx)
+{
+       struct dpu_kms *dpu_kms = arg;
+       struct dpu_irq *irq_obj = &dpu_kms->irq_obj;
+       struct dpu_irq_callback *cb;
+       unsigned long irq_flags;
+
+       pr_debug("irq_idx=%d\n", irq_idx);
+
+       if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) {
+               DRM_ERROR("no registered cb, idx:%d enable_count:%d\n", irq_idx,
+                       atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]));
+       }
+
+       atomic_inc(&irq_obj->irq_counts[irq_idx]);
+
+       /*
+        * Perform registered function callback
+        */
+       spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+       list_for_each_entry(cb, &irq_obj->irq_cb_tbl[irq_idx], list)
+               if (cb->func)
+                       cb->func(cb->arg, irq_idx);
+       spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+
+       /*
+        * Clear pending interrupt status in HW.
+        * NOTE: dpu_core_irq_callback_handler is protected by top-level
+        *       spinlock, so it is safe to clear any interrupt status here.
+        */
+       dpu_kms->hw_intr->ops.clear_intr_status_nolock(
+                       dpu_kms->hw_intr,
+                       irq_idx);
+}
+
+int dpu_core_irq_idx_lookup(struct dpu_kms *dpu_kms,
+               enum dpu_intr_type intr_type, u32 instance_idx)
+{
+       if (!dpu_kms || !dpu_kms->hw_intr ||
+                       !dpu_kms->hw_intr->ops.irq_idx_lookup)
+               return -EINVAL;
+
+       return dpu_kms->hw_intr->ops.irq_idx_lookup(intr_type,
+                       instance_idx);
+}
+
+/**
+ * _dpu_core_irq_enable - enable core interrupt given by the index
+ * @dpu_kms:           Pointer to dpu kms context
+ * @irq_idx:           interrupt index
+ */
+static int _dpu_core_irq_enable(struct dpu_kms *dpu_kms, int irq_idx)
+{
+       unsigned long irq_flags;
+       int ret = 0, enable_count;
+
+       if (!dpu_kms || !dpu_kms->hw_intr ||
+                       !dpu_kms->irq_obj.enable_counts ||
+                       !dpu_kms->irq_obj.irq_counts) {
+               DPU_ERROR("invalid params\n");
+               return -EINVAL;
+       }
+
+       if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+               DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+               return -EINVAL;
+       }
+
+       enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
+       DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
+       trace_dpu_core_irq_enable_idx(irq_idx, enable_count);
+
+       if (atomic_inc_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 1) {
+               ret = dpu_kms->hw_intr->ops.enable_irq(
+                               dpu_kms->hw_intr,
+                               irq_idx);
+               if (ret)
+                       DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
+                                       irq_idx);
+
+               DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+
+               spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+               /* empty callback list but interrupt is enabled */
+               if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]))
+                       DPU_ERROR("irq_idx=%d enabled with no callback\n",
+                                       irq_idx);
+               spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+       }
+
+       return ret;
+}
+
+int dpu_core_irq_enable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
+{
+       int i, ret = 0, counts;
+
+       if (!dpu_kms || !irq_idxs || !irq_count) {
+               DPU_ERROR("invalid params\n");
+               return -EINVAL;
+       }
+
+       counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
+       if (counts)
+               DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
+
+       for (i = 0; (i < irq_count) && !ret; i++)
+               ret = _dpu_core_irq_enable(dpu_kms, irq_idxs[i]);
+
+       return ret;
+}
+
+/**
+ * _dpu_core_irq_disable - disable core interrupt given by the index
+ * @dpu_kms:           Pointer to dpu kms context
+ * @irq_idx:           interrupt index
+ */
+static int _dpu_core_irq_disable(struct dpu_kms *dpu_kms, int irq_idx)
+{
+       int ret = 0, enable_count;
+
+       if (!dpu_kms || !dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) {
+               DPU_ERROR("invalid params\n");
+               return -EINVAL;
+       }
+
+       if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+               DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+               return -EINVAL;
+       }
+
+       enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
+       DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
+       trace_dpu_core_irq_disable_idx(irq_idx, enable_count);
+
+       if (atomic_dec_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 0) {
+               ret = dpu_kms->hw_intr->ops.disable_irq(
+                               dpu_kms->hw_intr,
+                               irq_idx);
+               if (ret)
+                       DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
+                                       irq_idx);
+               DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+       }
+
+       return ret;
+}
+
+int dpu_core_irq_disable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
+{
+       int i, ret = 0, counts;
+
+       if (!dpu_kms || !irq_idxs || !irq_count) {
+               DPU_ERROR("invalid params\n");
+               return -EINVAL;
+       }
+
+       counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
+       if (counts == 2)
+               DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
+
+       for (i = 0; (i < irq_count) && !ret; i++)
+               ret = _dpu_core_irq_disable(dpu_kms, irq_idxs[i]);
+
+       return ret;
+}
+
+u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
+{
+       if (!dpu_kms || !dpu_kms->hw_intr ||
+                       !dpu_kms->hw_intr->ops.get_interrupt_status)
+               return 0;
+
+       if (irq_idx < 0) {
+               DPU_ERROR("[%pS] invalid irq_idx=%d\n",
+                               __builtin_return_address(0), irq_idx);
+               return 0;
+       }
+
+       return dpu_kms->hw_intr->ops.get_interrupt_status(dpu_kms->hw_intr,
+                       irq_idx, clear);
+}
+
+int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
+               struct dpu_irq_callback *register_irq_cb)
+{
+       unsigned long irq_flags;
+
+       if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) {
+               DPU_ERROR("invalid params\n");
+               return -EINVAL;
+       }
+
+       if (!register_irq_cb || !register_irq_cb->func) {
+               DPU_ERROR("invalid irq_cb:%d func:%d\n",
+                               register_irq_cb != NULL,
+                               register_irq_cb ?
+                                       register_irq_cb->func != NULL : -1);
+               return -EINVAL;
+       }
+
+       if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+               DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+               return -EINVAL;
+       }
+
+       DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+       spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+       trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb);
+       list_del_init(&register_irq_cb->list);
+       list_add_tail(&register_irq_cb->list,
+                       &dpu_kms->irq_obj.irq_cb_tbl[irq_idx]);
+       spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+
+       return 0;
+}
+
+int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
+               struct dpu_irq_callback *register_irq_cb)
+{
+       unsigned long irq_flags;
+
+       if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) {
+               DPU_ERROR("invalid params\n");
+               return -EINVAL;
+       }
+
+       if (!register_irq_cb || !register_irq_cb->func) {
+               DPU_ERROR("invalid irq_cb:%d func:%d\n",
+                               register_irq_cb != NULL,
+                               register_irq_cb ?
+                                       register_irq_cb->func != NULL : -1);
+               return -EINVAL;
+       }
+
+       if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+               DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+               return -EINVAL;
+       }
+
+       DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+       spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+       trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
+       list_del_init(&register_irq_cb->list);
+       /* empty callback list but interrupt is still enabled */
+       if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]) &&
+                       atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]))
+               DPU_ERROR("irq_idx=%d enabled with no callback\n", irq_idx);
+       spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+
+       return 0;
+}
+
+static void dpu_clear_all_irqs(struct dpu_kms *dpu_kms)
+{
+       if (!dpu_kms || !dpu_kms->hw_intr ||
+                       !dpu_kms->hw_intr->ops.clear_all_irqs)
+               return;
+
+       dpu_kms->hw_intr->ops.clear_all_irqs(dpu_kms->hw_intr);
+}
+
+static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
+{
+       if (!dpu_kms || !dpu_kms->hw_intr ||
+                       !dpu_kms->hw_intr->ops.disable_all_irqs)
+               return;
+
+       dpu_kms->hw_intr->ops.disable_all_irqs(dpu_kms->hw_intr);
+}
+
+#ifdef CONFIG_DEBUG_FS
+#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix)                          \
+static int __prefix ## _open(struct inode *inode, struct file *file)   \
+{                                                                      \
+       return single_open(file, __prefix ## _show, inode->i_private);  \
+}                                                                      \
+static const struct file_operations __prefix ## _fops = {              \
+       .owner = THIS_MODULE,                                           \
+       .open = __prefix ## _open,                                      \
+       .release = single_release,                                      \
+       .read = seq_read,                                               \
+       .llseek = seq_lseek,                                            \
+}
+
+static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
+{
+       struct dpu_irq *irq_obj = s->private;
+       struct dpu_irq_callback *cb;
+       unsigned long irq_flags;
+       int i, irq_count, enable_count, cb_count;
+
+       if (!irq_obj || !irq_obj->enable_counts || !irq_obj->irq_cb_tbl) {
+               DPU_ERROR("invalid parameters\n");
+               return 0;
+       }
+
+       for (i = 0; i < irq_obj->total_irqs; i++) {
+               spin_lock_irqsave(&irq_obj->cb_lock, irq_flags);
+               cb_count = 0;
+               irq_count = atomic_read(&irq_obj->irq_counts[i]);
+               enable_count = atomic_read(&irq_obj->enable_counts[i]);
+               list_for_each_entry(cb, &irq_obj->irq_cb_tbl[i], list)
+                       cb_count++;
+               spin_unlock_irqrestore(&irq_obj->cb_lock, irq_flags);
+
+               if (irq_count || enable_count || cb_count)
+                       seq_printf(s, "idx:%d irq:%d enable:%d cb:%d\n",
+                                       i, irq_count, enable_count, cb_count);
+       }
+
+       return 0;
+}
+
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_core_irq);
+
+int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+               struct dentry *parent)
+{
+       dpu_kms->irq_obj.debugfs_file = debugfs_create_file("core_irq", 0600,
+                       parent, &dpu_kms->irq_obj,
+                       &dpu_debugfs_core_irq_fops);
+
+       return 0;
+}
+
+void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
+{
+       debugfs_remove(dpu_kms->irq_obj.debugfs_file);
+       dpu_kms->irq_obj.debugfs_file = NULL;
+}
+
+#else
+int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+               struct dentry *parent)
+{
+       return 0;
+}
+
+void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
+{
+}
+#endif
+
+void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
+{
+       struct msm_drm_private *priv;
+       int i;
+
+       if (!dpu_kms) {
+               DPU_ERROR("invalid dpu_kms\n");
+               return;
+       } else if (!dpu_kms->dev) {
+               DPU_ERROR("invalid drm device\n");
+               return;
+       } else if (!dpu_kms->dev->dev_private) {
+               DPU_ERROR("invalid device private\n");
+               return;
+       }
+       priv = dpu_kms->dev->dev_private;
+
+       pm_runtime_get_sync(&dpu_kms->pdev->dev);
+       dpu_clear_all_irqs(dpu_kms);
+       dpu_disable_all_irqs(dpu_kms);
+       pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+       spin_lock_init(&dpu_kms->irq_obj.cb_lock);
+
+       /* Create irq callbacks for all possible irq_idx */
+       dpu_kms->irq_obj.total_irqs = dpu_kms->hw_intr->irq_idx_tbl_size;
+       dpu_kms->irq_obj.irq_cb_tbl = kcalloc(dpu_kms->irq_obj.total_irqs,
+                       sizeof(struct list_head), GFP_KERNEL);
+       dpu_kms->irq_obj.enable_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
+                       sizeof(atomic_t), GFP_KERNEL);
+       dpu_kms->irq_obj.irq_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
+                       sizeof(atomic_t), GFP_KERNEL);
+       for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++) {
+               INIT_LIST_HEAD(&dpu_kms->irq_obj.irq_cb_tbl[i]);
+               atomic_set(&dpu_kms->irq_obj.enable_counts[i], 0);
+               atomic_set(&dpu_kms->irq_obj.irq_counts[i], 0);
+       }
+}
+
+int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms)
+{
+       return 0;
+}
+
+void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
+{
+       struct msm_drm_private *priv;
+       int i;
+
+       if (!dpu_kms) {
+               DPU_ERROR("invalid dpu_kms\n");
+               return;
+       } else if (!dpu_kms->dev) {
+               DPU_ERROR("invalid drm device\n");
+               return;
+       } else if (!dpu_kms->dev->dev_private) {
+               DPU_ERROR("invalid device private\n");
+               return;
+       }
+       priv = dpu_kms->dev->dev_private;
+
+       pm_runtime_get_sync(&dpu_kms->pdev->dev);
+       for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++)
+               if (atomic_read(&dpu_kms->irq_obj.enable_counts[i]) ||
+                               !list_empty(&dpu_kms->irq_obj.irq_cb_tbl[i]))
+                       DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
+
+       dpu_clear_all_irqs(dpu_kms);
+       dpu_disable_all_irqs(dpu_kms);
+       pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+       kfree(dpu_kms->irq_obj.irq_cb_tbl);
+       kfree(dpu_kms->irq_obj.enable_counts);
+       kfree(dpu_kms->irq_obj.irq_counts);
+       dpu_kms->irq_obj.irq_cb_tbl = NULL;
+       dpu_kms->irq_obj.enable_counts = NULL;
+       dpu_kms->irq_obj.irq_counts = NULL;
+       dpu_kms->irq_obj.total_irqs = 0;
+}
+
+irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms)
+{
+       /*
+        * Read interrupt status from all sources. Interrupt status are
+        * stored within hw_intr.
+        * Function will also clear the interrupt status after reading.
+        * Individual interrupt status bit will only get stored if it
+        * is enabled.
+        */
+       dpu_kms->hw_intr->ops.get_interrupt_statuses(dpu_kms->hw_intr);
+
+       /*
+        * Dispatch to HW driver to handle interrupt lookup that is being
+        * fired. When matching interrupt is located, HW driver will call to
+        * dpu_core_irq_callback_handler with the irq_idx from the lookup table.
+        * dpu_core_irq_callback_handler will perform the registered function
+        * callback, and do the interrupt status clearing once the registered
+        * callback is finished.
+        */
+       dpu_kms->hw_intr->ops.dispatch_irqs(
+                       dpu_kms->hw_intr,
+                       dpu_core_irq_callback_handler,
+                       dpu_kms);
+
+       return IRQ_HANDLED;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
new file mode 100644 (file)
index 0000000..5e98bba
--- /dev/null
@@ -0,0 +1,153 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_CORE_IRQ_H__
+#define __DPU_CORE_IRQ_H__
+
+#include "dpu_kms.h"
+#include "dpu_hw_interrupts.h"
+
+/**
+ * dpu_core_irq_preinstall - perform pre-installation of core IRQ handler
+ * @dpu_kms:           DPU handle
+ * @return:            none
+ */
+void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq_postinstall - perform post-installation of core IRQ handler
+ * @dpu_kms:           DPU handle
+ * @return:            0 if success; error code otherwise
+ */
+int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq_uninstall - uninstall core IRQ handler
+ * @dpu_kms:           DPU handle
+ * @return:            none
+ */
+void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq - core IRQ handler
+ * @dpu_kms:           DPU handle
+ * @return:            interrupt handling status
+ */
+irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq_idx_lookup - IRQ helper function for lookup irq_idx from HW
+ *                      interrupt mapping table.
+ * @dpu_kms:           DPU handle
+ * @intr_type:         DPU HW interrupt type for lookup
+ * @instance_idx:      DPU HW block instance defined in dpu_hw_mdss.h
+ * @return:            irq_idx or -EINVAL when fail to lookup
+ */
+int dpu_core_irq_idx_lookup(
+               struct dpu_kms *dpu_kms,
+               enum dpu_intr_type intr_type,
+               uint32_t instance_idx);
+
+/**
+ * dpu_core_irq_enable - IRQ helper function for enabling one or more IRQs
+ * @dpu_kms:           DPU handle
+ * @irq_idxs:          Array of irq index
+ * @irq_count:         Number of irq_idx provided in the array
+ * @return:            0 for success enabling IRQ, otherwise failure
+ *
+ * This function increments count on each enable and decrements on each
+ * disable.  Interrupts is enabled if count is 0 before increment.
+ */
+int dpu_core_irq_enable(
+               struct dpu_kms *dpu_kms,
+               int *irq_idxs,
+               uint32_t irq_count);
+
+/**
+ * dpu_core_irq_disable - IRQ helper function for disabling one of more IRQs
+ * @dpu_kms:           DPU handle
+ * @irq_idxs:          Array of irq index
+ * @irq_count:         Number of irq_idx provided in the array
+ * @return:            0 for success disabling IRQ, otherwise failure
+ *
+ * This function increments count on each enable and decrements on each
+ * disable.  Interrupts is disabled if count is 0 after decrement.
+ */
+int dpu_core_irq_disable(
+               struct dpu_kms *dpu_kms,
+               int *irq_idxs,
+               uint32_t irq_count);
+
+/**
+ * dpu_core_irq_read - IRQ helper function for reading IRQ status
+ * @dpu_kms:           DPU handle
+ * @irq_idx:           irq index
+ * @clear:             True to clear the irq after read
+ * @return:            non-zero if irq detected; otherwise no irq detected
+ */
+u32 dpu_core_irq_read(
+               struct dpu_kms *dpu_kms,
+               int irq_idx,
+               bool clear);
+
+/**
+ * dpu_core_irq_register_callback - For registering callback function on IRQ
+ *                             interrupt
+ * @dpu_kms:           DPU handle
+ * @irq_idx:           irq index
+ * @irq_cb:            IRQ callback structure, containing callback function
+ *                     and argument. Passing NULL for irq_cb will unregister
+ *                     the callback for the given irq_idx
+ *                     This must exist until un-registration.
+ * @return:            0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
+int dpu_core_irq_register_callback(
+               struct dpu_kms *dpu_kms,
+               int irq_idx,
+               struct dpu_irq_callback *irq_cb);
+
+/**
+ * dpu_core_irq_unregister_callback - For unregistering callback function on IRQ
+ *                             interrupt
+ * @dpu_kms:           DPU handle
+ * @irq_idx:           irq index
+ * @irq_cb:            IRQ callback structure, containing callback function
+ *                     and argument. Passing NULL for irq_cb will unregister
+ *                     the callback for the given irq_idx
+ *                     This must match with registration.
+ * @return:            0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
+int dpu_core_irq_unregister_callback(
+               struct dpu_kms *dpu_kms,
+               int irq_idx,
+               struct dpu_irq_callback *irq_cb);
+
+/**
+ * dpu_debugfs_core_irq_init - register core irq debugfs
+ * @dpu_kms: pointer to kms
+ * @parent: debugfs directory root
+ * @Return: 0 on success
+ */
+int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+               struct dentry *parent);
+
+/**
+ * dpu_debugfs_core_irq_destroy - deregister core irq debugfs
+ * @dpu_kms: pointer to kms
+ */
+void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms);
+
+#endif /* __DPU_CORE_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
new file mode 100644 (file)
index 0000000..41c5191
--- /dev/null
@@ -0,0 +1,637 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)    "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/sort.h>
+#include <linux/clk.h>
+#include <linux/bitmap.h>
+
+#include "dpu_kms.h"
+#include "dpu_trace.h"
+#include "dpu_crtc.h"
+#include "dpu_core_perf.h"
+
+#define DPU_PERF_MODE_STRING_SIZE      128
+
+/**
+ * enum dpu_perf_mode - performance tuning mode
+ * @DPU_PERF_MODE_NORMAL: performance controlled by user mode client
+ * @DPU_PERF_MODE_MINIMUM: performance bounded by minimum setting
+ * @DPU_PERF_MODE_FIXED: performance bounded by fixed setting
+ */
+enum dpu_perf_mode {
+       DPU_PERF_MODE_NORMAL,
+       DPU_PERF_MODE_MINIMUM,
+       DPU_PERF_MODE_FIXED,
+       DPU_PERF_MODE_MAX
+};
+
+static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
+{
+       struct msm_drm_private *priv;
+
+       if (!crtc->dev || !crtc->dev->dev_private) {
+               DPU_ERROR("invalid device\n");
+               return NULL;
+       }
+
+       priv = crtc->dev->dev_private;
+       if (!priv || !priv->kms) {
+               DPU_ERROR("invalid kms\n");
+               return NULL;
+       }
+
+       return to_dpu_kms(priv->kms);
+}
+
+static bool _dpu_core_perf_crtc_is_power_on(struct drm_crtc *crtc)
+{
+       return dpu_crtc_is_enabled(crtc);
+}
+
+static bool _dpu_core_video_mode_intf_connected(struct drm_crtc *crtc)
+{
+       struct drm_crtc *tmp_crtc;
+       bool intf_connected = false;
+
+       if (!crtc)
+               goto end;
+
+       drm_for_each_crtc(tmp_crtc, crtc->dev) {
+               if ((dpu_crtc_get_intf_mode(tmp_crtc) == INTF_MODE_VIDEO) &&
+                               _dpu_core_perf_crtc_is_power_on(tmp_crtc)) {
+                       DPU_DEBUG("video interface connected crtc:%d\n",
+                               tmp_crtc->base.id);
+                       intf_connected = true;
+                       goto end;
+               }
+       }
+
+end:
+       return intf_connected;
+}
+
+static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
+               struct drm_crtc *crtc,
+               struct drm_crtc_state *state,
+               struct dpu_core_perf_params *perf)
+{
+       struct dpu_crtc_state *dpu_cstate;
+       int i;
+
+       if (!kms || !kms->catalog || !crtc || !state || !perf) {
+               DPU_ERROR("invalid parameters\n");
+               return;
+       }
+
+       dpu_cstate = to_dpu_crtc_state(state);
+       memset(perf, 0, sizeof(struct dpu_core_perf_params));
+
+       if (!dpu_cstate->bw_control) {
+               for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+                       perf->bw_ctl[i] = kms->catalog->perf.max_bw_high *
+                                       1000ULL;
+                       perf->max_per_pipe_ib[i] = perf->bw_ctl[i];
+               }
+               perf->core_clk_rate = kms->perf.max_core_clk_rate;
+       } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
+               for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+                       perf->bw_ctl[i] = 0;
+                       perf->max_per_pipe_ib[i] = 0;
+               }
+               perf->core_clk_rate = 0;
+       } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) {
+               for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+                       perf->bw_ctl[i] = kms->perf.fix_core_ab_vote;
+                       perf->max_per_pipe_ib[i] = kms->perf.fix_core_ib_vote;
+               }
+               perf->core_clk_rate = kms->perf.fix_core_clk_rate;
+       }
+
+       DPU_DEBUG(
+               "crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu llcc_ib=%llu llcc_ab=%llu mem_ib=%llu mem_ab=%llu\n",
+                       crtc->base.id, perf->core_clk_rate,
+                       perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MNOC],
+                       perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
+                       perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_LLCC],
+                       perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
+                       perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_EBI],
+                       perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI]);
+}
+
+int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
+               struct drm_crtc_state *state)
+{
+       u32 bw, threshold;
+       u64 bw_sum_of_intfs = 0;
+       enum dpu_crtc_client_type curr_client_type;
+       bool is_video_mode;
+       struct dpu_crtc_state *dpu_cstate;
+       struct drm_crtc *tmp_crtc;
+       struct dpu_kms *kms;
+       int i;
+
+       if (!crtc || !state) {
+               DPU_ERROR("invalid crtc\n");
+               return -EINVAL;
+       }
+
+       kms = _dpu_crtc_get_kms(crtc);
+       if (!kms || !kms->catalog) {
+               DPU_ERROR("invalid parameters\n");
+               return 0;
+       }
+
+       /* we only need bandwidth check on real-time clients (interfaces) */
+       if (dpu_crtc_get_client_type(crtc) == NRT_CLIENT)
+               return 0;
+
+       dpu_cstate = to_dpu_crtc_state(state);
+
+       /* obtain new values */
+       _dpu_core_perf_calc_crtc(kms, crtc, state, &dpu_cstate->new_perf);
+
+       for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
+                       i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+               bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl[i];
+               curr_client_type = dpu_crtc_get_client_type(crtc);
+
+               drm_for_each_crtc(tmp_crtc, crtc->dev) {
+                       if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+                           (dpu_crtc_get_client_type(tmp_crtc) ==
+                                           curr_client_type) &&
+                           (tmp_crtc != crtc)) {
+                               struct dpu_crtc_state *tmp_cstate =
+                                       to_dpu_crtc_state(tmp_crtc->state);
+
+                               DPU_DEBUG("crtc:%d bw:%llu ctrl:%d\n",
+                                       tmp_crtc->base.id,
+                                       tmp_cstate->new_perf.bw_ctl[i],
+                                       tmp_cstate->bw_control);
+                               /*
+                                * For bw check only use the bw if the
+                                * atomic property has been already set
+                                */
+                               if (tmp_cstate->bw_control)
+                                       bw_sum_of_intfs +=
+                                               tmp_cstate->new_perf.bw_ctl[i];
+                       }
+               }
+
+               /* convert bandwidth to kb */
+               bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
+               DPU_DEBUG("calculated bandwidth=%uk\n", bw);
+
+               is_video_mode = dpu_crtc_get_intf_mode(crtc) == INTF_MODE_VIDEO;
+               threshold = (is_video_mode ||
+                       _dpu_core_video_mode_intf_connected(crtc)) ?
+                       kms->catalog->perf.max_bw_low :
+                       kms->catalog->perf.max_bw_high;
+
+               DPU_DEBUG("final threshold bw limit = %d\n", threshold);
+
+               if (!dpu_cstate->bw_control) {
+                       DPU_DEBUG("bypass bandwidth check\n");
+               } else if (!threshold) {
+                       DPU_ERROR("no bandwidth limits specified\n");
+                       return -E2BIG;
+               } else if (bw > threshold) {
+                       DPU_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw,
+                                       threshold);
+                       return -E2BIG;
+               }
+       }
+
+       return 0;
+}
+
+static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
+               struct drm_crtc *crtc, u32 bus_id)
+{
+       struct dpu_core_perf_params perf = { { 0 } };
+       enum dpu_crtc_client_type curr_client_type
+                                       = dpu_crtc_get_client_type(crtc);
+       struct drm_crtc *tmp_crtc;
+       struct dpu_crtc_state *dpu_cstate;
+       int ret = 0;
+
+       drm_for_each_crtc(tmp_crtc, crtc->dev) {
+               if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+                       curr_client_type ==
+                               dpu_crtc_get_client_type(tmp_crtc)) {
+                       dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
+
+                       perf.max_per_pipe_ib[bus_id] =
+                               max(perf.max_per_pipe_ib[bus_id],
+                               dpu_cstate->new_perf.max_per_pipe_ib[bus_id]);
+
+                       DPU_DEBUG("crtc=%d bus_id=%d bw=%llu\n",
+                               tmp_crtc->base.id, bus_id,
+                               dpu_cstate->new_perf.bw_ctl[bus_id]);
+               }
+       }
+       return ret;
+}
+
+/**
+ * @dpu_core_perf_crtc_release_bw() - request zero bandwidth
+ * @crtc - pointer to a crtc
+ *
+ * Function checks a state variable for the crtc, if all pending commit
+ * requests are done, meaning no more bandwidth is needed, release
+ * bandwidth request.
+ */
+void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
+{
+       struct drm_crtc *tmp_crtc;
+       struct dpu_crtc *dpu_crtc;
+       struct dpu_crtc_state *dpu_cstate;
+       struct dpu_kms *kms;
+       int i;
+
+       if (!crtc) {
+               DPU_ERROR("invalid crtc\n");
+               return;
+       }
+
+       kms = _dpu_crtc_get_kms(crtc);
+       if (!kms || !kms->catalog) {
+               DPU_ERROR("invalid kms\n");
+               return;
+       }
+
+       dpu_crtc = to_dpu_crtc(crtc);
+       dpu_cstate = to_dpu_crtc_state(crtc->state);
+
+       /* only do this for command mode rt client */
+       if (dpu_crtc_get_intf_mode(crtc) != INTF_MODE_CMD)
+               return;
+
+       /*
+        * If video interface present, cmd panel bandwidth cannot be
+        * released.
+        */
+       if (dpu_crtc_get_intf_mode(crtc) == INTF_MODE_CMD)
+               drm_for_each_crtc(tmp_crtc, crtc->dev) {
+                       if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+                               dpu_crtc_get_intf_mode(tmp_crtc) ==
+                                               INTF_MODE_VIDEO)
+                               return;
+               }
+
+       /* Release the bandwidth */
+       if (kms->perf.enable_bw_release) {
+               trace_dpu_cmd_release_bw(crtc->base.id);
+               DPU_DEBUG("Release BW crtc=%d\n", crtc->base.id);
+               for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+                       dpu_crtc->cur_perf.bw_ctl[i] = 0;
+                       _dpu_core_perf_crtc_update_bus(kms, crtc, i);
+               }
+       }
+}
+
+static int _dpu_core_perf_set_core_clk_rate(struct dpu_kms *kms, u64 rate)
+{
+       struct dss_clk *core_clk = kms->perf.core_clk;
+
+       if (core_clk->max_rate && (rate > core_clk->max_rate))
+               rate = core_clk->max_rate;
+
+       core_clk->rate = rate;
+       return msm_dss_clk_set_rate(core_clk, 1);
+}
+
+static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms)
+{
+       u64 clk_rate = kms->perf.perf_tune.min_core_clk;
+       struct drm_crtc *crtc;
+       struct dpu_crtc_state *dpu_cstate;
+
+       drm_for_each_crtc(crtc, kms->dev) {
+               if (_dpu_core_perf_crtc_is_power_on(crtc)) {
+                       dpu_cstate = to_dpu_crtc_state(crtc->state);
+                       clk_rate = max(dpu_cstate->new_perf.core_clk_rate,
+                                                       clk_rate);
+                       clk_rate = clk_round_rate(kms->perf.core_clk->clk,
+                                       clk_rate);
+               }
+       }
+
+       if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED)
+               clk_rate = kms->perf.fix_core_clk_rate;
+
+       DPU_DEBUG("clk:%llu\n", clk_rate);
+
+       return clk_rate;
+}
+
+int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
+               int params_changed, bool stop_req)
+{
+       struct dpu_core_perf_params *new, *old;
+       int update_bus = 0, update_clk = 0;
+       u64 clk_rate = 0;
+       struct dpu_crtc *dpu_crtc;
+       struct dpu_crtc_state *dpu_cstate;
+       int i;
+       struct msm_drm_private *priv;
+       struct dpu_kms *kms;
+       int ret;
+
+       if (!crtc) {
+               DPU_ERROR("invalid crtc\n");
+               return -EINVAL;
+       }
+
+       kms = _dpu_crtc_get_kms(crtc);
+       if (!kms || !kms->catalog) {
+               DPU_ERROR("invalid kms\n");
+               return -EINVAL;
+       }
+       priv = kms->dev->dev_private;
+
+       dpu_crtc = to_dpu_crtc(crtc);
+       dpu_cstate = to_dpu_crtc_state(crtc->state);
+
+       DPU_DEBUG("crtc:%d stop_req:%d core_clk:%llu\n",
+                       crtc->base.id, stop_req, kms->perf.core_clk_rate);
+
+       old = &dpu_crtc->cur_perf;
+       new = &dpu_cstate->new_perf;
+
+       if (_dpu_core_perf_crtc_is_power_on(crtc) && !stop_req) {
+               for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+                       /*
+                        * cases for bus bandwidth update.
+                        * 1. new bandwidth vote - "ab or ib vote" is higher
+                        *    than current vote for update request.
+                        * 2. new bandwidth vote - "ab or ib vote" is lower
+                        *    than current vote at end of commit or stop.
+                        */
+                       if ((params_changed && ((new->bw_ctl[i] >
+                                               old->bw_ctl[i]) ||
+                                 (new->max_per_pipe_ib[i] >
+                                               old->max_per_pipe_ib[i]))) ||
+                           (!params_changed && ((new->bw_ctl[i] <
+                                               old->bw_ctl[i]) ||
+                                 (new->max_per_pipe_ib[i] <
+                                               old->max_per_pipe_ib[i])))) {
+                               DPU_DEBUG(
+                                       "crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
+                                       crtc->base.id, params_changed,
+                                       new->bw_ctl[i], old->bw_ctl[i]);
+                               old->bw_ctl[i] = new->bw_ctl[i];
+                               old->max_per_pipe_ib[i] =
+                                               new->max_per_pipe_ib[i];
+                               update_bus |= BIT(i);
+                       }
+               }
+
+               if ((params_changed &&
+                               (new->core_clk_rate > old->core_clk_rate)) ||
+                               (!params_changed &&
+                               (new->core_clk_rate < old->core_clk_rate))) {
+                       old->core_clk_rate = new->core_clk_rate;
+                       update_clk = 1;
+               }
+       } else {
+               DPU_DEBUG("crtc=%d disable\n", crtc->base.id);
+               memset(old, 0, sizeof(*old));
+               memset(new, 0, sizeof(*new));
+               update_bus = ~0;
+               update_clk = 1;
+       }
+       trace_dpu_perf_crtc_update(crtc->base.id,
+                               new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
+                               new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
+                               new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI],
+                               new->core_clk_rate, stop_req,
+                               update_bus, update_clk);
+
+       for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+               if (update_bus & BIT(i)) {
+                       ret = _dpu_core_perf_crtc_update_bus(kms, crtc, i);
+                       if (ret) {
+                               DPU_ERROR("crtc-%d: failed to update bw vote for bus-%d\n",
+                                         crtc->base.id, i);
+                               return ret;
+                       }
+               }
+       }
+
+       /*
+        * Update the clock after bandwidth vote to ensure
+        * bandwidth is available before clock rate is increased.
+        */
+       if (update_clk) {
+               clk_rate = _dpu_core_perf_get_core_clk_rate(kms);
+
+               trace_dpu_core_perf_update_clk(kms->dev, stop_req, clk_rate);
+
+               ret = _dpu_core_perf_set_core_clk_rate(kms, clk_rate);
+               if (ret) {
+                       DPU_ERROR("failed to set %s clock rate %llu\n",
+                                       kms->perf.core_clk->clk_name, clk_rate);
+                       return ret;
+               }
+
+               kms->perf.core_clk_rate = clk_rate;
+               DPU_DEBUG("update clk rate = %lld HZ\n", clk_rate);
+       }
+       return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static ssize_t _dpu_core_perf_mode_write(struct file *file,
+                   const char __user *user_buf, size_t count, loff_t *ppos)
+{
+       struct dpu_core_perf *perf = file->private_data;
+       struct dpu_perf_cfg *cfg = &perf->catalog->perf;
+       u32 perf_mode = 0;
+       char buf[10];
+
+       if (!perf)
+               return -ENODEV;
+
+       if (count >= sizeof(buf))
+               return -EFAULT;
+
+       if (copy_from_user(buf, user_buf, count))
+               return -EFAULT;
+
+       buf[count] = 0; /* end of string */
+
+       if (kstrtouint(buf, 0, &perf_mode))
+               return -EFAULT;
+
+       if (perf_mode >= DPU_PERF_MODE_MAX)
+               return -EFAULT;
+
+       if (perf_mode == DPU_PERF_MODE_FIXED) {
+               DRM_INFO("fix performance mode\n");
+       } else if (perf_mode == DPU_PERF_MODE_MINIMUM) {
+               /* run the driver with max clk and BW vote */
+               perf->perf_tune.min_core_clk = perf->max_core_clk_rate;
+               perf->perf_tune.min_bus_vote =
+                               (u64) cfg->max_bw_high * 1000;
+               DRM_INFO("minimum performance mode\n");
+       } else if (perf_mode == DPU_PERF_MODE_NORMAL) {
+               /* reset the perf tune params to 0 */
+               perf->perf_tune.min_core_clk = 0;
+               perf->perf_tune.min_bus_vote = 0;
+               DRM_INFO("normal performance mode\n");
+       }
+       perf->perf_tune.mode = perf_mode;
+
+       return count;
+}
+
+static ssize_t _dpu_core_perf_mode_read(struct file *file,
+                       char __user *buff, size_t count, loff_t *ppos)
+{
+       struct dpu_core_perf *perf = file->private_data;
+       int len = 0;
+       char buf[DPU_PERF_MODE_STRING_SIZE] = {'\0'};
+
+       if (!perf)
+               return -ENODEV;
+
+       if (*ppos)
+               return 0;       /* the end */
+
+       len = snprintf(buf, sizeof(buf),
+                       "mode %d min_mdp_clk %llu min_bus_vote %llu\n",
+                       perf->perf_tune.mode,
+                       perf->perf_tune.min_core_clk,
+                       perf->perf_tune.min_bus_vote);
+       if (len < 0 || len >= sizeof(buf))
+               return 0;
+
+       if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+               return -EFAULT;
+
+       *ppos += len;   /* increase offset */
+
+       return len;
+}
+
+static const struct file_operations dpu_core_perf_mode_fops = {
+       .open = simple_open,
+       .read = _dpu_core_perf_mode_read,
+       .write = _dpu_core_perf_mode_write,
+};
+
+static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
+{
+       debugfs_remove_recursive(perf->debugfs_root);
+       perf->debugfs_root = NULL;
+}
+
+int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
+               struct dentry *parent)
+{
+       struct dpu_mdss_cfg *catalog = perf->catalog;
+       struct msm_drm_private *priv;
+       struct dpu_kms *dpu_kms;
+
+       priv = perf->dev->dev_private;
+       if (!priv || !priv->kms) {
+               DPU_ERROR("invalid KMS reference\n");
+               return -EINVAL;
+       }
+
+       dpu_kms = to_dpu_kms(priv->kms);
+
+       perf->debugfs_root = debugfs_create_dir("core_perf", parent);
+       if (!perf->debugfs_root) {
+               DPU_ERROR("failed to create core perf debugfs\n");
+               return -EINVAL;
+       }
+
+       debugfs_create_u64("max_core_clk_rate", 0600, perf->debugfs_root,
+                       &perf->max_core_clk_rate);
+       debugfs_create_u64("core_clk_rate", 0600, perf->debugfs_root,
+                       &perf->core_clk_rate);
+       debugfs_create_u32("enable_bw_release", 0600, perf->debugfs_root,
+                       (u32 *)&perf->enable_bw_release);
+       debugfs_create_u32("threshold_low", 0600, perf->debugfs_root,
+                       (u32 *)&catalog->perf.max_bw_low);
+       debugfs_create_u32("threshold_high", 0600, perf->debugfs_root,
+                       (u32 *)&catalog->perf.max_bw_high);
+       debugfs_create_u32("min_core_ib", 0600, perf->debugfs_root,
+                       (u32 *)&catalog->perf.min_core_ib);
+       debugfs_create_u32("min_llcc_ib", 0600, perf->debugfs_root,
+                       (u32 *)&catalog->perf.min_llcc_ib);
+       debugfs_create_u32("min_dram_ib", 0600, perf->debugfs_root,
+                       (u32 *)&catalog->perf.min_dram_ib);
+       debugfs_create_file("perf_mode", 0600, perf->debugfs_root,
+                       (u32 *)perf, &dpu_core_perf_mode_fops);
+       debugfs_create_u64("fix_core_clk_rate", 0600, perf->debugfs_root,
+                       &perf->fix_core_clk_rate);
+       debugfs_create_u64("fix_core_ib_vote", 0600, perf->debugfs_root,
+                       &perf->fix_core_ib_vote);
+       debugfs_create_u64("fix_core_ab_vote", 0600, perf->debugfs_root,
+                       &perf->fix_core_ab_vote);
+
+       return 0;
+}
+#else
+static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
+{
+}
+
+int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
+               struct dentry *parent)
+{
+       return 0;
+}
+#endif
+
+void dpu_core_perf_destroy(struct dpu_core_perf *perf)
+{
+       if (!perf) {
+               DPU_ERROR("invalid parameters\n");
+               return;
+       }
+
+       dpu_core_perf_debugfs_destroy(perf);
+       perf->max_core_clk_rate = 0;
+       perf->core_clk = NULL;
+       perf->phandle = NULL;
+       perf->catalog = NULL;
+       perf->dev = NULL;
+}
+
+int dpu_core_perf_init(struct dpu_core_perf *perf,
+               struct drm_device *dev,
+               struct dpu_mdss_cfg *catalog,
+               struct dpu_power_handle *phandle,
+               struct dss_clk *core_clk)
+{
+       perf->dev = dev;
+       perf->catalog = catalog;
+       perf->phandle = phandle;
+       perf->core_clk = core_clk;
+
+       perf->max_core_clk_rate = core_clk->max_rate;
+       if (!perf->max_core_clk_rate) {
+               DPU_DEBUG("optional max core clk rate, use default\n");
+               perf->max_core_clk_rate = DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE;
+       }
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
new file mode 100644 (file)
index 0000000..fbcbe0c
--- /dev/null
@@ -0,0 +1,133 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_CORE_PERF_H_
+#define _DPU_CORE_PERF_H_
+
+#include <linux/types.h>
+#include <linux/dcache.h>
+#include <linux/mutex.h>
+#include <drm/drm_crtc.h>
+
+#include "dpu_hw_catalog.h"
+#include "dpu_power_handle.h"
+
+#define        DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE      412500000
+
+/**
+ * struct dpu_core_perf_params - definition of performance parameters
+ * @max_per_pipe_ib: maximum instantaneous bandwidth request
+ * @bw_ctl: arbitrated bandwidth request
+ * @core_clk_rate: core clock rate request
+ */
+struct dpu_core_perf_params {
+       u64 max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MAX];
+       u64 bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MAX];
+       u64 core_clk_rate;
+};
+
+/**
+ * struct dpu_core_perf_tune - definition of performance tuning control
+ * @mode: performance mode
+ * @min_core_clk: minimum core clock
+ * @min_bus_vote: minimum bus vote
+ */
+struct dpu_core_perf_tune {
+       u32 mode;
+       u64 min_core_clk;
+       u64 min_bus_vote;
+};
+
+/**
+ * struct dpu_core_perf - definition of core performance context
+ * @dev: Pointer to drm device
+ * @debugfs_root: top level debug folder
+ * @catalog: Pointer to catalog configuration
+ * @phandle: Pointer to power handler
+ * @core_clk: Pointer to core clock structure
+ * @core_clk_rate: current core clock rate
+ * @max_core_clk_rate: maximum allowable core clock rate
+ * @perf_tune: debug control for performance tuning
+ * @enable_bw_release: debug control for bandwidth release
+ * @fix_core_clk_rate: fixed core clock request in Hz used in mode 2
+ * @fix_core_ib_vote: fixed core ib vote in bps used in mode 2
+ * @fix_core_ab_vote: fixed core ab vote in bps used in mode 2
+ */
+struct dpu_core_perf {
+       struct drm_device *dev;
+       struct dentry *debugfs_root;
+       struct dpu_mdss_cfg *catalog;
+       struct dpu_power_handle *phandle;
+       struct dss_clk *core_clk;
+       u64 core_clk_rate;
+       u64 max_core_clk_rate;
+       struct dpu_core_perf_tune perf_tune;
+       u32 enable_bw_release;
+       u64 fix_core_clk_rate;
+       u64 fix_core_ib_vote;
+       u64 fix_core_ab_vote;
+};
+
+/**
+ * dpu_core_perf_crtc_check - validate performance of the given crtc state
+ * @crtc: Pointer to crtc
+ * @state: Pointer to new crtc state
+ * return: zero if success, or error code otherwise
+ */
+int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
+               struct drm_crtc_state *state);
+
+/**
+ * dpu_core_perf_crtc_update - update performance of the given crtc
+ * @crtc: Pointer to crtc
+ * @params_changed: true if crtc parameters are modified
+ * @stop_req: true if this is a stop request
+ * return: zero if success, or error code otherwise
+ */
+int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
+               int params_changed, bool stop_req);
+
+/**
+ * dpu_core_perf_crtc_release_bw - release bandwidth of the given crtc
+ * @crtc: Pointer to crtc
+ */
+void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc);
+
+/**
+ * dpu_core_perf_destroy - destroy the given core performance context
+ * @perf: Pointer to core performance context
+ */
+void dpu_core_perf_destroy(struct dpu_core_perf *perf);
+
+/**
+ * dpu_core_perf_init - initialize the given core performance context
+ * @perf: Pointer to core performance context
+ * @dev: Pointer to drm device
+ * @catalog: Pointer to catalog
+ * @phandle: Pointer to power handle
+ * @core_clk: pointer to core clock
+ */
+int dpu_core_perf_init(struct dpu_core_perf *perf,
+               struct drm_device *dev,
+               struct dpu_mdss_cfg *catalog,
+               struct dpu_power_handle *phandle,
+               struct dss_clk *core_clk);
+
+/**
+ * dpu_core_perf_debugfs_init - initialize debugfs for core performance context
+ * @perf: Pointer to core performance context
+ * @debugfs_parent: Pointer to parent debugfs
+ */
+int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
+               struct dentry *parent);
+
+#endif /* _DPU_CORE_PERF_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
new file mode 100644 (file)
index 0000000..80cbf75
--- /dev/null
@@ -0,0 +1,2138 @@
+/*
+ * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt)    "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/sort.h>
+#include <linux/debugfs.h>
+#include <linux/ktime.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_flip_work.h>
+#include <drm/drm_rect.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_crtc.h"
+#include "dpu_plane.h"
+#include "dpu_encoder.h"
+#include "dpu_vbif.h"
+#include "dpu_power_handle.h"
+#include "dpu_core_perf.h"
+#include "dpu_trace.h"
+
+#define DPU_DRM_BLEND_OP_NOT_DEFINED    0
+#define DPU_DRM_BLEND_OP_OPAQUE         1
+#define DPU_DRM_BLEND_OP_PREMULTIPLIED  2
+#define DPU_DRM_BLEND_OP_COVERAGE       3
+#define DPU_DRM_BLEND_OP_MAX            4
+
+/* layer mixer index on dpu_crtc */
+#define LEFT_MIXER 0
+#define RIGHT_MIXER 1
+
+#define MISR_BUFF_SIZE                 256
+
+static inline struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
+{
+       struct msm_drm_private *priv;
+
+       if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+               DPU_ERROR("invalid crtc\n");
+               return NULL;
+       }
+       priv = crtc->dev->dev_private;
+       if (!priv || !priv->kms) {
+               DPU_ERROR("invalid kms\n");
+               return NULL;
+       }
+
+       return to_dpu_kms(priv->kms);
+}
+
+static inline int _dpu_crtc_power_enable(struct dpu_crtc *dpu_crtc, bool enable)
+{
+       struct drm_crtc *crtc;
+       struct msm_drm_private *priv;
+       struct dpu_kms *dpu_kms;
+
+       if (!dpu_crtc) {
+               DPU_ERROR("invalid dpu crtc\n");
+               return -EINVAL;
+       }
+
+       crtc = &dpu_crtc->base;
+       if (!crtc->dev || !crtc->dev->dev_private) {
+               DPU_ERROR("invalid drm device\n");
+               return -EINVAL;
+       }
+
+       priv = crtc->dev->dev_private;
+       if (!priv->kms) {
+               DPU_ERROR("invalid kms\n");
+               return -EINVAL;
+       }
+
+       dpu_kms = to_dpu_kms(priv->kms);
+
+       if (enable)
+               pm_runtime_get_sync(&dpu_kms->pdev->dev);
+       else
+               pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+       return 0;
+}
+
+/**
+ * _dpu_crtc_rp_to_crtc - get crtc from resource pool object
+ * @rp: Pointer to resource pool
+ * return: Pointer to drm crtc if success; null otherwise
+ */
+static struct drm_crtc *_dpu_crtc_rp_to_crtc(struct dpu_crtc_respool *rp)
+{
+       if (!rp)
+               return NULL;
+
+       return container_of(rp, struct dpu_crtc_state, rp)->base.crtc;
+}
+
+/**
+ * _dpu_crtc_rp_reclaim - reclaim unused, or all if forced, resources in pool
+ * @rp: Pointer to resource pool
+ * @force: True to reclaim all resources; otherwise, reclaim only unused ones
+ * return: None
+ */
+static void _dpu_crtc_rp_reclaim(struct dpu_crtc_respool *rp, bool force)
+{
+       struct dpu_crtc_res *res, *next;
+       struct drm_crtc *crtc;
+
+       crtc = _dpu_crtc_rp_to_crtc(rp);
+       if (!crtc) {
+               DPU_ERROR("invalid crtc\n");
+               return;
+       }
+
+       DPU_DEBUG("crtc%d.%u %s\n", crtc->base.id, rp->sequence_id,
+                       force ? "destroy" : "free_unused");
+
+       list_for_each_entry_safe(res, next, &rp->res_list, list) {
+               if (!force && !(res->flags & DPU_CRTC_RES_FLAG_FREE))
+                       continue;
+               DPU_DEBUG("crtc%d.%u reclaim res:0x%x/0x%llx/%pK/%d\n",
+                               crtc->base.id, rp->sequence_id,
+                               res->type, res->tag, res->val,
+                               atomic_read(&res->refcount));
+               list_del(&res->list);
+               if (res->ops.put)
+                       res->ops.put(res->val);
+               kfree(res);
+       }
+}
+
+/**
+ * _dpu_crtc_rp_free_unused - free unused resource in pool
+ * @rp: Pointer to resource pool
+ * return: none
+ */
+static void _dpu_crtc_rp_free_unused(struct dpu_crtc_respool *rp)
+{
+       mutex_lock(rp->rp_lock);
+       _dpu_crtc_rp_reclaim(rp, false);
+       mutex_unlock(rp->rp_lock);
+}
+
+/**
+ * _dpu_crtc_rp_destroy - destroy resource pool
+ * @rp: Pointer to resource pool
+ * return: None
+ */
+static void _dpu_crtc_rp_destroy(struct dpu_crtc_respool *rp)
+{
+       mutex_lock(rp->rp_lock);
+       list_del_init(&rp->rp_list);
+       _dpu_crtc_rp_reclaim(rp, true);
+       mutex_unlock(rp->rp_lock);
+}
+
+/**
+ * _dpu_crtc_hw_blk_get - get callback for hardware block
+ * @val: Resource handle
+ * @type: Resource type
+ * @tag: Search tag for given resource
+ * return: Resource handle
+ */
+static void *_dpu_crtc_hw_blk_get(void *val, u32 type, u64 tag)
+{
+       DPU_DEBUG("res:%d/0x%llx/%pK\n", type, tag, val);
+       return dpu_hw_blk_get(val, type, tag);
+}
+
+/**
+ * _dpu_crtc_hw_blk_put - put callback for hardware block
+ * @val: Resource handle
+ * return: None
+ */
+static void _dpu_crtc_hw_blk_put(void *val)
+{
+       DPU_DEBUG("res://%pK\n", val);
+       dpu_hw_blk_put(val);
+}
+
+/**
+ * _dpu_crtc_rp_duplicate - duplicate resource pool and reset reference count
+ * @rp: Pointer to original resource pool
+ * @dup_rp: Pointer to duplicated resource pool
+ * return: None
+ */
+static void _dpu_crtc_rp_duplicate(struct dpu_crtc_respool *rp,
+               struct dpu_crtc_respool *dup_rp)
+{
+       struct dpu_crtc_res *res, *dup_res;
+       struct drm_crtc *crtc;
+
+       if (!rp || !dup_rp || !rp->rp_head) {
+               DPU_ERROR("invalid resource pool\n");
+               return;
+       }
+
+       crtc = _dpu_crtc_rp_to_crtc(rp);
+       if (!crtc) {
+               DPU_ERROR("invalid crtc\n");
+               return;
+       }
+
+       DPU_DEBUG("crtc%d.%u duplicate\n", crtc->base.id, rp->sequence_id);
+
+       mutex_lock(rp->rp_lock);
+       dup_rp->sequence_id = rp->sequence_id + 1;
+       INIT_LIST_HEAD(&dup_rp->res_list);
+       dup_rp->ops = rp->ops;
+       list_for_each_entry(res, &rp->res_list, list) {
+               dup_res = kzalloc(sizeof(struct dpu_crtc_res), GFP_KERNEL);
+               if (!dup_res) {
+                       mutex_unlock(rp->rp_lock);
+                       return;
+               }
+               INIT_LIST_HEAD(&dup_res->list);
+               atomic_set(&dup_res->refcount, 0);
+               dup_res->type = res->type;
+               dup_res->tag = res->tag;
+               dup_res->val = res->val;
+               dup_res->ops = res->ops;
+               dup_res->flags = DPU_CRTC_RES_FLAG_FREE;
+               DPU_DEBUG("crtc%d.%u dup res:0x%x/0x%llx/%pK/%d\n",
+                               crtc->base.id, dup_rp->sequence_id,
+                               dup_res->type, dup_res->tag, dup_res->val,
+                               atomic_read(&dup_res->refcount));
+               list_add_tail(&dup_res->list, &dup_rp->res_list);
+               if (dup_res->ops.get)
+                       dup_res->ops.get(dup_res->val, 0, -1);
+       }
+
+       dup_rp->rp_lock = rp->rp_lock;
+       dup_rp->rp_head = rp->rp_head;
+       INIT_LIST_HEAD(&dup_rp->rp_list);
+       list_add_tail(&dup_rp->rp_list, rp->rp_head);
+       mutex_unlock(rp->rp_lock);
+}
+
+/**
+ * _dpu_crtc_rp_reset - reset resource pool after allocation
+ * @rp: Pointer to original resource pool
+ * @rp_lock: Pointer to serialization resource pool lock
+ * @rp_head: Pointer to crtc resource pool head
+ * return: None
+ */
+static void _dpu_crtc_rp_reset(struct dpu_crtc_respool *rp,
+               struct mutex *rp_lock, struct list_head *rp_head)
+{
+       if (!rp || !rp_lock || !rp_head) {
+               DPU_ERROR("invalid resource pool\n");
+               return;
+       }
+
+       mutex_lock(rp_lock);
+       rp->rp_lock = rp_lock;
+       rp->rp_head = rp_head;
+       INIT_LIST_HEAD(&rp->rp_list);
+       rp->sequence_id = 0;
+       INIT_LIST_HEAD(&rp->res_list);
+       rp->ops.get = _dpu_crtc_hw_blk_get;
+       rp->ops.put = _dpu_crtc_hw_blk_put;
+       list_add_tail(&rp->rp_list, rp->rp_head);
+       mutex_unlock(rp_lock);
+}
+
+static void dpu_crtc_destroy(struct drm_crtc *crtc)
+{
+       struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+
+       DPU_DEBUG("\n");
+
+       if (!crtc)
+               return;
+
+       dpu_crtc->phandle = NULL;
+
+       drm_crtc_cleanup(crtc);
+       mutex_destroy(&dpu_crtc->crtc_lock);
+       kfree(dpu_crtc);
+}
+
+static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
+               struct dpu_plane_state *pstate)
+{
+       struct dpu_hw_mixer *lm = mixer->hw_lm;
+
+       /* default to opaque blending */
+       lm->ops.setup_blend_config(lm, pstate->stage, 0XFF, 0,
+                               DPU_BLEND_FG_ALPHA_FG_CONST |
+                               DPU_BLEND_BG_ALPHA_BG_CONST);
+}
+
+static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
+{
+       struct dpu_crtc *dpu_crtc;
+       struct dpu_crtc_state *crtc_state;
+       int lm_idx, lm_horiz_position;
+
+       dpu_crtc = to_dpu_crtc(crtc);
+       crtc_state = to_dpu_crtc_state(crtc->state);
+
+       lm_horiz_position = 0;
+       for (lm_idx = 0; lm_idx < dpu_crtc->num_mixers; lm_idx++) {
+               const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
+               struct dpu_hw_mixer *hw_lm = dpu_crtc->mixers[lm_idx].hw_lm;
+               struct dpu_hw_mixer_cfg cfg;
+
+               if (!lm_roi || !drm_rect_visible(lm_roi))
+                       continue;
+
+               cfg.out_width = drm_rect_width(lm_roi);
+               cfg.out_height = drm_rect_height(lm_roi);
+               cfg.right_mixer = lm_horiz_position++;
+               cfg.flags = 0;
+               hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
+       }
+}
+
+static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
+       struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer)
+{
+       struct drm_plane *plane;
+       struct drm_framebuffer *fb;
+       struct drm_plane_state *state;
+       struct dpu_crtc_state *cstate;
+       struct dpu_plane_state *pstate = NULL;
+       struct dpu_format *format;
+       struct dpu_hw_ctl *ctl;
+       struct dpu_hw_mixer *lm;
+       struct dpu_hw_stage_cfg *stage_cfg;
+
+       u32 flush_mask;
+       uint32_t stage_idx, lm_idx;
+       int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
+       bool bg_alpha_enable = false;
+
+       if (!dpu_crtc || !mixer) {
+               DPU_ERROR("invalid dpu_crtc or mixer\n");
+               return;
+       }
+
+       ctl = mixer->hw_ctl;
+       lm = mixer->hw_lm;
+       stage_cfg = &dpu_crtc->stage_cfg;
+       cstate = to_dpu_crtc_state(crtc->state);
+
+       drm_atomic_crtc_for_each_plane(plane, crtc) {
+               state = plane->state;
+               if (!state)
+                       continue;
+
+               pstate = to_dpu_plane_state(state);
+               fb = state->fb;
+
+               dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
+
+               DPU_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
+                               crtc->base.id,
+                               pstate->stage,
+                               plane->base.id,
+                               dpu_plane_pipe(plane) - SSPP_VIG0,
+                               state->fb ? state->fb->base.id : -1);
+
+               format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
+               if (!format) {
+                       DPU_ERROR("invalid format\n");
+                       return;
+               }
+
+               if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
+                       bg_alpha_enable = true;
+
+               stage_idx = zpos_cnt[pstate->stage]++;
+               stage_cfg->stage[pstate->stage][stage_idx] =
+                                       dpu_plane_pipe(plane);
+               stage_cfg->multirect_index[pstate->stage][stage_idx] =
+                                       pstate->multirect_index;
+
+               trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
+                                          state, pstate, stage_idx,
+                                          dpu_plane_pipe(plane) - SSPP_VIG0,
+                                          format->base.pixel_format,
+                                          fb ? fb->modifier : 0);
+
+               /* blend config update */
+               for (lm_idx = 0; lm_idx < dpu_crtc->num_mixers; lm_idx++) {
+                       _dpu_crtc_setup_blend_cfg(mixer + lm_idx, pstate);
+
+                       mixer[lm_idx].flush_mask |= flush_mask;
+
+                       if (bg_alpha_enable && !format->alpha_enable)
+                               mixer[lm_idx].mixer_op_mode = 0;
+                       else
+                               mixer[lm_idx].mixer_op_mode |=
+                                               1 << pstate->stage;
+               }
+       }
+
+        _dpu_crtc_program_lm_output_roi(crtc);
+}
+
+/**
+ * _dpu_crtc_blend_setup - configure crtc mixers
+ * @crtc: Pointer to drm crtc structure
+ */
+static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
+{
+       struct dpu_crtc *dpu_crtc;
+       struct dpu_crtc_state *dpu_crtc_state;
+       struct dpu_crtc_mixer *mixer;
+       struct dpu_hw_ctl *ctl;
+       struct dpu_hw_mixer *lm;
+
+       int i;
+
+       if (!crtc)
+               return;
+
+       dpu_crtc = to_dpu_crtc(crtc);
+       dpu_crtc_state = to_dpu_crtc_state(crtc->state);
+       mixer = dpu_crtc->mixers;
+
+       DPU_DEBUG("%s\n", dpu_crtc->name);
+
+       if (dpu_crtc->num_mixers > CRTC_DUAL_MIXERS) {
+               DPU_ERROR("invalid number mixers: %d\n", dpu_crtc->num_mixers);
+               return;
+       }
+
+       for (i = 0; i < dpu_crtc->num_mixers; i++) {
+               if (!mixer[i].hw_lm || !mixer[i].hw_ctl) {
+                       DPU_ERROR("invalid lm or ctl assigned to mixer\n");
+                       return;
+               }
+               mixer[i].mixer_op_mode = 0;
+               mixer[i].flush_mask = 0;
+               if (mixer[i].hw_ctl->ops.clear_all_blendstages)
+                       mixer[i].hw_ctl->ops.clear_all_blendstages(
+                                       mixer[i].hw_ctl);
+       }
+
+       /* initialize stage cfg */
+       memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
+
+       _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
+
+       for (i = 0; i < dpu_crtc->num_mixers; i++) {
+               ctl = mixer[i].hw_ctl;
+               lm = mixer[i].hw_lm;
+
+               lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
+
+               mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
+                       mixer[i].hw_lm->idx);
+
+               /* stage config flush mask */
+               ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
+
+               DPU_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
+                       mixer[i].hw_lm->idx - LM_0,
+                       mixer[i].mixer_op_mode,
+                       ctl->idx - CTL_0,
+                       mixer[i].flush_mask);
+
+               ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
+                       &dpu_crtc->stage_cfg);
+       }
+}
+
+/**
+ *  _dpu_crtc_complete_flip - signal pending page_flip events
+ * Any pending vblank events are added to the vblank_event_list
+ * so that the next vblank interrupt shall signal them.
+ * However PAGE_FLIP events are not handled through the vblank_event_list.
+ * This API signals any pending PAGE_FLIP events requested through
+ * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
+ * @crtc: Pointer to drm crtc structure
+ */
+static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
+{
+       struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->event_lock, flags);
+       if (dpu_crtc->event) {
+               DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
+                             dpu_crtc->event);
+               trace_dpu_crtc_complete_flip(DRMID(crtc));
+               drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
+               dpu_crtc->event = NULL;
+       }
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
+{
+       struct drm_encoder *encoder;
+
+       if (!crtc || !crtc->dev) {
+               DPU_ERROR("invalid crtc\n");
+               return INTF_MODE_NONE;
+       }
+
+       drm_for_each_encoder(encoder, crtc->dev)
+               if (encoder->crtc == crtc)
+                       return dpu_encoder_get_intf_mode(encoder);
+
+       return INTF_MODE_NONE;
+}
+
+static void dpu_crtc_vblank_cb(void *data)
+{
+       struct drm_crtc *crtc = (struct drm_crtc *)data;
+       struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+
+       /* keep statistics on vblank callback - with auto reset via debugfs */
+       if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
+               dpu_crtc->vblank_cb_time = ktime_get();
+       else
+               dpu_crtc->vblank_cb_count++;
+       _dpu_crtc_complete_flip(crtc);
+       drm_crtc_handle_vblank(crtc);
+       trace_dpu_crtc_vblank_cb(DRMID(crtc));
+}
+
+static void dpu_crtc_frame_event_work(struct kthread_work *work)
+{
+       struct msm_drm_private *priv;
+       struct dpu_crtc_frame_event *fevent;
+       struct drm_crtc *crtc;
+       struct dpu_crtc *dpu_crtc;
+       struct dpu_kms *dpu_kms;
+       unsigned long flags;
+       bool frame_done = false;
+
+       if (!work) {
+               DPU_ERROR("invalid work handle\n");
+               return;
+       }
+
+       fevent = container_of(work, struct dpu_crtc_frame_event, work);
+       if (!fevent->crtc || !fevent->crtc->state) {
+               DPU_ERROR("invalid crtc\n");
+               return;
+       }
+
+       crtc = fevent->crtc;
+       dpu_crtc = to_dpu_crtc(crtc);
+
+       dpu_kms = _dpu_crtc_get_kms(crtc);
+       if (!dpu_kms) {
+               DPU_ERROR("invalid kms handle\n");
+               return;
+       }
+       priv = dpu_kms->dev->dev_private;
+       DPU_ATRACE_BEGIN("crtc_frame_event");
+
+       DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
+                       ktime_to_ns(fevent->ts));
+
+       if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
+                               | DPU_ENCODER_FRAME_EVENT_ERROR
+                               | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
+
+               if (atomic_read(&dpu_crtc->frame_pending) < 1) {
+                       /* this should not happen */
+                       DRM_ERROR("crtc%d ev:%u ts:%lld frame_pending:%d\n",
+                                       crtc->base.id,
+                                       fevent->event,
+                                       ktime_to_ns(fevent->ts),
+                                       atomic_read(&dpu_crtc->frame_pending));
+               } else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
+                       /* release bandwidth and other resources */
+                       trace_dpu_crtc_frame_event_done(DRMID(crtc),
+                                                       fevent->event);
+                       dpu_core_perf_crtc_release_bw(crtc);
+               } else {
+                       trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
+                                                               fevent->event);
+               }
+
+               if (fevent->event & DPU_ENCODER_FRAME_EVENT_DONE)
+                       dpu_core_perf_crtc_update(crtc, 0, false);
+
+               if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
+                                       | DPU_ENCODER_FRAME_EVENT_ERROR))
+                       frame_done = true;
+       }
+
+       if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
+               DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
+                               crtc->base.id, ktime_to_ns(fevent->ts));
+
+       if (frame_done)
+               complete_all(&dpu_crtc->frame_done_comp);
+
+       spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
+       list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
+       spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
+       DPU_ATRACE_END("crtc_frame_event");
+}
+
+/*
+ * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
+ * registers this API to encoder for all frame event callbacks like
+ * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
+ * from different context - IRQ, user thread, commit_thread, etc. Each event
+ * should be carefully reviewed and should be processed in proper task context
+ * to avoid schedulin delay or properly manage the irq context's bottom half
+ * processing.
+ */
+static void dpu_crtc_frame_event_cb(void *data, u32 event)
+{
+       struct drm_crtc *crtc = (struct drm_crtc *)data;
+       struct dpu_crtc *dpu_crtc;
+       struct msm_drm_private *priv;
+       struct dpu_crtc_frame_event *fevent;
+       unsigned long flags;
+       u32 crtc_id;
+
+       if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+               DPU_ERROR("invalid parameters\n");
+               return;
+       }
+
+       /* Nothing to do on idle event */
+       if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
+               return;
+
+       dpu_crtc = to_dpu_crtc(crtc);
+       priv = crtc->dev->dev_private;
+       crtc_id = drm_crtc_index(crtc);
+
+       trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
+
+       spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
+       fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
+                       struct dpu_crtc_frame_event, list);
+       if (fevent)
+               list_del_init(&fevent->list);
+       spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
+
+       if (!fevent) {
+               DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event);
+               return;
+       }
+
+       fevent->event = event;
+       fevent->crtc = crtc;
+       fevent->ts = ktime_get();
+       kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work);
+}
+
+void dpu_crtc_complete_commit(struct drm_crtc *crtc,
+               struct drm_crtc_state *old_state)
+{
+       if (!crtc || !crtc->state) {
+               DPU_ERROR("invalid crtc\n");
+               return;
+       }
+       trace_dpu_crtc_complete_commit(DRMID(crtc));
+}
+
+static void _dpu_crtc_setup_mixer_for_encoder(
+               struct drm_crtc *crtc,
+               struct drm_encoder *enc)
+{
+       struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+       struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
+       struct dpu_rm *rm = &dpu_kms->rm;
+       struct dpu_crtc_mixer *mixer;
+       struct dpu_hw_ctl *last_valid_ctl = NULL;
+       int i;
+       struct dpu_rm_hw_iter lm_iter, ctl_iter;
+
+       dpu_rm_init_hw_iter(&lm_iter, enc->base.id, DPU_HW_BLK_LM);
+       dpu_rm_init_hw_iter(&ctl_iter, enc->base.id, DPU_HW_BLK_CTL);
+
+       /* Set up all the mixers and ctls reserved by this encoder */
+       for (i = dpu_crtc->num_mixers; i < ARRAY_SIZE(dpu_crtc->mixers); i++) {
+               mixer = &dpu_crtc->mixers[i];
+
+               if (!dpu_rm_get_hw(rm, &lm_iter))
+                       break;
+               mixer->hw_lm = (struct dpu_hw_mixer *)lm_iter.hw;
+
+               /* CTL may be <= LMs, if <, multiple LMs controlled by 1 CTL */
+               if (!dpu_rm_get_hw(rm, &ctl_iter)) {
+                       DPU_DEBUG("no ctl assigned to lm %d, using previous\n",
+                                       mixer->hw_lm->idx - LM_0);
+                       mixer->hw_ctl = last_valid_ctl;
+               } else {
+                       mixer->hw_ctl = (struct dpu_hw_ctl *)ctl_iter.hw;
+                       last_valid_ctl = mixer->hw_ctl;
+               }
+
+               /* Shouldn't happen, mixers are always >= ctls */
+               if (!mixer->hw_ctl) {
+                       DPU_ERROR("no valid ctls found for lm %d\n",
+                                       mixer->hw_lm->idx - LM_0);
+                       return;
+               }
+
+               mixer->encoder = enc;
+
+               dpu_crtc->num_mixers++;
+               DPU_DEBUG("setup mixer %d: lm %d\n",
+                               i, mixer->hw_lm->idx - LM_0);
+               DPU_DEBUG("setup mixer %d: ctl %d\n",
+                               i, mixer->hw_ctl->idx - CTL_0);
+       }
+}
+
+static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc)
+{
+       struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+       struct drm_encoder *enc;
+
+       dpu_crtc->num_mixers = 0;
+       dpu_crtc->mixers_swapped = false;
+       memset(dpu_crtc->mixers, 0, sizeof(dpu_crtc->mixers));
+
+       mutex_lock(&dpu_crtc->crtc_lock);
+       /* Check for mixers on all encoders attached to this crtc */
+       list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
+               if (enc->crtc != crtc)
+                       continue;
+
+               _dpu_crtc_setup_mixer_for_encoder(crtc, enc);
+       }
+
+       mutex_unlock(&dpu_crtc->crtc_lock);
+}
+
+static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
+               struct drm_crtc_state *state)
+{
+       struct dpu_crtc *dpu_crtc;
+       struct dpu_crtc_state *cstate;
+       struct drm_display_mode *adj_mode;
+       u32 crtc_split_width;
+       int i;
+
+       if (!crtc || !state) {
+               DPU_ERROR("invalid args\n");
+               return;
+       }
+
+       dpu_crtc = to_dpu_crtc(crtc);
+       cstate = to_dpu_crtc_state(state);
+
+       adj_mode = &state->adjusted_mode;
+       crtc_split_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, adj_mode);
+
+       for (i = 0; i < dpu_crtc->num_mixers; i++) {
+               struct drm_rect *r = &cstate->lm_bounds[i];
+               r->x1 = crtc_split_width * i;
+               r->y1 = 0;
+               r->x2 = r->x1 + crtc_split_width;
+               r->y2 = dpu_crtc_get_mixer_height(dpu_crtc, cstate, adj_mode);
+
+               trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
+       }
+
+       drm_mode_debug_printmodeline(adj_mode);
+}
+
+static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
+               struct drm_crtc_state *old_state)
+{
+       struct dpu_crtc *dpu_crtc;
+       struct drm_encoder *encoder;
+       struct drm_device *dev;
+       unsigned long flags;
+       struct dpu_crtc_smmu_state_data *smmu_state;
+
+       if (!crtc) {
+               DPU_ERROR("invalid crtc\n");
+               return;
+       }
+
+       if (!crtc->state->enable) {
+               DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
+                               crtc->base.id, crtc->state->enable);
+               return;
+       }
+
+       DPU_DEBUG("crtc%d\n", crtc->base.id);
+
+       dpu_crtc = to_dpu_crtc(crtc);
+       dev = crtc->dev;
+       smmu_state = &dpu_crtc->smmu_state;
+
+       if (!dpu_crtc->num_mixers) {
+               _dpu_crtc_setup_mixers(crtc);
+               _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
+       }
+
+       if (dpu_crtc->event) {
+               WARN_ON(dpu_crtc->event);
+       } else {
+               spin_lock_irqsave(&dev->event_lock, flags);
+               dpu_crtc->event = crtc->state->event;
+               crtc->state->event = NULL;
+               spin_unlock_irqrestore(&dev->event_lock, flags);
+       }
+
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               if (encoder->crtc != crtc)
+                       continue;
+
+               /* encoder will trigger pending mask now */
+               dpu_encoder_trigger_kickoff_pending(encoder);
+       }
+
+       /*
+        * If no mixers have been allocated in dpu_crtc_atomic_check(),
+        * it means we are trying to flush a CRTC whose state is disabled:
+        * nothing else needs to be done.
+        */
+       if (unlikely(!dpu_crtc->num_mixers))
+               return;
+
+       _dpu_crtc_blend_setup(crtc);
+
+       /*
+        * PP_DONE irq is only used by command mode for now.
+        * It is better to request pending before FLUSH and START trigger
+        * to make sure no pp_done irq missed.
+        * This is safe because no pp_done will happen before SW trigger
+        * in command mode.
+        */
+}
+
+static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
+               struct drm_crtc_state *old_crtc_state)
+{
+       struct dpu_crtc *dpu_crtc;
+       struct drm_device *dev;
+       struct drm_plane *plane;
+       struct msm_drm_private *priv;
+       struct msm_drm_thread *event_thread;
+       unsigned long flags;
+       struct dpu_crtc_state *cstate;
+
+       if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+               DPU_ERROR("invalid crtc\n");
+               return;
+       }
+
+       if (!crtc->state->enable) {
+               DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
+                               crtc->base.id, crtc->state->enable);
+               return;
+       }
+
+       DPU_DEBUG("crtc%d\n", crtc->base.id);
+
+       dpu_crtc = to_dpu_crtc(crtc);
+       cstate = to_dpu_crtc_state(crtc->state);
+       dev = crtc->dev;
+       priv = dev->dev_private;
+
+       if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
+               DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
+               return;
+       }
+
+       event_thread = &priv->event_thread[crtc->index];
+
+       if (dpu_crtc->event) {
+               DPU_DEBUG("already received dpu_crtc->event\n");
+       } else {
+               spin_lock_irqsave(&dev->event_lock, flags);
+               dpu_crtc->event = crtc->state->event;
+               crtc->state->event = NULL;
+               spin_unlock_irqrestore(&dev->event_lock, flags);
+       }
+
+       /*
+        * If no mixers has been allocated in dpu_crtc_atomic_check(),
+        * it means we are trying to flush a CRTC whose state is disabled:
+        * nothing else needs to be done.
+        */
+       if (unlikely(!dpu_crtc->num_mixers))
+               return;
+
+       /*
+        * For planes without commit update, drm framework will not add
+        * those planes to current state since hardware update is not
+        * required. However, if those planes were power collapsed since
+        * last commit cycle, driver has to restore the hardware state
+        * of those planes explicitly here prior to plane flush.
+        */
+       drm_atomic_crtc_for_each_plane(plane, crtc)
+               dpu_plane_restore(plane);
+
+       /* update performance setting before crtc kickoff */
+       dpu_core_perf_crtc_update(crtc, 1, false);
+
+       /*
+        * Final plane updates: Give each plane a chance to complete all
+        *                      required writes/flushing before crtc's "flush
+        *                      everything" call below.
+        */
+       drm_atomic_crtc_for_each_plane(plane, crtc) {
+               if (dpu_crtc->smmu_state.transition_error)
+                       dpu_plane_set_error(plane, true);
+               dpu_plane_flush(plane);
+       }
+
+       /* Kickoff will be scheduled by outer layer */
+}
+
+/**
+ * dpu_crtc_destroy_state - state destroy hook
+ * @crtc: drm CRTC
+ * @state: CRTC state object to release
+ */
+static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
+               struct drm_crtc_state *state)
+{
+       struct dpu_crtc *dpu_crtc;
+       struct dpu_crtc_state *cstate;
+
+       if (!crtc || !state) {
+               DPU_ERROR("invalid argument(s)\n");
+               return;
+       }
+
+       dpu_crtc = to_dpu_crtc(crtc);
+       cstate = to_dpu_crtc_state(state);
+
+       DPU_DEBUG("crtc%d\n", crtc->base.id);
+
+       _dpu_crtc_rp_destroy(&cstate->rp);
+
+       __drm_atomic_helper_crtc_destroy_state(state);
+
+       kfree(cstate);
+}
+
+static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
+{
+       struct dpu_crtc *dpu_crtc;
+       int ret, rc = 0;
+
+       if (!crtc) {
+               DPU_ERROR("invalid argument\n");
+               return -EINVAL;
+       }
+       dpu_crtc = to_dpu_crtc(crtc);
+
+       if (!atomic_read(&dpu_crtc->frame_pending)) {
+               DPU_DEBUG("no frames pending\n");
+               return 0;
+       }
+
+       DPU_ATRACE_BEGIN("frame done completion wait");
+       ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
+                       msecs_to_jiffies(DPU_FRAME_DONE_TIMEOUT));
+       if (!ret) {
+               DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
+               rc = -ETIMEDOUT;
+       }
+       DPU_ATRACE_END("frame done completion wait");
+
+       return rc;
+}
+
+void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
+{
+       struct drm_encoder *encoder;
+       struct drm_device *dev;
+       struct dpu_crtc *dpu_crtc;
+       struct msm_drm_private *priv;
+       struct dpu_kms *dpu_kms;
+       struct dpu_crtc_state *cstate;
+       int ret;
+
+       if (!crtc) {
+               DPU_ERROR("invalid argument\n");
+               return;
+       }
+       dev = crtc->dev;
+       dpu_crtc = to_dpu_crtc(crtc);
+       dpu_kms = _dpu_crtc_get_kms(crtc);
+
+       if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev_private) {
+               DPU_ERROR("invalid argument\n");
+               return;
+       }
+
+       priv = dpu_kms->dev->dev_private;
+       cstate = to_dpu_crtc_state(crtc->state);
+
+       /*
+        * If no mixers has been allocated in dpu_crtc_atomic_check(),
+        * it means we are trying to start a CRTC whose state is disabled:
+        * nothing else needs to be done.
+        */
+       if (unlikely(!dpu_crtc->num_mixers))
+               return;
+
+       DPU_ATRACE_BEGIN("crtc_commit");
+
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               struct dpu_encoder_kickoff_params params = { 0 };
+
+               if (encoder->crtc != crtc)
+                       continue;
+
+               /*
+                * Encoder will flush/start now, unless it has a tx pending.
+                * If so, it may delay and flush at an irq event (e.g. ppdone)
+                */
+               dpu_encoder_prepare_for_kickoff(encoder, &params);
+       }
+
+       /* wait for frame_event_done completion */
+       DPU_ATRACE_BEGIN("wait_for_frame_done_event");
+       ret = _dpu_crtc_wait_for_frame_done(crtc);
+       DPU_ATRACE_END("wait_for_frame_done_event");
+       if (ret) {
+               DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
+                               crtc->base.id,
+                               atomic_read(&dpu_crtc->frame_pending));
+               goto end;
+       }
+
+       if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
+               /* acquire bandwidth and other resources */
+               DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
+       } else
+               DPU_DEBUG("crtc%d commit\n", crtc->base.id);
+
+       dpu_crtc->play_count++;
+
+       dpu_vbif_clear_errors(dpu_kms);
+
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               if (encoder->crtc != crtc)
+                       continue;
+
+               dpu_encoder_kickoff(encoder);
+       }
+
+end:
+       reinit_completion(&dpu_crtc->frame_done_comp);
+       DPU_ATRACE_END("crtc_commit");
+}
+
+/**
+ * _dpu_crtc_vblank_enable_no_lock - update power resource and vblank request
+ * @dpu_crtc: Pointer to dpu crtc structure
+ * @enable: Whether to enable/disable vblanks
+ *
+ * @Return: error code
+ */
+static int _dpu_crtc_vblank_enable_no_lock(
+               struct dpu_crtc *dpu_crtc, bool enable)
+{
+       struct drm_device *dev;
+       struct drm_crtc *crtc;
+       struct drm_encoder *enc;
+
+       if (!dpu_crtc) {
+               DPU_ERROR("invalid crtc\n");
+               return -EINVAL;
+       }
+
+       crtc = &dpu_crtc->base;
+       dev = crtc->dev;
+
+       if (enable) {
+               int ret;
+
+               /* drop lock since power crtc cb may try to re-acquire lock */
+               mutex_unlock(&dpu_crtc->crtc_lock);
+               ret = _dpu_crtc_power_enable(dpu_crtc, true);
+               mutex_lock(&dpu_crtc->crtc_lock);
+               if (ret)
+                       return ret;
+
+               list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+                       if (enc->crtc != crtc)
+                               continue;
+
+                       trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
+                                                    DRMID(enc), enable,
+                                                    dpu_crtc);
+
+                       dpu_encoder_register_vblank_callback(enc,
+                                       dpu_crtc_vblank_cb, (void *)crtc);
+               }
+       } else {
+               list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+                       if (enc->crtc != crtc)
+                               continue;
+
+                       trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
+                                                    DRMID(enc), enable,
+                                                    dpu_crtc);
+
+                       dpu_encoder_register_vblank_callback(enc, NULL, NULL);
+               }
+
+               /* drop lock since power crtc cb may try to re-acquire lock */
+               mutex_unlock(&dpu_crtc->crtc_lock);
+               _dpu_crtc_power_enable(dpu_crtc, false);
+               mutex_lock(&dpu_crtc->crtc_lock);
+       }
+
+       return 0;
+}
+
+/**
+ * _dpu_crtc_set_suspend - notify crtc of suspend enable/disable
+ * @crtc: Pointer to drm crtc object
+ * @enable: true to enable suspend, false to indicate resume
+ */
+static void _dpu_crtc_set_suspend(struct drm_crtc *crtc, bool enable)
+{
+       struct dpu_crtc *dpu_crtc;
+       struct msm_drm_private *priv;
+       struct dpu_kms *dpu_kms;
+       int ret = 0;
+
+       if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+               DPU_ERROR("invalid crtc\n");
+               return;
+       }
+       dpu_crtc = to_dpu_crtc(crtc);
+       priv = crtc->dev->dev_private;
+
+       if (!priv->kms) {
+               DPU_ERROR("invalid crtc kms\n");
+               return;
+       }
+       dpu_kms = to_dpu_kms(priv->kms);
+
+       DRM_DEBUG_KMS("crtc%d suspend = %d\n", crtc->base.id, enable);
+
+       mutex_lock(&dpu_crtc->crtc_lock);
+
+       /*
+        * If the vblank is enabled, release a power reference on suspend
+        * and take it back during resume (if it is still enabled).
+        */
+       trace_dpu_crtc_set_suspend(DRMID(&dpu_crtc->base), enable, dpu_crtc);
+       if (dpu_crtc->suspend == enable)
+               DPU_DEBUG("crtc%d suspend already set to %d, ignoring update\n",
+                               crtc->base.id, enable);
+       else if (dpu_crtc->enabled && dpu_crtc->vblank_requested) {
+               ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, !enable);
+               if (ret)
+                       DPU_ERROR("%s vblank enable failed: %d\n",
+                                       dpu_crtc->name, ret);
+       }
+
+       dpu_crtc->suspend = enable;
+       mutex_unlock(&dpu_crtc->crtc_lock);
+}
+
+/**
+ * dpu_crtc_duplicate_state - state duplicate hook
+ * @crtc: Pointer to drm crtc structure
+ * @Returns: Pointer to new drm_crtc_state structure
+ */
+static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+       struct dpu_crtc *dpu_crtc;
+       struct dpu_crtc_state *cstate, *old_cstate;
+
+       if (!crtc || !crtc->state) {
+               DPU_ERROR("invalid argument(s)\n");
+               return NULL;
+       }
+
+       dpu_crtc = to_dpu_crtc(crtc);
+       old_cstate = to_dpu_crtc_state(crtc->state);
+       cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
+       if (!cstate) {
+               DPU_ERROR("failed to allocate state\n");
+               return NULL;
+       }
+
+       /* duplicate base helper */
+       __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
+
+       _dpu_crtc_rp_duplicate(&old_cstate->rp, &cstate->rp);
+
+       return &cstate->base;
+}
+
+/**
+ * dpu_crtc_reset - reset hook for CRTCs
+ * Resets the atomic state for @crtc by freeing the state pointer (which might
+ * be NULL, e.g. at driver load time) and allocating a new empty state object.
+ * @crtc: Pointer to drm crtc structure
+ */
+static void dpu_crtc_reset(struct drm_crtc *crtc)
+{
+       struct dpu_crtc *dpu_crtc;
+       struct dpu_crtc_state *cstate;
+
+       if (!crtc) {
+               DPU_ERROR("invalid crtc\n");
+               return;
+       }
+
+       /* revert suspend actions, if necessary */
+       if (dpu_kms_is_suspend_state(crtc->dev))
+               _dpu_crtc_set_suspend(crtc, false);
+
+       /* remove previous state, if present */
+       if (crtc->state) {
+               dpu_crtc_destroy_state(crtc, crtc->state);
+               crtc->state = 0;
+       }
+
+       dpu_crtc = to_dpu_crtc(crtc);
+       cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
+       if (!cstate) {
+               DPU_ERROR("failed to allocate state\n");
+               return;
+       }
+
+       _dpu_crtc_rp_reset(&cstate->rp, &dpu_crtc->rp_lock,
+                       &dpu_crtc->rp_head);
+
+       cstate->base.crtc = crtc;
+       crtc->state = &cstate->base;
+}
+
+static void dpu_crtc_handle_power_event(u32 event_type, void *arg)
+{
+       struct drm_crtc *crtc = arg;
+       struct dpu_crtc *dpu_crtc;
+       struct drm_encoder *encoder;
+       struct dpu_crtc_mixer *m;
+       u32 i, misr_status;
+
+       if (!crtc) {
+               DPU_ERROR("invalid crtc\n");
+               return;
+       }
+       dpu_crtc = to_dpu_crtc(crtc);
+
+       mutex_lock(&dpu_crtc->crtc_lock);
+
+       trace_dpu_crtc_handle_power_event(DRMID(crtc), event_type);
+
+       switch (event_type) {
+       case DPU_POWER_EVENT_POST_ENABLE:
+               /* restore encoder; crtc will be programmed during commit */
+               drm_for_each_encoder(encoder, crtc->dev) {
+                       if (encoder->crtc != crtc)
+                               continue;
+
+                       dpu_encoder_virt_restore(encoder);
+               }
+
+               for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+                       m = &dpu_crtc->mixers[i];
+                       if (!m->hw_lm || !m->hw_lm->ops.setup_misr ||
+                                       !dpu_crtc->misr_enable)
+                               continue;
+
+                       m->hw_lm->ops.setup_misr(m->hw_lm, true,
+                                       dpu_crtc->misr_frame_count);
+               }
+               break;
+       case DPU_POWER_EVENT_PRE_DISABLE:
+               for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+                       m = &dpu_crtc->mixers[i];
+                       if (!m->hw_lm || !m->hw_lm->ops.collect_misr ||
+                                       !dpu_crtc->misr_enable)
+                               continue;
+
+                       misr_status = m->hw_lm->ops.collect_misr(m->hw_lm);
+                       dpu_crtc->misr_data[i] = misr_status ? misr_status :
+                                                       dpu_crtc->misr_data[i];
+               }
+               break;
+       case DPU_POWER_EVENT_POST_DISABLE:
+               /**
+                * Nothing to do. All the planes on the CRTC will be
+                * programmed for every frame
+                */
+               break;
+       default:
+               DPU_DEBUG("event:%d not handled\n", event_type);
+               break;
+       }
+
+       mutex_unlock(&dpu_crtc->crtc_lock);
+}
+
+static void dpu_crtc_disable(struct drm_crtc *crtc)
+{
+       struct dpu_crtc *dpu_crtc;
+       struct dpu_crtc_state *cstate;
+       struct drm_display_mode *mode;
+       struct drm_encoder *encoder;
+       struct msm_drm_private *priv;
+       int ret;
+       unsigned long flags;
+
+       if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) {
+               DPU_ERROR("invalid crtc\n");
+               return;
+       }
+       dpu_crtc = to_dpu_crtc(crtc);
+       cstate = to_dpu_crtc_state(crtc->state);
+       mode = &cstate->base.adjusted_mode;
+       priv = crtc->dev->dev_private;
+
+       DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
+
+       if (dpu_kms_is_suspend_state(crtc->dev))
+               _dpu_crtc_set_suspend(crtc, true);
+
+       /* Disable/save vblank irq handling */
+       drm_crtc_vblank_off(crtc);
+
+       mutex_lock(&dpu_crtc->crtc_lock);
+
+       /* wait for frame_event_done completion */
+       if (_dpu_crtc_wait_for_frame_done(crtc))
+               DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
+                               crtc->base.id,
+                               atomic_read(&dpu_crtc->frame_pending));
+
+       trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
+       if (dpu_crtc->enabled && !dpu_crtc->suspend &&
+                       dpu_crtc->vblank_requested) {
+               ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, false);
+               if (ret)
+                       DPU_ERROR("%s vblank enable failed: %d\n",
+                                       dpu_crtc->name, ret);
+       }
+       dpu_crtc->enabled = false;
+
+       if (atomic_read(&dpu_crtc->frame_pending)) {
+               trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
+                                    atomic_read(&dpu_crtc->frame_pending));
+               dpu_core_perf_crtc_release_bw(crtc);
+               atomic_set(&dpu_crtc->frame_pending, 0);
+       }
+
+       dpu_core_perf_crtc_update(crtc, 0, true);
+
+       drm_for_each_encoder(encoder, crtc->dev) {
+               if (encoder->crtc != crtc)
+                       continue;
+               dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
+       }
+
+       if (dpu_crtc->power_event)
+               dpu_power_handle_unregister_event(dpu_crtc->phandle,
+                               dpu_crtc->power_event);
+
+       memset(dpu_crtc->mixers, 0, sizeof(dpu_crtc->mixers));
+       dpu_crtc->num_mixers = 0;
+       dpu_crtc->mixers_swapped = false;
+
+       /* disable clk & bw control until clk & bw properties are set */
+       cstate->bw_control = false;
+       cstate->bw_split_vote = false;
+
+       mutex_unlock(&dpu_crtc->crtc_lock);
+
+       if (crtc->state->event && !crtc->state->active) {
+               spin_lock_irqsave(&crtc->dev->event_lock, flags);
+               drm_crtc_send_vblank_event(crtc, crtc->state->event);
+               crtc->state->event = NULL;
+               spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+       }
+}
+
+static void dpu_crtc_enable(struct drm_crtc *crtc,
+               struct drm_crtc_state *old_crtc_state)
+{
+       struct dpu_crtc *dpu_crtc;
+       struct drm_encoder *encoder;
+       struct msm_drm_private *priv;
+       int ret;
+
+       if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+               DPU_ERROR("invalid crtc\n");
+               return;
+       }
+       priv = crtc->dev->dev_private;
+
+       DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
+       dpu_crtc = to_dpu_crtc(crtc);
+
+       drm_for_each_encoder(encoder, crtc->dev) {
+               if (encoder->crtc != crtc)
+                       continue;
+               dpu_encoder_register_frame_event_callback(encoder,
+                               dpu_crtc_frame_event_cb, (void *)crtc);
+       }
+
+       mutex_lock(&dpu_crtc->crtc_lock);
+       trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
+       if (!dpu_crtc->enabled && !dpu_crtc->suspend &&
+                       dpu_crtc->vblank_requested) {
+               ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, true);
+               if (ret)
+                       DPU_ERROR("%s vblank enable failed: %d\n",
+                                       dpu_crtc->name, ret);
+       }
+       dpu_crtc->enabled = true;
+
+       mutex_unlock(&dpu_crtc->crtc_lock);
+
+       /* Enable/restore vblank irq handling */
+       drm_crtc_vblank_on(crtc);
+
+       dpu_crtc->power_event = dpu_power_handle_register_event(
+               dpu_crtc->phandle,
+               DPU_POWER_EVENT_POST_ENABLE | DPU_POWER_EVENT_POST_DISABLE |
+               DPU_POWER_EVENT_PRE_DISABLE,
+               dpu_crtc_handle_power_event, crtc, dpu_crtc->name);
+
+}
+
+struct plane_state {
+       struct dpu_plane_state *dpu_pstate;
+       const struct drm_plane_state *drm_pstate;
+       int stage;
+       u32 pipe_id;
+};
+
+static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
+               struct drm_crtc_state *state)
+{
+       struct dpu_crtc *dpu_crtc;
+       struct plane_state *pstates;
+       struct dpu_crtc_state *cstate;
+
+       const struct drm_plane_state *pstate;
+       struct drm_plane *plane;
+       struct drm_display_mode *mode;
+
+       int cnt = 0, rc = 0, mixer_width, i, z_pos;
+
+       struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
+       int multirect_count = 0;
+       const struct drm_plane_state *pipe_staged[SSPP_MAX];
+       int left_zpos_cnt = 0, right_zpos_cnt = 0;
+       struct drm_rect crtc_rect = { 0 };
+
+       if (!crtc) {
+               DPU_ERROR("invalid crtc\n");
+               return -EINVAL;
+       }
+
+       pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
+
+       dpu_crtc = to_dpu_crtc(crtc);
+       cstate = to_dpu_crtc_state(state);
+
+       if (!state->enable || !state->active) {
+               DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
+                               crtc->base.id, state->enable, state->active);
+               goto end;
+       }
+
+       mode = &state->adjusted_mode;
+       DPU_DEBUG("%s: check", dpu_crtc->name);
+
+       /* force a full mode set if active state changed */
+       if (state->active_changed)
+               state->mode_changed = true;
+
+       memset(pipe_staged, 0, sizeof(pipe_staged));
+
+       mixer_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, mode);
+
+       _dpu_crtc_setup_lm_bounds(crtc, state);
+
+       crtc_rect.x2 = mode->hdisplay;
+       crtc_rect.y2 = mode->vdisplay;
+
+        /* get plane state for all drm planes associated with crtc state */
+       drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
+               struct drm_rect dst, clip = crtc_rect;
+
+               if (IS_ERR_OR_NULL(pstate)) {
+                       rc = PTR_ERR(pstate);
+                       DPU_ERROR("%s: failed to get plane%d state, %d\n",
+                                       dpu_crtc->name, plane->base.id, rc);
+                       goto end;
+               }
+               if (cnt >= DPU_STAGE_MAX * 4)
+                       continue;
+
+               pstates[cnt].dpu_pstate = to_dpu_plane_state(pstate);
+               pstates[cnt].drm_pstate = pstate;
+               pstates[cnt].stage = pstate->normalized_zpos;
+               pstates[cnt].pipe_id = dpu_plane_pipe(plane);
+
+               if (pipe_staged[pstates[cnt].pipe_id]) {
+                       multirect_plane[multirect_count].r0 =
+                               pipe_staged[pstates[cnt].pipe_id];
+                       multirect_plane[multirect_count].r1 = pstate;
+                       multirect_count++;
+
+                       pipe_staged[pstates[cnt].pipe_id] = NULL;
+               } else {
+                       pipe_staged[pstates[cnt].pipe_id] = pstate;
+               }
+
+               cnt++;
+
+               dst = drm_plane_state_dest(pstate);
+               if (!drm_rect_intersect(&clip, &dst) ||
+                   !drm_rect_equals(&clip, &dst)) {
+                       DPU_ERROR("invalid vertical/horizontal destination\n");
+                       DPU_ERROR("display: " DRM_RECT_FMT " plane: "
+                                 DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
+                                 DRM_RECT_ARG(&dst));
+                       rc = -E2BIG;
+                       goto end;
+               }
+       }
+
+       for (i = 1; i < SSPP_MAX; i++) {
+               if (pipe_staged[i]) {
+                       dpu_plane_clear_multirect(pipe_staged[i]);
+
+                       if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
+                               DPU_ERROR(
+                                       "r1 only virt plane:%d not supported\n",
+                                       pipe_staged[i]->plane->base.id);
+                               rc  = -EINVAL;
+                               goto end;
+                       }
+               }
+       }
+
+       z_pos = -1;
+       for (i = 0; i < cnt; i++) {
+               /* reset counts at every new blend stage */
+               if (pstates[i].stage != z_pos) {
+                       left_zpos_cnt = 0;
+                       right_zpos_cnt = 0;
+                       z_pos = pstates[i].stage;
+               }
+
+               /* verify z_pos setting before using it */
+               if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) {
+                       DPU_ERROR("> %d plane stages assigned\n",
+                                       DPU_STAGE_MAX - DPU_STAGE_0);
+                       rc = -EINVAL;
+                       goto end;
+               } else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
+                       if (left_zpos_cnt == 2) {
+                               DPU_ERROR("> 2 planes @ stage %d on left\n",
+                                       z_pos);
+                               rc = -EINVAL;
+                               goto end;
+                       }
+                       left_zpos_cnt++;
+
+               } else {
+                       if (right_zpos_cnt == 2) {
+                               DPU_ERROR("> 2 planes @ stage %d on right\n",
+                                       z_pos);
+                               rc = -EINVAL;
+                               goto end;
+                       }
+                       right_zpos_cnt++;
+               }
+
+               pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
+               DPU_DEBUG("%s: zpos %d", dpu_crtc->name, z_pos);
+       }
+
+       for (i = 0; i < multirect_count; i++) {
+               if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) {
+                       DPU_ERROR(
+                       "multirect validation failed for planes (%d - %d)\n",
+                                       multirect_plane[i].r0->plane->base.id,
+                                       multirect_plane[i].r1->plane->base.id);
+                       rc = -EINVAL;
+                       goto end;
+               }
+       }
+
+       rc = dpu_core_perf_crtc_check(crtc, state);
+       if (rc) {
+               DPU_ERROR("crtc%d failed performance check %d\n",
+                               crtc->base.id, rc);
+               goto end;
+       }
+
+       /* validate source split:
+        * use pstates sorted by stage to check planes on same stage
+        * we assume that all pipes are in source split so its valid to compare
+        * without taking into account left/right mixer placement
+        */
+       for (i = 1; i < cnt; i++) {
+               struct plane_state *prv_pstate, *cur_pstate;
+               struct drm_rect left_rect, right_rect;
+               int32_t left_pid, right_pid;
+               int32_t stage;
+
+               prv_pstate = &pstates[i - 1];
+               cur_pstate = &pstates[i];
+               if (prv_pstate->stage != cur_pstate->stage)
+                       continue;
+
+               stage = cur_pstate->stage;
+
+               left_pid = prv_pstate->dpu_pstate->base.plane->base.id;
+               left_rect = drm_plane_state_dest(prv_pstate->drm_pstate);
+
+               right_pid = cur_pstate->dpu_pstate->base.plane->base.id;
+               right_rect = drm_plane_state_dest(cur_pstate->drm_pstate);
+
+               if (right_rect.x1 < left_rect.x1) {
+                       swap(left_pid, right_pid);
+                       swap(left_rect, right_rect);
+               }
+
+               /**
+                * - planes are enumerated in pipe-priority order such that
+                *   planes with lower drm_id must be left-most in a shared
+                *   blend-stage when using source split.
+                * - planes in source split must be contiguous in width
+                * - planes in source split must have same dest yoff and height
+                */
+               if (right_pid < left_pid) {
+                       DPU_ERROR(
+                               "invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
+                               stage, left_pid, right_pid);
+                       rc = -EINVAL;
+                       goto end;
+               } else if (right_rect.x1 != drm_rect_width(&left_rect)) {
+                       DPU_ERROR("non-contiguous coordinates for src split. "
+                                 "stage: %d left: " DRM_RECT_FMT " right: "
+                                 DRM_RECT_FMT "\n", stage,
+                                 DRM_RECT_ARG(&left_rect),
+                                 DRM_RECT_ARG(&right_rect));
+                       rc = -EINVAL;
+                       goto end;
+               } else if (left_rect.y1 != right_rect.y1 ||
+                          drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) {
+                       DPU_ERROR("source split at stage: %d. invalid "
+                                 "yoff/height: left: " DRM_RECT_FMT " right: "
+                                 DRM_RECT_FMT "\n", stage,
+                                 DRM_RECT_ARG(&left_rect),
+                                 DRM_RECT_ARG(&right_rect));
+                       rc = -EINVAL;
+                       goto end;
+               }
+       }
+
+end:
+       _dpu_crtc_rp_free_unused(&cstate->rp);
+       kfree(pstates);
+       return rc;
+}
+
+int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
+{
+       struct dpu_crtc *dpu_crtc;
+       int ret;
+
+       if (!crtc) {
+               DPU_ERROR("invalid crtc\n");
+               return -EINVAL;
+       }
+       dpu_crtc = to_dpu_crtc(crtc);
+
+       mutex_lock(&dpu_crtc->crtc_lock);
+       trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
+       if (dpu_crtc->enabled && !dpu_crtc->suspend) {
+               ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, en);
+               if (ret)
+                       DPU_ERROR("%s vblank enable failed: %d\n",
+                                       dpu_crtc->name, ret);
+       }
+       dpu_crtc->vblank_requested = en;
+       mutex_unlock(&dpu_crtc->crtc_lock);
+
+       return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
+{
+       struct dpu_crtc *dpu_crtc;
+       struct dpu_plane_state *pstate = NULL;
+       struct dpu_crtc_mixer *m;
+
+       struct drm_crtc *crtc;
+       struct drm_plane *plane;
+       struct drm_display_mode *mode;
+       struct drm_framebuffer *fb;
+       struct drm_plane_state *state;
+       struct dpu_crtc_state *cstate;
+
+       int i, out_width;
+
+       if (!s || !s->private)
+               return -EINVAL;
+
+       dpu_crtc = s->private;
+       crtc = &dpu_crtc->base;
+       cstate = to_dpu_crtc_state(crtc->state);
+
+       mutex_lock(&dpu_crtc->crtc_lock);
+       mode = &crtc->state->adjusted_mode;
+       out_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, mode);
+
+       seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
+                               mode->hdisplay, mode->vdisplay);
+
+       seq_puts(s, "\n");
+
+       for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+               m = &dpu_crtc->mixers[i];
+               if (!m->hw_lm)
+                       seq_printf(s, "\tmixer[%d] has no lm\n", i);
+               else if (!m->hw_ctl)
+                       seq_printf(s, "\tmixer[%d] has no ctl\n", i);
+               else
+                       seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
+                               m->hw_lm->idx - LM_0, m->hw_ctl->idx - CTL_0,
+                               out_width, mode->vdisplay);
+       }
+
+       seq_puts(s, "\n");
+
+       drm_atomic_crtc_for_each_plane(plane, crtc) {
+               pstate = to_dpu_plane_state(plane->state);
+               state = plane->state;
+
+               if (!pstate || !state)
+                       continue;
+
+               seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
+                       pstate->stage);
+
+               if (plane->state->fb) {
+                       fb = plane->state->fb;
+
+                       seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
+                               fb->base.id, (char *) &fb->format->format,
+                               fb->width, fb->height);
+                       for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
+                               seq_printf(s, "cpp[%d]:%u ",
+                                               i, fb->format->cpp[i]);
+                       seq_puts(s, "\n\t");
+
+                       seq_printf(s, "modifier:%8llu ", fb->modifier);
+                       seq_puts(s, "\n");
+
+                       seq_puts(s, "\t");
+                       for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
+                               seq_printf(s, "pitches[%d]:%8u ", i,
+                                                       fb->pitches[i]);
+                       seq_puts(s, "\n");
+
+                       seq_puts(s, "\t");
+                       for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
+                               seq_printf(s, "offsets[%d]:%8u ", i,
+                                                       fb->offsets[i]);
+                       seq_puts(s, "\n");
+               }
+
+               seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
+                       state->src_x, state->src_y, state->src_w, state->src_h);
+
+               seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
+                       state->crtc_x, state->crtc_y, state->crtc_w,
+                       state->crtc_h);
+               seq_printf(s, "\tmultirect: mode: %d index: %d\n",
+                       pstate->multirect_mode, pstate->multirect_index);
+
+               seq_puts(s, "\n");
+       }
+       if (dpu_crtc->vblank_cb_count) {
+               ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
+               s64 diff_ms = ktime_to_ms(diff);
+               s64 fps = diff_ms ? div_s64(
+                               dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
+
+               seq_printf(s,
+                       "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
+                               fps, dpu_crtc->vblank_cb_count,
+                               ktime_to_ms(diff), dpu_crtc->play_count);
+
+               /* reset time & count for next measurement */
+               dpu_crtc->vblank_cb_count = 0;
+               dpu_crtc->vblank_cb_time = ktime_set(0, 0);
+       }
+
+       seq_printf(s, "vblank_enable:%d\n", dpu_crtc->vblank_requested);
+
+       mutex_unlock(&dpu_crtc->crtc_lock);
+
+       return 0;
+}
+
+static int _dpu_debugfs_status_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, _dpu_debugfs_status_show, inode->i_private);
+}
+
+static ssize_t _dpu_crtc_misr_setup(struct file *file,
+               const char __user *user_buf, size_t count, loff_t *ppos)
+{
+       struct dpu_crtc *dpu_crtc;
+       struct dpu_crtc_mixer *m;
+       int i = 0, rc;
+       char buf[MISR_BUFF_SIZE + 1];
+       u32 frame_count, enable;
+       size_t buff_copy;
+
+       if (!file || !file->private_data)
+               return -EINVAL;
+
+       dpu_crtc = file->private_data;
+       buff_copy = min_t(size_t, count, MISR_BUFF_SIZE);
+       if (copy_from_user(buf, user_buf, buff_copy)) {
+               DPU_ERROR("buffer copy failed\n");
+               return -EINVAL;
+       }
+
+       buf[buff_copy] = 0; /* end of string */
+
+       if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
+               return -EINVAL;
+
+       rc = _dpu_crtc_power_enable(dpu_crtc, true);
+       if (rc)
+               return rc;
+
+       mutex_lock(&dpu_crtc->crtc_lock);
+       dpu_crtc->misr_enable = enable;
+       dpu_crtc->misr_frame_count = frame_count;
+       for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+               dpu_crtc->misr_data[i] = 0;
+               m = &dpu_crtc->mixers[i];
+               if (!m->hw_lm || !m->hw_lm->ops.setup_misr)
+                       continue;
+
+               m->hw_lm->ops.setup_misr(m->hw_lm, enable, frame_count);
+       }
+       mutex_unlock(&dpu_crtc->crtc_lock);
+       _dpu_crtc_power_enable(dpu_crtc, false);
+
+       return count;
+}
+
+static ssize_t _dpu_crtc_misr_read(struct file *file,
+               char __user *user_buff, size_t count, loff_t *ppos)
+{
+       struct dpu_crtc *dpu_crtc;
+       struct dpu_crtc_mixer *m;
+       int i = 0, rc;
+       u32 misr_status;
+       ssize_t len = 0;
+       char buf[MISR_BUFF_SIZE + 1] = {'\0'};
+
+       if (*ppos)
+               return 0;
+
+       if (!file || !file->private_data)
+               return -EINVAL;
+
+       dpu_crtc = file->private_data;
+       rc = _dpu_crtc_power_enable(dpu_crtc, true);
+       if (rc)
+               return rc;
+
+       mutex_lock(&dpu_crtc->crtc_lock);
+       if (!dpu_crtc->misr_enable) {
+               len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+                       "disabled\n");
+               goto buff_check;
+       }
+
+       for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+               m = &dpu_crtc->mixers[i];
+               if (!m->hw_lm || !m->hw_lm->ops.collect_misr)
+                       continue;
+
+               misr_status = m->hw_lm->ops.collect_misr(m->hw_lm);
+               dpu_crtc->misr_data[i] = misr_status ? misr_status :
+                                                       dpu_crtc->misr_data[i];
+               len += snprintf(buf + len, MISR_BUFF_SIZE - len, "lm idx:%d\n",
+                                       m->hw_lm->idx - LM_0);
+               len += snprintf(buf + len, MISR_BUFF_SIZE - len, "0x%x\n",
+                                                       dpu_crtc->misr_data[i]);
+       }
+
+buff_check:
+       if (count <= len) {
+               len = 0;
+               goto end;
+       }
+
+       if (copy_to_user(user_buff, buf, len)) {
+               len = -EFAULT;
+               goto end;
+       }
+
+       *ppos += len;   /* increase offset */
+
+end:
+       mutex_unlock(&dpu_crtc->crtc_lock);
+       _dpu_crtc_power_enable(dpu_crtc, false);
+       return len;
+}
+
+#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix)                          \
+static int __prefix ## _open(struct inode *inode, struct file *file)   \
+{                                                                      \
+       return single_open(file, __prefix ## _show, inode->i_private);  \
+}                                                                      \
+static const struct file_operations __prefix ## _fops = {              \
+       .owner = THIS_MODULE,                                           \
+       .open = __prefix ## _open,                                      \
+       .release = single_release,                                      \
+       .read = seq_read,                                               \
+       .llseek = seq_lseek,                                            \
+}
+
+static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
+{
+       struct drm_crtc *crtc = (struct drm_crtc *) s->private;
+       struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+       struct dpu_crtc_res *res;
+       struct dpu_crtc_respool *rp;
+       int i;
+
+       seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
+       seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
+       seq_printf(s, "core_clk_rate: %llu\n",
+                       dpu_crtc->cur_perf.core_clk_rate);
+       for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
+                       i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+               seq_printf(s, "bw_ctl[%s]: %llu\n",
+                               dpu_power_handle_get_dbus_name(i),
+                               dpu_crtc->cur_perf.bw_ctl[i]);
+               seq_printf(s, "max_per_pipe_ib[%s]: %llu\n",
+                               dpu_power_handle_get_dbus_name(i),
+                               dpu_crtc->cur_perf.max_per_pipe_ib[i]);
+       }
+
+       mutex_lock(&dpu_crtc->rp_lock);
+       list_for_each_entry(rp, &dpu_crtc->rp_head, rp_list) {
+               seq_printf(s, "rp.%d: ", rp->sequence_id);
+               list_for_each_entry(res, &rp->res_list, list)
+                       seq_printf(s, "0x%x/0x%llx/%pK/%d ",
+                                       res->type, res->tag, res->val,
+                                       atomic_read(&res->refcount));
+               seq_puts(s, "\n");
+       }
+       mutex_unlock(&dpu_crtc->rp_lock);
+
+       return 0;
+}
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
+
+static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
+{
+       struct dpu_crtc *dpu_crtc;
+       struct dpu_kms *dpu_kms;
+
+       static const struct file_operations debugfs_status_fops = {
+               .open =         _dpu_debugfs_status_open,
+               .read =         seq_read,
+               .llseek =       seq_lseek,
+               .release =      single_release,
+       };
+       static const struct file_operations debugfs_misr_fops = {
+               .open =         simple_open,
+               .read =         _dpu_crtc_misr_read,
+               .write =        _dpu_crtc_misr_setup,
+       };
+
+       if (!crtc)
+               return -EINVAL;
+       dpu_crtc = to_dpu_crtc(crtc);
+
+       dpu_kms = _dpu_crtc_get_kms(crtc);
+       if (!dpu_kms)
+               return -EINVAL;
+
+       dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
+                       crtc->dev->primary->debugfs_root);
+       if (!dpu_crtc->debugfs_root)
+               return -ENOMEM;
+
+       /* don't error check these */
+       debugfs_create_file("status", 0400,
+                       dpu_crtc->debugfs_root,
+                       dpu_crtc, &debugfs_status_fops);
+       debugfs_create_file("state", 0600,
+                       dpu_crtc->debugfs_root,
+                       &dpu_crtc->base,
+                       &dpu_crtc_debugfs_state_fops);
+       debugfs_create_file("misr_data", 0600, dpu_crtc->debugfs_root,
+                                       dpu_crtc, &debugfs_misr_fops);
+
+       return 0;
+}
+
+static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
+{
+       struct dpu_crtc *dpu_crtc;
+
+       if (!crtc)
+               return;
+       dpu_crtc = to_dpu_crtc(crtc);
+       debugfs_remove_recursive(dpu_crtc->debugfs_root);
+}
+#else
+static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
+{
+       return 0;
+}
+
+static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static int dpu_crtc_late_register(struct drm_crtc *crtc)
+{
+       return _dpu_crtc_init_debugfs(crtc);
+}
+
+static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
+{
+       _dpu_crtc_destroy_debugfs(crtc);
+}
+
+static const struct drm_crtc_funcs dpu_crtc_funcs = {
+       .set_config = drm_atomic_helper_set_config,
+       .destroy = dpu_crtc_destroy,
+       .page_flip = drm_atomic_helper_page_flip,
+       .reset = dpu_crtc_reset,
+       .atomic_duplicate_state = dpu_crtc_duplicate_state,
+       .atomic_destroy_state = dpu_crtc_destroy_state,
+       .late_register = dpu_crtc_late_register,
+       .early_unregister = dpu_crtc_early_unregister,
+};
+
+static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
+       .disable = dpu_crtc_disable,
+       .atomic_enable = dpu_crtc_enable,
+       .atomic_check = dpu_crtc_atomic_check,
+       .atomic_begin = dpu_crtc_atomic_begin,
+       .atomic_flush = dpu_crtc_atomic_flush,
+};
+
+/* initialize crtc */
+struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane)
+{
+       struct drm_crtc *crtc = NULL;
+       struct dpu_crtc *dpu_crtc = NULL;
+       struct msm_drm_private *priv = NULL;
+       struct dpu_kms *kms = NULL;
+       int i;
+
+       priv = dev->dev_private;
+       kms = to_dpu_kms(priv->kms);
+
+       dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
+       if (!dpu_crtc)
+               return ERR_PTR(-ENOMEM);
+
+       crtc = &dpu_crtc->base;
+       crtc->dev = dev;
+
+       mutex_init(&dpu_crtc->crtc_lock);
+       spin_lock_init(&dpu_crtc->spin_lock);
+       atomic_set(&dpu_crtc->frame_pending, 0);
+
+       mutex_init(&dpu_crtc->rp_lock);
+       INIT_LIST_HEAD(&dpu_crtc->rp_head);
+
+       init_completion(&dpu_crtc->frame_done_comp);
+
+       INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
+
+       for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
+               INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
+               list_add(&dpu_crtc->frame_events[i].list,
+                               &dpu_crtc->frame_event_list);
+               kthread_init_work(&dpu_crtc->frame_events[i].work,
+                               dpu_crtc_frame_event_work);
+       }
+
+       drm_crtc_init_with_planes(dev, crtc, plane, NULL, &dpu_crtc_funcs,
+                               NULL);
+
+       drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
+       plane->crtc = crtc;
+
+       /* save user friendly CRTC name for later */
+       snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
+
+       /* initialize event handling */
+       spin_lock_init(&dpu_crtc->event_lock);
+
+       dpu_crtc->phandle = &kms->phandle;
+
+       DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name);
+       return crtc;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
new file mode 100644 (file)
index 0000000..e87109e
--- /dev/null
@@ -0,0 +1,423 @@
+/*
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DPU_CRTC_H_
+#define _DPU_CRTC_H_
+
+#include <linux/kthread.h>
+#include <drm/drm_crtc.h>
+#include "dpu_kms.h"
+#include "dpu_core_perf.h"
+#include "dpu_hw_blk.h"
+
+#define DPU_CRTC_NAME_SIZE     12
+
+/* define the maximum number of in-flight frame events */
+#define DPU_CRTC_FRAME_EVENT_SIZE      4
+
+/**
+ * enum dpu_crtc_client_type: crtc client type
+ * @RT_CLIENT: RealTime client like video/cmd mode display
+ *              voting through apps rsc
+ * @NRT_CLIENT:        Non-RealTime client like WB display
+ *              voting through apps rsc
+ */
+enum dpu_crtc_client_type {
+       RT_CLIENT,
+       NRT_CLIENT,
+};
+
+/**
+ * enum dpu_crtc_smmu_state:   smmu state
+ * @ATTACHED:   all the context banks are attached.
+ * @DETACHED:   all the context banks are detached.
+ * @ATTACH_ALL_REQ:     transient state of attaching context banks.
+ * @DETACH_ALL_REQ:     transient state of detaching context banks.
+ */
+enum dpu_crtc_smmu_state {
+       ATTACHED = 0,
+       DETACHED,
+       ATTACH_ALL_REQ,
+       DETACH_ALL_REQ,
+};
+
+/**
+ * enum dpu_crtc_smmu_state_transition_type: state transition type
+ * @NONE: no pending state transitions
+ * @PRE_COMMIT: state transitions should be done before processing the commit
+ * @POST_COMMIT: state transitions to be done after processing the commit.
+ */
+enum dpu_crtc_smmu_state_transition_type {
+       NONE,
+       PRE_COMMIT,
+       POST_COMMIT
+};
+
+/**
+ * struct dpu_crtc_smmu_state_data: stores the smmu state and transition type
+ * @state: current state of smmu context banks
+ * @transition_type: transition request type
+ * @transition_error: whether there is error while transitioning the state
+ */
+struct dpu_crtc_smmu_state_data {
+       uint32_t state;
+       uint32_t transition_type;
+       uint32_t transition_error;
+};
+
+/**
+ * struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC
+ * @hw_lm:     LM HW Driver context
+ * @hw_ctl:    CTL Path HW driver context
+ * @encoder:   Encoder attached to this lm & ctl
+ * @mixer_op_mode:     mixer blending operation mode
+ * @flush_mask:        mixer flush mask for ctl, mixer and pipe
+ */
+struct dpu_crtc_mixer {
+       struct dpu_hw_mixer *hw_lm;
+       struct dpu_hw_ctl *hw_ctl;
+       struct drm_encoder *encoder;
+       u32 mixer_op_mode;
+       u32 flush_mask;
+};
+
+/**
+ * struct dpu_crtc_frame_event: stores crtc frame event for crtc processing
+ * @work:      base work structure
+ * @crtc:      Pointer to crtc handling this event
+ * @list:      event list
+ * @ts:                timestamp at queue entry
+ * @event:     event identifier
+ */
+struct dpu_crtc_frame_event {
+       struct kthread_work work;
+       struct drm_crtc *crtc;
+       struct list_head list;
+       ktime_t ts;
+       u32 event;
+};
+
+/*
+ * Maximum number of free event structures to cache
+ */
+#define DPU_CRTC_MAX_EVENT_COUNT       16
+
+/**
+ * struct dpu_crtc - virtualized CRTC data structure
+ * @base          : Base drm crtc structure
+ * @name          : ASCII description of this crtc
+ * @num_ctls      : Number of ctl paths in use
+ * @num_mixers    : Number of mixers in use
+ * @mixers_swapped: Whether the mixers have been swapped for left/right update
+ *                  especially in the case of DSC Merge.
+ * @mixers        : List of active mixers
+ * @event         : Pointer to last received drm vblank event. If there is a
+ *                  pending vblank event, this will be non-null.
+ * @vsync_count   : Running count of received vsync events
+ * @drm_requested_vblank : Whether vblanks have been enabled in the encoder
+ * @property_info : Opaque structure for generic property support
+ * @property_defaults : Array of default values for generic property support
+ * @stage_cfg     : H/w mixer stage configuration
+ * @debugfs_root  : Parent of debugfs node
+ * @vblank_cb_count : count of vblank callback since last reset
+ * @play_count    : frame count between crtc enable and disable
+ * @vblank_cb_time  : ktime at vblank count reset
+ * @vblank_requested : whether the user has requested vblank events
+ * @suspend         : whether or not a suspend operation is in progress
+ * @enabled       : whether the DPU CRTC is currently enabled. updated in the
+ *                  commit-thread, not state-swap time which is earlier, so
+ *                  safe to make decisions on during VBLANK on/off work
+ * @feature_list  : list of color processing features supported on a crtc
+ * @active_list   : list of color processing features are active
+ * @dirty_list    : list of color processing features are dirty
+ * @ad_dirty: list containing ad properties that are dirty
+ * @ad_active: list containing ad properties that are active
+ * @crtc_lock     : crtc lock around create, destroy and access.
+ * @frame_pending : Whether or not an update is pending
+ * @frame_events  : static allocation of in-flight frame events
+ * @frame_event_list : available frame event list
+ * @spin_lock     : spin lock for frame event, transaction status, etc...
+ * @frame_done_comp    : for frame_event_done synchronization
+ * @event_thread  : Pointer to event handler thread
+ * @event_worker  : Event worker queue
+ * @event_lock    : Spinlock around event handling code
+ * @misr_enable   : boolean entry indicates misr enable/disable status.
+ * @misr_frame_count  : misr frame count provided by client
+ * @misr_data     : store misr data before turning off the clocks.
+ * @phandle: Pointer to power handler
+ * @power_event   : registered power event handle
+ * @cur_perf      : current performance committed to clock/bandwidth driver
+ * @rp_lock       : serialization lock for resource pool
+ * @rp_head       : list of active resource pool
+ * @scl3_cfg_lut  : qseed3 lut config
+ */
+struct dpu_crtc {
+       struct drm_crtc base;
+       char name[DPU_CRTC_NAME_SIZE];
+
+       /* HW Resources reserved for the crtc */
+       u32 num_ctls;
+       u32 num_mixers;
+       bool mixers_swapped;
+       struct dpu_crtc_mixer mixers[CRTC_DUAL_MIXERS];
+       struct dpu_hw_scaler3_lut_cfg *scl3_lut_cfg;
+
+       struct drm_pending_vblank_event *event;
+       u32 vsync_count;
+
+       struct dpu_hw_stage_cfg stage_cfg;
+       struct dentry *debugfs_root;
+
+       u32 vblank_cb_count;
+       u64 play_count;
+       ktime_t vblank_cb_time;
+       bool vblank_requested;
+       bool suspend;
+       bool enabled;
+
+       struct list_head feature_list;
+       struct list_head active_list;
+       struct list_head dirty_list;
+       struct list_head ad_dirty;
+       struct list_head ad_active;
+
+       struct mutex crtc_lock;
+
+       atomic_t frame_pending;
+       struct dpu_crtc_frame_event frame_events[DPU_CRTC_FRAME_EVENT_SIZE];
+       struct list_head frame_event_list;
+       spinlock_t spin_lock;
+       struct completion frame_done_comp;
+
+       /* for handling internal event thread */
+       spinlock_t event_lock;
+       bool misr_enable;
+       u32 misr_frame_count;
+       u32 misr_data[CRTC_DUAL_MIXERS];
+
+       struct dpu_power_handle *phandle;
+       struct dpu_power_event *power_event;
+
+       struct dpu_core_perf_params cur_perf;
+
+       struct mutex rp_lock;
+       struct list_head rp_head;
+
+       struct dpu_crtc_smmu_state_data smmu_state;
+};
+
+#define to_dpu_crtc(x) container_of(x, struct dpu_crtc, base)
+
+/**
+ * struct dpu_crtc_res_ops - common operations for crtc resources
+ * @get: get given resource
+ * @put: put given resource
+ */
+struct dpu_crtc_res_ops {
+       void *(*get)(void *val, u32 type, u64 tag);
+       void (*put)(void *val);
+};
+
+#define DPU_CRTC_RES_FLAG_FREE         BIT(0)
+
+/**
+ * struct dpu_crtc_res - definition of crtc resources
+ * @list: list of crtc resource
+ * @type: crtc resource type
+ * @tag: unique identifier per type
+ * @refcount: reference/usage count
+ * @ops: callback operations
+ * @val: resource handle associated with type/tag
+ * @flags: customization flags
+ */
+struct dpu_crtc_res {
+       struct list_head list;
+       u32 type;
+       u64 tag;
+       atomic_t refcount;
+       struct dpu_crtc_res_ops ops;
+       void *val;
+       u32 flags;
+};
+
+/**
+ * dpu_crtc_respool - crtc resource pool
+ * @rp_lock: pointer to serialization lock
+ * @rp_head: pointer to head of active resource pools of this crtc
+ * @rp_list: list of crtc resource pool
+ * @sequence_id: sequence identifier, incremented per state duplication
+ * @res_list: list of resource managed by this resource pool
+ * @ops: resource operations for parent resource pool
+ */
+struct dpu_crtc_respool {
+       struct mutex *rp_lock;
+       struct list_head *rp_head;
+       struct list_head rp_list;
+       u32 sequence_id;
+       struct list_head res_list;
+       struct dpu_crtc_res_ops ops;
+};
+
+/**
+ * struct dpu_crtc_state - dpu container for atomic crtc state
+ * @base: Base drm crtc state structure
+ * @is_ppsplit    : Whether current topology requires PPSplit special handling
+ * @bw_control    : true if bw/clk controlled by core bw/clk properties
+ * @bw_split_vote : true if bw controlled by llcc/dram bw properties
+ * @lm_bounds     : LM boundaries based on current mode full resolution, no ROI.
+ *                  Origin top left of CRTC.
+ * @property_state: Local storage for msm_prop properties
+ * @property_values: Current crtc property values
+ * @input_fence_timeout_ns : Cached input fence timeout, in ns
+ * @new_perf: new performance state being requested
+ */
+struct dpu_crtc_state {
+       struct drm_crtc_state base;
+
+       bool bw_control;
+       bool bw_split_vote;
+
+       bool is_ppsplit;
+       struct drm_rect lm_bounds[CRTC_DUAL_MIXERS];
+
+       uint64_t input_fence_timeout_ns;
+
+       struct dpu_core_perf_params new_perf;
+       struct dpu_crtc_respool rp;
+};
+
+#define to_dpu_crtc_state(x) \
+       container_of(x, struct dpu_crtc_state, base)
+
+/**
+ * dpu_crtc_get_mixer_width - get the mixer width
+ * Mixer width will be same as panel width(/2 for split)
+ */
+static inline int dpu_crtc_get_mixer_width(struct dpu_crtc *dpu_crtc,
+       struct dpu_crtc_state *cstate, struct drm_display_mode *mode)
+{
+       u32 mixer_width;
+
+       if (!dpu_crtc || !cstate || !mode)
+               return 0;
+
+       mixer_width = (dpu_crtc->num_mixers == CRTC_DUAL_MIXERS ?
+                       mode->hdisplay / CRTC_DUAL_MIXERS : mode->hdisplay);
+
+       return mixer_width;
+}
+
+/**
+ * dpu_crtc_get_mixer_height - get the mixer height
+ * Mixer height will be same as panel height
+ */
+static inline int dpu_crtc_get_mixer_height(struct dpu_crtc *dpu_crtc,
+               struct dpu_crtc_state *cstate, struct drm_display_mode *mode)
+{
+       if (!dpu_crtc || !cstate || !mode)
+               return 0;
+
+       return mode->vdisplay;
+}
+
+/**
+ * dpu_crtc_frame_pending - retun the number of pending frames
+ * @crtc: Pointer to drm crtc object
+ */
+static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
+{
+       struct dpu_crtc *dpu_crtc;
+
+       if (!crtc)
+               return -EINVAL;
+
+       dpu_crtc = to_dpu_crtc(crtc);
+       return atomic_read(&dpu_crtc->frame_pending);
+}
+
+/**
+ * dpu_crtc_vblank - enable or disable vblanks for this crtc
+ * @crtc: Pointer to drm crtc object
+ * @en: true to enable vblanks, false to disable
+ */
+int dpu_crtc_vblank(struct drm_crtc *crtc, bool en);
+
+/**
+ * dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
+ * @crtc: Pointer to drm crtc object
+ */
+void dpu_crtc_commit_kickoff(struct drm_crtc *crtc);
+
+/**
+ * dpu_crtc_complete_commit - callback signalling completion of current commit
+ * @crtc: Pointer to drm crtc object
+ * @old_state: Pointer to drm crtc old state object
+ */
+void dpu_crtc_complete_commit(struct drm_crtc *crtc,
+               struct drm_crtc_state *old_state);
+
+/**
+ * dpu_crtc_init - create a new crtc object
+ * @dev: dpu device
+ * @plane: base plane
+ * @Return: new crtc object or error
+ */
+struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane);
+
+/**
+ * dpu_crtc_register_custom_event - api for enabling/disabling crtc event
+ * @kms: Pointer to dpu_kms
+ * @crtc_drm: Pointer to crtc object
+ * @event: Event that client is interested
+ * @en: Flag to enable/disable the event
+ */
+int dpu_crtc_register_custom_event(struct dpu_kms *kms,
+               struct drm_crtc *crtc_drm, u32 event, bool en);
+
+/**
+ * dpu_crtc_get_intf_mode - get interface mode of the given crtc
+ * @crtc: Pointert to crtc
+ */
+enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc);
+
+/**
+ * dpu_crtc_get_client_type - check the crtc type- rt, nrt etc.
+ * @crtc: Pointer to crtc
+ */
+static inline enum dpu_crtc_client_type dpu_crtc_get_client_type(
+                                               struct drm_crtc *crtc)
+{
+       struct dpu_crtc_state *cstate =
+                       crtc ? to_dpu_crtc_state(crtc->state) : NULL;
+
+       if (!cstate)
+               return NRT_CLIENT;
+
+       return RT_CLIENT;
+}
+
+/**
+ * dpu_crtc_is_enabled - check if dpu crtc is enabled or not
+ * @crtc: Pointer to crtc
+ */
+static inline bool dpu_crtc_is_enabled(struct drm_crtc *crtc)
+{
+       return crtc ? crtc->enabled : false;
+}
+
+#endif /* _DPU_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
new file mode 100644 (file)
index 0000000..ae2aee7
--- /dev/null
@@ -0,0 +1,2393 @@
+/* Copyright (c) 2009-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)    "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/ktime.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/dma-buf.h>
+#include <linux/slab.h>
+#include <linux/list_sort.h>
+#include <linux/pm_runtime.h>
+
+#include "dpu_dbg.h"
+#include "disp/dpu1/dpu_hw_catalog.h"
+
+
+#define DEFAULT_DBGBUS_DPU     DPU_DBG_DUMP_IN_MEM
+#define DEFAULT_DBGBUS_VBIFRT  DPU_DBG_DUMP_IN_MEM
+#define REG_BASE_NAME_LEN      80
+
+#define DBGBUS_FLAGS_DSPP      BIT(0)
+#define DBGBUS_DSPP_STATUS     0x34C
+
+#define DBGBUS_NAME_DPU                "dpu"
+#define DBGBUS_NAME_VBIF_RT    "vbif_rt"
+
+/* offsets from dpu top address for the debug buses */
+#define DBGBUS_SSPP0   0x188
+#define DBGBUS_AXI_INTF        0x194
+#define DBGBUS_SSPP1   0x298
+#define DBGBUS_DSPP    0x348
+#define DBGBUS_PERIPH  0x418
+
+#define TEST_MASK(id, tp)      ((id << 4) | (tp << 1) | BIT(0))
+
+/* following offsets are with respect to MDP VBIF base for DBG BUS access */
+#define MMSS_VBIF_CLKON                        0x4
+#define MMSS_VBIF_TEST_BUS_OUT_CTRL    0x210
+#define MMSS_VBIF_TEST_BUS_OUT         0x230
+
+/* Vbif error info */
+#define MMSS_VBIF_PND_ERR              0x190
+#define MMSS_VBIF_SRC_ERR              0x194
+#define MMSS_VBIF_XIN_HALT_CTRL1       0x204
+#define MMSS_VBIF_ERR_INFO             0X1a0
+#define MMSS_VBIF_ERR_INFO_1           0x1a4
+#define MMSS_VBIF_CLIENT_NUM           14
+
+/**
+ * struct dpu_dbg_reg_base - register region base.
+ *     may sub-ranges: sub-ranges are used for dumping
+ *     or may not have sub-ranges: dumping is base -> max_offset
+ * @reg_base_head: head of this node
+ * @name: register base name
+ * @base: base pointer
+ * @off: cached offset of region for manual register dumping
+ * @cnt: cached range of region for manual register dumping
+ * @max_offset: length of region
+ * @buf: buffer used for manual register dumping
+ * @buf_len:  buffer length used for manual register dumping
+ * @cb: callback for external dump function, null if not defined
+ * @cb_ptr: private pointer to callback function
+ */
+struct dpu_dbg_reg_base {
+       struct list_head reg_base_head;
+       char name[REG_BASE_NAME_LEN];
+       void __iomem *base;
+       size_t off;
+       size_t cnt;
+       size_t max_offset;
+       char *buf;
+       size_t buf_len;
+       void (*cb)(void *ptr);
+       void *cb_ptr;
+};
+
+struct dpu_debug_bus_entry {
+       u32 wr_addr;
+       u32 block_id;
+       u32 test_id;
+       void (*analyzer)(void __iomem *mem_base,
+                               struct dpu_debug_bus_entry *entry, u32 val);
+};
+
+struct vbif_debug_bus_entry {
+       u32 disable_bus_addr;
+       u32 block_bus_addr;
+       u32 bit_offset;
+       u32 block_cnt;
+       u32 test_pnt_start;
+       u32 test_pnt_cnt;
+};
+
+struct dpu_dbg_debug_bus_common {
+       char *name;
+       u32 enable_mask;
+       bool include_in_deferred_work;
+       u32 flags;
+       u32 entries_size;
+       u32 *dumped_content;
+};
+
+struct dpu_dbg_dpu_debug_bus {
+       struct dpu_dbg_debug_bus_common cmn;
+       struct dpu_debug_bus_entry *entries;
+       u32 top_blk_off;
+};
+
+struct dpu_dbg_vbif_debug_bus {
+       struct dpu_dbg_debug_bus_common cmn;
+       struct vbif_debug_bus_entry *entries;
+};
+
+/**
+ * struct dpu_dbg_base - global dpu debug base structure
+ * @reg_base_list: list of register dumping regions
+ * @dev: device pointer
+ * @dump_work: work struct for deferring register dump work to separate thread
+ * @dbgbus_dpu: debug bus structure for the dpu
+ * @dbgbus_vbif_rt: debug bus structure for the realtime vbif
+ */
+static struct dpu_dbg_base {
+       struct list_head reg_base_list;
+       struct device *dev;
+
+       struct work_struct dump_work;
+
+       struct dpu_dbg_dpu_debug_bus dbgbus_dpu;
+       struct dpu_dbg_vbif_debug_bus dbgbus_vbif_rt;
+} dpu_dbg_base;
+
+static void _dpu_debug_bus_xbar_dump(void __iomem *mem_base,
+               struct dpu_debug_bus_entry *entry, u32 val)
+{
+       dev_err(dpu_dbg_base.dev, "xbar 0x%x %d %d 0x%x\n",
+                       entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static void _dpu_debug_bus_lm_dump(void __iomem *mem_base,
+               struct dpu_debug_bus_entry *entry, u32 val)
+{
+       if (!(val & 0xFFF000))
+               return;
+
+       dev_err(dpu_dbg_base.dev, "lm 0x%x %d %d 0x%x\n",
+                       entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static void _dpu_debug_bus_ppb0_dump(void __iomem *mem_base,
+               struct dpu_debug_bus_entry *entry, u32 val)
+{
+       if (!(val & BIT(15)))
+               return;
+
+       dev_err(dpu_dbg_base.dev, "ppb0 0x%x %d %d 0x%x\n",
+                       entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static void _dpu_debug_bus_ppb1_dump(void __iomem *mem_base,
+               struct dpu_debug_bus_entry *entry, u32 val)
+{
+       if (!(val & BIT(15)))
+               return;
+
+       dev_err(dpu_dbg_base.dev, "ppb1 0x%x %d %d 0x%x\n",
+                       entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static struct dpu_debug_bus_entry dbg_bus_dpu_8998[] = {
+
+       /* Unpack 0 sspp 0*/
+       { DBGBUS_SSPP0, 50, 2 },
+       { DBGBUS_SSPP0, 60, 2 },
+       { DBGBUS_SSPP0, 70, 2 },
+       { DBGBUS_SSPP0, 85, 2 },
+
+       /* Upack 0 sspp 1*/
+       { DBGBUS_SSPP1, 50, 2 },
+       { DBGBUS_SSPP1, 60, 2 },
+       { DBGBUS_SSPP1, 70, 2 },
+       { DBGBUS_SSPP1, 85, 2 },
+
+       /* scheduler */
+       { DBGBUS_DSPP, 130, 0 },
+       { DBGBUS_DSPP, 130, 1 },
+       { DBGBUS_DSPP, 130, 2 },
+       { DBGBUS_DSPP, 130, 3 },
+       { DBGBUS_DSPP, 130, 4 },
+       { DBGBUS_DSPP, 130, 5 },
+
+       /* qseed */
+       { DBGBUS_SSPP0, 6, 0},
+       { DBGBUS_SSPP0, 6, 1},
+       { DBGBUS_SSPP0, 26, 0},
+       { DBGBUS_SSPP0, 26, 1},
+       { DBGBUS_SSPP1, 6, 0},
+       { DBGBUS_SSPP1, 6, 1},
+       { DBGBUS_SSPP1, 26, 0},
+       { DBGBUS_SSPP1, 26, 1},
+
+       /* scale */
+       { DBGBUS_SSPP0, 16, 0},
+       { DBGBUS_SSPP0, 16, 1},
+       { DBGBUS_SSPP0, 36, 0},
+       { DBGBUS_SSPP0, 36, 1},
+       { DBGBUS_SSPP1, 16, 0},
+       { DBGBUS_SSPP1, 16, 1},
+       { DBGBUS_SSPP1, 36, 0},
+       { DBGBUS_SSPP1, 36, 1},
+
+       /* fetch sspp0 */
+
+       /* vig 0 */
+       { DBGBUS_SSPP0, 0, 0 },
+       { DBGBUS_SSPP0, 0, 1 },
+       { DBGBUS_SSPP0, 0, 2 },
+       { DBGBUS_SSPP0, 0, 3 },
+       { DBGBUS_SSPP0, 0, 4 },
+       { DBGBUS_SSPP0, 0, 5 },
+       { DBGBUS_SSPP0, 0, 6 },
+       { DBGBUS_SSPP0, 0, 7 },
+
+       { DBGBUS_SSPP0, 1, 0 },
+       { DBGBUS_SSPP0, 1, 1 },
+       { DBGBUS_SSPP0, 1, 2 },
+       { DBGBUS_SSPP0, 1, 3 },
+       { DBGBUS_SSPP0, 1, 4 },
+       { DBGBUS_SSPP0, 1, 5 },
+       { DBGBUS_SSPP0, 1, 6 },
+       { DBGBUS_SSPP0, 1, 7 },
+
+       { DBGBUS_SSPP0, 2, 0 },
+       { DBGBUS_SSPP0, 2, 1 },
+       { DBGBUS_SSPP0, 2, 2 },
+       { DBGBUS_SSPP0, 2, 3 },
+       { DBGBUS_SSPP0, 2, 4 },
+       { DBGBUS_SSPP0, 2, 5 },
+       { DBGBUS_SSPP0, 2, 6 },
+       { DBGBUS_SSPP0, 2, 7 },
+
+       { DBGBUS_SSPP0, 4, 0 },
+       { DBGBUS_SSPP0, 4, 1 },
+       { DBGBUS_SSPP0, 4, 2 },
+       { DBGBUS_SSPP0, 4, 3 },
+       { DBGBUS_SSPP0, 4, 4 },
+       { DBGBUS_SSPP0, 4, 5 },
+       { DBGBUS_SSPP0, 4, 6 },
+       { DBGBUS_SSPP0, 4, 7 },
+
+       { DBGBUS_SSPP0, 5, 0 },
+       { DBGBUS_SSPP0, 5, 1 },
+       { DBGBUS_SSPP0, 5, 2 },
+       { DBGBUS_SSPP0, 5, 3 },
+       { DBGBUS_SSPP0, 5, 4 },
+       { DBGBUS_SSPP0, 5, 5 },
+       { DBGBUS_SSPP0, 5, 6 },
+       { DBGBUS_SSPP0, 5, 7 },
+
+       /* vig 2 */
+       { DBGBUS_SSPP0, 20, 0 },
+       { DBGBUS_SSPP0, 20, 1 },
+       { DBGBUS_SSPP0, 20, 2 },
+       { DBGBUS_SSPP0, 20, 3 },
+       { DBGBUS_SSPP0, 20, 4 },
+       { DBGBUS_SSPP0, 20, 5 },
+       { DBGBUS_SSPP0, 20, 6 },
+       { DBGBUS_SSPP0, 20, 7 },
+
+       { DBGBUS_SSPP0, 21, 0 },
+       { DBGBUS_SSPP0, 21, 1 },
+       { DBGBUS_SSPP0, 21, 2 },
+       { DBGBUS_SSPP0, 21, 3 },
+       { DBGBUS_SSPP0, 21, 4 },
+       { DBGBUS_SSPP0, 21, 5 },
+       { DBGBUS_SSPP0, 21, 6 },
+       { DBGBUS_SSPP0, 21, 7 },
+
+       { DBGBUS_SSPP0, 22, 0 },
+       { DBGBUS_SSPP0, 22, 1 },
+       { DBGBUS_SSPP0, 22, 2 },
+       { DBGBUS_SSPP0, 22, 3 },
+       { DBGBUS_SSPP0, 22, 4 },
+       { DBGBUS_SSPP0, 22, 5 },
+       { DBGBUS_SSPP0, 22, 6 },
+       { DBGBUS_SSPP0, 22, 7 },
+
+       { DBGBUS_SSPP0, 24, 0 },
+       { DBGBUS_SSPP0, 24, 1 },
+       { DBGBUS_SSPP0, 24, 2 },
+       { DBGBUS_SSPP0, 24, 3 },
+       { DBGBUS_SSPP0, 24, 4 },
+       { DBGBUS_SSPP0, 24, 5 },
+       { DBGBUS_SSPP0, 24, 6 },
+       { DBGBUS_SSPP0, 24, 7 },
+
+       { DBGBUS_SSPP0, 25, 0 },
+       { DBGBUS_SSPP0, 25, 1 },
+       { DBGBUS_SSPP0, 25, 2 },
+       { DBGBUS_SSPP0, 25, 3 },
+       { DBGBUS_SSPP0, 25, 4 },
+       { DBGBUS_SSPP0, 25, 5 },
+       { DBGBUS_SSPP0, 25, 6 },
+       { DBGBUS_SSPP0, 25, 7 },
+
+       /* dma 2 */
+       { DBGBUS_SSPP0, 30, 0 },
+       { DBGBUS_SSPP0, 30, 1 },
+       { DBGBUS_SSPP0, 30, 2 },
+       { DBGBUS_SSPP0, 30, 3 },
+       { DBGBUS_SSPP0, 30, 4 },
+       { DBGBUS_SSPP0, 30, 5 },
+       { DBGBUS_SSPP0, 30, 6 },
+       { DBGBUS_SSPP0, 30, 7 },
+
+       { DBGBUS_SSPP0, 31, 0 },
+       { DBGBUS_SSPP0, 31, 1 },
+       { DBGBUS_SSPP0, 31, 2 },
+       { DBGBUS_SSPP0, 31, 3 },
+       { DBGBUS_SSPP0, 31, 4 },
+       { DBGBUS_SSPP0, 31, 5 },
+       { DBGBUS_SSPP0, 31, 6 },
+       { DBGBUS_SSPP0, 31, 7 },
+
+       { DBGBUS_SSPP0, 32, 0 },
+       { DBGBUS_SSPP0, 32, 1 },
+       { DBGBUS_SSPP0, 32, 2 },
+       { DBGBUS_SSPP0, 32, 3 },
+       { DBGBUS_SSPP0, 32, 4 },
+       { DBGBUS_SSPP0, 32, 5 },
+       { DBGBUS_SSPP0, 32, 6 },
+       { DBGBUS_SSPP0, 32, 7 },
+
+       { DBGBUS_SSPP0, 33, 0 },
+       { DBGBUS_SSPP0, 33, 1 },
+       { DBGBUS_SSPP0, 33, 2 },
+       { DBGBUS_SSPP0, 33, 3 },
+       { DBGBUS_SSPP0, 33, 4 },
+       { DBGBUS_SSPP0, 33, 5 },
+       { DBGBUS_SSPP0, 33, 6 },
+       { DBGBUS_SSPP0, 33, 7 },
+
+       { DBGBUS_SSPP0, 34, 0 },
+       { DBGBUS_SSPP0, 34, 1 },
+       { DBGBUS_SSPP0, 34, 2 },
+       { DBGBUS_SSPP0, 34, 3 },
+       { DBGBUS_SSPP0, 34, 4 },
+       { DBGBUS_SSPP0, 34, 5 },
+       { DBGBUS_SSPP0, 34, 6 },
+       { DBGBUS_SSPP0, 34, 7 },
+
+       { DBGBUS_SSPP0, 35, 0 },
+       { DBGBUS_SSPP0, 35, 1 },
+       { DBGBUS_SSPP0, 35, 2 },
+       { DBGBUS_SSPP0, 35, 3 },
+
+       /* dma 0 */
+       { DBGBUS_SSPP0, 40, 0 },
+       { DBGBUS_SSPP0, 40, 1 },
+       { DBGBUS_SSPP0, 40, 2 },
+       { DBGBUS_SSPP0, 40, 3 },
+       { DBGBUS_SSPP0, 40, 4 },
+       { DBGBUS_SSPP0, 40, 5 },
+       { DBGBUS_SSPP0, 40, 6 },
+       { DBGBUS_SSPP0, 40, 7 },
+
+       { DBGBUS_SSPP0, 41, 0 },
+       { DBGBUS_SSPP0, 41, 1 },
+       { DBGBUS_SSPP0, 41, 2 },
+       { DBGBUS_SSPP0, 41, 3 },
+       { DBGBUS_SSPP0, 41, 4 },
+       { DBGBUS_SSPP0, 41, 5 },
+       { DBGBUS_SSPP0, 41, 6 },
+       { DBGBUS_SSPP0, 41, 7 },
+
+       { DBGBUS_SSPP0, 42, 0 },
+       { DBGBUS_SSPP0, 42, 1 },
+       { DBGBUS_SSPP0, 42, 2 },
+       { DBGBUS_SSPP0, 42, 3 },
+       { DBGBUS_SSPP0, 42, 4 },
+       { DBGBUS_SSPP0, 42, 5 },
+       { DBGBUS_SSPP0, 42, 6 },
+       { DBGBUS_SSPP0, 42, 7 },
+
+       { DBGBUS_SSPP0, 44, 0 },
+       { DBGBUS_SSPP0, 44, 1 },
+       { DBGBUS_SSPP0, 44, 2 },
+       { DBGBUS_SSPP0, 44, 3 },
+       { DBGBUS_SSPP0, 44, 4 },
+       { DBGBUS_SSPP0, 44, 5 },
+       { DBGBUS_SSPP0, 44, 6 },
+       { DBGBUS_SSPP0, 44, 7 },
+
+       { DBGBUS_SSPP0, 45, 0 },
+       { DBGBUS_SSPP0, 45, 1 },
+       { DBGBUS_SSPP0, 45, 2 },
+       { DBGBUS_SSPP0, 45, 3 },
+       { DBGBUS_SSPP0, 45, 4 },
+       { DBGBUS_SSPP0, 45, 5 },
+       { DBGBUS_SSPP0, 45, 6 },
+       { DBGBUS_SSPP0, 45, 7 },
+
+       /* fetch sspp1 */
+       /* vig 1 */
+       { DBGBUS_SSPP1, 0, 0 },
+       { DBGBUS_SSPP1, 0, 1 },
+       { DBGBUS_SSPP1, 0, 2 },
+       { DBGBUS_SSPP1, 0, 3 },
+       { DBGBUS_SSPP1, 0, 4 },
+       { DBGBUS_SSPP1, 0, 5 },
+       { DBGBUS_SSPP1, 0, 6 },
+       { DBGBUS_SSPP1, 0, 7 },
+
+       { DBGBUS_SSPP1, 1, 0 },
+       { DBGBUS_SSPP1, 1, 1 },
+       { DBGBUS_SSPP1, 1, 2 },
+       { DBGBUS_SSPP1, 1, 3 },
+       { DBGBUS_SSPP1, 1, 4 },
+       { DBGBUS_SSPP1, 1, 5 },
+       { DBGBUS_SSPP1, 1, 6 },
+       { DBGBUS_SSPP1, 1, 7 },
+
+       { DBGBUS_SSPP1, 2, 0 },
+       { DBGBUS_SSPP1, 2, 1 },
+       { DBGBUS_SSPP1, 2, 2 },
+       { DBGBUS_SSPP1, 2, 3 },
+       { DBGBUS_SSPP1, 2, 4 },
+       { DBGBUS_SSPP1, 2, 5 },
+       { DBGBUS_SSPP1, 2, 6 },
+       { DBGBUS_SSPP1, 2, 7 },
+
+       { DBGBUS_SSPP1, 4, 0 },
+       { DBGBUS_SSPP1, 4, 1 },
+       { DBGBUS_SSPP1, 4, 2 },
+       { DBGBUS_SSPP1, 4, 3 },
+       { DBGBUS_SSPP1, 4, 4 },
+       { DBGBUS_SSPP1, 4, 5 },
+       { DBGBUS_SSPP1, 4, 6 },
+       { DBGBUS_SSPP1, 4, 7 },
+
+       { DBGBUS_SSPP1, 5, 0 },
+       { DBGBUS_SSPP1, 5, 1 },
+       { DBGBUS_SSPP1, 5, 2 },
+       { DBGBUS_SSPP1, 5, 3 },
+       { DBGBUS_SSPP1, 5, 4 },
+       { DBGBUS_SSPP1, 5, 5 },
+       { DBGBUS_SSPP1, 5, 6 },
+       { DBGBUS_SSPP1, 5, 7 },
+
+       /* vig 3 */
+       { DBGBUS_SSPP1, 20, 0 },
+       { DBGBUS_SSPP1, 20, 1 },
+       { DBGBUS_SSPP1, 20, 2 },
+       { DBGBUS_SSPP1, 20, 3 },
+       { DBGBUS_SSPP1, 20, 4 },
+       { DBGBUS_SSPP1, 20, 5 },
+       { DBGBUS_SSPP1, 20, 6 },
+       { DBGBUS_SSPP1, 20, 7 },
+
+       { DBGBUS_SSPP1, 21, 0 },
+       { DBGBUS_SSPP1, 21, 1 },
+       { DBGBUS_SSPP1, 21, 2 },
+       { DBGBUS_SSPP1, 21, 3 },
+       { DBGBUS_SSPP1, 21, 4 },
+       { DBGBUS_SSPP1, 21, 5 },
+       { DBGBUS_SSPP1, 21, 6 },
+       { DBGBUS_SSPP1, 21, 7 },
+
+       { DBGBUS_SSPP1, 22, 0 },
+       { DBGBUS_SSPP1, 22, 1 },
+       { DBGBUS_SSPP1, 22, 2 },
+       { DBGBUS_SSPP1, 22, 3 },
+       { DBGBUS_SSPP1, 22, 4 },
+       { DBGBUS_SSPP1, 22, 5 },
+       { DBGBUS_SSPP1, 22, 6 },
+       { DBGBUS_SSPP1, 22, 7 },
+
+       { DBGBUS_SSPP1, 24, 0 },
+       { DBGBUS_SSPP1, 24, 1 },
+       { DBGBUS_SSPP1, 24, 2 },
+       { DBGBUS_SSPP1, 24, 3 },
+       { DBGBUS_SSPP1, 24, 4 },
+       { DBGBUS_SSPP1, 24, 5 },
+       { DBGBUS_SSPP1, 24, 6 },
+       { DBGBUS_SSPP1, 24, 7 },
+
+       { DBGBUS_SSPP1, 25, 0 },
+       { DBGBUS_SSPP1, 25, 1 },
+       { DBGBUS_SSPP1, 25, 2 },
+       { DBGBUS_SSPP1, 25, 3 },
+       { DBGBUS_SSPP1, 25, 4 },
+       { DBGBUS_SSPP1, 25, 5 },
+       { DBGBUS_SSPP1, 25, 6 },
+       { DBGBUS_SSPP1, 25, 7 },
+
+       /* dma 3 */
+       { DBGBUS_SSPP1, 30, 0 },
+       { DBGBUS_SSPP1, 30, 1 },
+       { DBGBUS_SSPP1, 30, 2 },
+       { DBGBUS_SSPP1, 30, 3 },
+       { DBGBUS_SSPP1, 30, 4 },
+       { DBGBUS_SSPP1, 30, 5 },
+       { DBGBUS_SSPP1, 30, 6 },
+       { DBGBUS_SSPP1, 30, 7 },
+
+       { DBGBUS_SSPP1, 31, 0 },
+       { DBGBUS_SSPP1, 31, 1 },
+       { DBGBUS_SSPP1, 31, 2 },
+       { DBGBUS_SSPP1, 31, 3 },
+       { DBGBUS_SSPP1, 31, 4 },
+       { DBGBUS_SSPP1, 31, 5 },
+       { DBGBUS_SSPP1, 31, 6 },
+       { DBGBUS_SSPP1, 31, 7 },
+
+       { DBGBUS_SSPP1, 32, 0 },
+       { DBGBUS_SSPP1, 32, 1 },
+       { DBGBUS_SSPP1, 32, 2 },
+       { DBGBUS_SSPP1, 32, 3 },
+       { DBGBUS_SSPP1, 32, 4 },
+       { DBGBUS_SSPP1, 32, 5 },
+       { DBGBUS_SSPP1, 32, 6 },
+       { DBGBUS_SSPP1, 32, 7 },
+
+       { DBGBUS_SSPP1, 33, 0 },
+       { DBGBUS_SSPP1, 33, 1 },
+       { DBGBUS_SSPP1, 33, 2 },
+       { DBGBUS_SSPP1, 33, 3 },
+       { DBGBUS_SSPP1, 33, 4 },
+       { DBGBUS_SSPP1, 33, 5 },
+       { DBGBUS_SSPP1, 33, 6 },
+       { DBGBUS_SSPP1, 33, 7 },
+
+       { DBGBUS_SSPP1, 34, 0 },
+       { DBGBUS_SSPP1, 34, 1 },
+       { DBGBUS_SSPP1, 34, 2 },
+       { DBGBUS_SSPP1, 34, 3 },
+       { DBGBUS_SSPP1, 34, 4 },
+       { DBGBUS_SSPP1, 34, 5 },
+       { DBGBUS_SSPP1, 34, 6 },
+       { DBGBUS_SSPP1, 34, 7 },
+
+       { DBGBUS_SSPP1, 35, 0 },
+       { DBGBUS_SSPP1, 35, 1 },
+       { DBGBUS_SSPP1, 35, 2 },
+
+       /* dma 1 */
+       { DBGBUS_SSPP1, 40, 0 },
+       { DBGBUS_SSPP1, 40, 1 },
+       { DBGBUS_SSPP1, 40, 2 },
+       { DBGBUS_SSPP1, 40, 3 },
+       { DBGBUS_SSPP1, 40, 4 },
+       { DBGBUS_SSPP1, 40, 5 },
+       { DBGBUS_SSPP1, 40, 6 },
+       { DBGBUS_SSPP1, 40, 7 },
+
+       { DBGBUS_SSPP1, 41, 0 },
+       { DBGBUS_SSPP1, 41, 1 },
+       { DBGBUS_SSPP1, 41, 2 },
+       { DBGBUS_SSPP1, 41, 3 },
+       { DBGBUS_SSPP1, 41, 4 },
+       { DBGBUS_SSPP1, 41, 5 },
+       { DBGBUS_SSPP1, 41, 6 },
+       { DBGBUS_SSPP1, 41, 7 },
+
+       { DBGBUS_SSPP1, 42, 0 },
+       { DBGBUS_SSPP1, 42, 1 },
+       { DBGBUS_SSPP1, 42, 2 },
+       { DBGBUS_SSPP1, 42, 3 },
+       { DBGBUS_SSPP1, 42, 4 },
+       { DBGBUS_SSPP1, 42, 5 },
+       { DBGBUS_SSPP1, 42, 6 },
+       { DBGBUS_SSPP1, 42, 7 },
+
+       { DBGBUS_SSPP1, 44, 0 },
+       { DBGBUS_SSPP1, 44, 1 },
+       { DBGBUS_SSPP1, 44, 2 },
+       { DBGBUS_SSPP1, 44, 3 },
+       { DBGBUS_SSPP1, 44, 4 },
+       { DBGBUS_SSPP1, 44, 5 },
+       { DBGBUS_SSPP1, 44, 6 },
+       { DBGBUS_SSPP1, 44, 7 },
+
+       { DBGBUS_SSPP1, 45, 0 },
+       { DBGBUS_SSPP1, 45, 1 },
+       { DBGBUS_SSPP1, 45, 2 },
+       { DBGBUS_SSPP1, 45, 3 },
+       { DBGBUS_SSPP1, 45, 4 },
+       { DBGBUS_SSPP1, 45, 5 },
+       { DBGBUS_SSPP1, 45, 6 },
+       { DBGBUS_SSPP1, 45, 7 },
+
+       /* cursor 1 */
+       { DBGBUS_SSPP1, 80, 0 },
+       { DBGBUS_SSPP1, 80, 1 },
+       { DBGBUS_SSPP1, 80, 2 },
+       { DBGBUS_SSPP1, 80, 3 },
+       { DBGBUS_SSPP1, 80, 4 },
+       { DBGBUS_SSPP1, 80, 5 },
+       { DBGBUS_SSPP1, 80, 6 },
+       { DBGBUS_SSPP1, 80, 7 },
+
+       { DBGBUS_SSPP1, 81, 0 },
+       { DBGBUS_SSPP1, 81, 1 },
+       { DBGBUS_SSPP1, 81, 2 },
+       { DBGBUS_SSPP1, 81, 3 },
+       { DBGBUS_SSPP1, 81, 4 },
+       { DBGBUS_SSPP1, 81, 5 },
+       { DBGBUS_SSPP1, 81, 6 },
+       { DBGBUS_SSPP1, 81, 7 },
+
+       { DBGBUS_SSPP1, 82, 0 },
+       { DBGBUS_SSPP1, 82, 1 },
+       { DBGBUS_SSPP1, 82, 2 },
+       { DBGBUS_SSPP1, 82, 3 },
+       { DBGBUS_SSPP1, 82, 4 },
+       { DBGBUS_SSPP1, 82, 5 },
+       { DBGBUS_SSPP1, 82, 6 },
+       { DBGBUS_SSPP1, 82, 7 },
+
+       { DBGBUS_SSPP1, 83, 0 },
+       { DBGBUS_SSPP1, 83, 1 },
+       { DBGBUS_SSPP1, 83, 2 },
+       { DBGBUS_SSPP1, 83, 3 },
+       { DBGBUS_SSPP1, 83, 4 },
+       { DBGBUS_SSPP1, 83, 5 },
+       { DBGBUS_SSPP1, 83, 6 },
+       { DBGBUS_SSPP1, 83, 7 },
+
+       { DBGBUS_SSPP1, 84, 0 },
+       { DBGBUS_SSPP1, 84, 1 },
+       { DBGBUS_SSPP1, 84, 2 },
+       { DBGBUS_SSPP1, 84, 3 },
+       { DBGBUS_SSPP1, 84, 4 },
+       { DBGBUS_SSPP1, 84, 5 },
+       { DBGBUS_SSPP1, 84, 6 },
+       { DBGBUS_SSPP1, 84, 7 },
+
+       /* dspp */
+       { DBGBUS_DSPP, 13, 0 },
+       { DBGBUS_DSPP, 19, 0 },
+       { DBGBUS_DSPP, 14, 0 },
+       { DBGBUS_DSPP, 14, 1 },
+       { DBGBUS_DSPP, 14, 3 },
+       { DBGBUS_DSPP, 20, 0 },
+       { DBGBUS_DSPP, 20, 1 },
+       { DBGBUS_DSPP, 20, 3 },
+
+       /* ppb_0 */
+       { DBGBUS_DSPP, 31, 0, _dpu_debug_bus_ppb0_dump },
+       { DBGBUS_DSPP, 33, 0, _dpu_debug_bus_ppb0_dump },
+       { DBGBUS_DSPP, 35, 0, _dpu_debug_bus_ppb0_dump },
+       { DBGBUS_DSPP, 42, 0, _dpu_debug_bus_ppb0_dump },
+
+       /* ppb_1 */
+       { DBGBUS_DSPP, 32, 0, _dpu_debug_bus_ppb1_dump },
+       { DBGBUS_DSPP, 34, 0, _dpu_debug_bus_ppb1_dump },
+       { DBGBUS_DSPP, 36, 0, _dpu_debug_bus_ppb1_dump },
+       { DBGBUS_DSPP, 43, 0, _dpu_debug_bus_ppb1_dump },
+
+       /* lm_lut */
+       { DBGBUS_DSPP, 109, 0 },
+       { DBGBUS_DSPP, 105, 0 },
+       { DBGBUS_DSPP, 103, 0 },
+
+       /* tear-check */
+       { DBGBUS_PERIPH, 63, 0 },
+       { DBGBUS_PERIPH, 64, 0 },
+       { DBGBUS_PERIPH, 65, 0 },
+       { DBGBUS_PERIPH, 73, 0 },
+       { DBGBUS_PERIPH, 74, 0 },
+
+       /* crossbar */
+       { DBGBUS_DSPP, 0, 0, _dpu_debug_bus_xbar_dump },
+
+       /* rotator */
+       { DBGBUS_DSPP, 9, 0},
+
+       /* blend */
+       /* LM0 */
+       { DBGBUS_DSPP, 63, 0},
+       { DBGBUS_DSPP, 63, 1},
+       { DBGBUS_DSPP, 63, 2},
+       { DBGBUS_DSPP, 63, 3},
+       { DBGBUS_DSPP, 63, 4},
+       { DBGBUS_DSPP, 63, 5},
+       { DBGBUS_DSPP, 63, 6},
+       { DBGBUS_DSPP, 63, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 64, 0},
+       { DBGBUS_DSPP, 64, 1},
+       { DBGBUS_DSPP, 64, 2},
+       { DBGBUS_DSPP, 64, 3},
+       { DBGBUS_DSPP, 64, 4},
+       { DBGBUS_DSPP, 64, 5},
+       { DBGBUS_DSPP, 64, 6},
+       { DBGBUS_DSPP, 64, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 65, 0},
+       { DBGBUS_DSPP, 65, 1},
+       { DBGBUS_DSPP, 65, 2},
+       { DBGBUS_DSPP, 65, 3},
+       { DBGBUS_DSPP, 65, 4},
+       { DBGBUS_DSPP, 65, 5},
+       { DBGBUS_DSPP, 65, 6},
+       { DBGBUS_DSPP, 65, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 66, 0},
+       { DBGBUS_DSPP, 66, 1},
+       { DBGBUS_DSPP, 66, 2},
+       { DBGBUS_DSPP, 66, 3},
+       { DBGBUS_DSPP, 66, 4},
+       { DBGBUS_DSPP, 66, 5},
+       { DBGBUS_DSPP, 66, 6},
+       { DBGBUS_DSPP, 66, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 67, 0},
+       { DBGBUS_DSPP, 67, 1},
+       { DBGBUS_DSPP, 67, 2},
+       { DBGBUS_DSPP, 67, 3},
+       { DBGBUS_DSPP, 67, 4},
+       { DBGBUS_DSPP, 67, 5},
+       { DBGBUS_DSPP, 67, 6},
+       { DBGBUS_DSPP, 67, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 68, 0},
+       { DBGBUS_DSPP, 68, 1},
+       { DBGBUS_DSPP, 68, 2},
+       { DBGBUS_DSPP, 68, 3},
+       { DBGBUS_DSPP, 68, 4},
+       { DBGBUS_DSPP, 68, 5},
+       { DBGBUS_DSPP, 68, 6},
+       { DBGBUS_DSPP, 68, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 69, 0},
+       { DBGBUS_DSPP, 69, 1},
+       { DBGBUS_DSPP, 69, 2},
+       { DBGBUS_DSPP, 69, 3},
+       { DBGBUS_DSPP, 69, 4},
+       { DBGBUS_DSPP, 69, 5},
+       { DBGBUS_DSPP, 69, 6},
+       { DBGBUS_DSPP, 69, 7, _dpu_debug_bus_lm_dump },
+
+       /* LM1 */
+       { DBGBUS_DSPP, 70, 0},
+       { DBGBUS_DSPP, 70, 1},
+       { DBGBUS_DSPP, 70, 2},
+       { DBGBUS_DSPP, 70, 3},
+       { DBGBUS_DSPP, 70, 4},
+       { DBGBUS_DSPP, 70, 5},
+       { DBGBUS_DSPP, 70, 6},
+       { DBGBUS_DSPP, 70, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 71, 0},
+       { DBGBUS_DSPP, 71, 1},
+       { DBGBUS_DSPP, 71, 2},
+       { DBGBUS_DSPP, 71, 3},
+       { DBGBUS_DSPP, 71, 4},
+       { DBGBUS_DSPP, 71, 5},
+       { DBGBUS_DSPP, 71, 6},
+       { DBGBUS_DSPP, 71, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 72, 0},
+       { DBGBUS_DSPP, 72, 1},
+       { DBGBUS_DSPP, 72, 2},
+       { DBGBUS_DSPP, 72, 3},
+       { DBGBUS_DSPP, 72, 4},
+       { DBGBUS_DSPP, 72, 5},
+       { DBGBUS_DSPP, 72, 6},
+       { DBGBUS_DSPP, 72, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 73, 0},
+       { DBGBUS_DSPP, 73, 1},
+       { DBGBUS_DSPP, 73, 2},
+       { DBGBUS_DSPP, 73, 3},
+       { DBGBUS_DSPP, 73, 4},
+       { DBGBUS_DSPP, 73, 5},
+       { DBGBUS_DSPP, 73, 6},
+       { DBGBUS_DSPP, 73, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 74, 0},
+       { DBGBUS_DSPP, 74, 1},
+       { DBGBUS_DSPP, 74, 2},
+       { DBGBUS_DSPP, 74, 3},
+       { DBGBUS_DSPP, 74, 4},
+       { DBGBUS_DSPP, 74, 5},
+       { DBGBUS_DSPP, 74, 6},
+       { DBGBUS_DSPP, 74, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 75, 0},
+       { DBGBUS_DSPP, 75, 1},
+       { DBGBUS_DSPP, 75, 2},
+       { DBGBUS_DSPP, 75, 3},
+       { DBGBUS_DSPP, 75, 4},
+       { DBGBUS_DSPP, 75, 5},
+       { DBGBUS_DSPP, 75, 6},
+       { DBGBUS_DSPP, 75, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 76, 0},
+       { DBGBUS_DSPP, 76, 1},
+       { DBGBUS_DSPP, 76, 2},
+       { DBGBUS_DSPP, 76, 3},
+       { DBGBUS_DSPP, 76, 4},
+       { DBGBUS_DSPP, 76, 5},
+       { DBGBUS_DSPP, 76, 6},
+       { DBGBUS_DSPP, 76, 7, _dpu_debug_bus_lm_dump },
+
+       /* LM2 */
+       { DBGBUS_DSPP, 77, 0},
+       { DBGBUS_DSPP, 77, 1},
+       { DBGBUS_DSPP, 77, 2},
+       { DBGBUS_DSPP, 77, 3},
+       { DBGBUS_DSPP, 77, 4},
+       { DBGBUS_DSPP, 77, 5},
+       { DBGBUS_DSPP, 77, 6},
+       { DBGBUS_DSPP, 77, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 78, 0},
+       { DBGBUS_DSPP, 78, 1},
+       { DBGBUS_DSPP, 78, 2},
+       { DBGBUS_DSPP, 78, 3},
+       { DBGBUS_DSPP, 78, 4},
+       { DBGBUS_DSPP, 78, 5},
+       { DBGBUS_DSPP, 78, 6},
+       { DBGBUS_DSPP, 78, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 79, 0},
+       { DBGBUS_DSPP, 79, 1},
+       { DBGBUS_DSPP, 79, 2},
+       { DBGBUS_DSPP, 79, 3},
+       { DBGBUS_DSPP, 79, 4},
+       { DBGBUS_DSPP, 79, 5},
+       { DBGBUS_DSPP, 79, 6},
+       { DBGBUS_DSPP, 79, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 80, 0},
+       { DBGBUS_DSPP, 80, 1},
+       { DBGBUS_DSPP, 80, 2},
+       { DBGBUS_DSPP, 80, 3},
+       { DBGBUS_DSPP, 80, 4},
+       { DBGBUS_DSPP, 80, 5},
+       { DBGBUS_DSPP, 80, 6},
+       { DBGBUS_DSPP, 80, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 81, 0},
+       { DBGBUS_DSPP, 81, 1},
+       { DBGBUS_DSPP, 81, 2},
+       { DBGBUS_DSPP, 81, 3},
+       { DBGBUS_DSPP, 81, 4},
+       { DBGBUS_DSPP, 81, 5},
+       { DBGBUS_DSPP, 81, 6},
+       { DBGBUS_DSPP, 81, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 82, 0},
+       { DBGBUS_DSPP, 82, 1},
+       { DBGBUS_DSPP, 82, 2},
+       { DBGBUS_DSPP, 82, 3},
+       { DBGBUS_DSPP, 82, 4},
+       { DBGBUS_DSPP, 82, 5},
+       { DBGBUS_DSPP, 82, 6},
+       { DBGBUS_DSPP, 82, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 83, 0},
+       { DBGBUS_DSPP, 83, 1},
+       { DBGBUS_DSPP, 83, 2},
+       { DBGBUS_DSPP, 83, 3},
+       { DBGBUS_DSPP, 83, 4},
+       { DBGBUS_DSPP, 83, 5},
+       { DBGBUS_DSPP, 83, 6},
+       { DBGBUS_DSPP, 83, 7, _dpu_debug_bus_lm_dump },
+
+       /* csc */
+       { DBGBUS_SSPP0, 7, 0},
+       { DBGBUS_SSPP0, 7, 1},
+       { DBGBUS_SSPP0, 27, 0},
+       { DBGBUS_SSPP0, 27, 1},
+       { DBGBUS_SSPP1, 7, 0},
+       { DBGBUS_SSPP1, 7, 1},
+       { DBGBUS_SSPP1, 27, 0},
+       { DBGBUS_SSPP1, 27, 1},
+
+       /* pcc */
+       { DBGBUS_SSPP0, 3,  3},
+       { DBGBUS_SSPP0, 23, 3},
+       { DBGBUS_SSPP0, 33, 3},
+       { DBGBUS_SSPP0, 43, 3},
+       { DBGBUS_SSPP1, 3,  3},
+       { DBGBUS_SSPP1, 23, 3},
+       { DBGBUS_SSPP1, 33, 3},
+       { DBGBUS_SSPP1, 43, 3},
+
+       /* spa */
+       { DBGBUS_SSPP0, 8,  0},
+       { DBGBUS_SSPP0, 28, 0},
+       { DBGBUS_SSPP1, 8,  0},
+       { DBGBUS_SSPP1, 28, 0},
+       { DBGBUS_DSPP, 13, 0},
+       { DBGBUS_DSPP, 19, 0},
+
+       /* igc */
+       { DBGBUS_SSPP0, 9,  0},
+       { DBGBUS_SSPP0, 9,  1},
+       { DBGBUS_SSPP0, 9,  3},
+       { DBGBUS_SSPP0, 29, 0},
+       { DBGBUS_SSPP0, 29, 1},
+       { DBGBUS_SSPP0, 29, 3},
+       { DBGBUS_SSPP0, 17, 0},
+       { DBGBUS_SSPP0, 17, 1},
+       { DBGBUS_SSPP0, 17, 3},
+       { DBGBUS_SSPP0, 37, 0},
+       { DBGBUS_SSPP0, 37, 1},
+       { DBGBUS_SSPP0, 37, 3},
+       { DBGBUS_SSPP0, 46, 0},
+       { DBGBUS_SSPP0, 46, 1},
+       { DBGBUS_SSPP0, 46, 3},
+
+       { DBGBUS_SSPP1, 9,  0},
+       { DBGBUS_SSPP1, 9,  1},
+       { DBGBUS_SSPP1, 9,  3},
+       { DBGBUS_SSPP1, 29, 0},
+       { DBGBUS_SSPP1, 29, 1},
+       { DBGBUS_SSPP1, 29, 3},
+       { DBGBUS_SSPP1, 17, 0},
+       { DBGBUS_SSPP1, 17, 1},
+       { DBGBUS_SSPP1, 17, 3},
+       { DBGBUS_SSPP1, 37, 0},
+       { DBGBUS_SSPP1, 37, 1},
+       { DBGBUS_SSPP1, 37, 3},
+       { DBGBUS_SSPP1, 46, 0},
+       { DBGBUS_SSPP1, 46, 1},
+       { DBGBUS_SSPP1, 46, 3},
+
+       { DBGBUS_DSPP, 14, 0},
+       { DBGBUS_DSPP, 14, 1},
+       { DBGBUS_DSPP, 14, 3},
+       { DBGBUS_DSPP, 20, 0},
+       { DBGBUS_DSPP, 20, 1},
+       { DBGBUS_DSPP, 20, 3},
+
+       { DBGBUS_PERIPH, 60, 0},
+};
+
+static struct dpu_debug_bus_entry dbg_bus_dpu_sdm845[] = {
+
+       /* Unpack 0 sspp 0*/
+       { DBGBUS_SSPP0, 50, 2 },
+       { DBGBUS_SSPP0, 60, 2 },
+       { DBGBUS_SSPP0, 70, 2 },
+
+       /* Upack 0 sspp 1*/
+       { DBGBUS_SSPP1, 50, 2 },
+       { DBGBUS_SSPP1, 60, 2 },
+       { DBGBUS_SSPP1, 70, 2 },
+
+       /* scheduler */
+       { DBGBUS_DSPP, 130, 0 },
+       { DBGBUS_DSPP, 130, 1 },
+       { DBGBUS_DSPP, 130, 2 },
+       { DBGBUS_DSPP, 130, 3 },
+       { DBGBUS_DSPP, 130, 4 },
+       { DBGBUS_DSPP, 130, 5 },
+
+       /* qseed */
+       { DBGBUS_SSPP0, 6, 0},
+       { DBGBUS_SSPP0, 6, 1},
+       { DBGBUS_SSPP0, 26, 0},
+       { DBGBUS_SSPP0, 26, 1},
+       { DBGBUS_SSPP1, 6, 0},
+       { DBGBUS_SSPP1, 6, 1},
+       { DBGBUS_SSPP1, 26, 0},
+       { DBGBUS_SSPP1, 26, 1},
+
+       /* scale */
+       { DBGBUS_SSPP0, 16, 0},
+       { DBGBUS_SSPP0, 16, 1},
+       { DBGBUS_SSPP0, 36, 0},
+       { DBGBUS_SSPP0, 36, 1},
+       { DBGBUS_SSPP1, 16, 0},
+       { DBGBUS_SSPP1, 16, 1},
+       { DBGBUS_SSPP1, 36, 0},
+       { DBGBUS_SSPP1, 36, 1},
+
+       /* fetch sspp0 */
+
+       /* vig 0 */
+       { DBGBUS_SSPP0, 0, 0 },
+       { DBGBUS_SSPP0, 0, 1 },
+       { DBGBUS_SSPP0, 0, 2 },
+       { DBGBUS_SSPP0, 0, 3 },
+       { DBGBUS_SSPP0, 0, 4 },
+       { DBGBUS_SSPP0, 0, 5 },
+       { DBGBUS_SSPP0, 0, 6 },
+       { DBGBUS_SSPP0, 0, 7 },
+
+       { DBGBUS_SSPP0, 1, 0 },
+       { DBGBUS_SSPP0, 1, 1 },
+       { DBGBUS_SSPP0, 1, 2 },
+       { DBGBUS_SSPP0, 1, 3 },
+       { DBGBUS_SSPP0, 1, 4 },
+       { DBGBUS_SSPP0, 1, 5 },
+       { DBGBUS_SSPP0, 1, 6 },
+       { DBGBUS_SSPP0, 1, 7 },
+
+       { DBGBUS_SSPP0, 2, 0 },
+       { DBGBUS_SSPP0, 2, 1 },
+       { DBGBUS_SSPP0, 2, 2 },
+       { DBGBUS_SSPP0, 2, 3 },
+       { DBGBUS_SSPP0, 2, 4 },
+       { DBGBUS_SSPP0, 2, 5 },
+       { DBGBUS_SSPP0, 2, 6 },
+       { DBGBUS_SSPP0, 2, 7 },
+
+       { DBGBUS_SSPP0, 4, 0 },
+       { DBGBUS_SSPP0, 4, 1 },
+       { DBGBUS_SSPP0, 4, 2 },
+       { DBGBUS_SSPP0, 4, 3 },
+       { DBGBUS_SSPP0, 4, 4 },
+       { DBGBUS_SSPP0, 4, 5 },
+       { DBGBUS_SSPP0, 4, 6 },
+       { DBGBUS_SSPP0, 4, 7 },
+
+       { DBGBUS_SSPP0, 5, 0 },
+       { DBGBUS_SSPP0, 5, 1 },
+       { DBGBUS_SSPP0, 5, 2 },
+       { DBGBUS_SSPP0, 5, 3 },
+       { DBGBUS_SSPP0, 5, 4 },
+       { DBGBUS_SSPP0, 5, 5 },
+       { DBGBUS_SSPP0, 5, 6 },
+       { DBGBUS_SSPP0, 5, 7 },
+
+       /* vig 2 */
+       { DBGBUS_SSPP0, 20, 0 },
+       { DBGBUS_SSPP0, 20, 1 },
+       { DBGBUS_SSPP0, 20, 2 },
+       { DBGBUS_SSPP0, 20, 3 },
+       { DBGBUS_SSPP0, 20, 4 },
+       { DBGBUS_SSPP0, 20, 5 },
+       { DBGBUS_SSPP0, 20, 6 },
+       { DBGBUS_SSPP0, 20, 7 },
+
+       { DBGBUS_SSPP0, 21, 0 },
+       { DBGBUS_SSPP0, 21, 1 },
+       { DBGBUS_SSPP0, 21, 2 },
+       { DBGBUS_SSPP0, 21, 3 },
+       { DBGBUS_SSPP0, 21, 4 },
+       { DBGBUS_SSPP0, 21, 5 },
+       { DBGBUS_SSPP0, 21, 6 },
+       { DBGBUS_SSPP0, 21, 7 },
+
+       { DBGBUS_SSPP0, 22, 0 },
+       { DBGBUS_SSPP0, 22, 1 },
+       { DBGBUS_SSPP0, 22, 2 },
+       { DBGBUS_SSPP0, 22, 3 },
+       { DBGBUS_SSPP0, 22, 4 },
+       { DBGBUS_SSPP0, 22, 5 },
+       { DBGBUS_SSPP0, 22, 6 },
+       { DBGBUS_SSPP0, 22, 7 },
+
+       { DBGBUS_SSPP0, 24, 0 },
+       { DBGBUS_SSPP0, 24, 1 },
+       { DBGBUS_SSPP0, 24, 2 },
+       { DBGBUS_SSPP0, 24, 3 },
+       { DBGBUS_SSPP0, 24, 4 },
+       { DBGBUS_SSPP0, 24, 5 },
+       { DBGBUS_SSPP0, 24, 6 },
+       { DBGBUS_SSPP0, 24, 7 },
+
+       { DBGBUS_SSPP0, 25, 0 },
+       { DBGBUS_SSPP0, 25, 1 },
+       { DBGBUS_SSPP0, 25, 2 },
+       { DBGBUS_SSPP0, 25, 3 },
+       { DBGBUS_SSPP0, 25, 4 },
+       { DBGBUS_SSPP0, 25, 5 },
+       { DBGBUS_SSPP0, 25, 6 },
+       { DBGBUS_SSPP0, 25, 7 },
+
+       /* dma 2 */
+       { DBGBUS_SSPP0, 30, 0 },
+       { DBGBUS_SSPP0, 30, 1 },
+       { DBGBUS_SSPP0, 30, 2 },
+       { DBGBUS_SSPP0, 30, 3 },
+       { DBGBUS_SSPP0, 30, 4 },
+       { DBGBUS_SSPP0, 30, 5 },
+       { DBGBUS_SSPP0, 30, 6 },
+       { DBGBUS_SSPP0, 30, 7 },
+
+       { DBGBUS_SSPP0, 31, 0 },
+       { DBGBUS_SSPP0, 31, 1 },
+       { DBGBUS_SSPP0, 31, 2 },
+       { DBGBUS_SSPP0, 31, 3 },
+       { DBGBUS_SSPP0, 31, 4 },
+       { DBGBUS_SSPP0, 31, 5 },
+       { DBGBUS_SSPP0, 31, 6 },
+       { DBGBUS_SSPP0, 31, 7 },
+
+       { DBGBUS_SSPP0, 32, 0 },
+       { DBGBUS_SSPP0, 32, 1 },
+       { DBGBUS_SSPP0, 32, 2 },
+       { DBGBUS_SSPP0, 32, 3 },
+       { DBGBUS_SSPP0, 32, 4 },
+       { DBGBUS_SSPP0, 32, 5 },
+       { DBGBUS_SSPP0, 32, 6 },
+       { DBGBUS_SSPP0, 32, 7 },
+
+       { DBGBUS_SSPP0, 33, 0 },
+       { DBGBUS_SSPP0, 33, 1 },
+       { DBGBUS_SSPP0, 33, 2 },
+       { DBGBUS_SSPP0, 33, 3 },
+       { DBGBUS_SSPP0, 33, 4 },
+       { DBGBUS_SSPP0, 33, 5 },
+       { DBGBUS_SSPP0, 33, 6 },
+       { DBGBUS_SSPP0, 33, 7 },
+
+       { DBGBUS_SSPP0, 34, 0 },
+       { DBGBUS_SSPP0, 34, 1 },
+       { DBGBUS_SSPP0, 34, 2 },
+       { DBGBUS_SSPP0, 34, 3 },
+       { DBGBUS_SSPP0, 34, 4 },
+       { DBGBUS_SSPP0, 34, 5 },
+       { DBGBUS_SSPP0, 34, 6 },
+       { DBGBUS_SSPP0, 34, 7 },
+
+       { DBGBUS_SSPP0, 35, 0 },
+       { DBGBUS_SSPP0, 35, 1 },
+       { DBGBUS_SSPP0, 35, 2 },
+       { DBGBUS_SSPP0, 35, 3 },
+
+       /* dma 0 */
+       { DBGBUS_SSPP0, 40, 0 },
+       { DBGBUS_SSPP0, 40, 1 },
+       { DBGBUS_SSPP0, 40, 2 },
+       { DBGBUS_SSPP0, 40, 3 },
+       { DBGBUS_SSPP0, 40, 4 },
+       { DBGBUS_SSPP0, 40, 5 },
+       { DBGBUS_SSPP0, 40, 6 },
+       { DBGBUS_SSPP0, 40, 7 },
+
+       { DBGBUS_SSPP0, 41, 0 },
+       { DBGBUS_SSPP0, 41, 1 },
+       { DBGBUS_SSPP0, 41, 2 },
+       { DBGBUS_SSPP0, 41, 3 },
+       { DBGBUS_SSPP0, 41, 4 },
+       { DBGBUS_SSPP0, 41, 5 },
+       { DBGBUS_SSPP0, 41, 6 },
+       { DBGBUS_SSPP0, 41, 7 },
+
+       { DBGBUS_SSPP0, 42, 0 },
+       { DBGBUS_SSPP0, 42, 1 },
+       { DBGBUS_SSPP0, 42, 2 },
+       { DBGBUS_SSPP0, 42, 3 },
+       { DBGBUS_SSPP0, 42, 4 },
+       { DBGBUS_SSPP0, 42, 5 },
+       { DBGBUS_SSPP0, 42, 6 },
+       { DBGBUS_SSPP0, 42, 7 },
+
+       { DBGBUS_SSPP0, 44, 0 },
+       { DBGBUS_SSPP0, 44, 1 },
+       { DBGBUS_SSPP0, 44, 2 },
+       { DBGBUS_SSPP0, 44, 3 },
+       { DBGBUS_SSPP0, 44, 4 },
+       { DBGBUS_SSPP0, 44, 5 },
+       { DBGBUS_SSPP0, 44, 6 },
+       { DBGBUS_SSPP0, 44, 7 },
+
+       { DBGBUS_SSPP0, 45, 0 },
+       { DBGBUS_SSPP0, 45, 1 },
+       { DBGBUS_SSPP0, 45, 2 },
+       { DBGBUS_SSPP0, 45, 3 },
+       { DBGBUS_SSPP0, 45, 4 },
+       { DBGBUS_SSPP0, 45, 5 },
+       { DBGBUS_SSPP0, 45, 6 },
+       { DBGBUS_SSPP0, 45, 7 },
+
+       /* fetch sspp1 */
+       /* vig 1 */
+       { DBGBUS_SSPP1, 0, 0 },
+       { DBGBUS_SSPP1, 0, 1 },
+       { DBGBUS_SSPP1, 0, 2 },
+       { DBGBUS_SSPP1, 0, 3 },
+       { DBGBUS_SSPP1, 0, 4 },
+       { DBGBUS_SSPP1, 0, 5 },
+       { DBGBUS_SSPP1, 0, 6 },
+       { DBGBUS_SSPP1, 0, 7 },
+
+       { DBGBUS_SSPP1, 1, 0 },
+       { DBGBUS_SSPP1, 1, 1 },
+       { DBGBUS_SSPP1, 1, 2 },
+       { DBGBUS_SSPP1, 1, 3 },
+       { DBGBUS_SSPP1, 1, 4 },
+       { DBGBUS_SSPP1, 1, 5 },
+       { DBGBUS_SSPP1, 1, 6 },
+       { DBGBUS_SSPP1, 1, 7 },
+
+       { DBGBUS_SSPP1, 2, 0 },
+       { DBGBUS_SSPP1, 2, 1 },
+       { DBGBUS_SSPP1, 2, 2 },
+       { DBGBUS_SSPP1, 2, 3 },
+       { DBGBUS_SSPP1, 2, 4 },
+       { DBGBUS_SSPP1, 2, 5 },
+       { DBGBUS_SSPP1, 2, 6 },
+       { DBGBUS_SSPP1, 2, 7 },
+
+       { DBGBUS_SSPP1, 4, 0 },
+       { DBGBUS_SSPP1, 4, 1 },
+       { DBGBUS_SSPP1, 4, 2 },
+       { DBGBUS_SSPP1, 4, 3 },
+       { DBGBUS_SSPP1, 4, 4 },
+       { DBGBUS_SSPP1, 4, 5 },
+       { DBGBUS_SSPP1, 4, 6 },
+       { DBGBUS_SSPP1, 4, 7 },
+
+       { DBGBUS_SSPP1, 5, 0 },
+       { DBGBUS_SSPP1, 5, 1 },
+       { DBGBUS_SSPP1, 5, 2 },
+       { DBGBUS_SSPP1, 5, 3 },
+       { DBGBUS_SSPP1, 5, 4 },
+       { DBGBUS_SSPP1, 5, 5 },
+       { DBGBUS_SSPP1, 5, 6 },
+       { DBGBUS_SSPP1, 5, 7 },
+
+       /* vig 3 */
+       { DBGBUS_SSPP1, 20, 0 },
+       { DBGBUS_SSPP1, 20, 1 },
+       { DBGBUS_SSPP1, 20, 2 },
+       { DBGBUS_SSPP1, 20, 3 },
+       { DBGBUS_SSPP1, 20, 4 },
+       { DBGBUS_SSPP1, 20, 5 },
+       { DBGBUS_SSPP1, 20, 6 },
+       { DBGBUS_SSPP1, 20, 7 },
+
+       { DBGBUS_SSPP1, 21, 0 },
+       { DBGBUS_SSPP1, 21, 1 },
+       { DBGBUS_SSPP1, 21, 2 },
+       { DBGBUS_SSPP1, 21, 3 },
+       { DBGBUS_SSPP1, 21, 4 },
+       { DBGBUS_SSPP1, 21, 5 },
+       { DBGBUS_SSPP1, 21, 6 },
+       { DBGBUS_SSPP1, 21, 7 },
+
+       { DBGBUS_SSPP1, 22, 0 },
+       { DBGBUS_SSPP1, 22, 1 },
+       { DBGBUS_SSPP1, 22, 2 },
+       { DBGBUS_SSPP1, 22, 3 },
+       { DBGBUS_SSPP1, 22, 4 },
+       { DBGBUS_SSPP1, 22, 5 },
+       { DBGBUS_SSPP1, 22, 6 },
+       { DBGBUS_SSPP1, 22, 7 },
+
+       { DBGBUS_SSPP1, 24, 0 },
+       { DBGBUS_SSPP1, 24, 1 },
+       { DBGBUS_SSPP1, 24, 2 },
+       { DBGBUS_SSPP1, 24, 3 },
+       { DBGBUS_SSPP1, 24, 4 },
+       { DBGBUS_SSPP1, 24, 5 },
+       { DBGBUS_SSPP1, 24, 6 },
+       { DBGBUS_SSPP1, 24, 7 },
+
+       { DBGBUS_SSPP1, 25, 0 },
+       { DBGBUS_SSPP1, 25, 1 },
+       { DBGBUS_SSPP1, 25, 2 },
+       { DBGBUS_SSPP1, 25, 3 },
+       { DBGBUS_SSPP1, 25, 4 },
+       { DBGBUS_SSPP1, 25, 5 },
+       { DBGBUS_SSPP1, 25, 6 },
+       { DBGBUS_SSPP1, 25, 7 },
+
+       /* dma 3 */
+       { DBGBUS_SSPP1, 30, 0 },
+       { DBGBUS_SSPP1, 30, 1 },
+       { DBGBUS_SSPP1, 30, 2 },
+       { DBGBUS_SSPP1, 30, 3 },
+       { DBGBUS_SSPP1, 30, 4 },
+       { DBGBUS_SSPP1, 30, 5 },
+       { DBGBUS_SSPP1, 30, 6 },
+       { DBGBUS_SSPP1, 30, 7 },
+
+       { DBGBUS_SSPP1, 31, 0 },
+       { DBGBUS_SSPP1, 31, 1 },
+       { DBGBUS_SSPP1, 31, 2 },
+       { DBGBUS_SSPP1, 31, 3 },
+       { DBGBUS_SSPP1, 31, 4 },
+       { DBGBUS_SSPP1, 31, 5 },
+       { DBGBUS_SSPP1, 31, 6 },
+       { DBGBUS_SSPP1, 31, 7 },
+
+       { DBGBUS_SSPP1, 32, 0 },
+       { DBGBUS_SSPP1, 32, 1 },
+       { DBGBUS_SSPP1, 32, 2 },
+       { DBGBUS_SSPP1, 32, 3 },
+       { DBGBUS_SSPP1, 32, 4 },
+       { DBGBUS_SSPP1, 32, 5 },
+       { DBGBUS_SSPP1, 32, 6 },
+       { DBGBUS_SSPP1, 32, 7 },
+
+       { DBGBUS_SSPP1, 33, 0 },
+       { DBGBUS_SSPP1, 33, 1 },
+       { DBGBUS_SSPP1, 33, 2 },
+       { DBGBUS_SSPP1, 33, 3 },
+       { DBGBUS_SSPP1, 33, 4 },
+       { DBGBUS_SSPP1, 33, 5 },
+       { DBGBUS_SSPP1, 33, 6 },
+       { DBGBUS_SSPP1, 33, 7 },
+
+       { DBGBUS_SSPP1, 34, 0 },
+       { DBGBUS_SSPP1, 34, 1 },
+       { DBGBUS_SSPP1, 34, 2 },
+       { DBGBUS_SSPP1, 34, 3 },
+       { DBGBUS_SSPP1, 34, 4 },
+       { DBGBUS_SSPP1, 34, 5 },
+       { DBGBUS_SSPP1, 34, 6 },
+       { DBGBUS_SSPP1, 34, 7 },
+
+       { DBGBUS_SSPP1, 35, 0 },
+       { DBGBUS_SSPP1, 35, 1 },
+       { DBGBUS_SSPP1, 35, 2 },
+
+       /* dma 1 */
+       { DBGBUS_SSPP1, 40, 0 },
+       { DBGBUS_SSPP1, 40, 1 },
+       { DBGBUS_SSPP1, 40, 2 },
+       { DBGBUS_SSPP1, 40, 3 },
+       { DBGBUS_SSPP1, 40, 4 },
+       { DBGBUS_SSPP1, 40, 5 },
+       { DBGBUS_SSPP1, 40, 6 },
+       { DBGBUS_SSPP1, 40, 7 },
+
+       { DBGBUS_SSPP1, 41, 0 },
+       { DBGBUS_SSPP1, 41, 1 },
+       { DBGBUS_SSPP1, 41, 2 },
+       { DBGBUS_SSPP1, 41, 3 },
+       { DBGBUS_SSPP1, 41, 4 },
+       { DBGBUS_SSPP1, 41, 5 },
+       { DBGBUS_SSPP1, 41, 6 },
+       { DBGBUS_SSPP1, 41, 7 },
+
+       { DBGBUS_SSPP1, 42, 0 },
+       { DBGBUS_SSPP1, 42, 1 },
+       { DBGBUS_SSPP1, 42, 2 },
+       { DBGBUS_SSPP1, 42, 3 },
+       { DBGBUS_SSPP1, 42, 4 },
+       { DBGBUS_SSPP1, 42, 5 },
+       { DBGBUS_SSPP1, 42, 6 },
+       { DBGBUS_SSPP1, 42, 7 },
+
+       { DBGBUS_SSPP1, 44, 0 },
+       { DBGBUS_SSPP1, 44, 1 },
+       { DBGBUS_SSPP1, 44, 2 },
+       { DBGBUS_SSPP1, 44, 3 },
+       { DBGBUS_SSPP1, 44, 4 },
+       { DBGBUS_SSPP1, 44, 5 },
+       { DBGBUS_SSPP1, 44, 6 },
+       { DBGBUS_SSPP1, 44, 7 },
+
+       { DBGBUS_SSPP1, 45, 0 },
+       { DBGBUS_SSPP1, 45, 1 },
+       { DBGBUS_SSPP1, 45, 2 },
+       { DBGBUS_SSPP1, 45, 3 },
+       { DBGBUS_SSPP1, 45, 4 },
+       { DBGBUS_SSPP1, 45, 5 },
+       { DBGBUS_SSPP1, 45, 6 },
+       { DBGBUS_SSPP1, 45, 7 },
+
+       /* dspp */
+       { DBGBUS_DSPP, 13, 0 },
+       { DBGBUS_DSPP, 19, 0 },
+       { DBGBUS_DSPP, 14, 0 },
+       { DBGBUS_DSPP, 14, 1 },
+       { DBGBUS_DSPP, 14, 3 },
+       { DBGBUS_DSPP, 20, 0 },
+       { DBGBUS_DSPP, 20, 1 },
+       { DBGBUS_DSPP, 20, 3 },
+
+       /* ppb_0 */
+       { DBGBUS_DSPP, 31, 0, _dpu_debug_bus_ppb0_dump },
+       { DBGBUS_DSPP, 33, 0, _dpu_debug_bus_ppb0_dump },
+       { DBGBUS_DSPP, 35, 0, _dpu_debug_bus_ppb0_dump },
+       { DBGBUS_DSPP, 42, 0, _dpu_debug_bus_ppb0_dump },
+
+       /* ppb_1 */
+       { DBGBUS_DSPP, 32, 0, _dpu_debug_bus_ppb1_dump },
+       { DBGBUS_DSPP, 34, 0, _dpu_debug_bus_ppb1_dump },
+       { DBGBUS_DSPP, 36, 0, _dpu_debug_bus_ppb1_dump },
+       { DBGBUS_DSPP, 43, 0, _dpu_debug_bus_ppb1_dump },
+
+       /* lm_lut */
+       { DBGBUS_DSPP, 109, 0 },
+       { DBGBUS_DSPP, 105, 0 },
+       { DBGBUS_DSPP, 103, 0 },
+
+       /* crossbar */
+       { DBGBUS_DSPP, 0, 0, _dpu_debug_bus_xbar_dump },
+
+       /* rotator */
+       { DBGBUS_DSPP, 9, 0},
+
+       /* blend */
+       /* LM0 */
+       { DBGBUS_DSPP, 63, 1},
+       { DBGBUS_DSPP, 63, 2},
+       { DBGBUS_DSPP, 63, 3},
+       { DBGBUS_DSPP, 63, 4},
+       { DBGBUS_DSPP, 63, 5},
+       { DBGBUS_DSPP, 63, 6},
+       { DBGBUS_DSPP, 63, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 64, 1},
+       { DBGBUS_DSPP, 64, 2},
+       { DBGBUS_DSPP, 64, 3},
+       { DBGBUS_DSPP, 64, 4},
+       { DBGBUS_DSPP, 64, 5},
+       { DBGBUS_DSPP, 64, 6},
+       { DBGBUS_DSPP, 64, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 65, 1},
+       { DBGBUS_DSPP, 65, 2},
+       { DBGBUS_DSPP, 65, 3},
+       { DBGBUS_DSPP, 65, 4},
+       { DBGBUS_DSPP, 65, 5},
+       { DBGBUS_DSPP, 65, 6},
+       { DBGBUS_DSPP, 65, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 66, 1},
+       { DBGBUS_DSPP, 66, 2},
+       { DBGBUS_DSPP, 66, 3},
+       { DBGBUS_DSPP, 66, 4},
+       { DBGBUS_DSPP, 66, 5},
+       { DBGBUS_DSPP, 66, 6},
+       { DBGBUS_DSPP, 66, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 67, 1},
+       { DBGBUS_DSPP, 67, 2},
+       { DBGBUS_DSPP, 67, 3},
+       { DBGBUS_DSPP, 67, 4},
+       { DBGBUS_DSPP, 67, 5},
+       { DBGBUS_DSPP, 67, 6},
+       { DBGBUS_DSPP, 67, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 68, 1},
+       { DBGBUS_DSPP, 68, 2},
+       { DBGBUS_DSPP, 68, 3},
+       { DBGBUS_DSPP, 68, 4},
+       { DBGBUS_DSPP, 68, 5},
+       { DBGBUS_DSPP, 68, 6},
+       { DBGBUS_DSPP, 68, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 69, 1},
+       { DBGBUS_DSPP, 69, 2},
+       { DBGBUS_DSPP, 69, 3},
+       { DBGBUS_DSPP, 69, 4},
+       { DBGBUS_DSPP, 69, 5},
+       { DBGBUS_DSPP, 69, 6},
+       { DBGBUS_DSPP, 69, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 84, 1},
+       { DBGBUS_DSPP, 84, 2},
+       { DBGBUS_DSPP, 84, 3},
+       { DBGBUS_DSPP, 84, 4},
+       { DBGBUS_DSPP, 84, 5},
+       { DBGBUS_DSPP, 84, 6},
+       { DBGBUS_DSPP, 84, 7, _dpu_debug_bus_lm_dump },
+
+
+       { DBGBUS_DSPP, 85, 1},
+       { DBGBUS_DSPP, 85, 2},
+       { DBGBUS_DSPP, 85, 3},
+       { DBGBUS_DSPP, 85, 4},
+       { DBGBUS_DSPP, 85, 5},
+       { DBGBUS_DSPP, 85, 6},
+       { DBGBUS_DSPP, 85, 7, _dpu_debug_bus_lm_dump },
+
+
+       { DBGBUS_DSPP, 86, 1},
+       { DBGBUS_DSPP, 86, 2},
+       { DBGBUS_DSPP, 86, 3},
+       { DBGBUS_DSPP, 86, 4},
+       { DBGBUS_DSPP, 86, 5},
+       { DBGBUS_DSPP, 86, 6},
+       { DBGBUS_DSPP, 86, 7, _dpu_debug_bus_lm_dump },
+
+
+       { DBGBUS_DSPP, 87, 1},
+       { DBGBUS_DSPP, 87, 2},
+       { DBGBUS_DSPP, 87, 3},
+       { DBGBUS_DSPP, 87, 4},
+       { DBGBUS_DSPP, 87, 5},
+       { DBGBUS_DSPP, 87, 6},
+       { DBGBUS_DSPP, 87, 7, _dpu_debug_bus_lm_dump },
+
+       /* LM1 */
+       { DBGBUS_DSPP, 70, 1},
+       { DBGBUS_DSPP, 70, 2},
+       { DBGBUS_DSPP, 70, 3},
+       { DBGBUS_DSPP, 70, 4},
+       { DBGBUS_DSPP, 70, 5},
+       { DBGBUS_DSPP, 70, 6},
+       { DBGBUS_DSPP, 70, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 71, 1},
+       { DBGBUS_DSPP, 71, 2},
+       { DBGBUS_DSPP, 71, 3},
+       { DBGBUS_DSPP, 71, 4},
+       { DBGBUS_DSPP, 71, 5},
+       { DBGBUS_DSPP, 71, 6},
+       { DBGBUS_DSPP, 71, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 72, 1},
+       { DBGBUS_DSPP, 72, 2},
+       { DBGBUS_DSPP, 72, 3},
+       { DBGBUS_DSPP, 72, 4},
+       { DBGBUS_DSPP, 72, 5},
+       { DBGBUS_DSPP, 72, 6},
+       { DBGBUS_DSPP, 72, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 73, 1},
+       { DBGBUS_DSPP, 73, 2},
+       { DBGBUS_DSPP, 73, 3},
+       { DBGBUS_DSPP, 73, 4},
+       { DBGBUS_DSPP, 73, 5},
+       { DBGBUS_DSPP, 73, 6},
+       { DBGBUS_DSPP, 73, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 74, 1},
+       { DBGBUS_DSPP, 74, 2},
+       { DBGBUS_DSPP, 74, 3},
+       { DBGBUS_DSPP, 74, 4},
+       { DBGBUS_DSPP, 74, 5},
+       { DBGBUS_DSPP, 74, 6},
+       { DBGBUS_DSPP, 74, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 75, 1},
+       { DBGBUS_DSPP, 75, 2},
+       { DBGBUS_DSPP, 75, 3},
+       { DBGBUS_DSPP, 75, 4},
+       { DBGBUS_DSPP, 75, 5},
+       { DBGBUS_DSPP, 75, 6},
+       { DBGBUS_DSPP, 75, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 76, 1},
+       { DBGBUS_DSPP, 76, 2},
+       { DBGBUS_DSPP, 76, 3},
+       { DBGBUS_DSPP, 76, 4},
+       { DBGBUS_DSPP, 76, 5},
+       { DBGBUS_DSPP, 76, 6},
+       { DBGBUS_DSPP, 76, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 88, 1},
+       { DBGBUS_DSPP, 88, 2},
+       { DBGBUS_DSPP, 88, 3},
+       { DBGBUS_DSPP, 88, 4},
+       { DBGBUS_DSPP, 88, 5},
+       { DBGBUS_DSPP, 88, 6},
+       { DBGBUS_DSPP, 88, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 89, 1},
+       { DBGBUS_DSPP, 89, 2},
+       { DBGBUS_DSPP, 89, 3},
+       { DBGBUS_DSPP, 89, 4},
+       { DBGBUS_DSPP, 89, 5},
+       { DBGBUS_DSPP, 89, 6},
+       { DBGBUS_DSPP, 89, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 90, 1},
+       { DBGBUS_DSPP, 90, 2},
+       { DBGBUS_DSPP, 90, 3},
+       { DBGBUS_DSPP, 90, 4},
+       { DBGBUS_DSPP, 90, 5},
+       { DBGBUS_DSPP, 90, 6},
+       { DBGBUS_DSPP, 90, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 91, 1},
+       { DBGBUS_DSPP, 91, 2},
+       { DBGBUS_DSPP, 91, 3},
+       { DBGBUS_DSPP, 91, 4},
+       { DBGBUS_DSPP, 91, 5},
+       { DBGBUS_DSPP, 91, 6},
+       { DBGBUS_DSPP, 91, 7, _dpu_debug_bus_lm_dump },
+
+       /* LM2 */
+       { DBGBUS_DSPP, 77, 0},
+       { DBGBUS_DSPP, 77, 1},
+       { DBGBUS_DSPP, 77, 2},
+       { DBGBUS_DSPP, 77, 3},
+       { DBGBUS_DSPP, 77, 4},
+       { DBGBUS_DSPP, 77, 5},
+       { DBGBUS_DSPP, 77, 6},
+       { DBGBUS_DSPP, 77, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 78, 0},
+       { DBGBUS_DSPP, 78, 1},
+       { DBGBUS_DSPP, 78, 2},
+       { DBGBUS_DSPP, 78, 3},
+       { DBGBUS_DSPP, 78, 4},
+       { DBGBUS_DSPP, 78, 5},
+       { DBGBUS_DSPP, 78, 6},
+       { DBGBUS_DSPP, 78, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 79, 0},
+       { DBGBUS_DSPP, 79, 1},
+       { DBGBUS_DSPP, 79, 2},
+       { DBGBUS_DSPP, 79, 3},
+       { DBGBUS_DSPP, 79, 4},
+       { DBGBUS_DSPP, 79, 5},
+       { DBGBUS_DSPP, 79, 6},
+       { DBGBUS_DSPP, 79, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 80, 0},
+       { DBGBUS_DSPP, 80, 1},
+       { DBGBUS_DSPP, 80, 2},
+       { DBGBUS_DSPP, 80, 3},
+       { DBGBUS_DSPP, 80, 4},
+       { DBGBUS_DSPP, 80, 5},
+       { DBGBUS_DSPP, 80, 6},
+       { DBGBUS_DSPP, 80, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 81, 0},
+       { DBGBUS_DSPP, 81, 1},
+       { DBGBUS_DSPP, 81, 2},
+       { DBGBUS_DSPP, 81, 3},
+       { DBGBUS_DSPP, 81, 4},
+       { DBGBUS_DSPP, 81, 5},
+       { DBGBUS_DSPP, 81, 6},
+       { DBGBUS_DSPP, 81, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 82, 0},
+       { DBGBUS_DSPP, 82, 1},
+       { DBGBUS_DSPP, 82, 2},
+       { DBGBUS_DSPP, 82, 3},
+       { DBGBUS_DSPP, 82, 4},
+       { DBGBUS_DSPP, 82, 5},
+       { DBGBUS_DSPP, 82, 6},
+       { DBGBUS_DSPP, 82, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 83, 0},
+       { DBGBUS_DSPP, 83, 1},
+       { DBGBUS_DSPP, 83, 2},
+       { DBGBUS_DSPP, 83, 3},
+       { DBGBUS_DSPP, 83, 4},
+       { DBGBUS_DSPP, 83, 5},
+       { DBGBUS_DSPP, 83, 6},
+       { DBGBUS_DSPP, 83, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 92, 1},
+       { DBGBUS_DSPP, 92, 2},
+       { DBGBUS_DSPP, 92, 3},
+       { DBGBUS_DSPP, 92, 4},
+       { DBGBUS_DSPP, 92, 5},
+       { DBGBUS_DSPP, 92, 6},
+       { DBGBUS_DSPP, 92, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 93, 1},
+       { DBGBUS_DSPP, 93, 2},
+       { DBGBUS_DSPP, 93, 3},
+       { DBGBUS_DSPP, 93, 4},
+       { DBGBUS_DSPP, 93, 5},
+       { DBGBUS_DSPP, 93, 6},
+       { DBGBUS_DSPP, 93, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 94, 1},
+       { DBGBUS_DSPP, 94, 2},
+       { DBGBUS_DSPP, 94, 3},
+       { DBGBUS_DSPP, 94, 4},
+       { DBGBUS_DSPP, 94, 5},
+       { DBGBUS_DSPP, 94, 6},
+       { DBGBUS_DSPP, 94, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 95, 1},
+       { DBGBUS_DSPP, 95, 2},
+       { DBGBUS_DSPP, 95, 3},
+       { DBGBUS_DSPP, 95, 4},
+       { DBGBUS_DSPP, 95, 5},
+       { DBGBUS_DSPP, 95, 6},
+       { DBGBUS_DSPP, 95, 7, _dpu_debug_bus_lm_dump },
+
+       /* LM5 */
+       { DBGBUS_DSPP, 110, 1},
+       { DBGBUS_DSPP, 110, 2},
+       { DBGBUS_DSPP, 110, 3},
+       { DBGBUS_DSPP, 110, 4},
+       { DBGBUS_DSPP, 110, 5},
+       { DBGBUS_DSPP, 110, 6},
+       { DBGBUS_DSPP, 110, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 111, 1},
+       { DBGBUS_DSPP, 111, 2},
+       { DBGBUS_DSPP, 111, 3},
+       { DBGBUS_DSPP, 111, 4},
+       { DBGBUS_DSPP, 111, 5},
+       { DBGBUS_DSPP, 111, 6},
+       { DBGBUS_DSPP, 111, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 112, 1},
+       { DBGBUS_DSPP, 112, 2},
+       { DBGBUS_DSPP, 112, 3},
+       { DBGBUS_DSPP, 112, 4},
+       { DBGBUS_DSPP, 112, 5},
+       { DBGBUS_DSPP, 112, 6},
+       { DBGBUS_DSPP, 112, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 113, 1},
+       { DBGBUS_DSPP, 113, 2},
+       { DBGBUS_DSPP, 113, 3},
+       { DBGBUS_DSPP, 113, 4},
+       { DBGBUS_DSPP, 113, 5},
+       { DBGBUS_DSPP, 113, 6},
+       { DBGBUS_DSPP, 113, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 114, 1},
+       { DBGBUS_DSPP, 114, 2},
+       { DBGBUS_DSPP, 114, 3},
+       { DBGBUS_DSPP, 114, 4},
+       { DBGBUS_DSPP, 114, 5},
+       { DBGBUS_DSPP, 114, 6},
+       { DBGBUS_DSPP, 114, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 115, 1},
+       { DBGBUS_DSPP, 115, 2},
+       { DBGBUS_DSPP, 115, 3},
+       { DBGBUS_DSPP, 115, 4},
+       { DBGBUS_DSPP, 115, 5},
+       { DBGBUS_DSPP, 115, 6},
+       { DBGBUS_DSPP, 115, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 116, 1},
+       { DBGBUS_DSPP, 116, 2},
+       { DBGBUS_DSPP, 116, 3},
+       { DBGBUS_DSPP, 116, 4},
+       { DBGBUS_DSPP, 116, 5},
+       { DBGBUS_DSPP, 116, 6},
+       { DBGBUS_DSPP, 116, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 117, 1},
+       { DBGBUS_DSPP, 117, 2},
+       { DBGBUS_DSPP, 117, 3},
+       { DBGBUS_DSPP, 117, 4},
+       { DBGBUS_DSPP, 117, 5},
+       { DBGBUS_DSPP, 117, 6},
+       { DBGBUS_DSPP, 117, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 118, 1},
+       { DBGBUS_DSPP, 118, 2},
+       { DBGBUS_DSPP, 118, 3},
+       { DBGBUS_DSPP, 118, 4},
+       { DBGBUS_DSPP, 118, 5},
+       { DBGBUS_DSPP, 118, 6},
+       { DBGBUS_DSPP, 118, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 119, 1},
+       { DBGBUS_DSPP, 119, 2},
+       { DBGBUS_DSPP, 119, 3},
+       { DBGBUS_DSPP, 119, 4},
+       { DBGBUS_DSPP, 119, 5},
+       { DBGBUS_DSPP, 119, 6},
+       { DBGBUS_DSPP, 119, 7, _dpu_debug_bus_lm_dump },
+
+       { DBGBUS_DSPP, 120, 1},
+       { DBGBUS_DSPP, 120, 2},
+       { DBGBUS_DSPP, 120, 3},
+       { DBGBUS_DSPP, 120, 4},
+       { DBGBUS_DSPP, 120, 5},
+       { DBGBUS_DSPP, 120, 6},
+       { DBGBUS_DSPP, 120, 7, _dpu_debug_bus_lm_dump },
+
+       /* csc */
+       { DBGBUS_SSPP0, 7, 0},
+       { DBGBUS_SSPP0, 7, 1},
+       { DBGBUS_SSPP0, 27, 0},
+       { DBGBUS_SSPP0, 27, 1},
+       { DBGBUS_SSPP1, 7, 0},
+       { DBGBUS_SSPP1, 7, 1},
+       { DBGBUS_SSPP1, 27, 0},
+       { DBGBUS_SSPP1, 27, 1},
+
+       /* pcc */
+       { DBGBUS_SSPP0, 3,  3},
+       { DBGBUS_SSPP0, 23, 3},
+       { DBGBUS_SSPP0, 33, 3},
+       { DBGBUS_SSPP0, 43, 3},
+       { DBGBUS_SSPP1, 3,  3},
+       { DBGBUS_SSPP1, 23, 3},
+       { DBGBUS_SSPP1, 33, 3},
+       { DBGBUS_SSPP1, 43, 3},
+
+       /* spa */
+       { DBGBUS_SSPP0, 8,  0},
+       { DBGBUS_SSPP0, 28, 0},
+       { DBGBUS_SSPP1, 8,  0},
+       { DBGBUS_SSPP1, 28, 0},
+       { DBGBUS_DSPP, 13, 0},
+       { DBGBUS_DSPP, 19, 0},
+
+       /* igc */
+       { DBGBUS_SSPP0, 17, 0},
+       { DBGBUS_SSPP0, 17, 1},
+       { DBGBUS_SSPP0, 17, 3},
+       { DBGBUS_SSPP0, 37, 0},
+       { DBGBUS_SSPP0, 37, 1},
+       { DBGBUS_SSPP0, 37, 3},
+       { DBGBUS_SSPP0, 46, 0},
+       { DBGBUS_SSPP0, 46, 1},
+       { DBGBUS_SSPP0, 46, 3},
+
+       { DBGBUS_SSPP1, 17, 0},
+       { DBGBUS_SSPP1, 17, 1},
+       { DBGBUS_SSPP1, 17, 3},
+       { DBGBUS_SSPP1, 37, 0},
+       { DBGBUS_SSPP1, 37, 1},
+       { DBGBUS_SSPP1, 37, 3},
+       { DBGBUS_SSPP1, 46, 0},
+       { DBGBUS_SSPP1, 46, 1},
+       { DBGBUS_SSPP1, 46, 3},
+
+       { DBGBUS_DSPP, 14, 0},
+       { DBGBUS_DSPP, 14, 1},
+       { DBGBUS_DSPP, 14, 3},
+       { DBGBUS_DSPP, 20, 0},
+       { DBGBUS_DSPP, 20, 1},
+       { DBGBUS_DSPP, 20, 3},
+
+       /* intf0-3 */
+       { DBGBUS_PERIPH, 0, 0},
+       { DBGBUS_PERIPH, 1, 0},
+       { DBGBUS_PERIPH, 2, 0},
+       { DBGBUS_PERIPH, 3, 0},
+
+       /* te counter wrapper */
+       { DBGBUS_PERIPH, 60, 0},
+
+       /* dsc0 */
+       { DBGBUS_PERIPH, 47, 0},
+       { DBGBUS_PERIPH, 47, 1},
+       { DBGBUS_PERIPH, 47, 2},
+       { DBGBUS_PERIPH, 47, 3},
+       { DBGBUS_PERIPH, 47, 4},
+       { DBGBUS_PERIPH, 47, 5},
+       { DBGBUS_PERIPH, 47, 6},
+       { DBGBUS_PERIPH, 47, 7},
+
+       /* dsc1 */
+       { DBGBUS_PERIPH, 48, 0},
+       { DBGBUS_PERIPH, 48, 1},
+       { DBGBUS_PERIPH, 48, 2},
+       { DBGBUS_PERIPH, 48, 3},
+       { DBGBUS_PERIPH, 48, 4},
+       { DBGBUS_PERIPH, 48, 5},
+       { DBGBUS_PERIPH, 48, 6},
+       { DBGBUS_PERIPH, 48, 7},
+
+       /* dsc2 */
+       { DBGBUS_PERIPH, 51, 0},
+       { DBGBUS_PERIPH, 51, 1},
+       { DBGBUS_PERIPH, 51, 2},
+       { DBGBUS_PERIPH, 51, 3},
+       { DBGBUS_PERIPH, 51, 4},
+       { DBGBUS_PERIPH, 51, 5},
+       { DBGBUS_PERIPH, 51, 6},
+       { DBGBUS_PERIPH, 51, 7},
+
+       /* dsc3 */
+       { DBGBUS_PERIPH, 52, 0},
+       { DBGBUS_PERIPH, 52, 1},
+       { DBGBUS_PERIPH, 52, 2},
+       { DBGBUS_PERIPH, 52, 3},
+       { DBGBUS_PERIPH, 52, 4},
+       { DBGBUS_PERIPH, 52, 5},
+       { DBGBUS_PERIPH, 52, 6},
+       { DBGBUS_PERIPH, 52, 7},
+
+       /* tear-check */
+       { DBGBUS_PERIPH, 63, 0 },
+       { DBGBUS_PERIPH, 64, 0 },
+       { DBGBUS_PERIPH, 65, 0 },
+       { DBGBUS_PERIPH, 73, 0 },
+       { DBGBUS_PERIPH, 74, 0 },
+
+       /* cdwn */
+       { DBGBUS_PERIPH, 80, 0},
+       { DBGBUS_PERIPH, 80, 1},
+       { DBGBUS_PERIPH, 80, 2},
+
+       { DBGBUS_PERIPH, 81, 0},
+       { DBGBUS_PERIPH, 81, 1},
+       { DBGBUS_PERIPH, 81, 2},
+
+       { DBGBUS_PERIPH, 82, 0},
+       { DBGBUS_PERIPH, 82, 1},
+       { DBGBUS_PERIPH, 82, 2},
+       { DBGBUS_PERIPH, 82, 3},
+       { DBGBUS_PERIPH, 82, 4},
+       { DBGBUS_PERIPH, 82, 5},
+       { DBGBUS_PERIPH, 82, 6},
+       { DBGBUS_PERIPH, 82, 7},
+
+       /* hdmi */
+       { DBGBUS_PERIPH, 68, 0},
+       { DBGBUS_PERIPH, 68, 1},
+       { DBGBUS_PERIPH, 68, 2},
+       { DBGBUS_PERIPH, 68, 3},
+       { DBGBUS_PERIPH, 68, 4},
+       { DBGBUS_PERIPH, 68, 5},
+
+       /* edp */
+       { DBGBUS_PERIPH, 69, 0},
+       { DBGBUS_PERIPH, 69, 1},
+       { DBGBUS_PERIPH, 69, 2},
+       { DBGBUS_PERIPH, 69, 3},
+       { DBGBUS_PERIPH, 69, 4},
+       { DBGBUS_PERIPH, 69, 5},
+
+       /* dsi0 */
+       { DBGBUS_PERIPH, 70, 0},
+       { DBGBUS_PERIPH, 70, 1},
+       { DBGBUS_PERIPH, 70, 2},
+       { DBGBUS_PERIPH, 70, 3},
+       { DBGBUS_PERIPH, 70, 4},
+       { DBGBUS_PERIPH, 70, 5},
+
+       /* dsi1 */
+       { DBGBUS_PERIPH, 71, 0},
+       { DBGBUS_PERIPH, 71, 1},
+       { DBGBUS_PERIPH, 71, 2},
+       { DBGBUS_PERIPH, 71, 3},
+       { DBGBUS_PERIPH, 71, 4},
+       { DBGBUS_PERIPH, 71, 5},
+};
+
+static struct vbif_debug_bus_entry vbif_dbg_bus_msm8998[] = {
+       {0x214, 0x21c, 16, 2, 0x0, 0xd},     /* arb clients */
+       {0x214, 0x21c, 16, 2, 0x80, 0xc0},   /* arb clients */
+       {0x214, 0x21c, 16, 2, 0x100, 0x140}, /* arb clients */
+       {0x214, 0x21c, 0, 16, 0x0, 0xf},     /* xin blocks - axi side */
+       {0x214, 0x21c, 0, 16, 0x80, 0xa4},   /* xin blocks - axi side */
+       {0x214, 0x21c, 0, 15, 0x100, 0x124}, /* xin blocks - axi side */
+       {0x21c, 0x214, 0, 14, 0, 0xc}, /* xin blocks - clock side */
+};
+
+/**
+ * _dpu_dbg_enable_power - use callback to turn power on for hw register access
+ * @enable: whether to turn power on or off
+ */
+static inline void _dpu_dbg_enable_power(int enable)
+{
+       if (enable)
+               pm_runtime_get_sync(dpu_dbg_base.dev);
+       else
+               pm_runtime_put_sync(dpu_dbg_base.dev);
+}
+
+static void _dpu_dbg_dump_dpu_dbg_bus(struct dpu_dbg_dpu_debug_bus *bus)
+{
+       bool in_log, in_mem;
+       u32 **dump_mem = NULL;
+       u32 *dump_addr = NULL;
+       u32 status = 0;
+       struct dpu_debug_bus_entry *head;
+       phys_addr_t phys = 0;
+       int list_size;
+       int i;
+       u32 offset;
+       void __iomem *mem_base = NULL;
+       struct dpu_dbg_reg_base *reg_base;
+
+       if (!bus || !bus->cmn.entries_size)
+               return;
+
+       list_for_each_entry(reg_base, &dpu_dbg_base.reg_base_list,
+                       reg_base_head)
+               if (strlen(reg_base->name) &&
+                       !strcmp(reg_base->name, bus->cmn.name))
+                       mem_base = reg_base->base + bus->top_blk_off;
+
+       if (!mem_base) {
+               pr_err("unable to find mem_base for %s\n", bus->cmn.name);
+               return;
+       }
+
+       dump_mem = &bus->cmn.dumped_content;
+
+       /* will keep in memory 4 entries of 4 bytes each */
+       list_size = (bus->cmn.entries_size * 4 * 4);
+
+       in_log = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_LOG);
+       in_mem = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_MEM);
+
+       if (!in_log && !in_mem)
+               return;
+
+       dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n",
+                       bus->cmn.name);
+
+       if (in_mem) {
+               if (!(*dump_mem))
+                       *dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
+                               list_size, &phys, GFP_KERNEL);
+
+               if (*dump_mem) {
+                       dump_addr = *dump_mem;
+                       dev_info(dpu_dbg_base.dev,
+                               "%s: start_addr:0x%pK len:0x%x\n",
+                               __func__, dump_addr, list_size);
+               } else {
+                       in_mem = false;
+                       pr_err("dump_mem: allocation fails\n");
+               }
+       }
+
+       _dpu_dbg_enable_power(true);
+       for (i = 0; i < bus->cmn.entries_size; i++) {
+               head = bus->entries + i;
+               writel_relaxed(TEST_MASK(head->block_id, head->test_id),
+                               mem_base + head->wr_addr);
+               wmb(); /* make sure test bits were written */
+
+               if (bus->cmn.flags & DBGBUS_FLAGS_DSPP) {
+                       offset = DBGBUS_DSPP_STATUS;
+                       /* keep DSPP test point enabled */
+                       if (head->wr_addr != DBGBUS_DSPP)
+                               writel_relaxed(0xF, mem_base + DBGBUS_DSPP);
+               } else {
+                       offset = head->wr_addr + 0x4;
+               }
+
+               status = readl_relaxed(mem_base + offset);
+
+               if (in_log)
+                       dev_info(dpu_dbg_base.dev,
+                                       "waddr=0x%x blk=%d tst=%d val=0x%x\n",
+                                       head->wr_addr, head->block_id,
+                                       head->test_id, status);
+
+               if (dump_addr && in_mem) {
+                       dump_addr[i*4]     = head->wr_addr;
+                       dump_addr[i*4 + 1] = head->block_id;
+                       dump_addr[i*4 + 2] = head->test_id;
+                       dump_addr[i*4 + 3] = status;
+               }
+
+               if (head->analyzer)
+                       head->analyzer(mem_base, head, status);
+
+               /* Disable debug bus once we are done */
+               writel_relaxed(0, mem_base + head->wr_addr);
+               if (bus->cmn.flags & DBGBUS_FLAGS_DSPP &&
+                                               head->wr_addr != DBGBUS_DSPP)
+                       writel_relaxed(0x0, mem_base + DBGBUS_DSPP);
+       }
+       _dpu_dbg_enable_power(false);
+
+       dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n",
+                       bus->cmn.name);
+}
+
+static void _dpu_dbg_dump_vbif_debug_bus_entry(
+               struct vbif_debug_bus_entry *head, void __iomem *mem_base,
+               u32 *dump_addr, bool in_log)
+{
+       int i, j;
+       u32 val;
+
+       if (!dump_addr && !in_log)
+               return;
+
+       for (i = 0; i < head->block_cnt; i++) {
+               writel_relaxed(1 << (i + head->bit_offset),
+                               mem_base + head->block_bus_addr);
+               /* make sure that current bus blcok enable */
+               wmb();
+               for (j = head->test_pnt_start; j < head->test_pnt_cnt; j++) {
+                       writel_relaxed(j, mem_base + head->block_bus_addr + 4);
+                       /* make sure that test point is enabled */
+                       wmb();
+                       val = readl_relaxed(mem_base + MMSS_VBIF_TEST_BUS_OUT);
+                       if (dump_addr) {
+                               *dump_addr++ = head->block_bus_addr;
+                               *dump_addr++ = i;
+                               *dump_addr++ = j;
+                               *dump_addr++ = val;
+                       }
+                       if (in_log)
+                               dev_info(dpu_dbg_base.dev,
+                                       "testpoint:%x arb/xin id=%d index=%d val=0x%x\n",
+                                       head->block_bus_addr, i, j, val);
+               }
+       }
+}
+
+static void _dpu_dbg_dump_vbif_dbg_bus(struct dpu_dbg_vbif_debug_bus *bus)
+{
+       bool in_log, in_mem;
+       u32 **dump_mem = NULL;
+       u32 *dump_addr = NULL;
+       u32 value, d0, d1;
+       unsigned long reg, reg1, reg2;
+       struct vbif_debug_bus_entry *head;
+       phys_addr_t phys = 0;
+       int i, list_size = 0;
+       void __iomem *mem_base = NULL;
+       struct vbif_debug_bus_entry *dbg_bus;
+       u32 bus_size;
+       struct dpu_dbg_reg_base *reg_base;
+
+       if (!bus || !bus->cmn.entries_size)
+               return;
+
+       list_for_each_entry(reg_base, &dpu_dbg_base.reg_base_list,
+                       reg_base_head)
+               if (strlen(reg_base->name) &&
+                       !strcmp(reg_base->name, bus->cmn.name))
+                       mem_base = reg_base->base;
+
+       if (!mem_base) {
+               pr_err("unable to find mem_base for %s\n", bus->cmn.name);
+               return;
+       }
+
+       dbg_bus = bus->entries;
+       bus_size = bus->cmn.entries_size;
+       list_size = bus->cmn.entries_size;
+       dump_mem = &bus->cmn.dumped_content;
+
+       dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n",
+                       bus->cmn.name);
+
+       if (!dump_mem || !dbg_bus || !bus_size || !list_size)
+               return;
+
+       /* allocate memory for each test point */
+       for (i = 0; i < bus_size; i++) {
+               head = dbg_bus + i;
+               list_size += (head->block_cnt * head->test_pnt_cnt);
+       }
+
+       /* 4 bytes * 4 entries for each test point*/
+       list_size *= 16;
+
+       in_log = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_LOG);
+       in_mem = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_MEM);
+
+       if (!in_log && !in_mem)
+               return;
+
+       if (in_mem) {
+               if (!(*dump_mem))
+                       *dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
+                               list_size, &phys, GFP_KERNEL);
+
+               if (*dump_mem) {
+                       dump_addr = *dump_mem;
+                       dev_info(dpu_dbg_base.dev,
+                               "%s: start_addr:0x%pK len:0x%x\n",
+                               __func__, dump_addr, list_size);
+               } else {
+                       in_mem = false;
+                       pr_err("dump_mem: allocation fails\n");
+               }
+       }
+
+       _dpu_dbg_enable_power(true);
+
+       value = readl_relaxed(mem_base + MMSS_VBIF_CLKON);
+       writel_relaxed(value | BIT(1), mem_base + MMSS_VBIF_CLKON);
+
+       /* make sure that vbif core is on */
+       wmb();
+
+       /**
+        * Extract VBIF error info based on XIN halt and error status.
+        * If the XIN client is not in HALT state, or an error is detected,
+        * then retrieve the VBIF error info for it.
+        */
+       reg = readl_relaxed(mem_base + MMSS_VBIF_XIN_HALT_CTRL1);
+       reg1 = readl_relaxed(mem_base + MMSS_VBIF_PND_ERR);
+       reg2 = readl_relaxed(mem_base + MMSS_VBIF_SRC_ERR);
+       dev_err(dpu_dbg_base.dev,
+                       "XIN HALT:0x%lX, PND ERR:0x%lX, SRC ERR:0x%lX\n",
+                       reg, reg1, reg2);
+       reg >>= 16;
+       reg &= ~(reg1 | reg2);
+       for (i = 0; i < MMSS_VBIF_CLIENT_NUM; i++) {
+               if (!test_bit(0, &reg)) {
+                       writel_relaxed(i, mem_base + MMSS_VBIF_ERR_INFO);
+                       /* make sure reg write goes through */
+                       wmb();
+
+                       d0 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO);
+                       d1 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO_1);
+
+                       dev_err(dpu_dbg_base.dev,
+                                       "Client:%d, errinfo=0x%X, errinfo1=0x%X\n",
+                                       i, d0, d1);
+               }
+               reg >>= 1;
+       }
+
+       for (i = 0; i < bus_size; i++) {
+               head = dbg_bus + i;
+
+               writel_relaxed(0, mem_base + head->disable_bus_addr);
+               writel_relaxed(BIT(0), mem_base + MMSS_VBIF_TEST_BUS_OUT_CTRL);
+               /* make sure that other bus is off */
+               wmb();
+
+               _dpu_dbg_dump_vbif_debug_bus_entry(head, mem_base, dump_addr,
+                               in_log);
+               if (dump_addr)
+                       dump_addr += (head->block_cnt * head->test_pnt_cnt * 4);
+       }
+
+       _dpu_dbg_enable_power(false);
+
+       dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n",
+                       bus->cmn.name);
+}
+
+/**
+ * _dpu_dump_array - dump array of register bases
+ * @name: string indicating origin of dump
+ * @dump_dbgbus_dpu: whether to dump the dpu debug bus
+ * @dump_dbgbus_vbif_rt: whether to dump the vbif rt debug bus
+ */
+static void _dpu_dump_array(const char *name, bool dump_dbgbus_dpu,
+                           bool dump_dbgbus_vbif_rt)
+{
+       if (dump_dbgbus_dpu)
+               _dpu_dbg_dump_dpu_dbg_bus(&dpu_dbg_base.dbgbus_dpu);
+
+       if (dump_dbgbus_vbif_rt)
+               _dpu_dbg_dump_vbif_dbg_bus(&dpu_dbg_base.dbgbus_vbif_rt);
+}
+
+/**
+ * _dpu_dump_work - deferred dump work function
+ * @work: work structure
+ */
+static void _dpu_dump_work(struct work_struct *work)
+{
+       _dpu_dump_array("dpudump_workitem",
+               dpu_dbg_base.dbgbus_dpu.cmn.include_in_deferred_work,
+               dpu_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work);
+}
+
+void dpu_dbg_dump(bool queue_work, const char *name, bool dump_dbgbus_dpu,
+                 bool dump_dbgbus_vbif_rt)
+{
+       if (queue_work && work_pending(&dpu_dbg_base.dump_work))
+               return;
+
+       if (!queue_work) {
+               _dpu_dump_array(name, dump_dbgbus_dpu, dump_dbgbus_vbif_rt);
+               return;
+       }
+
+       /* schedule work to dump later */
+       dpu_dbg_base.dbgbus_dpu.cmn.include_in_deferred_work = dump_dbgbus_dpu;
+       dpu_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work =
+                       dump_dbgbus_vbif_rt;
+       schedule_work(&dpu_dbg_base.dump_work);
+}
+
+/*
+ * dpu_dbg_debugfs_open - debugfs open handler for debug dump
+ * @inode: debugfs inode
+ * @file: file handle
+ */
+static int dpu_dbg_debugfs_open(struct inode *inode, struct file *file)
+{
+       /* non-seekable */
+       file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+       file->private_data = inode->i_private;
+       return 0;
+}
+
+/**
+ * dpu_dbg_dump_write - debugfs write handler for debug dump
+ * @file: file handler
+ * @user_buf: user buffer content from debugfs
+ * @count: size of user buffer
+ * @ppos: position offset of user buffer
+ */
+static ssize_t dpu_dbg_dump_write(struct file *file,
+       const char __user *user_buf, size_t count, loff_t *ppos)
+{
+       _dpu_dump_array("dump_debugfs", true, true);
+       return count;
+}
+
+static const struct file_operations dpu_dbg_dump_fops = {
+       .open = dpu_dbg_debugfs_open,
+       .write = dpu_dbg_dump_write,
+};
+
+int dpu_dbg_debugfs_register(struct dentry *debugfs_root)
+{
+       static struct dpu_dbg_base *dbg = &dpu_dbg_base;
+       char debug_name[80] = "";
+
+       if (!debugfs_root)
+               return -EINVAL;
+
+       debugfs_create_file("dump", 0600, debugfs_root, NULL,
+                       &dpu_dbg_dump_fops);
+
+       if (dbg->dbgbus_dpu.entries) {
+               dbg->dbgbus_dpu.cmn.name = DBGBUS_NAME_DPU;
+               snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
+                               dbg->dbgbus_dpu.cmn.name);
+               dbg->dbgbus_dpu.cmn.enable_mask = DEFAULT_DBGBUS_DPU;
+               debugfs_create_u32(debug_name, 0600, debugfs_root,
+                               &dbg->dbgbus_dpu.cmn.enable_mask);
+       }
+
+       if (dbg->dbgbus_vbif_rt.entries) {
+               dbg->dbgbus_vbif_rt.cmn.name = DBGBUS_NAME_VBIF_RT;
+               snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
+                               dbg->dbgbus_vbif_rt.cmn.name);
+               dbg->dbgbus_vbif_rt.cmn.enable_mask = DEFAULT_DBGBUS_VBIFRT;
+               debugfs_create_u32(debug_name, 0600, debugfs_root,
+                               &dbg->dbgbus_vbif_rt.cmn.enable_mask);
+       }
+
+       return 0;
+}
+
+static void _dpu_dbg_debugfs_destroy(void)
+{
+}
+
+void dpu_dbg_init_dbg_buses(u32 hwversion)
+{
+       static struct dpu_dbg_base *dbg = &dpu_dbg_base;
+
+       memset(&dbg->dbgbus_dpu, 0, sizeof(dbg->dbgbus_dpu));
+       memset(&dbg->dbgbus_vbif_rt, 0, sizeof(dbg->dbgbus_vbif_rt));
+
+       if (IS_MSM8998_TARGET(hwversion)) {
+               dbg->dbgbus_dpu.entries = dbg_bus_dpu_8998;
+               dbg->dbgbus_dpu.cmn.entries_size = ARRAY_SIZE(dbg_bus_dpu_8998);
+               dbg->dbgbus_dpu.cmn.flags = DBGBUS_FLAGS_DSPP;
+
+               dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
+               dbg->dbgbus_vbif_rt.cmn.entries_size =
+                               ARRAY_SIZE(vbif_dbg_bus_msm8998);
+       } else if (IS_SDM845_TARGET(hwversion) || IS_SDM670_TARGET(hwversion)) {
+               dbg->dbgbus_dpu.entries = dbg_bus_dpu_sdm845;
+               dbg->dbgbus_dpu.cmn.entries_size =
+                               ARRAY_SIZE(dbg_bus_dpu_sdm845);
+               dbg->dbgbus_dpu.cmn.flags = DBGBUS_FLAGS_DSPP;
+
+               /* vbif is unchanged vs 8998 */
+               dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
+               dbg->dbgbus_vbif_rt.cmn.entries_size =
+                               ARRAY_SIZE(vbif_dbg_bus_msm8998);
+       } else {
+               pr_err("unsupported chipset id %X\n", hwversion);
+       }
+}
+
+int dpu_dbg_init(struct device *dev)
+{
+       if (!dev) {
+               pr_err("invalid params\n");
+               return -EINVAL;
+       }
+
+       INIT_LIST_HEAD(&dpu_dbg_base.reg_base_list);
+       dpu_dbg_base.dev = dev;
+
+       INIT_WORK(&dpu_dbg_base.dump_work, _dpu_dump_work);
+
+       return 0;
+}
+
+/**
+ * dpu_dbg_destroy - destroy dpu debug facilities
+ */
+void dpu_dbg_destroy(void)
+{
+       _dpu_dbg_debugfs_destroy();
+}
+
+void dpu_dbg_set_dpu_top_offset(u32 blk_off)
+{
+       dpu_dbg_base.dbgbus_dpu.top_blk_off = blk_off;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h
new file mode 100644 (file)
index 0000000..1e6fa94
--- /dev/null
@@ -0,0 +1,103 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DPU_DBG_H_
+#define DPU_DBG_H_
+
+#include <stdarg.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+
+enum dpu_dbg_dump_flag {
+       DPU_DBG_DUMP_IN_LOG = BIT(0),
+       DPU_DBG_DUMP_IN_MEM = BIT(1),
+};
+
+#if defined(CONFIG_DEBUG_FS)
+
+/**
+ * dpu_dbg_init_dbg_buses - initialize debug bus dumping support for the chipset
+ * @hwversion:         Chipset revision
+ */
+void dpu_dbg_init_dbg_buses(u32 hwversion);
+
+/**
+ * dpu_dbg_init - initialize global dpu debug facilities: regdump
+ * @dev:               device handle
+ * Returns:            0 or -ERROR
+ */
+int dpu_dbg_init(struct device *dev);
+
+/**
+ * dpu_dbg_debugfs_register - register entries at the given debugfs dir
+ * @debugfs_root:      debugfs root in which to create dpu debug entries
+ * Returns:    0 or -ERROR
+ */
+int dpu_dbg_debugfs_register(struct dentry *debugfs_root);
+
+/**
+ * dpu_dbg_destroy - destroy the global dpu debug facilities
+ * Returns:    none
+ */
+void dpu_dbg_destroy(void);
+
+/**
+ * dpu_dbg_dump - trigger dumping of all dpu_dbg facilities
+ * @queue_work:          whether to queue the dumping work to the work_struct
+ * @name:        string indicating origin of dump
+ * @dump_dbgbus:  dump the dpu debug bus
+ * @dump_vbif_rt: dump the vbif rt bus
+ * Returns:    none
+ */
+void dpu_dbg_dump(bool queue_work, const char *name, bool dump_dbgbus_dpu,
+                 bool dump_dbgbus_vbif_rt);
+
+/**
+ * dpu_dbg_set_dpu_top_offset - set the target specific offset from mdss base
+ *     address of the top registers. Used for accessing debug bus controls.
+ * @blk_off: offset from mdss base of the top block
+ */
+void dpu_dbg_set_dpu_top_offset(u32 blk_off);
+
+#else
+
+static inline void dpu_dbg_init_dbg_buses(u32 hwversion)
+{
+}
+
+static inline int dpu_dbg_init(struct device *dev)
+{
+       return 0;
+}
+
+static inline int dpu_dbg_debugfs_register(struct dentry *debugfs_root)
+{
+       return 0;
+}
+
+static inline void dpu_dbg_destroy(void)
+{
+}
+
+static inline void dpu_dbg_dump(bool queue_work, const char *name,
+                               bool dump_dbgbus_dpu, bool dump_dbgbus_vbif_rt)
+{
+}
+
+static inline void dpu_dbg_set_dpu_top_offset(u32 blk_off)
+{
+}
+
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+
+#endif /* DPU_DBG_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
new file mode 100644 (file)
index 0000000..0bd3eda
--- /dev/null
@@ -0,0 +1,2498 @@
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt)    "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/kthread.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_intf.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_formats.h"
+#include "dpu_encoder_phys.h"
+#include "dpu_crtc.h"
+#include "dpu_trace.h"
+#include "dpu_core_irq.h"
+
+#define DPU_DEBUG_ENC(e, fmt, ...) DPU_DEBUG("enc%d " fmt,\
+               (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
+               (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DPU_DEBUG_PHYS(p, fmt, ...) DPU_DEBUG("enc%d intf%d pp%d " fmt,\
+               (p) ? (p)->parent->base.id : -1, \
+               (p) ? (p)->intf_idx - INTF_0 : -1, \
+               (p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
+               ##__VA_ARGS__)
+
+#define DPU_ERROR_PHYS(p, fmt, ...) DPU_ERROR("enc%d intf%d pp%d " fmt,\
+               (p) ? (p)->parent->base.id : -1, \
+               (p) ? (p)->intf_idx - INTF_0 : -1, \
+               (p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
+               ##__VA_ARGS__)
+
+/*
+ * Two to anticipate panels that can do cmd/vid dynamic switching
+ * plan is to create all possible physical encoder types, and switch between
+ * them at runtime
+ */
+#define NUM_PHYS_ENCODER_TYPES 2
+
+#define MAX_PHYS_ENCODERS_PER_VIRTUAL \
+       (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
+
+#define MAX_CHANNELS_PER_ENC 2
+
+#define MISR_BUFF_SIZE                 256
+
+#define IDLE_SHORT_TIMEOUT     1
+
+#define MAX_VDISPLAY_SPLIT 1080
+
+/**
+ * enum dpu_enc_rc_events - events for resource control state machine
+ * @DPU_ENC_RC_EVENT_KICKOFF:
+ *     This event happens at NORMAL priority.
+ *     Event that signals the start of the transfer. When this event is
+ *     received, enable MDP/DSI core clocks. Regardless of the previous
+ *     state, the resource should be in ON state at the end of this event.
+ * @DPU_ENC_RC_EVENT_FRAME_DONE:
+ *     This event happens at INTERRUPT level.
+ *     Event signals the end of the data transfer after the PP FRAME_DONE
+ *     event. At the end of this event, a delayed work is scheduled to go to
+ *     IDLE_PC state after IDLE_TIMEOUT time.
+ * @DPU_ENC_RC_EVENT_PRE_STOP:
+ *     This event happens at NORMAL priority.
+ *     This event, when received during the ON state, leave the RC STATE
+ *     in the PRE_OFF state. It should be followed by the STOP event as
+ *     part of encoder disable.
+ *     If received during IDLE or OFF states, it will do nothing.
+ * @DPU_ENC_RC_EVENT_STOP:
+ *     This event happens at NORMAL priority.
+ *     When this event is received, disable all the MDP/DSI core clocks, and
+ *     disable IRQs. It should be called from the PRE_OFF or IDLE states.
+ *     IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing.
+ *     PRE_OFF is expected when PRE_STOP was executed during the ON state.
+ *     Resource state should be in OFF at the end of the event.
+ * @DPU_ENC_RC_EVENT_ENTER_IDLE:
+ *     This event happens at NORMAL priority from a work item.
+ *     Event signals that there were no frame updates for IDLE_TIMEOUT time.
+ *     This would disable MDP/DSI core clocks and change the resource state
+ *     to IDLE.
+ */
+enum dpu_enc_rc_events {
+       DPU_ENC_RC_EVENT_KICKOFF = 1,
+       DPU_ENC_RC_EVENT_FRAME_DONE,
+       DPU_ENC_RC_EVENT_PRE_STOP,
+       DPU_ENC_RC_EVENT_STOP,
+       DPU_ENC_RC_EVENT_ENTER_IDLE
+};
+
+/*
+ * enum dpu_enc_rc_states - states that the resource control maintains
+ * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state
+ * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state
+ * @DPU_ENC_RC_STATE_ON: Resource is in ON state
+ * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state
+ * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state
+ */
+enum dpu_enc_rc_states {
+       DPU_ENC_RC_STATE_OFF,
+       DPU_ENC_RC_STATE_PRE_OFF,
+       DPU_ENC_RC_STATE_ON,
+       DPU_ENC_RC_STATE_IDLE
+};
+
+/**
+ * struct dpu_encoder_virt - virtual encoder. Container of one or more physical
+ *     encoders. Virtual encoder manages one "logical" display. Physical
+ *     encoders manage one intf block, tied to a specific panel/sub-panel.
+ *     Virtual encoder defers as much as possible to the physical encoders.
+ *     Virtual encoder registers itself with the DRM Framework as the encoder.
+ * @base:              drm_encoder base class for registration with DRM
+ * @enc_spin_lock:     Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @bus_scaling_client:        Client handle to the bus scaling interface
+ * @num_phys_encs:     Actual number of physical encoders contained.
+ * @phys_encs:         Container of physical encoders managed.
+ * @cur_master:                Pointer to the current master in this mode. Optimization
+ *                     Only valid after enable. Cleared as disable.
+ * @hw_pp              Handle to the pingpong blocks used for the display. No.
+ *                     pingpong blocks can be different than num_phys_encs.
+ * @intfs_swapped      Whether or not the phys_enc interfaces have been swapped
+ *                     for partial update right-only cases, such as pingpong
+ *                     split where virtual pingpong does not generate IRQs
+ * @crtc_vblank_cb:    Callback into the upper layer / CRTC for
+ *                     notification of the VBLANK
+ * @crtc_vblank_cb_data:       Data from upper layer for VBLANK notification
+ * @crtc_kickoff_cb:           Callback into CRTC that will flush & start
+ *                             all CTL paths
+ * @crtc_kickoff_cb_data:      Opaque user data given to crtc_kickoff_cb
+ * @debugfs_root:              Debug file system root file node
+ * @enc_lock:                  Lock around physical encoder create/destroy and
+                               access.
+ * @frame_busy_mask:           Bitmask tracking which phys_enc we are still
+ *                             busy processing current command.
+ *                             Bit0 = phys_encs[0] etc.
+ * @crtc_frame_event_cb:       callback handler for frame event
+ * @crtc_frame_event_cb_data:  callback handler private data
+ * @frame_done_timeout:                frame done timeout in Hz
+ * @frame_done_timer:          watchdog timer for frame done event
+ * @vsync_event_timer:         vsync timer
+ * @disp_info:                 local copy of msm_display_info struct
+ * @misr_enable:               misr enable/disable status
+ * @misr_frame_count:          misr frame count before start capturing the data
+ * @idle_pc_supported:         indicate if idle power collaps is supported
+ * @rc_lock:                   resource control mutex lock to protect
+ *                             virt encoder over various state changes
+ * @rc_state:                  resource controller state
+ * @delayed_off_work:          delayed worker to schedule disabling of
+ *                             clks and resources after IDLE_TIMEOUT time.
+ * @vsync_event_work:          worker to handle vsync event for autorefresh
+ * @topology:                   topology of the display
+ * @mode_set_complete:          flag to indicate modeset completion
+ * @idle_timeout:              idle timeout duration in milliseconds
+ */
+struct dpu_encoder_virt {
+       struct drm_encoder base;
+       spinlock_t enc_spinlock;
+       uint32_t bus_scaling_client;
+
+       uint32_t display_num_of_h_tiles;
+
+       unsigned int num_phys_encs;
+       struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
+       struct dpu_encoder_phys *cur_master;
+       struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
+
+       bool intfs_swapped;
+
+       void (*crtc_vblank_cb)(void *);
+       void *crtc_vblank_cb_data;
+
+       struct dentry *debugfs_root;
+       struct mutex enc_lock;
+       DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
+       void (*crtc_frame_event_cb)(void *, u32 event);
+       void *crtc_frame_event_cb_data;
+
+       atomic_t frame_done_timeout;
+       struct timer_list frame_done_timer;
+       struct timer_list vsync_event_timer;
+
+       struct msm_display_info disp_info;
+       bool misr_enable;
+       u32 misr_frame_count;
+
+       bool idle_pc_supported;
+       struct mutex rc_lock;
+       enum dpu_enc_rc_states rc_state;
+       struct kthread_delayed_work delayed_off_work;
+       struct kthread_work vsync_event_work;
+       struct msm_display_topology topology;
+       bool mode_set_complete;
+
+       u32 idle_timeout;
+};
+
+#define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
+static inline int _dpu_encoder_power_enable(struct dpu_encoder_virt *dpu_enc,
+                                                               bool enable)
+{
+       struct drm_encoder *drm_enc;
+       struct msm_drm_private *priv;
+       struct dpu_kms *dpu_kms;
+
+       if (!dpu_enc) {
+               DPU_ERROR("invalid dpu enc\n");
+               return -EINVAL;
+       }
+
+       drm_enc = &dpu_enc->base;
+       if (!drm_enc->dev || !drm_enc->dev->dev_private) {
+               DPU_ERROR("drm device invalid\n");
+               return -EINVAL;
+       }
+
+       priv = drm_enc->dev->dev_private;
+       if (!priv->kms) {
+               DPU_ERROR("invalid kms\n");
+               return -EINVAL;
+       }
+
+       dpu_kms = to_dpu_kms(priv->kms);
+
+       if (enable)
+               pm_runtime_get_sync(&dpu_kms->pdev->dev);
+       else
+               pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+       return 0;
+}
+
+void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
+               enum dpu_intr_idx intr_idx)
+{
+       DRM_ERROR("irq timeout id=%u, intf=%d, pp=%d, intr=%d\n",
+                 DRMID(phys_enc->parent), phys_enc->intf_idx - INTF_0,
+                 phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
+
+       if (phys_enc->parent_ops->handle_frame_done)
+               phys_enc->parent_ops->handle_frame_done(
+                               phys_enc->parent, phys_enc,
+                               DPU_ENCODER_FRAME_EVENT_ERROR);
+}
+
+static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
+               int32_t hw_id, struct dpu_encoder_wait_info *info);
+
+int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
+               enum dpu_intr_idx intr_idx,
+               struct dpu_encoder_wait_info *wait_info)
+{
+       struct dpu_encoder_irq *irq;
+       u32 irq_status;
+       int ret;
+
+       if (!phys_enc || !wait_info || intr_idx >= INTR_IDX_MAX) {
+               DPU_ERROR("invalid params\n");
+               return -EINVAL;
+       }
+       irq = &phys_enc->irq[intr_idx];
+
+       /* note: do master / slave checking outside */
+
+       /* return EWOULDBLOCK since we know the wait isn't necessary */
+       if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+               DRM_ERROR("encoder is disabled id=%u, intr=%d, hw=%d, irq=%d",
+                         DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+                         irq->irq_idx);
+               return -EWOULDBLOCK;
+       }
+
+       if (irq->irq_idx < 0) {
+               DRM_DEBUG_KMS("skip irq wait id=%u, intr=%d, hw=%d, irq=%s",
+                             DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+                             irq->name);
+               return 0;
+       }
+
+       DRM_DEBUG_KMS("id=%u, intr=%d, hw=%d, irq=%d, pp=%d, pending_cnt=%d",
+                     DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+                     irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
+                     atomic_read(wait_info->atomic_cnt));
+
+       ret = dpu_encoder_helper_wait_event_timeout(
+                       DRMID(phys_enc->parent),
+                       irq->hw_idx,
+                       wait_info);
+
+       if (ret <= 0) {
+               irq_status = dpu_core_irq_read(phys_enc->dpu_kms,
+                               irq->irq_idx, true);
+               if (irq_status) {
+                       unsigned long flags;
+
+                       DRM_DEBUG_KMS("irq not triggered id=%u, intr=%d, "
+                                     "hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
+                                     DRMID(phys_enc->parent), intr_idx,
+                                     irq->hw_idx, irq->irq_idx,
+                                     phys_enc->hw_pp->idx - PINGPONG_0,
+                                     atomic_read(wait_info->atomic_cnt));
+                       local_irq_save(flags);
+                       irq->cb.func(phys_enc, irq->irq_idx);
+                       local_irq_restore(flags);
+                       ret = 0;
+               } else {
+                       ret = -ETIMEDOUT;
+                       DRM_DEBUG_KMS("irq timeout id=%u, intr=%d, "
+                                     "hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
+                                     DRMID(phys_enc->parent), intr_idx,
+                                     irq->hw_idx, irq->irq_idx,
+                                     phys_enc->hw_pp->idx - PINGPONG_0,
+                                     atomic_read(wait_info->atomic_cnt));
+               }
+       } else {
+               ret = 0;
+               trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
+                       intr_idx, irq->hw_idx, irq->irq_idx,
+                       phys_enc->hw_pp->idx - PINGPONG_0,
+                       atomic_read(wait_info->atomic_cnt));
+       }
+
+       return ret;
+}
+
+int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
+               enum dpu_intr_idx intr_idx)
+{
+       struct dpu_encoder_irq *irq;
+       int ret = 0;
+
+       if (!phys_enc || intr_idx >= INTR_IDX_MAX) {
+               DPU_ERROR("invalid params\n");
+               return -EINVAL;
+       }
+       irq = &phys_enc->irq[intr_idx];
+
+       if (irq->irq_idx >= 0) {
+               DPU_DEBUG_PHYS(phys_enc,
+                               "skipping already registered irq %s type %d\n",
+                               irq->name, irq->intr_type);
+               return 0;
+       }
+
+       irq->irq_idx = dpu_core_irq_idx_lookup(phys_enc->dpu_kms,
+                       irq->intr_type, irq->hw_idx);
+       if (irq->irq_idx < 0) {
+               DPU_ERROR_PHYS(phys_enc,
+                       "failed to lookup IRQ index for %s type:%d\n",
+                       irq->name, irq->intr_type);
+               return -EINVAL;
+       }
+
+       ret = dpu_core_irq_register_callback(phys_enc->dpu_kms, irq->irq_idx,
+                       &irq->cb);
+       if (ret) {
+               DPU_ERROR_PHYS(phys_enc,
+                       "failed to register IRQ callback for %s\n",
+                       irq->name);
+               irq->irq_idx = -EINVAL;
+               return ret;
+       }
+
+       ret = dpu_core_irq_enable(phys_enc->dpu_kms, &irq->irq_idx, 1);
+       if (ret) {
+               DRM_ERROR("enable failed id=%u, intr=%d, hw=%d, irq=%d",
+                         DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+                         irq->irq_idx);
+               dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+                               irq->irq_idx, &irq->cb);
+               irq->irq_idx = -EINVAL;
+               return ret;
+       }
+
+       trace_dpu_enc_irq_register_success(DRMID(phys_enc->parent), intr_idx,
+                               irq->hw_idx, irq->irq_idx);
+
+       return ret;
+}
+
+int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
+               enum dpu_intr_idx intr_idx)
+{
+       struct dpu_encoder_irq *irq;
+       int ret;
+
+       if (!phys_enc) {
+               DPU_ERROR("invalid encoder\n");
+               return -EINVAL;
+       }
+       irq = &phys_enc->irq[intr_idx];
+
+       /* silently skip irqs that weren't registered */
+       if (irq->irq_idx < 0) {
+               DRM_ERROR("duplicate unregister id=%u, intr=%d, hw=%d, irq=%d",
+                         DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+                         irq->irq_idx);
+               return 0;
+       }
+
+       ret = dpu_core_irq_disable(phys_enc->dpu_kms, &irq->irq_idx, 1);
+       if (ret) {
+               DRM_ERROR("diable failed id=%u, intr=%d, hw=%d, irq=%d ret=%d",
+                         DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+                         irq->irq_idx, ret);
+       }
+
+       ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms, irq->irq_idx,
+                       &irq->cb);
+       if (ret) {
+               DRM_ERROR("unreg cb fail id=%u, intr=%d, hw=%d, irq=%d ret=%d",
+                         DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+                         irq->irq_idx, ret);
+       }
+
+       trace_dpu_enc_irq_unregister_success(DRMID(phys_enc->parent), intr_idx,
+                                            irq->hw_idx, irq->irq_idx);
+
+       irq->irq_idx = -EINVAL;
+
+       return 0;
+}
+
+void dpu_encoder_get_hw_resources(struct drm_encoder *drm_enc,
+               struct dpu_encoder_hw_resources *hw_res,
+               struct drm_connector_state *conn_state)
+{
+       struct dpu_encoder_virt *dpu_enc = NULL;
+       int i = 0;
+
+       if (!hw_res || !drm_enc || !conn_state) {
+               DPU_ERROR("invalid argument(s), drm_enc %d, res %d, state %d\n",
+                               drm_enc != 0, hw_res != 0, conn_state != 0);
+               return;
+       }
+
+       dpu_enc = to_dpu_encoder_virt(drm_enc);
+       DPU_DEBUG_ENC(dpu_enc, "\n");
+
+       /* Query resources used by phys encs, expected to be without overlap */
+       memset(hw_res, 0, sizeof(*hw_res));
+       hw_res->display_num_of_h_tiles = dpu_enc->display_num_of_h_tiles;
+
+       for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+               struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+               if (phys && phys->ops.get_hw_resources)
+                       phys->ops.get_hw_resources(phys, hw_res, conn_state);
+       }
+}
+
+static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
+{
+       struct dpu_encoder_virt *dpu_enc = NULL;
+       int i = 0;
+
+       if (!drm_enc) {
+               DPU_ERROR("invalid encoder\n");
+               return;
+       }
+
+       dpu_enc = to_dpu_encoder_virt(drm_enc);
+       DPU_DEBUG_ENC(dpu_enc, "\n");
+
+       mutex_lock(&dpu_enc->enc_lock);
+
+       for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+               struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+               if (phys && phys->ops.destroy) {
+                       phys->ops.destroy(phys);
+                       --dpu_enc->num_phys_encs;
+                       dpu_enc->phys_encs[i] = NULL;
+               }
+       }
+
+       if (dpu_enc->num_phys_encs)
+               DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n",
+                               dpu_enc->num_phys_encs);
+       dpu_enc->num_phys_encs = 0;
+       mutex_unlock(&dpu_enc->enc_lock);
+
+       drm_encoder_cleanup(drm_enc);
+       mutex_destroy(&dpu_enc->enc_lock);
+
+       kfree(dpu_enc);
+}
+
+void dpu_encoder_helper_split_config(
+               struct dpu_encoder_phys *phys_enc,
+               enum dpu_intf interface)
+{
+       struct dpu_encoder_virt *dpu_enc;
+       struct split_pipe_cfg cfg = { 0 };
+       struct dpu_hw_mdp *hw_mdptop;
+       struct msm_display_info *disp_info;
+
+       if (!phys_enc || !phys_enc->hw_mdptop || !phys_enc->parent) {
+               DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+               return;
+       }
+
+       dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
+       hw_mdptop = phys_enc->hw_mdptop;
+       disp_info = &dpu_enc->disp_info;
+
+       if (disp_info->intf_type != DRM_MODE_CONNECTOR_DSI)
+               return;
+
+       /**
+        * disable split modes since encoder will be operating in as the only
+        * encoder, either for the entire use case in the case of, for example,
+        * single DSI, or for this frame in the case of left/right only partial
+        * update.
+        */
+       if (phys_enc->split_role == ENC_ROLE_SOLO) {
+               if (hw_mdptop->ops.setup_split_pipe)
+                       hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
+               return;
+       }
+
+       cfg.en = true;
+       cfg.mode = phys_enc->intf_mode;
+       cfg.intf = interface;
+
+       if (cfg.en && phys_enc->ops.needs_single_flush &&
+                       phys_enc->ops.needs_single_flush(phys_enc))
+               cfg.split_flush_en = true;
+
+       if (phys_enc->split_role == ENC_ROLE_MASTER) {
+               DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en);
+
+               if (hw_mdptop->ops.setup_split_pipe)
+                       hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
+       }
+}
+
+static void _dpu_encoder_adjust_mode(struct drm_connector *connector,
+               struct drm_display_mode *adj_mode)
+{
+       struct drm_display_mode *cur_mode;
+
+       if (!connector || !adj_mode)
+               return;
+
+       list_for_each_entry(cur_mode, &connector->modes, head) {
+               if (cur_mode->vdisplay == adj_mode->vdisplay &&
+                       cur_mode->hdisplay == adj_mode->hdisplay &&
+                       cur_mode->vrefresh == adj_mode->vrefresh) {
+                       adj_mode->private = cur_mode->private;
+                       adj_mode->private_flags |= cur_mode->private_flags;
+               }
+       }
+}
+
+static struct msm_display_topology dpu_encoder_get_topology(
+                       struct dpu_encoder_virt *dpu_enc,
+                       struct dpu_kms *dpu_kms,
+                       struct drm_display_mode *mode)
+{
+       struct msm_display_topology topology;
+       int i, intf_count = 0;
+
+       for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
+               if (dpu_enc->phys_encs[i])
+                       intf_count++;
+
+       /* User split topology for width > 1080 */
+       topology.num_lm = (mode->vdisplay > MAX_VDISPLAY_SPLIT) ? 2 : 1;
+       topology.num_enc = 0;
+       topology.num_intf = intf_count;
+
+       return topology;
+}
+static int dpu_encoder_virt_atomic_check(
+               struct drm_encoder *drm_enc,
+               struct drm_crtc_state *crtc_state,
+               struct drm_connector_state *conn_state)
+{
+       struct dpu_encoder_virt *dpu_enc;
+       struct msm_drm_private *priv;
+       struct dpu_kms *dpu_kms;
+       const struct drm_display_mode *mode;
+       struct drm_display_mode *adj_mode;
+       struct msm_display_topology topology;
+       int i = 0;
+       int ret = 0;
+
+       if (!drm_enc || !crtc_state || !conn_state) {
+               DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
+                               drm_enc != 0, crtc_state != 0, conn_state != 0);
+               return -EINVAL;
+       }
+
+       dpu_enc = to_dpu_encoder_virt(drm_enc);
+       DPU_DEBUG_ENC(dpu_enc, "\n");
+
+       priv = drm_enc->dev->dev_private;
+       dpu_kms = to_dpu_kms(priv->kms);
+       mode = &crtc_state->mode;
+       adj_mode = &crtc_state->adjusted_mode;
+       trace_dpu_enc_atomic_check(DRMID(drm_enc));
+
+       /*
+        * display drivers may populate private fields of the drm display mode
+        * structure while registering possible modes of a connector with DRM.
+        * These private fields are not populated back while DRM invokes
+        * the mode_set callbacks. This module retrieves and populates the
+        * private fields of the given mode.
+        */
+       _dpu_encoder_adjust_mode(conn_state->connector, adj_mode);
+
+       /* perform atomic check on the first physical encoder (master) */
+       for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+               struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+               if (phys && phys->ops.atomic_check)
+                       ret = phys->ops.atomic_check(phys, crtc_state,
+                                       conn_state);
+               else if (phys && phys->ops.mode_fixup)
+                       if (!phys->ops.mode_fixup(phys, mode, adj_mode))
+                               ret = -EINVAL;
+
+               if (ret) {
+                       DPU_ERROR_ENC(dpu_enc,
+                                       "mode unsupported, phys idx %d\n", i);
+                       break;
+               }
+       }
+
+       topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
+
+       /* Reserve dynamic resources now. Indicating AtomicTest phase */
+       if (!ret) {
+               /*
+                * Avoid reserving resources when mode set is pending. Topology
+                * info may not be available to complete reservation.
+                */
+               if (drm_atomic_crtc_needs_modeset(crtc_state)
+                               && dpu_enc->mode_set_complete) {
+                       ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, crtc_state,
+                               conn_state, topology, true);
+                       dpu_enc->mode_set_complete = false;
+               }
+       }
+
+       if (!ret)
+               drm_mode_set_crtcinfo(adj_mode, 0);
+
+       trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags,
+                       adj_mode->private_flags);
+
+       return ret;
+}
+
+static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
+                       struct msm_display_info *disp_info)
+{
+       struct dpu_vsync_source_cfg vsync_cfg = { 0 };
+       struct msm_drm_private *priv;
+       struct dpu_kms *dpu_kms;
+       struct dpu_hw_mdp *hw_mdptop;
+       struct drm_encoder *drm_enc;
+       int i;
+
+       if (!dpu_enc || !disp_info) {
+               DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n",
+                                       dpu_enc != NULL, disp_info != NULL);
+               return;
+       } else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) {
+               DPU_ERROR("invalid num phys enc %d/%d\n",
+                               dpu_enc->num_phys_encs,
+                               (int) ARRAY_SIZE(dpu_enc->hw_pp));
+               return;
+       }
+
+       drm_enc = &dpu_enc->base;
+       /* this pointers are checked in virt_enable_helper */
+       priv = drm_enc->dev->dev_private;
+
+       dpu_kms = to_dpu_kms(priv->kms);
+       if (!dpu_kms) {
+               DPU_ERROR("invalid dpu_kms\n");
+               return;
+       }
+
+       hw_mdptop = dpu_kms->hw_mdp;
+       if (!hw_mdptop) {
+               DPU_ERROR("invalid mdptop\n");
+               return;
+       }
+
+       if (hw_mdptop->ops.setup_vsync_source &&
+                       disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
+               for (i = 0; i < dpu_enc->num_phys_encs; i++)
+                       vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx;
+
+               vsync_cfg.pp_count = dpu_enc->num_phys_encs;
+               if (disp_info->is_te_using_watchdog_timer)
+                       vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0;
+               else
+                       vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO;
+
+               hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
+       }
+}
+
+static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
+{
+       struct dpu_encoder_virt *dpu_enc;
+       int i;
+
+       if (!drm_enc) {
+               DPU_ERROR("invalid encoder\n");
+               return;
+       }
+
+       dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+       DPU_DEBUG_ENC(dpu_enc, "enable:%d\n", enable);
+       for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+               struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+               if (phys && phys->ops.irq_control)
+                       phys->ops.irq_control(phys, enable);
+       }
+
+}
+
+static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc,
+               bool enable)
+{
+       struct msm_drm_private *priv;
+       struct dpu_kms *dpu_kms;
+       struct dpu_encoder_virt *dpu_enc;
+
+       dpu_enc = to_dpu_encoder_virt(drm_enc);
+       priv = drm_enc->dev->dev_private;
+       dpu_kms = to_dpu_kms(priv->kms);
+
+       trace_dpu_enc_rc_helper(DRMID(drm_enc), enable);
+
+       if (!dpu_enc->cur_master) {
+               DPU_ERROR("encoder master not set\n");
+               return;
+       }
+
+       if (enable) {
+               /* enable DPU core clks */
+               pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+               /* enable all the irq */
+               _dpu_encoder_irq_control(drm_enc, true);
+
+       } else {
+               /* disable all the irq */
+               _dpu_encoder_irq_control(drm_enc, false);
+
+               /* disable DPU core clks */
+               pm_runtime_put_sync(&dpu_kms->pdev->dev);
+       }
+
+}
+
+static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
+               u32 sw_event)
+{
+       struct dpu_encoder_virt *dpu_enc;
+       struct msm_drm_private *priv;
+       struct msm_drm_thread *disp_thread;
+       bool is_vid_mode = false;
+
+       if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private ||
+                       !drm_enc->crtc) {
+               DPU_ERROR("invalid parameters\n");
+               return -EINVAL;
+       }
+       dpu_enc = to_dpu_encoder_virt(drm_enc);
+       priv = drm_enc->dev->dev_private;
+       is_vid_mode = dpu_enc->disp_info.capabilities &
+                                               MSM_DISPLAY_CAP_VID_MODE;
+
+       if (drm_enc->crtc->index >= ARRAY_SIZE(priv->disp_thread)) {
+               DPU_ERROR("invalid crtc index\n");
+               return -EINVAL;
+       }
+       disp_thread = &priv->disp_thread[drm_enc->crtc->index];
+
+       /*
+        * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
+        * events and return early for other events (ie wb display).
+        */
+       if (!dpu_enc->idle_pc_supported &&
+                       (sw_event != DPU_ENC_RC_EVENT_KICKOFF &&
+                       sw_event != DPU_ENC_RC_EVENT_STOP &&
+                       sw_event != DPU_ENC_RC_EVENT_PRE_STOP))
+               return 0;
+
+       trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported,
+                        dpu_enc->rc_state, "begin");
+
+       switch (sw_event) {
+       case DPU_ENC_RC_EVENT_KICKOFF:
+               /* cancel delayed off work, if any */
+               if (kthread_cancel_delayed_work_sync(
+                               &dpu_enc->delayed_off_work))
+                       DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
+                                       sw_event);
+
+               mutex_lock(&dpu_enc->rc_lock);
+
+               /* return if the resource control is already in ON state */
+               if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
+                       DRM_DEBUG_KMS("id;%u, sw_event:%d, rc in ON state\n",
+                                     DRMID(drm_enc), sw_event);
+                       mutex_unlock(&dpu_enc->rc_lock);
+                       return 0;
+               } else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF &&
+                               dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) {
+                       DRM_DEBUG_KMS("id;%u, sw_event:%d, rc in state %d\n",
+                                     DRMID(drm_enc), sw_event,
+                                     dpu_enc->rc_state);
+                       mutex_unlock(&dpu_enc->rc_lock);
+                       return -EINVAL;
+               }
+
+               if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE)
+                       _dpu_encoder_irq_control(drm_enc, true);
+               else
+                       _dpu_encoder_resource_control_helper(drm_enc, true);
+
+               dpu_enc->rc_state = DPU_ENC_RC_STATE_ON;
+
+               trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+                                dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+                                "kickoff");
+
+               mutex_unlock(&dpu_enc->rc_lock);
+               break;
+
+       case DPU_ENC_RC_EVENT_FRAME_DONE:
+               /*
+                * mutex lock is not used as this event happens at interrupt
+                * context. And locking is not required as, the other events
+                * like KICKOFF and STOP does a wait-for-idle before executing
+                * the resource_control
+                */
+               if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
+                       DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n",
+                                     DRMID(drm_enc), sw_event,
+                                     dpu_enc->rc_state);
+                       return -EINVAL;
+               }
+
+               /*
+                * schedule off work item only when there are no
+                * frames pending
+                */
+               if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) {
+                       DRM_DEBUG_KMS("id:%d skip schedule work\n",
+                                     DRMID(drm_enc));
+                       return 0;
+               }
+
+               kthread_queue_delayed_work(
+                       &disp_thread->worker,
+                       &dpu_enc->delayed_off_work,
+                       msecs_to_jiffies(dpu_enc->idle_timeout));
+
+               trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+                                dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+                                "frame done");
+               break;
+
+       case DPU_ENC_RC_EVENT_PRE_STOP:
+               /* cancel delayed off work, if any */
+               if (kthread_cancel_delayed_work_sync(
+                               &dpu_enc->delayed_off_work))
+                       DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
+                                       sw_event);
+
+               mutex_lock(&dpu_enc->rc_lock);
+
+               if (is_vid_mode &&
+                         dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
+                       _dpu_encoder_irq_control(drm_enc, true);
+               }
+               /* skip if is already OFF or IDLE, resources are off already */
+               else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF ||
+                               dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
+                       DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n",
+                                     DRMID(drm_enc), sw_event,
+                                     dpu_enc->rc_state);
+                       mutex_unlock(&dpu_enc->rc_lock);
+                       return 0;
+               }
+
+               dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF;
+
+               trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+                                dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+                                "pre stop");
+
+               mutex_unlock(&dpu_enc->rc_lock);
+               break;
+
+       case DPU_ENC_RC_EVENT_STOP:
+               mutex_lock(&dpu_enc->rc_lock);
+
+               /* return if the resource control is already in OFF state */
+               if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) {
+                       DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n",
+                                     DRMID(drm_enc), sw_event);
+                       mutex_unlock(&dpu_enc->rc_lock);
+                       return 0;
+               } else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
+                       DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n",
+                                 DRMID(drm_enc), sw_event, dpu_enc->rc_state);
+                       mutex_unlock(&dpu_enc->rc_lock);
+                       return -EINVAL;
+               }
+
+               /**
+                * expect to arrive here only if in either idle state or pre-off
+                * and in IDLE state the resources are already disabled
+                */
+               if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF)
+                       _dpu_encoder_resource_control_helper(drm_enc, false);
+
+               dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF;
+
+               trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+                                dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+                                "stop");
+
+               mutex_unlock(&dpu_enc->rc_lock);
+               break;
+
+       case DPU_ENC_RC_EVENT_ENTER_IDLE:
+               mutex_lock(&dpu_enc->rc_lock);
+
+               if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
+                       DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n",
+                                 DRMID(drm_enc), sw_event, dpu_enc->rc_state);
+                       mutex_unlock(&dpu_enc->rc_lock);
+                       return 0;
+               }
+
+               /*
+                * if we are in ON but a frame was just kicked off,
+                * ignore the IDLE event, it's probably a stale timer event
+                */
+               if (dpu_enc->frame_busy_mask[0]) {
+                       DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n",
+                                 DRMID(drm_enc), sw_event, dpu_enc->rc_state);
+                       mutex_unlock(&dpu_enc->rc_lock);
+                       return 0;
+               }
+
+               if (is_vid_mode)
+                       _dpu_encoder_irq_control(drm_enc, false);
+               else
+                       _dpu_encoder_resource_control_helper(drm_enc, false);
+
+               dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE;
+
+               trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+                                dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+                                "idle");
+
+               mutex_unlock(&dpu_enc->rc_lock);
+               break;
+
+       default:
+               DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc),
+                         sw_event);
+               trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+                                dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+                                "error");
+               break;
+       }
+
+       trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+                        dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+                        "end");
+       return 0;
+}
+
+static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
+                                     struct drm_display_mode *mode,
+                                     struct drm_display_mode *adj_mode)
+{
+       struct dpu_encoder_virt *dpu_enc;
+       struct msm_drm_private *priv;
+       struct dpu_kms *dpu_kms;
+       struct list_head *connector_list;
+       struct drm_connector *conn = NULL, *conn_iter;
+       struct dpu_rm_hw_iter pp_iter;
+       struct msm_display_topology topology;
+       enum dpu_rm_topology_name topology_name;
+       int i = 0, ret;
+
+       if (!drm_enc) {
+               DPU_ERROR("invalid encoder\n");
+               return;
+       }
+
+       dpu_enc = to_dpu_encoder_virt(drm_enc);
+       DPU_DEBUG_ENC(dpu_enc, "\n");
+
+       priv = drm_enc->dev->dev_private;
+       dpu_kms = to_dpu_kms(priv->kms);
+       connector_list = &dpu_kms->dev->mode_config.connector_list;
+
+       trace_dpu_enc_mode_set(DRMID(drm_enc));
+
+       list_for_each_entry(conn_iter, connector_list, head)
+               if (conn_iter->encoder == drm_enc)
+                       conn = conn_iter;
+
+       if (!conn) {
+               DPU_ERROR_ENC(dpu_enc, "failed to find attached connector\n");
+               return;
+       } else if (!conn->state) {
+               DPU_ERROR_ENC(dpu_enc, "invalid connector state\n");
+               return;
+       }
+
+       topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
+
+       /* Reserve dynamic resources now. Indicating non-AtomicTest phase */
+       ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_enc->crtc->state,
+                       conn->state, topology, false);
+       if (ret) {
+               DPU_ERROR_ENC(dpu_enc,
+                               "failed to reserve hw resources, %d\n", ret);
+               return;
+       }
+
+       dpu_rm_init_hw_iter(&pp_iter, drm_enc->base.id, DPU_HW_BLK_PINGPONG);
+       for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+               dpu_enc->hw_pp[i] = NULL;
+               if (!dpu_rm_get_hw(&dpu_kms->rm, &pp_iter))
+                       break;
+               dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) pp_iter.hw;
+       }
+
+       topology_name = dpu_rm_get_topology_name(topology);
+       for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+               struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+               if (phys) {
+                       if (!dpu_enc->hw_pp[i]) {
+                               DPU_ERROR_ENC(dpu_enc,
+                                   "invalid pingpong block for the encoder\n");
+                               return;
+                       }
+                       phys->hw_pp = dpu_enc->hw_pp[i];
+                       phys->connector = conn->state->connector;
+                       phys->topology_name = topology_name;
+                       if (phys->ops.mode_set)
+                               phys->ops.mode_set(phys, mode, adj_mode);
+               }
+       }
+
+       dpu_enc->mode_set_complete = true;
+}
+
+static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
+{
+       struct dpu_encoder_virt *dpu_enc = NULL;
+       struct msm_drm_private *priv;
+       struct dpu_kms *dpu_kms;
+
+       if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
+               DPU_ERROR("invalid parameters\n");
+               return;
+       }
+
+       priv = drm_enc->dev->dev_private;
+       dpu_kms = to_dpu_kms(priv->kms);
+       if (!dpu_kms) {
+               DPU_ERROR("invalid dpu_kms\n");
+               return;
+       }
+
+       dpu_enc = to_dpu_encoder_virt(drm_enc);
+       if (!dpu_enc || !dpu_enc->cur_master) {
+               DPU_ERROR("invalid dpu encoder/master\n");
+               return;
+       }
+
+       if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DisplayPort &&
+           dpu_enc->cur_master->hw_mdptop &&
+           dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
+               dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
+                                       dpu_enc->cur_master->hw_mdptop);
+
+       if (dpu_enc->cur_master->hw_mdptop &&
+                       dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc)
+               dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc(
+                               dpu_enc->cur_master->hw_mdptop,
+                               dpu_kms->catalog);
+
+       _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
+}
+
+void dpu_encoder_virt_restore(struct drm_encoder *drm_enc)
+{
+       struct dpu_encoder_virt *dpu_enc = NULL;
+       int i;
+
+       if (!drm_enc) {
+               DPU_ERROR("invalid encoder\n");
+               return;
+       }
+       dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+       for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+               struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+               if (phys && (phys != dpu_enc->cur_master) && phys->ops.restore)
+                       phys->ops.restore(phys);
+       }
+
+       if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
+               dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
+
+       _dpu_encoder_virt_enable_helper(drm_enc);
+}
+
+static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
+{
+       struct dpu_encoder_virt *dpu_enc = NULL;
+       int i, ret = 0;
+       struct drm_display_mode *cur_mode = NULL;
+
+       if (!drm_enc) {
+               DPU_ERROR("invalid encoder\n");
+               return;
+       }
+       dpu_enc = to_dpu_encoder_virt(drm_enc);
+       cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
+
+       trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
+                            cur_mode->vdisplay);
+
+       dpu_enc->cur_master = NULL;
+       for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+               struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+               if (phys && phys->ops.is_master && phys->ops.is_master(phys)) {
+                       DPU_DEBUG_ENC(dpu_enc, "master is now idx %d\n", i);
+                       dpu_enc->cur_master = phys;
+                       break;
+               }
+       }
+
+       if (!dpu_enc->cur_master) {
+               DPU_ERROR("virt encoder has no master! num_phys %d\n", i);
+               return;
+       }
+
+       ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
+       if (ret) {
+               DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
+                               ret);
+               return;
+       }
+
+       for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+               struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+               if (!phys)
+                       continue;
+
+               if (phys != dpu_enc->cur_master) {
+                       if (phys->ops.enable)
+                               phys->ops.enable(phys);
+               }
+
+               if (dpu_enc->misr_enable && (dpu_enc->disp_info.capabilities &
+                    MSM_DISPLAY_CAP_VID_MODE) && phys->ops.setup_misr)
+                       phys->ops.setup_misr(phys, true,
+                                               dpu_enc->misr_frame_count);
+       }
+
+       if (dpu_enc->cur_master->ops.enable)
+               dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
+
+       _dpu_encoder_virt_enable_helper(drm_enc);
+}
+
+static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
+{
+       struct dpu_encoder_virt *dpu_enc = NULL;
+       struct msm_drm_private *priv;
+       struct dpu_kms *dpu_kms;
+       struct drm_display_mode *mode;
+       int i = 0;
+
+       if (!drm_enc) {
+               DPU_ERROR("invalid encoder\n");
+               return;
+       } else if (!drm_enc->dev) {
+               DPU_ERROR("invalid dev\n");
+               return;
+       } else if (!drm_enc->dev->dev_private) {
+               DPU_ERROR("invalid dev_private\n");
+               return;
+       }
+
+       mode = &drm_enc->crtc->state->adjusted_mode;
+
+       dpu_enc = to_dpu_encoder_virt(drm_enc);
+       DPU_DEBUG_ENC(dpu_enc, "\n");
+
+       priv = drm_enc->dev->dev_private;
+       dpu_kms = to_dpu_kms(priv->kms);
+
+       trace_dpu_enc_disable(DRMID(drm_enc));
+
+       /* wait for idle */
+       dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
+
+       dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
+
+       for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+               struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+               if (phys && phys->ops.disable)
+                       phys->ops.disable(phys);
+       }
+
+       /* after phys waits for frame-done, should be no more frames pending */
+       if (atomic_xchg(&dpu_enc->frame_done_timeout, 0)) {
+               DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
+               del_timer_sync(&dpu_enc->frame_done_timer);
+       }
+
+       dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
+
+       for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+               if (dpu_enc->phys_encs[i])
+                       dpu_enc->phys_encs[i]->connector = NULL;
+       }
+
+       dpu_enc->cur_master = NULL;
+
+       DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
+
+       dpu_rm_release(&dpu_kms->rm, drm_enc);
+}
+
+static enum dpu_intf dpu_encoder_get_intf(struct dpu_mdss_cfg *catalog,
+               enum dpu_intf_type type, u32 controller_id)
+{
+       int i = 0;
+
+       for (i = 0; i < catalog->intf_count; i++) {
+               if (catalog->intf[i].type == type
+                   && catalog->intf[i].controller_id == controller_id) {
+                       return catalog->intf[i].id;
+               }
+       }
+
+       return INTF_MAX;
+}
+
+static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
+               struct dpu_encoder_phys *phy_enc)
+{
+       struct dpu_encoder_virt *dpu_enc = NULL;
+       unsigned long lock_flags;
+
+       if (!drm_enc || !phy_enc)
+               return;
+
+       DPU_ATRACE_BEGIN("encoder_vblank_callback");
+       dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+       spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+       if (dpu_enc->crtc_vblank_cb)
+               dpu_enc->crtc_vblank_cb(dpu_enc->crtc_vblank_cb_data);
+       spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+
+       atomic_inc(&phy_enc->vsync_cnt);
+       DPU_ATRACE_END("encoder_vblank_callback");
+}
+
+static void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
+               struct dpu_encoder_phys *phy_enc)
+{
+       if (!phy_enc)
+               return;
+
+       DPU_ATRACE_BEGIN("encoder_underrun_callback");
+       atomic_inc(&phy_enc->underrun_cnt);
+       trace_dpu_enc_underrun_cb(DRMID(drm_enc),
+                                 atomic_read(&phy_enc->underrun_cnt));
+       DPU_ATRACE_END("encoder_underrun_callback");
+}
+
+void dpu_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
+               void (*vbl_cb)(void *), void *vbl_data)
+{
+       struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+       unsigned long lock_flags;
+       bool enable;
+       int i;
+
+       enable = vbl_cb ? true : false;
+
+       if (!drm_enc) {
+               DPU_ERROR("invalid encoder\n");
+               return;
+       }
+       trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
+
+       spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+       dpu_enc->crtc_vblank_cb = vbl_cb;
+       dpu_enc->crtc_vblank_cb_data = vbl_data;
+       spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+
+       for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+               struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+               if (phys && phys->ops.control_vblank_irq)
+                       phys->ops.control_vblank_irq(phys, enable);
+       }
+}
+
+void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
+               void (*frame_event_cb)(void *, u32 event),
+               void *frame_event_cb_data)
+{
+       struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+       unsigned long lock_flags;
+       bool enable;
+
+       enable = frame_event_cb ? true : false;
+
+       if (!drm_enc) {
+               DPU_ERROR("invalid encoder\n");
+               return;
+       }
+       trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable);
+
+       spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+       dpu_enc->crtc_frame_event_cb = frame_event_cb;
+       dpu_enc->crtc_frame_event_cb_data = frame_event_cb_data;
+       spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+}
+
+static void dpu_encoder_frame_done_callback(
+               struct drm_encoder *drm_enc,
+               struct dpu_encoder_phys *ready_phys, u32 event)
+{
+       struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+       unsigned int i;
+
+       if (event & (DPU_ENCODER_FRAME_EVENT_DONE
+                       | DPU_ENCODER_FRAME_EVENT_ERROR
+                       | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
+
+               if (!dpu_enc->frame_busy_mask[0]) {
+                       /**
+                        * suppress frame_done without waiter,
+                        * likely autorefresh
+                        */
+                       trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc),
+                                       event, ready_phys->intf_idx);
+                       return;
+               }
+
+               /* One of the physical encoders has become idle */
+               for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+                       if (dpu_enc->phys_encs[i] == ready_phys) {
+                               clear_bit(i, dpu_enc->frame_busy_mask);
+                               trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
+                                               dpu_enc->frame_busy_mask[0]);
+                       }
+               }
+
+               if (!dpu_enc->frame_busy_mask[0]) {
+                       atomic_set(&dpu_enc->frame_done_timeout, 0);
+                       del_timer(&dpu_enc->frame_done_timer);
+
+                       dpu_encoder_resource_control(drm_enc,
+                                       DPU_ENC_RC_EVENT_FRAME_DONE);
+
+                       if (dpu_enc->crtc_frame_event_cb)
+                               dpu_enc->crtc_frame_event_cb(
+                                       dpu_enc->crtc_frame_event_cb_data,
+                                       event);
+               }
+       } else {
+               if (dpu_enc->crtc_frame_event_cb)
+                       dpu_enc->crtc_frame_event_cb(
+                               dpu_enc->crtc_frame_event_cb_data, event);
+       }
+}
+
+static void dpu_encoder_off_work(struct kthread_work *work)
+{
+       struct dpu_encoder_virt *dpu_enc = container_of(work,
+                       struct dpu_encoder_virt, delayed_off_work.work);
+
+       if (!dpu_enc) {
+               DPU_ERROR("invalid dpu encoder\n");
+               return;
+       }
+
+       dpu_encoder_resource_control(&dpu_enc->base,
+                                               DPU_ENC_RC_EVENT_ENTER_IDLE);
+
+       dpu_encoder_frame_done_callback(&dpu_enc->base, NULL,
+                               DPU_ENCODER_FRAME_EVENT_IDLE);
+}
+
+/**
+ * _dpu_encoder_trigger_flush - trigger flush for a physical encoder
+ * drm_enc: Pointer to drm encoder structure
+ * phys: Pointer to physical encoder structure
+ * extra_flush_bits: Additional bit mask to include in flush trigger
+ */
+static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
+               struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
+{
+       struct dpu_hw_ctl *ctl;
+       int pending_kickoff_cnt;
+       u32 ret = UINT_MAX;
+
+       if (!drm_enc || !phys) {
+               DPU_ERROR("invalid argument(s), drm_enc %d, phys_enc %d\n",
+                               drm_enc != 0, phys != 0);
+               return;
+       }
+
+       if (!phys->hw_pp) {
+               DPU_ERROR("invalid pingpong hw\n");
+               return;
+       }
+
+       ctl = phys->hw_ctl;
+       if (!ctl || !ctl->ops.trigger_flush) {
+               DPU_ERROR("missing trigger cb\n");
+               return;
+       }
+
+       pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
+
+       if (extra_flush_bits && ctl->ops.update_pending_flush)
+               ctl->ops.update_pending_flush(ctl, extra_flush_bits);
+
+       ctl->ops.trigger_flush(ctl);
+
+       if (ctl->ops.get_pending_flush)
+               ret = ctl->ops.get_pending_flush(ctl);
+
+       trace_dpu_enc_trigger_flush(DRMID(drm_enc), phys->intf_idx,
+                                   pending_kickoff_cnt, ctl->idx, ret);
+}
+
+/**
+ * _dpu_encoder_trigger_start - trigger start for a physical encoder
+ * phys: Pointer to physical encoder structure
+ */
+static inline void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
+{
+       if (!phys) {
+               DPU_ERROR("invalid argument(s)\n");
+               return;
+       }
+
+       if (!phys->hw_pp) {
+               DPU_ERROR("invalid pingpong hw\n");
+               return;
+       }
+
+       if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED)
+               phys->ops.trigger_start(phys);
+}
+
+void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
+{
+       struct dpu_hw_ctl *ctl;
+
+       if (!phys_enc) {
+               DPU_ERROR("invalid encoder\n");
+               return;
+       }
+
+       ctl = phys_enc->hw_ctl;
+       if (ctl && ctl->ops.trigger_start) {
+               ctl->ops.trigger_start(ctl);
+               trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx);
+       }
+}
+
+static int dpu_encoder_helper_wait_event_timeout(
+               int32_t drm_id,
+               int32_t hw_id,
+               struct dpu_encoder_wait_info *info)
+{
+       int rc = 0;
+       s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms;
+       s64 jiffies = msecs_to_jiffies(info->timeout_ms);
+       s64 time;
+
+       do {
+               rc = wait_event_timeout(*(info->wq),
+                               atomic_read(info->atomic_cnt) == 0, jiffies);
+               time = ktime_to_ms(ktime_get());
+
+               trace_dpu_enc_wait_event_timeout(drm_id, hw_id, rc, time,
+                                                expected_time,
+                                                atomic_read(info->atomic_cnt));
+       /* If we timed out, counter is valid and time is less, wait again */
+       } while (atomic_read(info->atomic_cnt) && (rc == 0) &&
+                       (time < expected_time));
+
+       return rc;
+}
+
+void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
+{
+       struct dpu_encoder_virt *dpu_enc;
+       struct dpu_hw_ctl *ctl;
+       int rc;
+
+       if (!phys_enc) {
+               DPU_ERROR("invalid encoder\n");
+               return;
+       }
+       dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
+       ctl = phys_enc->hw_ctl;
+
+       if (!ctl || !ctl->ops.reset)
+               return;
+
+       DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(phys_enc->parent),
+                     ctl->idx);
+
+       rc = ctl->ops.reset(ctl);
+       if (rc) {
+               DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n",  ctl->idx);
+               dpu_dbg_dump(false, __func__, true, true);
+       }
+
+       phys_enc->enable_state = DPU_ENC_ENABLED;
+}
+
+/**
+ * _dpu_encoder_kickoff_phys - handle physical encoder kickoff
+ *     Iterate through the physical encoders and perform consolidated flush
+ *     and/or control start triggering as needed. This is done in the virtual
+ *     encoder rather than the individual physical ones in order to handle
+ *     use cases that require visibility into multiple physical encoders at
+ *     a time.
+ * dpu_enc: Pointer to virtual encoder structure
+ */
+static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
+{
+       struct dpu_hw_ctl *ctl;
+       uint32_t i, pending_flush;
+       unsigned long lock_flags;
+
+       if (!dpu_enc) {
+               DPU_ERROR("invalid encoder\n");
+               return;
+       }
+
+       pending_flush = 0x0;
+
+       /* update pending counts and trigger kickoff ctl flush atomically */
+       spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+
+       /* don't perform flush/start operations for slave encoders */
+       for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+               struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+               if (!phys || phys->enable_state == DPU_ENC_DISABLED)
+                       continue;
+
+               ctl = phys->hw_ctl;
+               if (!ctl)
+                       continue;
+
+               if (phys->split_role != ENC_ROLE_SLAVE)
+                       set_bit(i, dpu_enc->frame_busy_mask);
+               if (!phys->ops.needs_single_flush ||
+                               !phys->ops.needs_single_flush(phys))
+                       _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
+               else if (ctl->ops.get_pending_flush)
+                       pending_flush |= ctl->ops.get_pending_flush(ctl);
+       }
+
+       /* for split flush, combine pending flush masks and send to master */
+       if (pending_flush && dpu_enc->cur_master) {
+               _dpu_encoder_trigger_flush(
+                               &dpu_enc->base,
+                               dpu_enc->cur_master,
+                               pending_flush);
+       }
+
+       _dpu_encoder_trigger_start(dpu_enc->cur_master);
+
+       spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+}
+
+void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
+{
+       struct dpu_encoder_virt *dpu_enc;
+       struct dpu_encoder_phys *phys;
+       unsigned int i;
+       struct dpu_hw_ctl *ctl;
+       struct msm_display_info *disp_info;
+
+       if (!drm_enc) {
+               DPU_ERROR("invalid encoder\n");
+               return;
+       }
+       dpu_enc = to_dpu_encoder_virt(drm_enc);
+       disp_info = &dpu_enc->disp_info;
+
+       for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+               phys = dpu_enc->phys_encs[i];
+
+               if (phys && phys->hw_ctl) {
+                       ctl = phys->hw_ctl;
+                       if (ctl->ops.clear_pending_flush)
+                               ctl->ops.clear_pending_flush(ctl);
+
+                       /* update only for command mode primary ctl */
+                       if ((phys == dpu_enc->cur_master) &&
+                          (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
+                           && ctl->ops.trigger_pending)
+                               ctl->ops.trigger_pending(ctl);
+               }
+       }
+}
+
+static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc,
+               struct drm_display_mode *mode)
+{
+       u64 pclk_rate;
+       u32 pclk_period;
+       u32 line_time;
+
+       /*
+        * For linetime calculation, only operate on master encoder.
+        */
+       if (!dpu_enc->cur_master)
+               return 0;
+
+       if (!dpu_enc->cur_master->ops.get_line_count) {
+               DPU_ERROR("get_line_count function not defined\n");
+               return 0;
+       }
+
+       pclk_rate = mode->clock; /* pixel clock in kHz */
+       if (pclk_rate == 0) {
+               DPU_ERROR("pclk is 0, cannot calculate line time\n");
+               return 0;
+       }
+
+       pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate);
+       if (pclk_period == 0) {
+               DPU_ERROR("pclk period is 0\n");
+               return 0;
+       }
+
+       /*
+        * Line time calculation based on Pixel clock and HTOTAL.
+        * Final unit is in ns.
+        */
+       line_time = (pclk_period * mode->htotal) / 1000;
+       if (line_time == 0) {
+               DPU_ERROR("line time calculation is 0\n");
+               return 0;
+       }
+
+       DPU_DEBUG_ENC(dpu_enc,
+                       "clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n",
+                       pclk_rate, pclk_period, line_time);
+
+       return line_time;
+}
+
+static int _dpu_encoder_wakeup_time(struct drm_encoder *drm_enc,
+               ktime_t *wakeup_time)
+{
+       struct drm_display_mode *mode;
+       struct dpu_encoder_virt *dpu_enc;
+       u32 cur_line;
+       u32 line_time;
+       u32 vtotal, time_to_vsync;
+       ktime_t cur_time;
+
+       dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+       if (!drm_enc->crtc || !drm_enc->crtc->state) {
+               DPU_ERROR("crtc/crtc state object is NULL\n");
+               return -EINVAL;
+       }
+       mode = &drm_enc->crtc->state->adjusted_mode;
+
+       line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode);
+       if (!line_time)
+               return -EINVAL;
+
+       cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master);
+
+       vtotal = mode->vtotal;
+       if (cur_line >= vtotal)
+               time_to_vsync = line_time * vtotal;
+       else
+               time_to_vsync = line_time * (vtotal - cur_line);
+
+       if (time_to_vsync == 0) {
+               DPU_ERROR("time to vsync should not be zero, vtotal=%d\n",
+                               vtotal);
+               return -EINVAL;
+       }
+
+       cur_time = ktime_get();
+       *wakeup_time = ktime_add_ns(cur_time, time_to_vsync);
+
+       DPU_DEBUG_ENC(dpu_enc,
+                       "cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n",
+                       cur_line, vtotal, time_to_vsync,
+                       ktime_to_ms(cur_time),
+                       ktime_to_ms(*wakeup_time));
+       return 0;
+}
+
+static void dpu_encoder_vsync_event_handler(struct timer_list *t)
+{
+       struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
+                       vsync_event_timer);
+       struct drm_encoder *drm_enc = &dpu_enc->base;
+       struct msm_drm_private *priv;
+       struct msm_drm_thread *event_thread;
+
+       if (!drm_enc->dev || !drm_enc->dev->dev_private ||
+                       !drm_enc->crtc) {
+               DPU_ERROR("invalid parameters\n");
+               return;
+       }
+
+       priv = drm_enc->dev->dev_private;
+
+       if (drm_enc->crtc->index >= ARRAY_SIZE(priv->event_thread)) {
+               DPU_ERROR("invalid crtc index\n");
+               return;
+       }
+       event_thread = &priv->event_thread[drm_enc->crtc->index];
+       if (!event_thread) {
+               DPU_ERROR("event_thread not found for crtc:%d\n",
+                               drm_enc->crtc->index);
+               return;
+       }
+
+       del_timer(&dpu_enc->vsync_event_timer);
+}
+
+static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
+{
+       struct dpu_encoder_virt *dpu_enc = container_of(work,
+                       struct dpu_encoder_virt, vsync_event_work);
+       ktime_t wakeup_time;
+
+       if (!dpu_enc) {
+               DPU_ERROR("invalid dpu encoder\n");
+               return;
+       }
+
+       if (_dpu_encoder_wakeup_time(&dpu_enc->base, &wakeup_time))
+               return;
+
+       trace_dpu_enc_vsync_event_work(DRMID(&dpu_enc->base), wakeup_time);
+       mod_timer(&dpu_enc->vsync_event_timer,
+                       nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
+}
+
+void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
+               struct dpu_encoder_kickoff_params *params)
+{
+       struct dpu_encoder_virt *dpu_enc;
+       struct dpu_encoder_phys *phys;
+       bool needs_hw_reset = false;
+       unsigned int i;
+
+       if (!drm_enc || !params) {
+               DPU_ERROR("invalid args\n");
+               return;
+       }
+       dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+       trace_dpu_enc_prepare_kickoff(DRMID(drm_enc));
+
+       /* prepare for next kickoff, may include waiting on previous kickoff */
+       DPU_ATRACE_BEGIN("enc_prepare_for_kickoff");
+       for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+               phys = dpu_enc->phys_encs[i];
+               if (phys) {
+                       if (phys->ops.prepare_for_kickoff)
+                               phys->ops.prepare_for_kickoff(phys, params);
+                       if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
+                               needs_hw_reset = true;
+               }
+       }
+       DPU_ATRACE_END("enc_prepare_for_kickoff");
+
+       dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
+
+       /* if any phys needs reset, reset all phys, in-order */
+       if (needs_hw_reset) {
+               trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
+               for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+                       phys = dpu_enc->phys_encs[i];
+                       if (phys && phys->ops.hw_reset)
+                               phys->ops.hw_reset(phys);
+               }
+       }
+}
+
+void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
+{
+       struct dpu_encoder_virt *dpu_enc;
+       struct dpu_encoder_phys *phys;
+       ktime_t wakeup_time;
+       unsigned int i;
+
+       if (!drm_enc) {
+               DPU_ERROR("invalid encoder\n");
+               return;
+       }
+       DPU_ATRACE_BEGIN("encoder_kickoff");
+       dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+       trace_dpu_enc_kickoff(DRMID(drm_enc));
+
+       atomic_set(&dpu_enc->frame_done_timeout,
+                       DPU_FRAME_DONE_TIMEOUT * 1000 /
+                       drm_enc->crtc->state->adjusted_mode.vrefresh);
+       mod_timer(&dpu_enc->frame_done_timer, jiffies +
+               ((atomic_read(&dpu_enc->frame_done_timeout) * HZ) / 1000));
+
+       /* All phys encs are ready to go, trigger the kickoff */
+       _dpu_encoder_kickoff_phys(dpu_enc);
+
+       /* allow phys encs to handle any post-kickoff business */
+       for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+               phys = dpu_enc->phys_encs[i];
+               if (phys && phys->ops.handle_post_kickoff)
+                       phys->ops.handle_post_kickoff(phys);
+       }
+
+       if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DSI &&
+                       !_dpu_encoder_wakeup_time(drm_enc, &wakeup_time)) {
+               trace_dpu_enc_early_kickoff(DRMID(drm_enc),
+                                           ktime_to_ms(wakeup_time));
+               mod_timer(&dpu_enc->vsync_event_timer,
+                               nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
+       }
+
+       DPU_ATRACE_END("encoder_kickoff");
+}
+
+void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
+{
+       struct dpu_encoder_virt *dpu_enc;
+       struct dpu_encoder_phys *phys;
+       int i;
+
+       if (!drm_enc) {
+               DPU_ERROR("invalid encoder\n");
+               return;
+       }
+       dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+       for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+               phys = dpu_enc->phys_encs[i];
+               if (phys && phys->ops.prepare_commit)
+                       phys->ops.prepare_commit(phys);
+       }
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _dpu_encoder_status_show(struct seq_file *s, void *data)
+{
+       struct dpu_encoder_virt *dpu_enc;
+       int i;
+
+       if (!s || !s->private)
+               return -EINVAL;
+
+       dpu_enc = s->private;
+
+       mutex_lock(&dpu_enc->enc_lock);
+       for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+               struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+               if (!phys)
+                       continue;
+
+               seq_printf(s, "intf:%d    vsync:%8d     underrun:%8d    ",
+                               phys->intf_idx - INTF_0,
+                               atomic_read(&phys->vsync_cnt),
+                               atomic_read(&phys->underrun_cnt));
+
+               switch (phys->intf_mode) {
+               case INTF_MODE_VIDEO:
+                       seq_puts(s, "mode: video\n");
+                       break;
+               case INTF_MODE_CMD:
+                       seq_puts(s, "mode: command\n");
+                       break;
+               default:
+                       seq_puts(s, "mode: ???\n");
+                       break;
+               }
+       }
+       mutex_unlock(&dpu_enc->enc_lock);
+
+       return 0;
+}
+
+static int _dpu_encoder_debugfs_status_open(struct inode *inode,
+               struct file *file)
+{
+       return single_open(file, _dpu_encoder_status_show, inode->i_private);
+}
+
+static ssize_t _dpu_encoder_misr_setup(struct file *file,
+               const char __user *user_buf, size_t count, loff_t *ppos)
+{
+       struct dpu_encoder_virt *dpu_enc;
+       int i = 0, rc;
+       char buf[MISR_BUFF_SIZE + 1];
+       size_t buff_copy;
+       u32 frame_count, enable;
+
+       if (!file || !file->private_data)
+               return -EINVAL;
+
+       dpu_enc = file->private_data;
+
+       buff_copy = min_t(size_t, count, MISR_BUFF_SIZE);
+       if (copy_from_user(buf, user_buf, buff_copy))
+               return -EINVAL;
+
+       buf[buff_copy] = 0; /* end of string */
+
+       if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
+               return -EINVAL;
+
+       rc = _dpu_encoder_power_enable(dpu_enc, true);
+       if (rc)
+               return rc;
+
+       mutex_lock(&dpu_enc->enc_lock);
+       dpu_enc->misr_enable = enable;
+       dpu_enc->misr_frame_count = frame_count;
+       for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+               struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+               if (!phys || !phys->ops.setup_misr)
+                       continue;
+
+               phys->ops.setup_misr(phys, enable, frame_count);
+       }
+       mutex_unlock(&dpu_enc->enc_lock);
+       _dpu_encoder_power_enable(dpu_enc, false);
+
+       return count;
+}
+
+static ssize_t _dpu_encoder_misr_read(struct file *file,
+               char __user *user_buff, size_t count, loff_t *ppos)
+{
+       struct dpu_encoder_virt *dpu_enc;
+       int i = 0, len = 0;
+       char buf[MISR_BUFF_SIZE + 1] = {'\0'};
+       int rc;
+
+       if (*ppos)
+               return 0;
+
+       if (!file || !file->private_data)
+               return -EINVAL;
+
+       dpu_enc = file->private_data;
+
+       rc = _dpu_encoder_power_enable(dpu_enc, true);
+       if (rc)
+               return rc;
+
+       mutex_lock(&dpu_enc->enc_lock);
+       if (!dpu_enc->misr_enable) {
+               len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+                       "disabled\n");
+               goto buff_check;
+       } else if (dpu_enc->disp_info.capabilities &
+                                               ~MSM_DISPLAY_CAP_VID_MODE) {
+               len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+                       "unsupported\n");
+               goto buff_check;
+       }
+
+       for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+               struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+               if (!phys || !phys->ops.collect_misr)
+                       continue;
+
+               len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+                       "Intf idx:%d\n", phys->intf_idx - INTF_0);
+               len += snprintf(buf + len, MISR_BUFF_SIZE - len, "0x%x\n",
+                                       phys->ops.collect_misr(phys));
+       }
+
+buff_check:
+       if (count <= len) {
+               len = 0;
+               goto end;
+       }
+
+       if (copy_to_user(user_buff, buf, len)) {
+               len = -EFAULT;
+               goto end;
+       }
+
+       *ppos += len;   /* increase offset */
+
+end:
+       mutex_unlock(&dpu_enc->enc_lock);
+       _dpu_encoder_power_enable(dpu_enc, false);
+       return len;
+}
+
+static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
+{
+       struct dpu_encoder_virt *dpu_enc;
+       struct msm_drm_private *priv;
+       struct dpu_kms *dpu_kms;
+       int i;
+
+       static const struct file_operations debugfs_status_fops = {
+               .open =         _dpu_encoder_debugfs_status_open,
+               .read =         seq_read,
+               .llseek =       seq_lseek,
+               .release =      single_release,
+       };
+
+       static const struct file_operations debugfs_misr_fops = {
+               .open = simple_open,
+               .read = _dpu_encoder_misr_read,
+               .write = _dpu_encoder_misr_setup,
+       };
+
+       char name[DPU_NAME_SIZE];
+
+       if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
+               DPU_ERROR("invalid encoder or kms\n");
+               return -EINVAL;
+       }
+
+       dpu_enc = to_dpu_encoder_virt(drm_enc);
+       priv = drm_enc->dev->dev_private;
+       dpu_kms = to_dpu_kms(priv->kms);
+
+       snprintf(name, DPU_NAME_SIZE, "encoder%u", drm_enc->base.id);
+
+       /* create overall sub-directory for the encoder */
+       dpu_enc->debugfs_root = debugfs_create_dir(name,
+                       drm_enc->dev->primary->debugfs_root);
+       if (!dpu_enc->debugfs_root)
+               return -ENOMEM;
+
+       /* don't error check these */
+       debugfs_create_file("status", 0600,
+               dpu_enc->debugfs_root, dpu_enc, &debugfs_status_fops);
+
+       debugfs_create_file("misr_data", 0600,
+               dpu_enc->debugfs_root, dpu_enc, &debugfs_misr_fops);
+
+       for (i = 0; i < dpu_enc->num_phys_encs; i++)
+               if (dpu_enc->phys_encs[i] &&
+                               dpu_enc->phys_encs[i]->ops.late_register)
+                       dpu_enc->phys_encs[i]->ops.late_register(
+                                       dpu_enc->phys_encs[i],
+                                       dpu_enc->debugfs_root);
+
+       return 0;
+}
+
+static void _dpu_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
+{
+       struct dpu_encoder_virt *dpu_enc;
+
+       if (!drm_enc)
+               return;
+
+       dpu_enc = to_dpu_encoder_virt(drm_enc);
+       debugfs_remove_recursive(dpu_enc->debugfs_root);
+}
+#else
+static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
+{
+       return 0;
+}
+
+static void _dpu_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
+{
+}
+#endif
+
+static int dpu_encoder_late_register(struct drm_encoder *encoder)
+{
+       return _dpu_encoder_init_debugfs(encoder);
+}
+
+static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
+{
+       _dpu_encoder_destroy_debugfs(encoder);
+}
+
+static int dpu_encoder_virt_add_phys_encs(
+               u32 display_caps,
+               struct dpu_encoder_virt *dpu_enc,
+               struct dpu_enc_phys_init_params *params)
+{
+       struct dpu_encoder_phys *enc = NULL;
+
+       DPU_DEBUG_ENC(dpu_enc, "\n");
+
+       /*
+        * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
+        * in this function, check up-front.
+        */
+       if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
+                       ARRAY_SIZE(dpu_enc->phys_encs)) {
+               DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n",
+                         dpu_enc->num_phys_encs);
+               return -EINVAL;
+       }
+
+       if (display_caps & MSM_DISPLAY_CAP_VID_MODE) {
+               enc = dpu_encoder_phys_vid_init(params);
+
+               if (IS_ERR_OR_NULL(enc)) {
+                       DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
+                               PTR_ERR(enc));
+                       return enc == 0 ? -EINVAL : PTR_ERR(enc);
+               }
+
+               dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
+               ++dpu_enc->num_phys_encs;
+       }
+
+       if (display_caps & MSM_DISPLAY_CAP_CMD_MODE) {
+               enc = dpu_encoder_phys_cmd_init(params);
+
+               if (IS_ERR_OR_NULL(enc)) {
+                       DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
+                               PTR_ERR(enc));
+                       return enc == 0 ? -EINVAL : PTR_ERR(enc);
+               }
+
+               dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
+               ++dpu_enc->num_phys_encs;
+       }
+
+       return 0;
+}
+
+static const struct dpu_encoder_virt_ops dpu_encoder_parent_ops = {
+       .handle_vblank_virt = dpu_encoder_vblank_callback,
+       .handle_underrun_virt = dpu_encoder_underrun_callback,
+       .handle_frame_done = dpu_encoder_frame_done_callback,
+};
+
+static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
+                                struct dpu_kms *dpu_kms,
+                                struct msm_display_info *disp_info,
+                                int *drm_enc_mode)
+{
+       int ret = 0;
+       int i = 0;
+       enum dpu_intf_type intf_type;
+       struct dpu_enc_phys_init_params phys_params;
+
+       if (!dpu_enc || !dpu_kms) {
+               DPU_ERROR("invalid arg(s), enc %d kms %d\n",
+                               dpu_enc != 0, dpu_kms != 0);
+               return -EINVAL;
+       }
+
+       memset(&phys_params, 0, sizeof(phys_params));
+       phys_params.dpu_kms = dpu_kms;
+       phys_params.parent = &dpu_enc->base;
+       phys_params.parent_ops = &dpu_encoder_parent_ops;
+       phys_params.enc_spinlock = &dpu_enc->enc_spinlock;
+
+       DPU_DEBUG("\n");
+
+       if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI) {
+               *drm_enc_mode = DRM_MODE_ENCODER_DSI;
+               intf_type = INTF_DSI;
+       } else if (disp_info->intf_type == DRM_MODE_CONNECTOR_HDMIA) {
+               *drm_enc_mode = DRM_MODE_ENCODER_TMDS;
+               intf_type = INTF_HDMI;
+       } else if (disp_info->intf_type == DRM_MODE_CONNECTOR_DisplayPort) {
+               *drm_enc_mode = DRM_MODE_ENCODER_TMDS;
+               intf_type = INTF_DP;
+       } else {
+               DPU_ERROR_ENC(dpu_enc, "unsupported display interface type\n");
+               return -EINVAL;
+       }
+
+       WARN_ON(disp_info->num_of_h_tiles < 1);
+
+       dpu_enc->display_num_of_h_tiles = disp_info->num_of_h_tiles;
+
+       DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
+
+       if ((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) ||
+           (disp_info->capabilities & MSM_DISPLAY_CAP_VID_MODE))
+               dpu_enc->idle_pc_supported =
+                               dpu_kms->catalog->caps->has_idle_pc;
+
+       mutex_lock(&dpu_enc->enc_lock);
+       for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
+               /*
+                * Left-most tile is at index 0, content is controller id
+                * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
+                * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
+                */
+               u32 controller_id = disp_info->h_tile_instance[i];
+
+               if (disp_info->num_of_h_tiles > 1) {
+                       if (i == 0)
+                               phys_params.split_role = ENC_ROLE_MASTER;
+                       else
+                               phys_params.split_role = ENC_ROLE_SLAVE;
+               } else {
+                       phys_params.split_role = ENC_ROLE_SOLO;
+               }
+
+               DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n",
+                               i, controller_id, phys_params.split_role);
+
+               phys_params.intf_idx = dpu_encoder_get_intf(dpu_kms->catalog,
+                                                                                                       intf_type,
+                                                                                                       controller_id);
+               if (phys_params.intf_idx == INTF_MAX) {
+                       DPU_ERROR_ENC(dpu_enc, "could not get intf: type %d, id %d\n",
+                                                 intf_type, controller_id);
+                       ret = -EINVAL;
+               }
+
+               if (!ret) {
+                       ret = dpu_encoder_virt_add_phys_encs(disp_info->capabilities,
+                                                                                                dpu_enc,
+                                                                                                &phys_params);
+                       if (ret)
+                               DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
+               }
+       }
+
+       for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+               struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+               if (phys) {
+                       atomic_set(&phys->vsync_cnt, 0);
+                       atomic_set(&phys->underrun_cnt, 0);
+               }
+       }
+       mutex_unlock(&dpu_enc->enc_lock);
+
+       return ret;
+}
+
+static void dpu_encoder_frame_done_timeout(struct timer_list *t)
+{
+       struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
+                       frame_done_timer);
+       struct drm_encoder *drm_enc = &dpu_enc->base;
+       struct msm_drm_private *priv;
+       u32 event;
+
+       if (!drm_enc->dev || !drm_enc->dev->dev_private) {
+               DPU_ERROR("invalid parameters\n");
+               return;
+       }
+       priv = drm_enc->dev->dev_private;
+
+       if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
+               DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
+                             DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
+               return;
+       } else if (!atomic_xchg(&dpu_enc->frame_done_timeout, 0)) {
+               DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc));
+               return;
+       }
+
+       DPU_ERROR_ENC(dpu_enc, "frame done timeout\n");
+
+       event = DPU_ENCODER_FRAME_EVENT_ERROR;
+       trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
+       dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event);
+}
+
+static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
+       .mode_set = dpu_encoder_virt_mode_set,
+       .disable = dpu_encoder_virt_disable,
+       .enable = dpu_kms_encoder_enable,
+       .atomic_check = dpu_encoder_virt_atomic_check,
+
+       /* This is called by dpu_kms_encoder_enable */
+       .commit = dpu_encoder_virt_enable,
+};
+
+static const struct drm_encoder_funcs dpu_encoder_funcs = {
+               .destroy = dpu_encoder_destroy,
+               .late_register = dpu_encoder_late_register,
+               .early_unregister = dpu_encoder_early_unregister,
+};
+
+int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
+               struct msm_display_info *disp_info)
+{
+       struct msm_drm_private *priv = dev->dev_private;
+       struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
+       struct drm_encoder *drm_enc = NULL;
+       struct dpu_encoder_virt *dpu_enc = NULL;
+       int drm_enc_mode = DRM_MODE_ENCODER_NONE;
+       int ret = 0;
+
+       dpu_enc = to_dpu_encoder_virt(enc);
+
+       mutex_init(&dpu_enc->enc_lock);
+       ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info,
+                       &drm_enc_mode);
+       if (ret)
+               goto fail;
+
+       dpu_enc->cur_master = NULL;
+       spin_lock_init(&dpu_enc->enc_spinlock);
+
+       atomic_set(&dpu_enc->frame_done_timeout, 0);
+       timer_setup(&dpu_enc->frame_done_timer,
+                       dpu_encoder_frame_done_timeout, 0);
+
+       if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI)
+               timer_setup(&dpu_enc->vsync_event_timer,
+                               dpu_encoder_vsync_event_handler,
+                               0);
+
+
+       mutex_init(&dpu_enc->rc_lock);
+       kthread_init_delayed_work(&dpu_enc->delayed_off_work,
+                       dpu_encoder_off_work);
+       dpu_enc->idle_timeout = IDLE_TIMEOUT;
+
+       kthread_init_work(&dpu_enc->vsync_event_work,
+                       dpu_encoder_vsync_event_work_handler);
+
+       memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info));
+
+       DPU_DEBUG_ENC(dpu_enc, "created\n");
+
+       return ret;
+
+fail:
+       DPU_ERROR("failed to create encoder\n");
+       if (drm_enc)
+               dpu_encoder_destroy(drm_enc);
+
+       return ret;
+
+
+}
+
+struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
+               int drm_enc_mode)
+{
+       struct dpu_encoder_virt *dpu_enc = NULL;
+       int rc = 0;
+
+       dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
+       if (!dpu_enc)
+               return ERR_PTR(ENOMEM);
+
+       rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
+                       drm_enc_mode, NULL);
+       if (rc) {
+               devm_kfree(dev->dev, dpu_enc);
+               return ERR_PTR(rc);
+       }
+
+       drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
+
+       return &dpu_enc->base;
+}
+
+int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
+       enum msm_event_wait event)
+{
+       int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL;
+       struct dpu_encoder_virt *dpu_enc = NULL;
+       int i, ret = 0;
+
+       if (!drm_enc) {
+               DPU_ERROR("invalid encoder\n");
+               return -EINVAL;
+       }
+       dpu_enc = to_dpu_encoder_virt(drm_enc);
+       DPU_DEBUG_ENC(dpu_enc, "\n");
+
+       for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+               struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+               switch (event) {
+               case MSM_ENC_COMMIT_DONE:
+                       fn_wait = phys->ops.wait_for_commit_done;
+                       break;
+               case MSM_ENC_TX_COMPLETE:
+                       fn_wait = phys->ops.wait_for_tx_complete;
+                       break;
+               case MSM_ENC_VBLANK:
+                       fn_wait = phys->ops.wait_for_vblank;
+                       break;
+               default:
+                       DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
+                                       event);
+                       return -EINVAL;
+               };
+
+               if (phys && fn_wait) {
+                       DPU_ATRACE_BEGIN("wait_for_completion_event");
+                       ret = fn_wait(phys);
+                       DPU_ATRACE_END("wait_for_completion_event");
+                       if (ret)
+                               return ret;
+               }
+       }
+
+       return ret;
+}
+
+enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
+{
+       struct dpu_encoder_virt *dpu_enc = NULL;
+       int i;
+
+       if (!encoder) {
+               DPU_ERROR("invalid encoder\n");
+               return INTF_MODE_NONE;
+       }
+       dpu_enc = to_dpu_encoder_virt(encoder);
+
+       if (dpu_enc->cur_master)
+               return dpu_enc->cur_master->intf_mode;
+
+       for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+               struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+               if (phys)
+                       return phys->intf_mode;
+       }
+
+       return INTF_MODE_NONE;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
new file mode 100644 (file)
index 0000000..60f809f
--- /dev/null
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __DPU_ENCODER_H__
+#define __DPU_ENCODER_H__
+
+#include <drm/drm_crtc.h>
+#include "dpu_hw_mdss.h"
+
+#define DPU_ENCODER_FRAME_EVENT_DONE                   BIT(0)
+#define DPU_ENCODER_FRAME_EVENT_ERROR                  BIT(1)
+#define DPU_ENCODER_FRAME_EVENT_PANEL_DEAD             BIT(2)
+#define DPU_ENCODER_FRAME_EVENT_IDLE                   BIT(3)
+
+#define IDLE_TIMEOUT   (66 - 16/2)
+
+/**
+ * Encoder functions and data types
+ * @intfs:     Interfaces this encoder is using, INTF_MODE_NONE if unused
+ * @needs_cdm: Encoder requests a CDM based on pixel format conversion needs
+ * @display_num_of_h_tiles: Number of horizontal tiles in case of split
+ *                          interface
+ * @topology:   Topology of the display
+ */
+struct dpu_encoder_hw_resources {
+       enum dpu_intf_mode intfs[INTF_MAX];
+       bool needs_cdm;
+       u32 display_num_of_h_tiles;
+};
+
+/**
+ * dpu_encoder_kickoff_params - info encoder requires at kickoff
+ * @affected_displays:  bitmask, bit set means the ROI of the commit lies within
+ *                      the bounds of the physical display at the bit index
+ */
+struct dpu_encoder_kickoff_params {
+       unsigned long affected_displays;
+};
+
+/**
+ * dpu_encoder_get_hw_resources - Populate table of required hardware resources
+ * @encoder:   encoder pointer
+ * @hw_res:    resource table to populate with encoder required resources
+ * @conn_state:        report hw reqs based on this proposed connector state
+ */
+void dpu_encoder_get_hw_resources(struct drm_encoder *encoder,
+               struct dpu_encoder_hw_resources *hw_res,
+               struct drm_connector_state *conn_state);
+
+/**
+ * dpu_encoder_register_vblank_callback - provide callback to encoder that
+ *     will be called on the next vblank.
+ * @encoder:   encoder pointer
+ * @cb:                callback pointer, provide NULL to deregister and disable IRQs
+ * @data:      user data provided to callback
+ */
+void dpu_encoder_register_vblank_callback(struct drm_encoder *encoder,
+               void (*cb)(void *), void *data);
+
+/**
+ * dpu_encoder_register_frame_event_callback - provide callback to encoder that
+ *     will be called after the request is complete, or other events.
+ * @encoder:   encoder pointer
+ * @cb:                callback pointer, provide NULL to deregister
+ * @data:      user data provided to callback
+ */
+void dpu_encoder_register_frame_event_callback(struct drm_encoder *encoder,
+               void (*cb)(void *, u32), void *data);
+
+/**
+ * dpu_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl
+ *     path (i.e. ctl flush and start) at next appropriate time.
+ *     Immediately: if no previous commit is outstanding.
+ *     Delayed: Block until next trigger can be issued.
+ * @encoder:   encoder pointer
+ * @params:    kickoff time parameters
+ */
+void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder,
+               struct dpu_encoder_kickoff_params *params);
+
+/**
+ * dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous
+ *        kickoff and trigger the ctl prepare progress for command mode display.
+ * @encoder:   encoder pointer
+ */
+void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_kickoff - trigger a double buffer flip of the ctl path
+ *     (i.e. ctl flush and start) immediately.
+ * @encoder:   encoder pointer
+ */
+void dpu_encoder_kickoff(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_wait_for_event - Waits for encoder events
+ * @encoder:   encoder pointer
+ * @event:      event to wait for
+ * MSM_ENC_COMMIT_DONE -  Wait for hardware to have flushed the current pending
+ *                        frames to hardware at a vblank or ctl_start
+ *                        Encoders will map this differently depending on the
+ *                        panel type.
+ *                       vid mode -> vsync_irq
+ *                        cmd mode -> ctl_start
+ * MSM_ENC_TX_COMPLETE -  Wait for the hardware to transfer all the pixels to
+ *                        the panel. Encoders will map this differently
+ *                        depending on the panel type.
+ *                        vid mode -> vsync_irq
+ *                        cmd mode -> pp_done
+ * Returns: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
+ */
+int dpu_encoder_wait_for_event(struct drm_encoder *drm_encoder,
+                                               enum msm_event_wait event);
+
+/*
+ * dpu_encoder_get_intf_mode - get interface mode of the given encoder
+ * @encoder: Pointer to drm encoder object
+ */
+enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_virt_restore - restore the encoder configs
+ * @encoder:   encoder pointer
+ */
+void dpu_encoder_virt_restore(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_init - initialize virtual encoder object
+ * @dev:        Pointer to drm device structure
+ * @disp_info:  Pointer to display information structure
+ * Returns:     Pointer to newly created drm encoder
+ */
+struct drm_encoder *dpu_encoder_init(
+               struct drm_device *dev,
+               int drm_enc_mode);
+
+/**
+ * dpu_encoder_setup - setup dpu_encoder for the display probed
+ * @dev:               Pointer to drm device structure
+ * @enc:               Pointer to the drm_encoder
+ * @disp_info: Pointer to the display info
+ */
+int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
+               struct msm_display_info *disp_info);
+
+/**
+ * dpu_encoder_prepare_commit - prepare encoder at the very beginning of an
+ *     atomic commit, before any registers are written
+ * @drm_enc:    Pointer to previously created drm encoder structure
+ */
+void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc);
+
+/**
+ * dpu_encoder_set_idle_timeout - set the idle timeout for video
+ *                    and command mode encoders.
+ * @drm_enc:    Pointer to previously created drm encoder structure
+ * @idle_timeout:    idle timeout duration in milliseconds
+ */
+void dpu_encoder_set_idle_timeout(struct drm_encoder *drm_enc,
+                                                       u32 idle_timeout);
+
+#endif /* __DPU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
new file mode 100644 (file)
index 0000000..c7df8aa
--- /dev/null
@@ -0,0 +1,430 @@
+/*
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DPU_ENCODER_PHYS_H__
+#define __DPU_ENCODER_PHYS_H__
+
+#include <linux/jiffies.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_intf.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_top.h"
+#include "dpu_hw_cdm.h"
+#include "dpu_encoder.h"
+
+#define DPU_ENCODER_NAME_MAX   16
+
+/* wait for at most 2 vsync for lowest refresh rate (24hz) */
+#define KICKOFF_TIMEOUT_MS             84
+#define KICKOFF_TIMEOUT_JIFFIES                msecs_to_jiffies(KICKOFF_TIMEOUT_MS)
+
+/**
+ * enum dpu_enc_split_role - Role this physical encoder will play in a
+ *     split-panel configuration, where one panel is master, and others slaves.
+ *     Masters have extra responsibilities, like managing the VBLANK IRQ.
+ * @ENC_ROLE_SOLO:     This is the one and only panel. This encoder is master.
+ * @ENC_ROLE_MASTER:   This encoder is the master of a split panel config.
+ * @ENC_ROLE_SLAVE:    This encoder is not the master of a split panel config.
+ */
+enum dpu_enc_split_role {
+       ENC_ROLE_SOLO,
+       ENC_ROLE_MASTER,
+       ENC_ROLE_SLAVE,
+};
+
+/**
+ * enum dpu_enc_enable_state - current enabled state of the physical encoder
+ * @DPU_ENC_DISABLING: Encoder transitioning to disable state
+ *                     Events bounding transition are encoder type specific
+ * @DPU_ENC_DISABLED:  Encoder is disabled
+ * @DPU_ENC_ENABLING:  Encoder transitioning to enabled
+ *                     Events bounding transition are encoder type specific
+ * @DPU_ENC_ENABLED:   Encoder is enabled
+ * @DPU_ENC_ERR_NEEDS_HW_RESET:        Encoder is enabled, but requires a hw_reset
+ *                             to recover from a previous error
+ */
+enum dpu_enc_enable_state {
+       DPU_ENC_DISABLING,
+       DPU_ENC_DISABLED,
+       DPU_ENC_ENABLING,
+       DPU_ENC_ENABLED,
+       DPU_ENC_ERR_NEEDS_HW_RESET
+};
+
+struct dpu_encoder_phys;
+
+/**
+ * struct dpu_encoder_virt_ops - Interface the containing virtual encoder
+ *     provides for the physical encoders to use to callback.
+ * @handle_vblank_virt:        Notify virtual encoder of vblank IRQ reception
+ *                     Note: This is called from IRQ handler context.
+ * @handle_underrun_virt: Notify virtual encoder of underrun IRQ reception
+ *                     Note: This is called from IRQ handler context.
+ * @handle_frame_done: Notify virtual encoder that this phys encoder
+ *                     completes last request frame.
+ */
+struct dpu_encoder_virt_ops {
+       void (*handle_vblank_virt)(struct drm_encoder *,
+                       struct dpu_encoder_phys *phys);
+       void (*handle_underrun_virt)(struct drm_encoder *,
+                       struct dpu_encoder_phys *phys);
+       void (*handle_frame_done)(struct drm_encoder *,
+                       struct dpu_encoder_phys *phys, u32 event);
+};
+
+/**
+ * struct dpu_encoder_phys_ops - Interface the physical encoders provide to
+ *     the containing virtual encoder.
+ * @late_register:             DRM Call. Add Userspace interfaces, debugfs.
+ * @prepare_commit:            MSM Atomic Call, start of atomic commit sequence
+ * @is_master:                 Whether this phys_enc is the current master
+ *                             encoder. Can be switched at enable time. Based
+ *                             on split_role and current mode (CMD/VID).
+ * @mode_fixup:                        DRM Call. Fixup a DRM mode.
+ * @mode_set:                  DRM Call. Set a DRM mode.
+ *                             This likely caches the mode, for use at enable.
+ * @enable:                    DRM Call. Enable a DRM mode.
+ * @disable:                   DRM Call. Disable mode.
+ * @atomic_check:              DRM Call. Atomic check new DRM state.
+ * @destroy:                   DRM Call. Destroy and release resources.
+ * @get_hw_resources:          Populate the structure with the hardware
+ *                             resources that this phys_enc is using.
+ *                             Expect no overlap between phys_encs.
+ * @control_vblank_irq         Register/Deregister for VBLANK IRQ
+ * @wait_for_commit_done:      Wait for hardware to have flushed the
+ *                             current pending frames to hardware
+ * @wait_for_tx_complete:      Wait for hardware to transfer the pixels
+ *                             to the panel
+ * @wait_for_vblank:           Wait for VBLANK, for sub-driver internal use
+ * @prepare_for_kickoff:       Do any work necessary prior to a kickoff
+ *                             For CMD encoder, may wait for previous tx done
+ * @handle_post_kickoff:       Do any work necessary post-kickoff work
+ * @trigger_start:             Process start event on physical encoder
+ * @needs_single_flush:                Whether encoder slaves need to be flushed
+ * @setup_misr:                Sets up MISR, enable and disables based on sysfs
+ * @collect_misr:              Collects MISR data on frame update
+ * @hw_reset:                  Issue HW recovery such as CTL reset and clear
+ *                             DPU_ENC_ERR_NEEDS_HW_RESET state
+ * @irq_control:               Handler to enable/disable all the encoder IRQs
+ * @prepare_idle_pc:           phys encoder can update the vsync_enable status
+ *                              on idle power collapse prepare
+ * @restore:                   Restore all the encoder configs.
+ * @get_line_count:            Obtain current vertical line count
+ */
+
+struct dpu_encoder_phys_ops {
+       int (*late_register)(struct dpu_encoder_phys *encoder,
+                       struct dentry *debugfs_root);
+       void (*prepare_commit)(struct dpu_encoder_phys *encoder);
+       bool (*is_master)(struct dpu_encoder_phys *encoder);
+       bool (*mode_fixup)(struct dpu_encoder_phys *encoder,
+                       const struct drm_display_mode *mode,
+                       struct drm_display_mode *adjusted_mode);
+       void (*mode_set)(struct dpu_encoder_phys *encoder,
+                       struct drm_display_mode *mode,
+                       struct drm_display_mode *adjusted_mode);
+       void (*enable)(struct dpu_encoder_phys *encoder);
+       void (*disable)(struct dpu_encoder_phys *encoder);
+       int (*atomic_check)(struct dpu_encoder_phys *encoder,
+                           struct drm_crtc_state *crtc_state,
+                           struct drm_connector_state *conn_state);
+       void (*destroy)(struct dpu_encoder_phys *encoder);
+       void (*get_hw_resources)(struct dpu_encoder_phys *encoder,
+                       struct dpu_encoder_hw_resources *hw_res,
+                       struct drm_connector_state *conn_state);
+       int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable);
+       int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc);
+       int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc);
+       int (*wait_for_vblank)(struct dpu_encoder_phys *phys_enc);
+       void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc,
+                       struct dpu_encoder_kickoff_params *params);
+       void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc);
+       void (*trigger_start)(struct dpu_encoder_phys *phys_enc);
+       bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc);
+
+       void (*setup_misr)(struct dpu_encoder_phys *phys_encs,
+                               bool enable, u32 frame_count);
+       u32 (*collect_misr)(struct dpu_encoder_phys *phys_enc);
+       void (*hw_reset)(struct dpu_encoder_phys *phys_enc);
+       void (*irq_control)(struct dpu_encoder_phys *phys, bool enable);
+       void (*prepare_idle_pc)(struct dpu_encoder_phys *phys_enc);
+       void (*restore)(struct dpu_encoder_phys *phys);
+       int (*get_line_count)(struct dpu_encoder_phys *phys);
+};
+
+/**
+ * enum dpu_intr_idx - dpu encoder interrupt index
+ * @INTR_IDX_VSYNC:    Vsync interrupt for video mode panel
+ * @INTR_IDX_PINGPONG: Pingpong done unterrupt for cmd mode panel
+ * @INTR_IDX_UNDERRUN: Underrun unterrupt for video and cmd mode panel
+ * @INTR_IDX_RDPTR:    Readpointer done unterrupt for cmd mode panel
+ */
+enum dpu_intr_idx {
+       INTR_IDX_VSYNC,
+       INTR_IDX_PINGPONG,
+       INTR_IDX_UNDERRUN,
+       INTR_IDX_CTL_START,
+       INTR_IDX_RDPTR,
+       INTR_IDX_MAX,
+};
+
+/**
+ * dpu_encoder_irq - tracking structure for interrupts
+ * @name:              string name of interrupt
+ * @intr_type:         Encoder interrupt type
+ * @intr_idx:          Encoder interrupt enumeration
+ * @hw_idx:            HW Block ID
+ * @irq_idx:           IRQ interface lookup index from DPU IRQ framework
+ *                     will be -EINVAL if IRQ is not registered
+ * @irq_cb:            interrupt callback
+ */
+struct dpu_encoder_irq {
+       const char *name;
+       enum dpu_intr_type intr_type;
+       enum dpu_intr_idx intr_idx;
+       int hw_idx;
+       int irq_idx;
+       struct dpu_irq_callback cb;
+};
+
+/**
+ * struct dpu_encoder_phys - physical encoder that drives a single INTF block
+ *     tied to a specific panel / sub-panel. Abstract type, sub-classed by
+ *     phys_vid or phys_cmd for video mode or command mode encs respectively.
+ * @parent:            Pointer to the containing virtual encoder
+ * @connector:         If a mode is set, cached pointer to the active connector
+ * @ops:               Operations exposed to the virtual encoder
+ * @parent_ops:                Callbacks exposed by the parent to the phys_enc
+ * @hw_mdptop:         Hardware interface to the top registers
+ * @hw_ctl:            Hardware interface to the ctl registers
+ * @hw_cdm:            Hardware interface to the cdm registers
+ * @cdm_cfg:           Chroma-down hardware configuration
+ * @hw_pp:             Hardware interface to the ping pong registers
+ * @dpu_kms:           Pointer to the dpu_kms top level
+ * @cached_mode:       DRM mode cached at mode_set time, acted on in enable
+ * @enabled:           Whether the encoder has enabled and running a mode
+ * @split_role:                Role to play in a split-panel configuration
+ * @intf_mode:         Interface mode
+ * @intf_idx:          Interface index on dpu hardware
+ * @topology_name:     topology selected for the display
+ * @enc_spinlock:      Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @enable_state:      Enable state tracking
+ * @vblank_refcount:   Reference count of vblank request
+ * @vsync_cnt:         Vsync count for the physical encoder
+ * @underrun_cnt:      Underrun count for the physical encoder
+ * @pending_kickoff_cnt:       Atomic counter tracking the number of kickoffs
+ *                             vs. the number of done/vblank irqs. Should hover
+ *                             between 0-2 Incremented when a new kickoff is
+ *                             scheduled. Decremented in irq handler
+ * @pending_ctlstart_cnt:      Atomic counter tracking the number of ctl start
+ *                              pending.
+ * @pending_kickoff_wq:                Wait queue for blocking until kickoff completes
+ * @irq:                       IRQ tracking structures
+ */
+struct dpu_encoder_phys {
+       struct drm_encoder *parent;
+       struct drm_connector *connector;
+       struct dpu_encoder_phys_ops ops;
+       const struct dpu_encoder_virt_ops *parent_ops;
+       struct dpu_hw_mdp *hw_mdptop;
+       struct dpu_hw_ctl *hw_ctl;
+       struct dpu_hw_cdm *hw_cdm;
+       struct dpu_hw_cdm_cfg cdm_cfg;
+       struct dpu_hw_pingpong *hw_pp;
+       struct dpu_kms *dpu_kms;
+       struct drm_display_mode cached_mode;
+       enum dpu_enc_split_role split_role;
+       enum dpu_intf_mode intf_mode;
+       enum dpu_intf intf_idx;
+       enum dpu_rm_topology_name topology_name;
+       spinlock_t *enc_spinlock;
+       enum dpu_enc_enable_state enable_state;
+       atomic_t vblank_refcount;
+       atomic_t vsync_cnt;
+       atomic_t underrun_cnt;
+       atomic_t pending_ctlstart_cnt;
+       atomic_t pending_kickoff_cnt;
+       wait_queue_head_t pending_kickoff_wq;
+       struct dpu_encoder_irq irq[INTR_IDX_MAX];
+};
+
+static inline int dpu_encoder_phys_inc_pending(struct dpu_encoder_phys *phys)
+{
+       atomic_inc_return(&phys->pending_ctlstart_cnt);
+       return atomic_inc_return(&phys->pending_kickoff_cnt);
+}
+
+/**
+ * struct dpu_encoder_phys_vid - sub-class of dpu_encoder_phys to handle video
+ *     mode specific operations
+ * @base:      Baseclass physical encoder structure
+ * @hw_intf:   Hardware interface to the intf registers
+ * @timing_params: Current timing parameter
+ */
+struct dpu_encoder_phys_vid {
+       struct dpu_encoder_phys base;
+       struct dpu_hw_intf *hw_intf;
+       struct intf_timing_params timing_params;
+};
+
+/**
+ * struct dpu_encoder_phys_cmd - sub-class of dpu_encoder_phys to handle command
+ *     mode specific operations
+ * @base:      Baseclass physical encoder structure
+ * @intf_idx:  Intf Block index used by this phys encoder
+ * @stream_sel:        Stream selection for multi-stream interfaces
+ * @serialize_wait4pp: serialize wait4pp feature waits for pp_done interrupt
+ *                     after ctl_start instead of before next frame kickoff
+ * @pp_timeout_report_cnt: number of pingpong done irq timeout errors
+ * @pending_vblank_cnt: Atomic counter tracking pending wait for VBLANK
+ * @pending_vblank_wq: Wait queue for blocking until VBLANK received
+ */
+struct dpu_encoder_phys_cmd {
+       struct dpu_encoder_phys base;
+       int stream_sel;
+       bool serialize_wait4pp;
+       int pp_timeout_report_cnt;
+       atomic_t pending_vblank_cnt;
+       wait_queue_head_t pending_vblank_wq;
+};
+
+/**
+ * struct dpu_enc_phys_init_params - initialization parameters for phys encs
+ * @dpu_kms:           Pointer to the dpu_kms top level
+ * @parent:            Pointer to the containing virtual encoder
+ * @parent_ops:                Callbacks exposed by the parent to the phys_enc
+ * @split_role:                Role to play in a split-panel configuration
+ * @intf_idx:          Interface index this phys_enc will control
+ * @enc_spinlock:      Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ */
+struct dpu_enc_phys_init_params {
+       struct dpu_kms *dpu_kms;
+       struct drm_encoder *parent;
+       const struct dpu_encoder_virt_ops *parent_ops;
+       enum dpu_enc_split_role split_role;
+       enum dpu_intf intf_idx;
+       spinlock_t *enc_spinlock;
+};
+
+/**
+ * dpu_encoder_wait_info - container for passing arguments to irq wait functions
+ * @wq: wait queue structure
+ * @atomic_cnt: wait until atomic_cnt equals zero
+ * @timeout_ms: timeout value in milliseconds
+ */
+struct dpu_encoder_wait_info {
+       wait_queue_head_t *wq;
+       atomic_t *atomic_cnt;
+       s64 timeout_ms;
+};
+
+/**
+ * dpu_encoder_phys_vid_init - Construct a new video mode physical encoder
+ * @p: Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
+               struct dpu_enc_phys_init_params *p);
+
+/**
+ * dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder
+ * @p: Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
+               struct dpu_enc_phys_init_params *p);
+
+/**
+ * dpu_encoder_helper_trigger_start - control start helper function
+ *     This helper function may be optionally specified by physical
+ *     encoders if they require ctl_start triggering.
+ * @phys_enc: Pointer to physical encoder structure
+ */
+void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc);
+
+/**
+ * dpu_encoder_helper_hw_reset - issue ctl hw reset
+ *     This helper function may be optionally specified by physical
+ *     encoders if they require ctl hw reset. If state is currently
+ *     DPU_ENC_ERR_NEEDS_HW_RESET, it is set back to DPU_ENC_ENABLED.
+ * @phys_enc: Pointer to physical encoder structure
+ */
+void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc);
+
+static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
+               struct dpu_encoder_phys *phys_enc)
+{
+       if (!phys_enc || phys_enc->enable_state == DPU_ENC_DISABLING)
+               return BLEND_3D_NONE;
+
+       if (phys_enc->split_role == ENC_ROLE_SOLO &&
+           phys_enc->topology_name == DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE)
+               return BLEND_3D_H_ROW_INT;
+
+       return BLEND_3D_NONE;
+}
+
+/**
+ * dpu_encoder_helper_split_config - split display configuration helper function
+ *     This helper function may be used by physical encoders to configure
+ *     the split display related registers.
+ * @phys_enc: Pointer to physical encoder structure
+ * @interface: enum dpu_intf setting
+ */
+void dpu_encoder_helper_split_config(
+               struct dpu_encoder_phys *phys_enc,
+               enum dpu_intf interface);
+
+/**
+ * dpu_encoder_helper_report_irq_timeout - utility to report error that irq has
+ *     timed out, including reporting frame error event to crtc and debug dump
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: Failing interrupt index
+ */
+void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
+               enum dpu_intr_idx intr_idx);
+
+/**
+ * dpu_encoder_helper_wait_for_irq - utility to wait on an irq.
+ *     note: will call dpu_encoder_helper_wait_for_irq on timeout
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: encoder interrupt index
+ * @wait_info: wait info struct
+ * @Return: 0 or -ERROR
+ */
+int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
+               enum dpu_intr_idx intr_idx,
+               struct dpu_encoder_wait_info *wait_info);
+
+/**
+ * dpu_encoder_helper_register_irq - register and enable an irq
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: encoder interrupt index
+ * @Return: 0 or -ERROR
+ */
+int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
+               enum dpu_intr_idx intr_idx);
+
+/**
+ * dpu_encoder_helper_unregister_irq - unregister and disable an irq
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: encoder interrupt index
+ * @Return: 0 or -ERROR
+ */
+int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
+               enum dpu_intr_idx intr_idx);
+
+#endif /* __dpu_encoder_phys_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
new file mode 100644 (file)
index 0000000..3084675
--- /dev/null
@@ -0,0 +1,905 @@
+/*
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)    "[drm:%s:%d] " fmt, __func__, __LINE__
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_core_irq.h"
+#include "dpu_formats.h"
+#include "dpu_trace.h"
+
+#define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
+               (e) && (e)->base.parent ? \
+               (e)->base.parent->base.id : -1, \
+               (e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_CMDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
+               (e) && (e)->base.parent ? \
+               (e)->base.parent->base.id : -1, \
+               (e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define to_dpu_encoder_phys_cmd(x) \
+       container_of(x, struct dpu_encoder_phys_cmd, base)
+
+#define PP_TIMEOUT_MAX_TRIALS  10
+
+/*
+ * Tearcheck sync start and continue thresholds are empirically found
+ * based on common panels In the future, may want to allow panels to override
+ * these default values
+ */
+#define DEFAULT_TEARCHECK_SYNC_THRESH_START    4
+#define DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE 4
+
+#define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000
+
+static inline int _dpu_encoder_phys_cmd_get_idle_timeout(
+               struct dpu_encoder_phys_cmd *cmd_enc)
+{
+       return KICKOFF_TIMEOUT_MS;
+}
+
+static inline bool dpu_encoder_phys_cmd_is_master(
+               struct dpu_encoder_phys *phys_enc)
+{
+       return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false;
+}
+
+static bool dpu_encoder_phys_cmd_mode_fixup(
+               struct dpu_encoder_phys *phys_enc,
+               const struct drm_display_mode *mode,
+               struct drm_display_mode *adj_mode)
+{
+       if (phys_enc)
+               DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc), "\n");
+       return true;
+}
+
+static void _dpu_encoder_phys_cmd_update_intf_cfg(
+               struct dpu_encoder_phys *phys_enc)
+{
+       struct dpu_encoder_phys_cmd *cmd_enc =
+                       to_dpu_encoder_phys_cmd(phys_enc);
+       struct dpu_hw_ctl *ctl;
+       struct dpu_hw_intf_cfg intf_cfg = { 0 };
+
+       if (!phys_enc)
+               return;
+
+       ctl = phys_enc->hw_ctl;
+       if (!ctl || !ctl->ops.setup_intf_cfg)
+               return;
+
+       intf_cfg.intf = phys_enc->intf_idx;
+       intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_CMD;
+       intf_cfg.stream_sel = cmd_enc->stream_sel;
+       intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+       ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
+}
+
+static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
+{
+       struct dpu_encoder_phys *phys_enc = arg;
+       unsigned long lock_flags;
+       int new_cnt;
+       u32 event = DPU_ENCODER_FRAME_EVENT_DONE;
+
+       if (!phys_enc || !phys_enc->hw_pp)
+               return;
+
+       DPU_ATRACE_BEGIN("pp_done_irq");
+       /* notify all synchronous clients first, then asynchronous clients */
+       if (phys_enc->parent_ops->handle_frame_done)
+               phys_enc->parent_ops->handle_frame_done(phys_enc->parent,
+                               phys_enc, event);
+
+       spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+       new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+       spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+       trace_dpu_enc_phys_cmd_pp_tx_done(DRMID(phys_enc->parent),
+                                         phys_enc->hw_pp->idx - PINGPONG_0,
+                                         new_cnt, event);
+
+       /* Signal any waiting atomic commit thread */
+       wake_up_all(&phys_enc->pending_kickoff_wq);
+       DPU_ATRACE_END("pp_done_irq");
+}
+
+static void dpu_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
+{
+       struct dpu_encoder_phys *phys_enc = arg;
+       struct dpu_encoder_phys_cmd *cmd_enc;
+
+       if (!phys_enc || !phys_enc->hw_pp)
+               return;
+
+       DPU_ATRACE_BEGIN("rd_ptr_irq");
+       cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+       if (phys_enc->parent_ops->handle_vblank_virt)
+               phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
+                       phys_enc);
+
+       atomic_add_unless(&cmd_enc->pending_vblank_cnt, -1, 0);
+       wake_up_all(&cmd_enc->pending_vblank_wq);
+       DPU_ATRACE_END("rd_ptr_irq");
+}
+
+static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
+{
+       struct dpu_encoder_phys *phys_enc = arg;
+       struct dpu_encoder_phys_cmd *cmd_enc;
+
+       if (!phys_enc || !phys_enc->hw_ctl)
+               return;
+
+       DPU_ATRACE_BEGIN("ctl_start_irq");
+       cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+       atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
+
+       /* Signal any waiting ctl start interrupt */
+       wake_up_all(&phys_enc->pending_kickoff_wq);
+       DPU_ATRACE_END("ctl_start_irq");
+}
+
+static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
+{
+       struct dpu_encoder_phys *phys_enc = arg;
+
+       if (!phys_enc)
+               return;
+
+       if (phys_enc->parent_ops->handle_underrun_virt)
+               phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
+                       phys_enc);
+}
+
+static void _dpu_encoder_phys_cmd_setup_irq_hw_idx(
+               struct dpu_encoder_phys *phys_enc)
+{
+       struct dpu_encoder_irq *irq;
+
+       irq = &phys_enc->irq[INTR_IDX_CTL_START];
+       irq->hw_idx = phys_enc->hw_ctl->idx;
+       irq->irq_idx = -EINVAL;
+
+       irq = &phys_enc->irq[INTR_IDX_PINGPONG];
+       irq->hw_idx = phys_enc->hw_pp->idx;
+       irq->irq_idx = -EINVAL;
+
+       irq = &phys_enc->irq[INTR_IDX_RDPTR];
+       irq->hw_idx = phys_enc->hw_pp->idx;
+       irq->irq_idx = -EINVAL;
+
+       irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+       irq->hw_idx = phys_enc->intf_idx;
+       irq->irq_idx = -EINVAL;
+}
+
+static void dpu_encoder_phys_cmd_mode_set(
+               struct dpu_encoder_phys *phys_enc,
+               struct drm_display_mode *mode,
+               struct drm_display_mode *adj_mode)
+{
+       struct dpu_encoder_phys_cmd *cmd_enc =
+               to_dpu_encoder_phys_cmd(phys_enc);
+       struct dpu_rm *rm = &phys_enc->dpu_kms->rm;
+       struct dpu_rm_hw_iter iter;
+       int i, instance;
+
+       if (!phys_enc || !mode || !adj_mode) {
+               DPU_ERROR("invalid args\n");
+               return;
+       }
+       phys_enc->cached_mode = *adj_mode;
+       DPU_DEBUG_CMDENC(cmd_enc, "caching mode:\n");
+       drm_mode_debug_printmodeline(adj_mode);
+
+       instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
+
+       /* Retrieve previously allocated HW Resources. Shouldn't fail */
+       dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_CTL);
+       for (i = 0; i <= instance; i++) {
+               if (dpu_rm_get_hw(rm, &iter))
+                       phys_enc->hw_ctl = (struct dpu_hw_ctl *)iter.hw;
+       }
+
+       if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
+               DPU_ERROR_CMDENC(cmd_enc, "failed to init ctl: %ld\n",
+                               PTR_ERR(phys_enc->hw_ctl));
+               phys_enc->hw_ctl = NULL;
+               return;
+       }
+
+       _dpu_encoder_phys_cmd_setup_irq_hw_idx(phys_enc);
+}
+
+static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
+               struct dpu_encoder_phys *phys_enc)
+{
+       struct dpu_encoder_phys_cmd *cmd_enc =
+                       to_dpu_encoder_phys_cmd(phys_enc);
+       u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
+       bool do_log = false;
+
+       if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_ctl)
+               return -EINVAL;
+
+       cmd_enc->pp_timeout_report_cnt++;
+       if (cmd_enc->pp_timeout_report_cnt == PP_TIMEOUT_MAX_TRIALS) {
+               frame_event |= DPU_ENCODER_FRAME_EVENT_PANEL_DEAD;
+               do_log = true;
+       } else if (cmd_enc->pp_timeout_report_cnt == 1) {
+               do_log = true;
+       }
+
+       trace_dpu_enc_phys_cmd_pdone_timeout(DRMID(phys_enc->parent),
+                    phys_enc->hw_pp->idx - PINGPONG_0,
+                    cmd_enc->pp_timeout_report_cnt,
+                    atomic_read(&phys_enc->pending_kickoff_cnt),
+                    frame_event);
+
+       /* to avoid flooding, only log first time, and "dead" time */
+       if (do_log) {
+               DRM_ERROR("id:%d pp:%d kickoff timeout %d cnt %d koff_cnt %d\n",
+                         DRMID(phys_enc->parent),
+                         phys_enc->hw_pp->idx - PINGPONG_0,
+                         phys_enc->hw_ctl->idx - CTL_0,
+                         cmd_enc->pp_timeout_report_cnt,
+                         atomic_read(&phys_enc->pending_kickoff_cnt));
+
+               dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_RDPTR);
+               dpu_dbg_dump(false, __func__, true, true);
+       }
+
+       atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+
+       /* request a ctl reset before the next kickoff */
+       phys_enc->enable_state = DPU_ENC_ERR_NEEDS_HW_RESET;
+
+       if (phys_enc->parent_ops->handle_frame_done)
+               phys_enc->parent_ops->handle_frame_done(
+                               phys_enc->parent, phys_enc, frame_event);
+
+       return -ETIMEDOUT;
+}
+
+static int _dpu_encoder_phys_cmd_wait_for_idle(
+               struct dpu_encoder_phys *phys_enc)
+{
+       struct dpu_encoder_phys_cmd *cmd_enc =
+                       to_dpu_encoder_phys_cmd(phys_enc);
+       struct dpu_encoder_wait_info wait_info;
+       int ret;
+
+       if (!phys_enc) {
+               DPU_ERROR("invalid encoder\n");
+               return -EINVAL;
+       }
+
+       wait_info.wq = &phys_enc->pending_kickoff_wq;
+       wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
+       wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+       ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_PINGPONG,
+                       &wait_info);
+       if (ret == -ETIMEDOUT)
+               _dpu_encoder_phys_cmd_handle_ppdone_timeout(phys_enc);
+       else if (!ret)
+               cmd_enc->pp_timeout_report_cnt = 0;
+
+       return ret;
+}
+
+static int dpu_encoder_phys_cmd_control_vblank_irq(
+               struct dpu_encoder_phys *phys_enc,
+               bool enable)
+{
+       int ret = 0;
+       int refcount;
+
+       if (!phys_enc || !phys_enc->hw_pp) {
+               DPU_ERROR("invalid encoder\n");
+               return -EINVAL;
+       }
+
+       refcount = atomic_read(&phys_enc->vblank_refcount);
+
+       /* Slave encoders don't report vblank */
+       if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+               goto end;
+
+       /* protect against negative */
+       if (!enable && refcount == 0) {
+               ret = -EINVAL;
+               goto end;
+       }
+
+       DRM_DEBUG_KMS("id:%u pp:%d enable=%s/%d\n", DRMID(phys_enc->parent),
+                     phys_enc->hw_pp->idx - PINGPONG_0,
+                     enable ? "true" : "false", refcount);
+
+       if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+               ret = dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_RDPTR);
+       else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+               ret = dpu_encoder_helper_unregister_irq(phys_enc,
+                               INTR_IDX_RDPTR);
+
+end:
+       if (ret) {
+               DRM_ERROR("vblank irq err id:%u pp:%d ret:%d, enable %s/%d\n",
+                         DRMID(phys_enc->parent),
+                         phys_enc->hw_pp->idx - PINGPONG_0, ret,
+                         enable ? "true" : "false", refcount);
+       }
+
+       return ret;
+}
+
+static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
+               bool enable)
+{
+       struct dpu_encoder_phys_cmd *cmd_enc;
+
+       if (!phys_enc)
+               return;
+
+       cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+       trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent),
+                       phys_enc->hw_pp->idx - PINGPONG_0,
+                       enable, atomic_read(&phys_enc->vblank_refcount));
+
+       if (enable) {
+               dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_PINGPONG);
+               dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
+               dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
+
+               if (dpu_encoder_phys_cmd_is_master(phys_enc))
+                       dpu_encoder_helper_register_irq(phys_enc,
+                                       INTR_IDX_CTL_START);
+       } else {
+               if (dpu_encoder_phys_cmd_is_master(phys_enc))
+                       dpu_encoder_helper_unregister_irq(phys_enc,
+                                       INTR_IDX_CTL_START);
+
+               dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
+               dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
+               dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_PINGPONG);
+       }
+}
+
+static void dpu_encoder_phys_cmd_tearcheck_config(
+               struct dpu_encoder_phys *phys_enc)
+{
+       struct dpu_encoder_phys_cmd *cmd_enc =
+               to_dpu_encoder_phys_cmd(phys_enc);
+       struct dpu_hw_tear_check tc_cfg = { 0 };
+       struct drm_display_mode *mode;
+       bool tc_enable = true;
+       u32 vsync_hz;
+       struct msm_drm_private *priv;
+       struct dpu_kms *dpu_kms;
+
+       if (!phys_enc || !phys_enc->hw_pp) {
+               DPU_ERROR("invalid encoder\n");
+               return;
+       }
+       mode = &phys_enc->cached_mode;
+
+       DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+
+       if (!phys_enc->hw_pp->ops.setup_tearcheck ||
+               !phys_enc->hw_pp->ops.enable_tearcheck) {
+               DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
+               return;
+       }
+
+       dpu_kms = phys_enc->dpu_kms;
+       if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev_private) {
+               DPU_ERROR("invalid device\n");
+               return;
+       }
+       priv = dpu_kms->dev->dev_private;
+
+       /*
+        * TE default: dsi byte clock calculated base on 70 fps;
+        * around 14 ms to complete a kickoff cycle if te disabled;
+        * vclk_line base on 60 fps; write is faster than read;
+        * init == start == rdptr;
+        *
+        * vsync_count is ratio of MDP VSYNC clock frequency to LCD panel
+        * frequency divided by the no. of rows (lines) in the LCDpanel.
+        */
+       vsync_hz = dpu_kms_get_clk_rate(dpu_kms, "vsync");
+       if (vsync_hz <= 0) {
+               DPU_DEBUG_CMDENC(cmd_enc, "invalid - vsync_hz %u\n",
+                                vsync_hz);
+               return;
+       }
+
+       tc_cfg.vsync_count = vsync_hz / (mode->vtotal * mode->vrefresh);
+
+       /* enable external TE after kickoff to avoid premature autorefresh */
+       tc_cfg.hw_vsync_mode = 0;
+
+       /*
+        * By setting sync_cfg_height to near max register value, we essentially
+        * disable dpu hw generated TE signal, since hw TE will arrive first.
+        * Only caveat is if due to error, we hit wrap-around.
+        */
+       tc_cfg.sync_cfg_height = 0xFFF0;
+       tc_cfg.vsync_init_val = mode->vdisplay;
+       tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START;
+       tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
+       tc_cfg.start_pos = mode->vdisplay;
+       tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
+
+       DPU_DEBUG_CMDENC(cmd_enc,
+               "tc %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n",
+               phys_enc->hw_pp->idx - PINGPONG_0, vsync_hz,
+               mode->vtotal, mode->vrefresh);
+       DPU_DEBUG_CMDENC(cmd_enc,
+               "tc %d enable %u start_pos %u rd_ptr_irq %u\n",
+               phys_enc->hw_pp->idx - PINGPONG_0, tc_enable, tc_cfg.start_pos,
+               tc_cfg.rd_ptr_irq);
+       DPU_DEBUG_CMDENC(cmd_enc,
+               "tc %d hw_vsync_mode %u vsync_count %u vsync_init_val %u\n",
+               phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.hw_vsync_mode,
+               tc_cfg.vsync_count, tc_cfg.vsync_init_val);
+       DPU_DEBUG_CMDENC(cmd_enc,
+               "tc %d cfgheight %u thresh_start %u thresh_cont %u\n",
+               phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.sync_cfg_height,
+               tc_cfg.sync_threshold_start, tc_cfg.sync_threshold_continue);
+
+       phys_enc->hw_pp->ops.setup_tearcheck(phys_enc->hw_pp, &tc_cfg);
+       phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, tc_enable);
+}
+
+static void _dpu_encoder_phys_cmd_pingpong_config(
+               struct dpu_encoder_phys *phys_enc)
+{
+       struct dpu_encoder_phys_cmd *cmd_enc =
+               to_dpu_encoder_phys_cmd(phys_enc);
+
+       if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp
+                       || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+               DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != 0);
+               return;
+       }
+
+       DPU_DEBUG_CMDENC(cmd_enc, "pp %d, enabling mode:\n",
+                       phys_enc->hw_pp->idx - PINGPONG_0);
+       drm_mode_debug_printmodeline(&phys_enc->cached_mode);
+
+       _dpu_encoder_phys_cmd_update_intf_cfg(phys_enc);
+       dpu_encoder_phys_cmd_tearcheck_config(phys_enc);
+}
+
+static bool dpu_encoder_phys_cmd_needs_single_flush(
+               struct dpu_encoder_phys *phys_enc)
+{
+       /**
+        * we do separate flush for each CTL and let
+        * CTL_START synchronize them
+        */
+       return false;
+}
+
+static void dpu_encoder_phys_cmd_enable_helper(
+               struct dpu_encoder_phys *phys_enc)
+{
+       struct dpu_hw_ctl *ctl;
+       u32 flush_mask = 0;
+
+       if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp) {
+               DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+               return;
+       }
+
+       dpu_encoder_helper_split_config(phys_enc, phys_enc->intf_idx);
+
+       _dpu_encoder_phys_cmd_pingpong_config(phys_enc);
+
+       if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+               goto skip_flush;
+
+       ctl = phys_enc->hw_ctl;
+       ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->intf_idx);
+       ctl->ops.update_pending_flush(ctl, flush_mask);
+
+skip_flush:
+       return;
+}
+
+static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
+{
+       struct dpu_encoder_phys_cmd *cmd_enc =
+               to_dpu_encoder_phys_cmd(phys_enc);
+
+       if (!phys_enc || !phys_enc->hw_pp) {
+               DPU_ERROR("invalid phys encoder\n");
+               return;
+       }
+
+       DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+
+       if (phys_enc->enable_state == DPU_ENC_ENABLED) {
+               DPU_ERROR("already enabled\n");
+               return;
+       }
+
+       dpu_encoder_phys_cmd_enable_helper(phys_enc);
+       phys_enc->enable_state = DPU_ENC_ENABLED;
+}
+
+static void _dpu_encoder_phys_cmd_connect_te(
+               struct dpu_encoder_phys *phys_enc, bool enable)
+{
+       if (!phys_enc || !phys_enc->hw_pp ||
+                       !phys_enc->hw_pp->ops.connect_external_te)
+               return;
+
+       trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
+       phys_enc->hw_pp->ops.connect_external_te(phys_enc->hw_pp, enable);
+}
+
+static void dpu_encoder_phys_cmd_prepare_idle_pc(
+               struct dpu_encoder_phys *phys_enc)
+{
+       _dpu_encoder_phys_cmd_connect_te(phys_enc, false);
+}
+
+static int dpu_encoder_phys_cmd_get_line_count(
+               struct dpu_encoder_phys *phys_enc)
+{
+       struct dpu_hw_pingpong *hw_pp;
+
+       if (!phys_enc || !phys_enc->hw_pp)
+               return -EINVAL;
+
+       if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+               return -EINVAL;
+
+       hw_pp = phys_enc->hw_pp;
+       if (!hw_pp->ops.get_line_count)
+               return -EINVAL;
+
+       return hw_pp->ops.get_line_count(hw_pp);
+}
+
+static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
+{
+       struct dpu_encoder_phys_cmd *cmd_enc =
+               to_dpu_encoder_phys_cmd(phys_enc);
+
+       if (!phys_enc || !phys_enc->hw_pp) {
+               DPU_ERROR("invalid encoder\n");
+               return;
+       }
+       DRM_DEBUG_KMS("id:%u pp:%d state:%d\n", DRMID(phys_enc->parent),
+                     phys_enc->hw_pp->idx - PINGPONG_0,
+                     phys_enc->enable_state);
+
+       if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+               DPU_ERROR_CMDENC(cmd_enc, "already disabled\n");
+               return;
+       }
+
+       if (phys_enc->hw_pp->ops.enable_tearcheck)
+               phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, false);
+       phys_enc->enable_state = DPU_ENC_DISABLED;
+}
+
+static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc)
+{
+       struct dpu_encoder_phys_cmd *cmd_enc =
+               to_dpu_encoder_phys_cmd(phys_enc);
+
+       if (!phys_enc) {
+               DPU_ERROR("invalid encoder\n");
+               return;
+       }
+       kfree(cmd_enc);
+}
+
+static void dpu_encoder_phys_cmd_get_hw_resources(
+               struct dpu_encoder_phys *phys_enc,
+               struct dpu_encoder_hw_resources *hw_res,
+               struct drm_connector_state *conn_state)
+{
+       struct dpu_encoder_phys_cmd *cmd_enc =
+               to_dpu_encoder_phys_cmd(phys_enc);
+
+       if (!phys_enc) {
+               DPU_ERROR("invalid encoder\n");
+               return;
+       }
+
+       if ((phys_enc->intf_idx - INTF_0) >= INTF_MAX) {
+               DPU_ERROR("invalid intf idx:%d\n", phys_enc->intf_idx);
+               return;
+       }
+
+       DPU_DEBUG_CMDENC(cmd_enc, "\n");
+       hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_CMD;
+}
+
+static void dpu_encoder_phys_cmd_prepare_for_kickoff(
+               struct dpu_encoder_phys *phys_enc,
+               struct dpu_encoder_kickoff_params *params)
+{
+       struct dpu_encoder_phys_cmd *cmd_enc =
+                       to_dpu_encoder_phys_cmd(phys_enc);
+       int ret;
+
+       if (!phys_enc || !phys_enc->hw_pp) {
+               DPU_ERROR("invalid encoder\n");
+               return;
+       }
+       DRM_DEBUG_KMS("id:%u pp:%d pending_cnt:%d\n", DRMID(phys_enc->parent),
+                     phys_enc->hw_pp->idx - PINGPONG_0,
+                     atomic_read(&phys_enc->pending_kickoff_cnt));
+
+       /*
+        * Mark kickoff request as outstanding. If there are more than one,
+        * outstanding, then we have to wait for the previous one to complete
+        */
+       ret = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
+       if (ret) {
+               /* force pending_kickoff_cnt 0 to discard failed kickoff */
+               atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+               DRM_ERROR("failed wait_for_idle: id:%u ret:%d pp:%d\n",
+                         DRMID(phys_enc->parent), ret,
+                         phys_enc->hw_pp->idx - PINGPONG_0);
+       }
+
+       DPU_DEBUG_CMDENC(cmd_enc, "pp:%d pending_cnt %d\n",
+                       phys_enc->hw_pp->idx - PINGPONG_0,
+                       atomic_read(&phys_enc->pending_kickoff_cnt));
+}
+
+static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
+               struct dpu_encoder_phys *phys_enc)
+{
+       struct dpu_encoder_phys_cmd *cmd_enc =
+                       to_dpu_encoder_phys_cmd(phys_enc);
+       struct dpu_encoder_wait_info wait_info;
+       int ret;
+
+       if (!phys_enc || !phys_enc->hw_ctl) {
+               DPU_ERROR("invalid argument(s)\n");
+               return -EINVAL;
+       }
+
+       wait_info.wq = &phys_enc->pending_kickoff_wq;
+       wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
+       wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+       ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_CTL_START,
+                       &wait_info);
+       if (ret == -ETIMEDOUT) {
+               DPU_ERROR_CMDENC(cmd_enc, "ctl start interrupt wait failed\n");
+               ret = -EINVAL;
+       } else if (!ret)
+               ret = 0;
+
+       return ret;
+}
+
+static int dpu_encoder_phys_cmd_wait_for_tx_complete(
+               struct dpu_encoder_phys *phys_enc)
+{
+       int rc;
+       struct dpu_encoder_phys_cmd *cmd_enc;
+
+       if (!phys_enc)
+               return -EINVAL;
+
+       cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+       rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
+       if (rc) {
+               DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n",
+                         DRMID(phys_enc->parent), rc,
+                         phys_enc->intf_idx - INTF_0);
+       }
+
+       return rc;
+}
+
+static int dpu_encoder_phys_cmd_wait_for_commit_done(
+               struct dpu_encoder_phys *phys_enc)
+{
+       int rc = 0;
+       struct dpu_encoder_phys_cmd *cmd_enc;
+
+       if (!phys_enc)
+               return -EINVAL;
+
+       cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+       /* only required for master controller */
+       if (dpu_encoder_phys_cmd_is_master(phys_enc))
+               rc = _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
+
+       /* required for both controllers */
+       if (!rc && cmd_enc->serialize_wait4pp)
+               dpu_encoder_phys_cmd_prepare_for_kickoff(phys_enc, NULL);
+
+       return rc;
+}
+
+static int dpu_encoder_phys_cmd_wait_for_vblank(
+               struct dpu_encoder_phys *phys_enc)
+{
+       int rc = 0;
+       struct dpu_encoder_phys_cmd *cmd_enc;
+       struct dpu_encoder_wait_info wait_info;
+
+       if (!phys_enc)
+               return -EINVAL;
+
+       cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+       /* only required for master controller */
+       if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+               return rc;
+
+       wait_info.wq = &cmd_enc->pending_vblank_wq;
+       wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt;
+       wait_info.timeout_ms = _dpu_encoder_phys_cmd_get_idle_timeout(cmd_enc);
+
+       atomic_inc(&cmd_enc->pending_vblank_cnt);
+
+       rc = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_RDPTR,
+                       &wait_info);
+
+       return rc;
+}
+
+static void dpu_encoder_phys_cmd_handle_post_kickoff(
+               struct dpu_encoder_phys *phys_enc)
+{
+       if (!phys_enc)
+               return;
+
+       /**
+        * re-enable external TE, either for the first time after enabling
+        * or if disabled for Autorefresh
+        */
+       _dpu_encoder_phys_cmd_connect_te(phys_enc, true);
+}
+
+static void dpu_encoder_phys_cmd_trigger_start(
+               struct dpu_encoder_phys *phys_enc)
+{
+       if (!phys_enc)
+               return;
+
+       dpu_encoder_helper_trigger_start(phys_enc);
+}
+
+static void dpu_encoder_phys_cmd_init_ops(
+               struct dpu_encoder_phys_ops *ops)
+{
+       ops->is_master = dpu_encoder_phys_cmd_is_master;
+       ops->mode_set = dpu_encoder_phys_cmd_mode_set;
+       ops->mode_fixup = dpu_encoder_phys_cmd_mode_fixup;
+       ops->enable = dpu_encoder_phys_cmd_enable;
+       ops->disable = dpu_encoder_phys_cmd_disable;
+       ops->destroy = dpu_encoder_phys_cmd_destroy;
+       ops->get_hw_resources = dpu_encoder_phys_cmd_get_hw_resources;
+       ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq;
+       ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done;
+       ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff;
+       ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete;
+       ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank;
+       ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
+       ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
+       ops->hw_reset = dpu_encoder_helper_hw_reset;
+       ops->irq_control = dpu_encoder_phys_cmd_irq_control;
+       ops->restore = dpu_encoder_phys_cmd_enable_helper;
+       ops->prepare_idle_pc = dpu_encoder_phys_cmd_prepare_idle_pc;
+       ops->handle_post_kickoff = dpu_encoder_phys_cmd_handle_post_kickoff;
+       ops->get_line_count = dpu_encoder_phys_cmd_get_line_count;
+}
+
+struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
+               struct dpu_enc_phys_init_params *p)
+{
+       struct dpu_encoder_phys *phys_enc = NULL;
+       struct dpu_encoder_phys_cmd *cmd_enc = NULL;
+       struct dpu_hw_mdp *hw_mdp;
+       struct dpu_encoder_irq *irq;
+       int i, ret = 0;
+
+       DPU_DEBUG("intf %d\n", p->intf_idx - INTF_0);
+
+       cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL);
+       if (!cmd_enc) {
+               ret = -ENOMEM;
+               DPU_ERROR("failed to allocate\n");
+               goto fail;
+       }
+       phys_enc = &cmd_enc->base;
+
+       hw_mdp = dpu_rm_get_mdp(&p->dpu_kms->rm);
+       if (IS_ERR_OR_NULL(hw_mdp)) {
+               ret = PTR_ERR(hw_mdp);
+               DPU_ERROR("failed to get mdptop\n");
+               goto fail_mdp_init;
+       }
+       phys_enc->hw_mdptop = hw_mdp;
+       phys_enc->intf_idx = p->intf_idx;
+
+       dpu_encoder_phys_cmd_init_ops(&phys_enc->ops);
+       phys_enc->parent = p->parent;
+       phys_enc->parent_ops = p->parent_ops;
+       phys_enc->dpu_kms = p->dpu_kms;
+       phys_enc->split_role = p->split_role;
+       phys_enc->intf_mode = INTF_MODE_CMD;
+       phys_enc->enc_spinlock = p->enc_spinlock;
+       cmd_enc->stream_sel = 0;
+       phys_enc->enable_state = DPU_ENC_DISABLED;
+       for (i = 0; i < INTR_IDX_MAX; i++) {
+               irq = &phys_enc->irq[i];
+               INIT_LIST_HEAD(&irq->cb.list);
+               irq->irq_idx = -EINVAL;
+               irq->hw_idx = -EINVAL;
+               irq->cb.arg = phys_enc;
+       }
+
+       irq = &phys_enc->irq[INTR_IDX_CTL_START];
+       irq->name = "ctl_start";
+       irq->intr_type = DPU_IRQ_TYPE_CTL_START;
+       irq->intr_idx = INTR_IDX_CTL_START;
+       irq->cb.func = dpu_encoder_phys_cmd_ctl_start_irq;
+
+       irq = &phys_enc->irq[INTR_IDX_PINGPONG];
+       irq->name = "pp_done";
+       irq->intr_type = DPU_IRQ_TYPE_PING_PONG_COMP;
+       irq->intr_idx = INTR_IDX_PINGPONG;
+       irq->cb.func = dpu_encoder_phys_cmd_pp_tx_done_irq;
+
+       irq = &phys_enc->irq[INTR_IDX_RDPTR];
+       irq->name = "pp_rd_ptr";
+       irq->intr_type = DPU_IRQ_TYPE_PING_PONG_RD_PTR;
+       irq->intr_idx = INTR_IDX_RDPTR;
+       irq->cb.func = dpu_encoder_phys_cmd_pp_rd_ptr_irq;
+
+       irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+       irq->name = "underrun";
+       irq->intr_type = DPU_IRQ_TYPE_INTF_UNDER_RUN;
+       irq->intr_idx = INTR_IDX_UNDERRUN;
+       irq->cb.func = dpu_encoder_phys_cmd_underrun_irq;
+
+       atomic_set(&phys_enc->vblank_refcount, 0);
+       atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+       atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
+       atomic_set(&cmd_enc->pending_vblank_cnt, 0);
+       init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+       init_waitqueue_head(&cmd_enc->pending_vblank_wq);
+
+       DPU_DEBUG_CMDENC(cmd_enc, "created\n");
+
+       return phys_enc;
+
+fail_mdp_init:
+       kfree(cmd_enc);
+fail:
+       return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
new file mode 100644 (file)
index 0000000..14fc7c2
--- /dev/null
@@ -0,0 +1,922 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)    "[drm:%s:%d] " fmt, __func__, __LINE__
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_core_irq.h"
+#include "dpu_formats.h"
+#include "dpu_trace.h"
+
+#define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
+               (e) && (e)->base.parent ? \
+               (e)->base.parent->base.id : -1, \
+               (e) && (e)->hw_intf ? \
+               (e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_VIDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
+               (e) && (e)->base.parent ? \
+               (e)->base.parent->base.id : -1, \
+               (e) && (e)->hw_intf ? \
+               (e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define to_dpu_encoder_phys_vid(x) \
+       container_of(x, struct dpu_encoder_phys_vid, base)
+
+static bool dpu_encoder_phys_vid_is_master(
+               struct dpu_encoder_phys *phys_enc)
+{
+       bool ret = false;
+
+       if (phys_enc->split_role != ENC_ROLE_SLAVE)
+               ret = true;
+
+       return ret;
+}
+
+static void drm_mode_to_intf_timing_params(
+               const struct dpu_encoder_phys_vid *vid_enc,
+               const struct drm_display_mode *mode,
+               struct intf_timing_params *timing)
+{
+       memset(timing, 0, sizeof(*timing));
+
+       if ((mode->htotal < mode->hsync_end)
+                       || (mode->hsync_start < mode->hdisplay)
+                       || (mode->vtotal < mode->vsync_end)
+                       || (mode->vsync_start < mode->vdisplay)
+                       || (mode->hsync_end < mode->hsync_start)
+                       || (mode->vsync_end < mode->vsync_start)) {
+               DPU_ERROR(
+                   "invalid params - hstart:%d,hend:%d,htot:%d,hdisplay:%d\n",
+                               mode->hsync_start, mode->hsync_end,
+                               mode->htotal, mode->hdisplay);
+               DPU_ERROR("vstart:%d,vend:%d,vtot:%d,vdisplay:%d\n",
+                               mode->vsync_start, mode->vsync_end,
+                               mode->vtotal, mode->vdisplay);
+               return;
+       }
+
+       /*
+        * https://www.kernel.org/doc/htmldocs/drm/ch02s05.html
+        *  Active Region      Front Porch   Sync   Back Porch
+        * <-----------------><------------><-----><----------->
+        * <- [hv]display --->
+        * <--------- [hv]sync_start ------>
+        * <----------------- [hv]sync_end ------->
+        * <---------------------------- [hv]total ------------->
+        */
+       timing->width = mode->hdisplay; /* active width */
+       timing->height = mode->vdisplay;        /* active height */
+       timing->xres = timing->width;
+       timing->yres = timing->height;
+       timing->h_back_porch = mode->htotal - mode->hsync_end;
+       timing->h_front_porch = mode->hsync_start - mode->hdisplay;
+       timing->v_back_porch = mode->vtotal - mode->vsync_end;
+       timing->v_front_porch = mode->vsync_start - mode->vdisplay;
+       timing->hsync_pulse_width = mode->hsync_end - mode->hsync_start;
+       timing->vsync_pulse_width = mode->vsync_end - mode->vsync_start;
+       timing->hsync_polarity = (mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0;
+       timing->vsync_polarity = (mode->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
+       timing->border_clr = 0;
+       timing->underflow_clr = 0xff;
+       timing->hsync_skew = mode->hskew;
+
+       /* DSI controller cannot handle active-low sync signals. */
+       if (vid_enc->hw_intf->cap->type == INTF_DSI) {
+               timing->hsync_polarity = 0;
+               timing->vsync_polarity = 0;
+       }
+
+       /*
+        * For edp only:
+        * DISPLAY_V_START = (VBP * HCYCLE) + HBP
+        * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
+        */
+       /*
+        * if (vid_enc->hw->cap->type == INTF_EDP) {
+        * display_v_start += mode->htotal - mode->hsync_start;
+        * display_v_end -= mode->hsync_start - mode->hdisplay;
+        * }
+        */
+}
+
+static inline u32 get_horizontal_total(const struct intf_timing_params *timing)
+{
+       u32 active = timing->xres;
+       u32 inactive =
+           timing->h_back_porch + timing->h_front_porch +
+           timing->hsync_pulse_width;
+       return active + inactive;
+}
+
+static inline u32 get_vertical_total(const struct intf_timing_params *timing)
+{
+       u32 active = timing->yres;
+       u32 inactive =
+           timing->v_back_porch + timing->v_front_porch +
+           timing->vsync_pulse_width;
+       return active + inactive;
+}
+
+/*
+ * programmable_fetch_get_num_lines:
+ *     Number of fetch lines in vertical front porch
+ * @timing: Pointer to the intf timing information for the requested mode
+ *
+ * Returns the number of fetch lines in vertical front porch at which mdp
+ * can start fetching the next frame.
+ *
+ * Number of needed prefetch lines is anything that cannot be absorbed in the
+ * start of frame time (back porch + vsync pulse width).
+ *
+ * Some panels have very large VFP, however we only need a total number of
+ * lines based on the chip worst case latencies.
+ */
+static u32 programmable_fetch_get_num_lines(
+               struct dpu_encoder_phys_vid *vid_enc,
+               const struct intf_timing_params *timing)
+{
+       u32 worst_case_needed_lines =
+           vid_enc->hw_intf->cap->prog_fetch_lines_worst_case;
+       u32 start_of_frame_lines =
+           timing->v_back_porch + timing->vsync_pulse_width;
+       u32 needed_vfp_lines = worst_case_needed_lines - start_of_frame_lines;
+       u32 actual_vfp_lines = 0;
+
+       /* Fetch must be outside active lines, otherwise undefined. */
+       if (start_of_frame_lines >= worst_case_needed_lines) {
+               DPU_DEBUG_VIDENC(vid_enc,
+                               "prog fetch is not needed, large vbp+vsw\n");
+               actual_vfp_lines = 0;
+       } else if (timing->v_front_porch < needed_vfp_lines) {
+               /* Warn fetch needed, but not enough porch in panel config */
+               pr_warn_once
+                       ("low vbp+vfp may lead to perf issues in some cases\n");
+               DPU_DEBUG_VIDENC(vid_enc,
+                               "less vfp than fetch req, using entire vfp\n");
+               actual_vfp_lines = timing->v_front_porch;
+       } else {
+               DPU_DEBUG_VIDENC(vid_enc, "room in vfp for needed prefetch\n");
+               actual_vfp_lines = needed_vfp_lines;
+       }
+
+       DPU_DEBUG_VIDENC(vid_enc,
+               "v_front_porch %u v_back_porch %u vsync_pulse_width %u\n",
+               timing->v_front_porch, timing->v_back_porch,
+               timing->vsync_pulse_width);
+       DPU_DEBUG_VIDENC(vid_enc,
+               "wc_lines %u needed_vfp_lines %u actual_vfp_lines %u\n",
+               worst_case_needed_lines, needed_vfp_lines, actual_vfp_lines);
+
+       return actual_vfp_lines;
+}
+
+/*
+ * programmable_fetch_config: Programs HW to prefetch lines by offsetting
+ *     the start of fetch into the vertical front porch for cases where the
+ *     vsync pulse width and vertical back porch time is insufficient
+ *
+ *     Gets # of lines to pre-fetch, then calculate VSYNC counter value.
+ *     HW layer requires VSYNC counter of first pixel of tgt VFP line.
+ *
+ * @timing: Pointer to the intf timing information for the requested mode
+ */
+static void programmable_fetch_config(struct dpu_encoder_phys *phys_enc,
+                                     const struct intf_timing_params *timing)
+{
+       struct dpu_encoder_phys_vid *vid_enc =
+               to_dpu_encoder_phys_vid(phys_enc);
+       struct intf_prog_fetch f = { 0 };
+       u32 vfp_fetch_lines = 0;
+       u32 horiz_total = 0;
+       u32 vert_total = 0;
+       u32 vfp_fetch_start_vsync_counter = 0;
+       unsigned long lock_flags;
+
+       if (WARN_ON_ONCE(!vid_enc->hw_intf->ops.setup_prg_fetch))
+               return;
+
+       vfp_fetch_lines = programmable_fetch_get_num_lines(vid_enc, timing);
+       if (vfp_fetch_lines) {
+               vert_total = get_vertical_total(timing);
+               horiz_total = get_horizontal_total(timing);
+               vfp_fetch_start_vsync_counter =
+                   (vert_total - vfp_fetch_lines) * horiz_total + 1;
+               f.enable = 1;
+               f.fetch_start = vfp_fetch_start_vsync_counter;
+       }
+
+       DPU_DEBUG_VIDENC(vid_enc,
+               "vfp_fetch_lines %u vfp_fetch_start_vsync_counter %u\n",
+               vfp_fetch_lines, vfp_fetch_start_vsync_counter);
+
+       spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+       vid_enc->hw_intf->ops.setup_prg_fetch(vid_enc->hw_intf, &f);
+       spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+}
+
+static bool dpu_encoder_phys_vid_mode_fixup(
+               struct dpu_encoder_phys *phys_enc,
+               const struct drm_display_mode *mode,
+               struct drm_display_mode *adj_mode)
+{
+       if (phys_enc)
+               DPU_DEBUG_VIDENC(to_dpu_encoder_phys_vid(phys_enc), "\n");
+
+       /*
+        * Modifying mode has consequences when the mode comes back to us
+        */
+       return true;
+}
+
+static void dpu_encoder_phys_vid_setup_timing_engine(
+               struct dpu_encoder_phys *phys_enc)
+{
+       struct dpu_encoder_phys_vid *vid_enc;
+       struct drm_display_mode mode;
+       struct intf_timing_params timing_params = { 0 };
+       const struct dpu_format *fmt = NULL;
+       u32 fmt_fourcc = DRM_FORMAT_RGB888;
+       unsigned long lock_flags;
+       struct dpu_hw_intf_cfg intf_cfg = { 0 };
+
+       if (!phys_enc || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+               DPU_ERROR("invalid encoder %d\n", phys_enc != 0);
+               return;
+       }
+
+       mode = phys_enc->cached_mode;
+       vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+       if (!vid_enc->hw_intf->ops.setup_timing_gen) {
+               DPU_ERROR("timing engine setup is not supported\n");
+               return;
+       }
+
+       DPU_DEBUG_VIDENC(vid_enc, "enabling mode:\n");
+       drm_mode_debug_printmodeline(&mode);
+
+       if (phys_enc->split_role != ENC_ROLE_SOLO) {
+               mode.hdisplay >>= 1;
+               mode.htotal >>= 1;
+               mode.hsync_start >>= 1;
+               mode.hsync_end >>= 1;
+
+               DPU_DEBUG_VIDENC(vid_enc,
+                       "split_role %d, halve horizontal %d %d %d %d\n",
+                       phys_enc->split_role,
+                       mode.hdisplay, mode.htotal,
+                       mode.hsync_start, mode.hsync_end);
+       }
+
+       drm_mode_to_intf_timing_params(vid_enc, &mode, &timing_params);
+
+       fmt = dpu_get_dpu_format(fmt_fourcc);
+       DPU_DEBUG_VIDENC(vid_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
+
+       intf_cfg.intf = vid_enc->hw_intf->idx;
+       intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_VID;
+       intf_cfg.stream_sel = 0; /* Don't care value for video mode */
+       intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+
+       spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+       vid_enc->hw_intf->ops.setup_timing_gen(vid_enc->hw_intf,
+                       &timing_params, fmt);
+       phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+       spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+       programmable_fetch_config(phys_enc, &timing_params);
+
+       vid_enc->timing_params = timing_params;
+}
+
+static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
+{
+       struct dpu_encoder_phys *phys_enc = arg;
+       struct dpu_hw_ctl *hw_ctl;
+       unsigned long lock_flags;
+       u32 flush_register = 0;
+       int new_cnt = -1, old_cnt = -1;
+
+       if (!phys_enc)
+               return;
+
+       hw_ctl = phys_enc->hw_ctl;
+       if (!hw_ctl)
+               return;
+
+       DPU_ATRACE_BEGIN("vblank_irq");
+
+       if (phys_enc->parent_ops->handle_vblank_virt)
+               phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
+                               phys_enc);
+
+       old_cnt  = atomic_read(&phys_enc->pending_kickoff_cnt);
+
+       /*
+        * only decrement the pending flush count if we've actually flushed
+        * hardware. due to sw irq latency, vblank may have already happened
+        * so we need to double-check with hw that it accepted the flush bits
+        */
+       spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+       if (hw_ctl && hw_ctl->ops.get_flush_register)
+               flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
+
+       if (flush_register == 0)
+               new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
+                               -1, 0);
+       spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+       /* Signal any waiting atomic commit thread */
+       wake_up_all(&phys_enc->pending_kickoff_wq);
+       DPU_ATRACE_END("vblank_irq");
+}
+
+static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
+{
+       struct dpu_encoder_phys *phys_enc = arg;
+
+       if (!phys_enc)
+               return;
+
+       if (phys_enc->parent_ops->handle_underrun_virt)
+               phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
+                       phys_enc);
+}
+
+static bool _dpu_encoder_phys_is_dual_ctl(struct dpu_encoder_phys *phys_enc)
+{
+       if (!phys_enc)
+               return false;
+
+       if (phys_enc->topology_name == DPU_RM_TOPOLOGY_DUALPIPE)
+               return true;
+
+       return false;
+}
+
+static bool dpu_encoder_phys_vid_needs_single_flush(
+               struct dpu_encoder_phys *phys_enc)
+{
+       return (phys_enc && _dpu_encoder_phys_is_dual_ctl(phys_enc));
+}
+
+static void _dpu_encoder_phys_vid_setup_irq_hw_idx(
+               struct dpu_encoder_phys *phys_enc)
+{
+       struct dpu_encoder_irq *irq;
+
+       /*
+        * Initialize irq->hw_idx only when irq is not registered.
+        * Prevent invalidating irq->irq_idx as modeset may be
+        * called many times during dfps.
+        */
+
+       irq = &phys_enc->irq[INTR_IDX_VSYNC];
+       if (irq->irq_idx < 0)
+               irq->hw_idx = phys_enc->intf_idx;
+
+       irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+       if (irq->irq_idx < 0)
+               irq->hw_idx = phys_enc->intf_idx;
+}
+
+static void dpu_encoder_phys_vid_mode_set(
+               struct dpu_encoder_phys *phys_enc,
+               struct drm_display_mode *mode,
+               struct drm_display_mode *adj_mode)
+{
+       struct dpu_rm *rm;
+       struct dpu_rm_hw_iter iter;
+       int i, instance;
+       struct dpu_encoder_phys_vid *vid_enc;
+
+       if (!phys_enc || !phys_enc->dpu_kms) {
+               DPU_ERROR("invalid encoder/kms\n");
+               return;
+       }
+
+       rm = &phys_enc->dpu_kms->rm;
+       vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+       if (adj_mode) {
+               phys_enc->cached_mode = *adj_mode;
+               drm_mode_debug_printmodeline(adj_mode);
+               DPU_DEBUG_VIDENC(vid_enc, "caching mode:\n");
+       }
+
+       instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
+
+       /* Retrieve previously allocated HW Resources. Shouldn't fail */
+       dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_CTL);
+       for (i = 0; i <= instance; i++) {
+               if (dpu_rm_get_hw(rm, &iter))
+                       phys_enc->hw_ctl = (struct dpu_hw_ctl *)iter.hw;
+       }
+       if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
+               DPU_ERROR_VIDENC(vid_enc, "failed to init ctl, %ld\n",
+                               PTR_ERR(phys_enc->hw_ctl));
+               phys_enc->hw_ctl = NULL;
+               return;
+       }
+
+       _dpu_encoder_phys_vid_setup_irq_hw_idx(phys_enc);
+}
+
+static int dpu_encoder_phys_vid_control_vblank_irq(
+               struct dpu_encoder_phys *phys_enc,
+               bool enable)
+{
+       int ret = 0;
+       struct dpu_encoder_phys_vid *vid_enc;
+       int refcount;
+
+       if (!phys_enc) {
+               DPU_ERROR("invalid encoder\n");
+               return -EINVAL;
+       }
+
+       refcount = atomic_read(&phys_enc->vblank_refcount);
+       vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+       /* Slave encoders don't report vblank */
+       if (!dpu_encoder_phys_vid_is_master(phys_enc))
+               goto end;
+
+       /* protect against negative */
+       if (!enable && refcount == 0) {
+               ret = -EINVAL;
+               goto end;
+       }
+
+       DRM_DEBUG_KMS("id:%u enable=%d/%d\n", DRMID(phys_enc->parent), enable,
+                     atomic_read(&phys_enc->vblank_refcount));
+
+       if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+               ret = dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_VSYNC);
+       else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+               ret = dpu_encoder_helper_unregister_irq(phys_enc,
+                               INTR_IDX_VSYNC);
+
+end:
+       if (ret) {
+               DRM_ERROR("failed: id:%u intf:%d ret:%d enable:%d refcnt:%d\n",
+                         DRMID(phys_enc->parent),
+                         vid_enc->hw_intf->idx - INTF_0, ret, enable,
+                         refcount);
+       }
+       return ret;
+}
+
+static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
+{
+       struct msm_drm_private *priv;
+       struct dpu_encoder_phys_vid *vid_enc;
+       struct dpu_hw_intf *intf;
+       struct dpu_hw_ctl *ctl;
+       u32 flush_mask = 0;
+
+       if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
+                       !phys_enc->parent->dev->dev_private) {
+               DPU_ERROR("invalid encoder/device\n");
+               return;
+       }
+       priv = phys_enc->parent->dev->dev_private;
+
+       vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+       intf = vid_enc->hw_intf;
+       ctl = phys_enc->hw_ctl;
+       if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
+               DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
+                               vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+               return;
+       }
+
+       DPU_DEBUG_VIDENC(vid_enc, "\n");
+
+       if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+               return;
+
+       dpu_encoder_helper_split_config(phys_enc, vid_enc->hw_intf->idx);
+
+       dpu_encoder_phys_vid_setup_timing_engine(phys_enc);
+
+       /*
+        * For single flush cases (dual-ctl or pp-split), skip setting the
+        * flush bit for the slave intf, since both intfs use same ctl
+        * and HW will only flush the master.
+        */
+       if (dpu_encoder_phys_vid_needs_single_flush(phys_enc) &&
+               !dpu_encoder_phys_vid_is_master(phys_enc))
+               goto skip_flush;
+
+       ctl->ops.get_bitmask_intf(ctl, &flush_mask, intf->idx);
+       ctl->ops.update_pending_flush(ctl, flush_mask);
+
+skip_flush:
+       DPU_DEBUG_VIDENC(vid_enc, "update pending flush ctl %d flush_mask %x\n",
+               ctl->idx - CTL_0, flush_mask);
+
+       /* ctl_flush & timing engine enable will be triggered by framework */
+       if (phys_enc->enable_state == DPU_ENC_DISABLED)
+               phys_enc->enable_state = DPU_ENC_ENABLING;
+}
+
+static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc)
+{
+       struct dpu_encoder_phys_vid *vid_enc;
+
+       if (!phys_enc) {
+               DPU_ERROR("invalid encoder\n");
+               return;
+       }
+
+       vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+       DPU_DEBUG_VIDENC(vid_enc, "\n");
+       kfree(vid_enc);
+}
+
+static void dpu_encoder_phys_vid_get_hw_resources(
+               struct dpu_encoder_phys *phys_enc,
+               struct dpu_encoder_hw_resources *hw_res,
+               struct drm_connector_state *conn_state)
+{
+       struct dpu_encoder_phys_vid *vid_enc;
+
+       if (!phys_enc || !hw_res) {
+               DPU_ERROR("invalid arg(s), enc %d hw_res %d conn_state %d\n",
+                               phys_enc != 0, hw_res != 0, conn_state != 0);
+               return;
+       }
+
+       vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+       if (!vid_enc->hw_intf) {
+               DPU_ERROR("invalid arg(s), hw_intf\n");
+               return;
+       }
+
+       DPU_DEBUG_VIDENC(vid_enc, "\n");
+       hw_res->intfs[vid_enc->hw_intf->idx - INTF_0] = INTF_MODE_VIDEO;
+}
+
+static int _dpu_encoder_phys_vid_wait_for_vblank(
+               struct dpu_encoder_phys *phys_enc, bool notify)
+{
+       struct dpu_encoder_wait_info wait_info;
+       int ret;
+
+       if (!phys_enc) {
+               pr_err("invalid encoder\n");
+               return -EINVAL;
+       }
+
+       wait_info.wq = &phys_enc->pending_kickoff_wq;
+       wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
+       wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+       if (!dpu_encoder_phys_vid_is_master(phys_enc)) {
+               if (notify && phys_enc->parent_ops->handle_frame_done)
+                       phys_enc->parent_ops->handle_frame_done(
+                                       phys_enc->parent, phys_enc,
+                                       DPU_ENCODER_FRAME_EVENT_DONE);
+               return 0;
+       }
+
+       /* Wait for kickoff to complete */
+       ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_VSYNC,
+                       &wait_info);
+
+       if (ret == -ETIMEDOUT) {
+               dpu_encoder_helper_report_irq_timeout(phys_enc, INTR_IDX_VSYNC);
+       } else if (!ret && notify && phys_enc->parent_ops->handle_frame_done)
+               phys_enc->parent_ops->handle_frame_done(
+                               phys_enc->parent, phys_enc,
+                               DPU_ENCODER_FRAME_EVENT_DONE);
+
+       return ret;
+}
+
+static int dpu_encoder_phys_vid_wait_for_vblank(
+               struct dpu_encoder_phys *phys_enc)
+{
+       return _dpu_encoder_phys_vid_wait_for_vblank(phys_enc, true);
+}
+
+static void dpu_encoder_phys_vid_prepare_for_kickoff(
+               struct dpu_encoder_phys *phys_enc,
+               struct dpu_encoder_kickoff_params *params)
+{
+       struct dpu_encoder_phys_vid *vid_enc;
+       struct dpu_hw_ctl *ctl;
+       int rc;
+
+       if (!phys_enc || !params) {
+               DPU_ERROR("invalid encoder/parameters\n");
+               return;
+       }
+       vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+       ctl = phys_enc->hw_ctl;
+       if (!ctl || !ctl->ops.wait_reset_status)
+               return;
+
+       /*
+        * hw supports hardware initiated ctl reset, so before we kickoff a new
+        * frame, need to check and wait for hw initiated ctl reset completion
+        */
+       rc = ctl->ops.wait_reset_status(ctl);
+       if (rc) {
+               DPU_ERROR_VIDENC(vid_enc, "ctl %d reset failure: %d\n",
+                               ctl->idx, rc);
+               dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_VSYNC);
+               dpu_dbg_dump(false, __func__, true, true);
+       }
+}
+
+static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
+{
+       struct msm_drm_private *priv;
+       struct dpu_encoder_phys_vid *vid_enc;
+       unsigned long lock_flags;
+       int ret;
+
+       if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
+                       !phys_enc->parent->dev->dev_private) {
+               DPU_ERROR("invalid encoder/device\n");
+               return;
+       }
+       priv = phys_enc->parent->dev->dev_private;
+
+       vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+       if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
+               DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
+                               vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+               return;
+       }
+
+       DPU_DEBUG_VIDENC(vid_enc, "\n");
+
+       if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+               return;
+
+       if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+               DPU_ERROR("already disabled\n");
+               return;
+       }
+
+       spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+       vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 0);
+       if (dpu_encoder_phys_vid_is_master(phys_enc))
+               dpu_encoder_phys_inc_pending(phys_enc);
+       spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+       /*
+        * Wait for a vsync so we know the ENABLE=0 latched before
+        * the (connector) source of the vsync's gets disabled,
+        * otherwise we end up in a funny state if we re-enable
+        * before the disable latches, which results that some of
+        * the settings changes for the new modeset (like new
+        * scanout buffer) don't latch properly..
+        */
+       if (dpu_encoder_phys_vid_is_master(phys_enc)) {
+               ret = _dpu_encoder_phys_vid_wait_for_vblank(phys_enc, false);
+               if (ret) {
+                       atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+                       DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
+                                 DRMID(phys_enc->parent),
+                                 vid_enc->hw_intf->idx - INTF_0, ret);
+               }
+       }
+
+       phys_enc->enable_state = DPU_ENC_DISABLED;
+}
+
+static void dpu_encoder_phys_vid_handle_post_kickoff(
+               struct dpu_encoder_phys *phys_enc)
+{
+       unsigned long lock_flags;
+       struct dpu_encoder_phys_vid *vid_enc;
+
+       if (!phys_enc) {
+               DPU_ERROR("invalid encoder\n");
+               return;
+       }
+
+       vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+       DPU_DEBUG_VIDENC(vid_enc, "enable_state %d\n", phys_enc->enable_state);
+
+       /*
+        * Video mode must flush CTL before enabling timing engine
+        * Video encoders need to turn on their interfaces now
+        */
+       if (phys_enc->enable_state == DPU_ENC_ENABLING) {
+               trace_dpu_enc_phys_vid_post_kickoff(DRMID(phys_enc->parent),
+                                   vid_enc->hw_intf->idx - INTF_0);
+               spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+               vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 1);
+               spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+               phys_enc->enable_state = DPU_ENC_ENABLED;
+       }
+}
+
+static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
+               bool enable)
+{
+       struct dpu_encoder_phys_vid *vid_enc;
+       int ret;
+
+       if (!phys_enc)
+               return;
+
+       vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+       trace_dpu_enc_phys_vid_irq_ctrl(DRMID(phys_enc->parent),
+                           vid_enc->hw_intf->idx - INTF_0,
+                           enable,
+                           atomic_read(&phys_enc->vblank_refcount));
+
+       if (enable) {
+               ret = dpu_encoder_phys_vid_control_vblank_irq(phys_enc, true);
+               if (ret)
+                       return;
+
+               dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
+       } else {
+               dpu_encoder_phys_vid_control_vblank_irq(phys_enc, false);
+               dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
+       }
+}
+
+static void dpu_encoder_phys_vid_setup_misr(struct dpu_encoder_phys *phys_enc,
+                                               bool enable, u32 frame_count)
+{
+       struct dpu_encoder_phys_vid *vid_enc;
+
+       if (!phys_enc)
+               return;
+       vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+       if (vid_enc->hw_intf && vid_enc->hw_intf->ops.setup_misr)
+               vid_enc->hw_intf->ops.setup_misr(vid_enc->hw_intf,
+                                                       enable, frame_count);
+}
+
+static u32 dpu_encoder_phys_vid_collect_misr(struct dpu_encoder_phys *phys_enc)
+{
+       struct dpu_encoder_phys_vid *vid_enc;
+
+       if (!phys_enc)
+               return 0;
+       vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+       return vid_enc->hw_intf && vid_enc->hw_intf->ops.collect_misr ?
+               vid_enc->hw_intf->ops.collect_misr(vid_enc->hw_intf) : 0;
+}
+
+static int dpu_encoder_phys_vid_get_line_count(
+               struct dpu_encoder_phys *phys_enc)
+{
+       struct dpu_encoder_phys_vid *vid_enc;
+
+       if (!phys_enc)
+               return -EINVAL;
+
+       if (!dpu_encoder_phys_vid_is_master(phys_enc))
+               return -EINVAL;
+
+       vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+       if (!vid_enc->hw_intf || !vid_enc->hw_intf->ops.get_line_count)
+               return -EINVAL;
+
+       return vid_enc->hw_intf->ops.get_line_count(vid_enc->hw_intf);
+}
+
+static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
+{
+       ops->is_master = dpu_encoder_phys_vid_is_master;
+       ops->mode_set = dpu_encoder_phys_vid_mode_set;
+       ops->mode_fixup = dpu_encoder_phys_vid_mode_fixup;
+       ops->enable = dpu_encoder_phys_vid_enable;
+       ops->disable = dpu_encoder_phys_vid_disable;
+       ops->destroy = dpu_encoder_phys_vid_destroy;
+       ops->get_hw_resources = dpu_encoder_phys_vid_get_hw_resources;
+       ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq;
+       ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_vblank;
+       ops->wait_for_vblank = dpu_encoder_phys_vid_wait_for_vblank;
+       ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_vblank;
+       ops->irq_control = dpu_encoder_phys_vid_irq_control;
+       ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff;
+       ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
+       ops->needs_single_flush = dpu_encoder_phys_vid_needs_single_flush;
+       ops->setup_misr = dpu_encoder_phys_vid_setup_misr;
+       ops->collect_misr = dpu_encoder_phys_vid_collect_misr;
+       ops->hw_reset = dpu_encoder_helper_hw_reset;
+       ops->get_line_count = dpu_encoder_phys_vid_get_line_count;
+}
+
+struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
+               struct dpu_enc_phys_init_params *p)
+{
+       struct dpu_encoder_phys *phys_enc = NULL;
+       struct dpu_encoder_phys_vid *vid_enc = NULL;
+       struct dpu_rm_hw_iter iter;
+       struct dpu_hw_mdp *hw_mdp;
+       struct dpu_encoder_irq *irq;
+       int i, ret = 0;
+
+       if (!p) {
+               ret = -EINVAL;
+               goto fail;
+       }
+
+       vid_enc = kzalloc(sizeof(*vid_enc), GFP_KERNEL);
+       if (!vid_enc) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+
+       phys_enc = &vid_enc->base;
+
+       hw_mdp = dpu_rm_get_mdp(&p->dpu_kms->rm);
+       if (IS_ERR_OR_NULL(hw_mdp)) {
+               ret = PTR_ERR(hw_mdp);
+               DPU_ERROR("failed to get mdptop\n");
+               goto fail;
+       }
+       phys_enc->hw_mdptop = hw_mdp;
+       phys_enc->intf_idx = p->intf_idx;
+
+       /**
+        * hw_intf resource permanently assigned to this encoder
+        * Other resources allocated at atomic commit time by use case
+        */
+       dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_INTF);
+       while (dpu_rm_get_hw(&p->dpu_kms->rm, &iter)) {
+               struct dpu_hw_intf *hw_intf = (struct dpu_hw_intf *)iter.hw;
+
+               if (hw_intf->idx == p->intf_idx) {
+                       vid_enc->hw_intf = hw_intf;
+                       break;
+               }
+       }
+
+       if (!vid_enc->hw_intf) {
+               ret = -EINVAL;
+               DPU_ERROR("failed to get hw_intf\n");
+               goto fail;
+       }
+
+       DPU_DEBUG_VIDENC(vid_enc, "\n");
+
+       dpu_encoder_phys_vid_init_ops(&phys_enc->ops);
+       phys_enc->parent = p->parent;
+       phys_enc->parent_ops = p->parent_ops;
+       phys_enc->dpu_kms = p->dpu_kms;
+       phys_enc->split_role = p->split_role;
+       phys_enc->intf_mode = INTF_MODE_VIDEO;
+       phys_enc->enc_spinlock = p->enc_spinlock;
+       for (i = 0; i < INTR_IDX_MAX; i++) {
+               irq = &phys_enc->irq[i];
+               INIT_LIST_HEAD(&irq->cb.list);
+               irq->irq_idx = -EINVAL;
+               irq->hw_idx = -EINVAL;
+               irq->cb.arg = phys_enc;
+       }
+
+       irq = &phys_enc->irq[INTR_IDX_VSYNC];
+       irq->name = "vsync_irq";
+       irq->intr_type = DPU_IRQ_TYPE_INTF_VSYNC;
+       irq->intr_idx = INTR_IDX_VSYNC;
+       irq->cb.func = dpu_encoder_phys_vid_vblank_irq;
+
+       irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+       irq->name = "underrun";
+       irq->intr_type = DPU_IRQ_TYPE_INTF_UNDER_RUN;
+       irq->intr_idx = INTR_IDX_UNDERRUN;
+       irq->cb.func = dpu_encoder_phys_vid_underrun_irq;
+
+       atomic_set(&phys_enc->vblank_refcount, 0);
+       atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+       init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+       phys_enc->enable_state = DPU_ENC_DISABLED;
+
+       DPU_DEBUG_VIDENC(vid_enc, "created intf idx:%d\n", p->intf_idx);
+
+       return phys_enc;
+
+fail:
+       DPU_ERROR("failed to create encoder\n");
+       if (vid_enc)
+               dpu_encoder_phys_vid_destroy(phys_enc);
+
+       return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
new file mode 100644 (file)
index 0000000..bfcd165
--- /dev/null
@@ -0,0 +1,1173 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)    "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <uapi/drm/drm_fourcc.h>
+
+#include "msm_media_info.h"
+#include "dpu_kms.h"
+#include "dpu_formats.h"
+
+#define DPU_UBWC_META_MACRO_W_H                16
+#define DPU_UBWC_META_BLOCK_SIZE       256
+#define DPU_UBWC_PLANE_SIZE_ALIGNMENT  4096
+
+#define DPU_TILE_HEIGHT_DEFAULT        1
+#define DPU_TILE_HEIGHT_TILED  4
+#define DPU_TILE_HEIGHT_UBWC   4
+#define DPU_TILE_HEIGHT_NV12   8
+
+#define DPU_MAX_IMG_WIDTH              0x3FFF
+#define DPU_MAX_IMG_HEIGHT             0x3FFF
+
+/**
+ * DPU supported format packing, bpp, and other format
+ * information.
+ * DPU currently only supports interleaved RGB formats
+ * UBWC support for a pixel format is indicated by the flag,
+ * there is additional meta data plane for such formats
+ */
+
+#define INTERLEAVED_RGB_FMT(fmt, a, r, g, b, e0, e1, e2, e3, uc, alpha,   \
+bp, flg, fm, np)                                                          \
+{                                                                         \
+       .base.pixel_format = DRM_FORMAT_ ## fmt,                          \
+       .fetch_planes = DPU_PLANE_INTERLEAVED,                            \
+       .alpha_enable = alpha,                                            \
+       .element = { (e0), (e1), (e2), (e3) },                            \
+       .bits = { g, b, r, a },                                           \
+       .chroma_sample = DPU_CHROMA_RGB,                                  \
+       .unpack_align_msb = 0,                                            \
+       .unpack_tight = 1,                                                \
+       .unpack_count = uc,                                               \
+       .bpp = bp,                                                        \
+       .fetch_mode = fm,                                                 \
+       .flag = {(flg)},                                                  \
+       .num_planes = np,                                                 \
+       .tile_height = DPU_TILE_HEIGHT_DEFAULT                            \
+}
+
+#define INTERLEAVED_RGB_FMT_TILED(fmt, a, r, g, b, e0, e1, e2, e3, uc,    \
+alpha, bp, flg, fm, np, th)                                               \
+{                                                                         \
+       .base.pixel_format = DRM_FORMAT_ ## fmt,                          \
+       .fetch_planes = DPU_PLANE_INTERLEAVED,                            \
+       .alpha_enable = alpha,                                            \
+       .element = { (e0), (e1), (e2), (e3) },                            \
+       .bits = { g, b, r, a },                                           \
+       .chroma_sample = DPU_CHROMA_RGB,                                  \
+       .unpack_align_msb = 0,                                            \
+       .unpack_tight = 1,                                                \
+       .unpack_count = uc,                                               \
+       .bpp = bp,                                                        \
+       .fetch_mode = fm,                                                 \
+       .flag = {(flg)},                                                  \
+       .num_planes = np,                                                 \
+       .tile_height = th                                                 \
+}
+
+
+#define INTERLEAVED_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, e3,              \
+alpha, chroma, count, bp, flg, fm, np)                                    \
+{                                                                         \
+       .base.pixel_format = DRM_FORMAT_ ## fmt,                          \
+       .fetch_planes = DPU_PLANE_INTERLEAVED,                            \
+       .alpha_enable = alpha,                                            \
+       .element = { (e0), (e1), (e2), (e3)},                             \
+       .bits = { g, b, r, a },                                           \
+       .chroma_sample = chroma,                                          \
+       .unpack_align_msb = 0,                                            \
+       .unpack_tight = 1,                                                \
+       .unpack_count = count,                                            \
+       .bpp = bp,                                                        \
+       .fetch_mode = fm,                                                 \
+       .flag = {(flg)},                                                  \
+       .num_planes = np,                                                 \
+       .tile_height = DPU_TILE_HEIGHT_DEFAULT                            \
+}
+
+#define PSEUDO_YUV_FMT(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np)      \
+{                                                                         \
+       .base.pixel_format = DRM_FORMAT_ ## fmt,                          \
+       .fetch_planes = DPU_PLANE_PSEUDO_PLANAR,                          \
+       .alpha_enable = false,                                            \
+       .element = { (e0), (e1), 0, 0 },                                  \
+       .bits = { g, b, r, a },                                           \
+       .chroma_sample = chroma,                                          \
+       .unpack_align_msb = 0,                                            \
+       .unpack_tight = 1,                                                \
+       .unpack_count = 2,                                                \
+       .bpp = 2,                                                         \
+       .fetch_mode = fm,                                                 \
+       .flag = {(flg)},                                                  \
+       .num_planes = np,                                                 \
+       .tile_height = DPU_TILE_HEIGHT_DEFAULT                            \
+}
+
+#define PSEUDO_YUV_FMT_TILED(fmt, a, r, g, b, e0, e1, chroma,             \
+flg, fm, np, th)                                                          \
+{                                                                         \
+       .base.pixel_format = DRM_FORMAT_ ## fmt,                          \
+       .fetch_planes = DPU_PLANE_PSEUDO_PLANAR,                          \
+       .alpha_enable = false,                                            \
+       .element = { (e0), (e1), 0, 0 },                                  \
+       .bits = { g, b, r, a },                                           \
+       .chroma_sample = chroma,                                          \
+       .unpack_align_msb = 0,                                            \
+       .unpack_tight = 1,                                                \
+       .unpack_count = 2,                                                \
+       .bpp = 2,                                                         \
+       .fetch_mode = fm,                                                 \
+       .flag = {(flg)},                                                  \
+       .num_planes = np,                                                 \
+       .tile_height = th                                                 \
+}
+
+#define PSEUDO_YUV_FMT_LOOSE(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np)\
+{                                                                         \
+       .base.pixel_format = DRM_FORMAT_ ## fmt,                          \
+       .fetch_planes = DPU_PLANE_PSEUDO_PLANAR,                          \
+       .alpha_enable = false,                                            \
+       .element = { (e0), (e1), 0, 0 },                                  \
+       .bits = { g, b, r, a },                                           \
+       .chroma_sample = chroma,                                          \
+       .unpack_align_msb = 1,                                            \
+       .unpack_tight = 0,                                                \
+       .unpack_count = 2,                                                \
+       .bpp = 2,                                                         \
+       .fetch_mode = fm,                                                 \
+       .flag = {(flg)},                                                  \
+       .num_planes = np,                                                 \
+       .tile_height = DPU_TILE_HEIGHT_DEFAULT                            \
+}
+
+#define PSEUDO_YUV_FMT_LOOSE_TILED(fmt, a, r, g, b, e0, e1, chroma,       \
+flg, fm, np, th)                                                          \
+{                                                                         \
+       .base.pixel_format = DRM_FORMAT_ ## fmt,                          \
+       .fetch_planes = DPU_PLANE_PSEUDO_PLANAR,                          \
+       .alpha_enable = false,                                            \
+       .element = { (e0), (e1), 0, 0 },                                  \
+       .bits = { g, b, r, a },                                           \
+       .chroma_sample = chroma,                                          \
+       .unpack_align_msb = 1,                                            \
+       .unpack_tight = 0,                                                \
+       .unpack_count = 2,                                                \
+       .bpp = 2,                                                         \
+       .fetch_mode = fm,                                                 \
+       .flag = {(flg)},                                                  \
+       .num_planes = np,                                                 \
+       .tile_height = th                                                 \
+}
+
+
+#define PLANAR_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, alpha, chroma, bp,    \
+flg, fm, np)                                                      \
+{                                                                         \
+       .base.pixel_format = DRM_FORMAT_ ## fmt,                          \
+       .fetch_planes = DPU_PLANE_PLANAR,                                 \
+       .alpha_enable = alpha,                                            \
+       .element = { (e0), (e1), (e2), 0 },                               \
+       .bits = { g, b, r, a },                                           \
+       .chroma_sample = chroma,                                          \
+       .unpack_align_msb = 0,                                            \
+       .unpack_tight = 1,                                                \
+       .unpack_count = 1,                                                \
+       .bpp = bp,                                                        \
+       .fetch_mode = fm,                                                 \
+       .flag = {(flg)},                                                  \
+       .num_planes = np,                                                 \
+       .tile_height = DPU_TILE_HEIGHT_DEFAULT                            \
+}
+
+/*
+ * struct dpu_media_color_map - maps drm format to media format
+ * @format: DRM base pixel format
+ * @color: Media API color related to DRM format
+ */
+struct dpu_media_color_map {
+       uint32_t format;
+       uint32_t color;
+};
+
+static const struct dpu_format dpu_format_map[] = {
+       INTERLEAVED_RGB_FMT(ARGB8888,
+               COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+               true, 4, 0,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(ABGR8888,
+               COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+               true, 4, 0,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(XBGR8888,
+               COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+               true, 4, 0,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(RGBA8888,
+               COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+               true, 4, 0,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(BGRA8888,
+               COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+               true, 4, 0,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(BGRX8888,
+               COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+               false, 4, 0,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(XRGB8888,
+               COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+               false, 4, 0,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(RGBX8888,
+               COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+               false, 4, 0,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(RGB888,
+               0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+               false, 3, 0,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(BGR888,
+               0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+               false, 3, 0,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(RGB565,
+               0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+               C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+               false, 2, 0,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(BGR565,
+               0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+               C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+               false, 2, 0,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(ARGB1555,
+               COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+               C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+               true, 2, 0,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(ABGR1555,
+               COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+               C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+               true, 2, 0,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(RGBA5551,
+               COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+               C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+               true, 2, 0,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(BGRA5551,
+               COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+               C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+               true, 2, 0,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(XRGB1555,
+               COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+               C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+               false, 2, 0,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(XBGR1555,
+               COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+               C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+               false, 2, 0,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(RGBX5551,
+               COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+               C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+               false, 2, 0,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(BGRX5551,
+               COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+               C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+               false, 2, 0,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(ARGB4444,
+               COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+               C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+               true, 2, 0,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(ABGR4444,
+               COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+               C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+               true, 2, 0,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(RGBA4444,
+               COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+               C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+               true, 2, 0,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(BGRA4444,
+               COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+               C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+               true, 2, 0,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(XRGB4444,
+               COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+               C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+               false, 2, 0,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(XBGR4444,
+               COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+               C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+               false, 2, 0,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(RGBX4444,
+               COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+               C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+               false, 2, 0,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(BGRX4444,
+               COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+               C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+               false, 2, 0,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(BGRA1010102,
+               COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+               true, 4, DPU_FORMAT_FLAG_DX,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(RGBA1010102,
+               COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+               true, 4, DPU_FORMAT_FLAG_DX,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(ABGR2101010,
+               COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+               true, 4, DPU_FORMAT_FLAG_DX,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(ARGB2101010,
+               COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+               true, 4, DPU_FORMAT_FLAG_DX,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(XRGB2101010,
+               COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+               false, 4, DPU_FORMAT_FLAG_DX,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(BGRX1010102,
+               COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+               false, 4, DPU_FORMAT_FLAG_DX,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(XBGR2101010,
+               COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+               false, 4, DPU_FORMAT_FLAG_DX,
+               DPU_FETCH_LINEAR, 1),
+
+       INTERLEAVED_RGB_FMT(RGBX1010102,
+               COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+               false, 4, DPU_FORMAT_FLAG_DX,
+               DPU_FETCH_LINEAR, 1),
+
+       PSEUDO_YUV_FMT(NV12,
+               0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C1_B_Cb, C2_R_Cr,
+               DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
+               DPU_FETCH_LINEAR, 2),
+
+       PSEUDO_YUV_FMT(NV21,
+               0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C2_R_Cr, C1_B_Cb,
+               DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
+               DPU_FETCH_LINEAR, 2),
+
+       PSEUDO_YUV_FMT(NV16,
+               0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C1_B_Cb, C2_R_Cr,
+               DPU_CHROMA_H2V1, DPU_FORMAT_FLAG_YUV,
+               DPU_FETCH_LINEAR, 2),
+
+       PSEUDO_YUV_FMT(NV61,
+               0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C2_R_Cr, C1_B_Cb,
+               DPU_CHROMA_H2V1, DPU_FORMAT_FLAG_YUV,
+               DPU_FETCH_LINEAR, 2),
+
+       INTERLEAVED_YUV_FMT(VYUY,
+               0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C2_R_Cr, C0_G_Y, C1_B_Cb, C0_G_Y,
+               false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+               DPU_FETCH_LINEAR, 2),
+
+       INTERLEAVED_YUV_FMT(UYVY,
+               0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C1_B_Cb, C0_G_Y, C2_R_Cr, C0_G_Y,
+               false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+               DPU_FETCH_LINEAR, 2),
+
+       INTERLEAVED_YUV_FMT(YUYV,
+               0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C0_G_Y, C1_B_Cb, C0_G_Y, C2_R_Cr,
+               false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+               DPU_FETCH_LINEAR, 2),
+
+       INTERLEAVED_YUV_FMT(YVYU,
+               0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C0_G_Y, C2_R_Cr, C0_G_Y, C1_B_Cb,
+               false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+               DPU_FETCH_LINEAR, 2),
+
+       PLANAR_YUV_FMT(YUV420,
+               0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C2_R_Cr, C1_B_Cb, C0_G_Y,
+               false, DPU_CHROMA_420, 1, DPU_FORMAT_FLAG_YUV,
+               DPU_FETCH_LINEAR, 3),
+
+       PLANAR_YUV_FMT(YVU420,
+               0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C1_B_Cb, C2_R_Cr, C0_G_Y,
+               false, DPU_CHROMA_420, 1, DPU_FORMAT_FLAG_YUV,
+               DPU_FETCH_LINEAR, 3),
+};
+
+/*
+ * A5x tile formats tables:
+ * These tables hold the A5x tile formats supported.
+ */
+static const struct dpu_format dpu_format_map_tile[] = {
+       INTERLEAVED_RGB_FMT_TILED(BGR565,
+               0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+               C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+               false, 2, 0,
+               DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+       INTERLEAVED_RGB_FMT_TILED(ARGB8888,
+               COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+               true, 4, 0,
+               DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+       INTERLEAVED_RGB_FMT_TILED(ABGR8888,
+               COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+               true, 4, 0,
+               DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+       INTERLEAVED_RGB_FMT_TILED(XBGR8888,
+               COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+               false, 4, 0,
+               DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+       INTERLEAVED_RGB_FMT_TILED(RGBA8888,
+               COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+               true, 4, 0,
+               DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+       INTERLEAVED_RGB_FMT_TILED(BGRA8888,
+               COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+               true, 4, 0,
+               DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+       INTERLEAVED_RGB_FMT_TILED(BGRX8888,
+               COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+               false, 4, 0,
+               DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+       INTERLEAVED_RGB_FMT_TILED(XRGB8888,
+               COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+               false, 4, 0,
+               DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+       INTERLEAVED_RGB_FMT_TILED(RGBX8888,
+               COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+               false, 4, 0,
+               DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+       INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
+               COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+               true, 4, DPU_FORMAT_FLAG_DX,
+               DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+       INTERLEAVED_RGB_FMT_TILED(XBGR2101010,
+               COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+               true, 4, DPU_FORMAT_FLAG_DX,
+               DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+       PSEUDO_YUV_FMT_TILED(NV12,
+               0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C1_B_Cb, C2_R_Cr,
+               DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
+               DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_NV12),
+
+       PSEUDO_YUV_FMT_TILED(NV21,
+               0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C2_R_Cr, C1_B_Cb,
+               DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
+               DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_NV12),
+};
+
+/*
+ * UBWC formats table:
+ * This table holds the UBWC formats supported.
+ * If a compression ratio needs to be used for this or any other format,
+ * the data will be passed by user-space.
+ */
+static const struct dpu_format dpu_format_map_ubwc[] = {
+       INTERLEAVED_RGB_FMT_TILED(BGR565,
+               0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+               C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+               false, 2, DPU_FORMAT_FLAG_COMPRESSED,
+               DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+       INTERLEAVED_RGB_FMT_TILED(ABGR8888,
+               COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+               true, 4, DPU_FORMAT_FLAG_COMPRESSED,
+               DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+       INTERLEAVED_RGB_FMT_TILED(XBGR8888,
+               COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+               false, 4, DPU_FORMAT_FLAG_COMPRESSED,
+               DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+       INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
+               COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+               true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED,
+               DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+       INTERLEAVED_RGB_FMT_TILED(XBGR2101010,
+               COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+               true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED,
+               DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+       PSEUDO_YUV_FMT_TILED(NV12,
+               0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C1_B_Cb, C2_R_Cr,
+               DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV |
+                               DPU_FORMAT_FLAG_COMPRESSED,
+               DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12),
+};
+
+static const struct dpu_format dpu_format_map_p010[] = {
+       PSEUDO_YUV_FMT_LOOSE(NV12,
+               0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C1_B_Cb, C2_R_Cr,
+               DPU_CHROMA_420, (DPU_FORMAT_FLAG_YUV | DPU_FORMAT_FLAG_DX),
+               DPU_FETCH_LINEAR, 2),
+};
+
+static const struct dpu_format dpu_format_map_p010_ubwc[] = {
+       PSEUDO_YUV_FMT_LOOSE_TILED(NV12,
+               0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C1_B_Cb, C2_R_Cr,
+               DPU_CHROMA_420, (DPU_FORMAT_FLAG_YUV | DPU_FORMAT_FLAG_DX |
+                               DPU_FORMAT_FLAG_COMPRESSED),
+               DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12),
+};
+
+static const struct dpu_format dpu_format_map_tp10_ubwc[] = {
+       PSEUDO_YUV_FMT_TILED(NV12,
+               0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+               C1_B_Cb, C2_R_Cr,
+               DPU_CHROMA_420, (DPU_FORMAT_FLAG_YUV | DPU_FORMAT_FLAG_DX |
+                               DPU_FORMAT_FLAG_COMPRESSED),
+               DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12),
+};
+
+/* _dpu_get_v_h_subsample_rate - Get subsample rates for all formats we support
+ *   Note: Not using the drm_format_*_subsampling since we have formats
+ */
+static void _dpu_get_v_h_subsample_rate(
+       enum dpu_chroma_samp_type chroma_sample,
+       uint32_t *v_sample,
+       uint32_t *h_sample)
+{
+       if (!v_sample || !h_sample)
+               return;
+
+       switch (chroma_sample) {
+       case DPU_CHROMA_H2V1:
+               *v_sample = 1;
+               *h_sample = 2;
+               break;
+       case DPU_CHROMA_H1V2:
+               *v_sample = 2;
+               *h_sample = 1;
+               break;
+       case DPU_CHROMA_420:
+               *v_sample = 2;
+               *h_sample = 2;
+               break;
+       default:
+               *v_sample = 1;
+               *h_sample = 1;
+               break;
+       }
+}
+
+static int _dpu_format_get_media_color_ubwc(const struct dpu_format *fmt)
+{
+       static const struct dpu_media_color_map dpu_media_ubwc_map[] = {
+               {DRM_FORMAT_ABGR8888, COLOR_FMT_RGBA8888_UBWC},
+               {DRM_FORMAT_XBGR8888, COLOR_FMT_RGBA8888_UBWC},
+               {DRM_FORMAT_ABGR2101010, COLOR_FMT_RGBA1010102_UBWC},
+               {DRM_FORMAT_XBGR2101010, COLOR_FMT_RGBA1010102_UBWC},
+               {DRM_FORMAT_BGR565, COLOR_FMT_RGB565_UBWC},
+       };
+       int color_fmt = -1;
+       int i;
+
+       if (fmt->base.pixel_format == DRM_FORMAT_NV12) {
+               if (DPU_FORMAT_IS_DX(fmt)) {
+                       if (fmt->unpack_tight)
+                               color_fmt = COLOR_FMT_NV12_BPP10_UBWC;
+                       else
+                               color_fmt = COLOR_FMT_P010_UBWC;
+               } else
+                       color_fmt = COLOR_FMT_NV12_UBWC;
+               return color_fmt;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(dpu_media_ubwc_map); ++i)
+               if (fmt->base.pixel_format == dpu_media_ubwc_map[i].format) {
+                       color_fmt = dpu_media_ubwc_map[i].color;
+                       break;
+               }
+       return color_fmt;
+}
+
+static int _dpu_format_get_plane_sizes_ubwc(
+               const struct dpu_format *fmt,
+               const uint32_t width,
+               const uint32_t height,
+               struct dpu_hw_fmt_layout *layout)
+{
+       int i;
+       int color;
+       bool meta = DPU_FORMAT_IS_UBWC(fmt);
+
+       memset(layout, 0, sizeof(struct dpu_hw_fmt_layout));
+       layout->format = fmt;
+       layout->width = width;
+       layout->height = height;
+       layout->num_planes = fmt->num_planes;
+
+       color = _dpu_format_get_media_color_ubwc(fmt);
+       if (color < 0) {
+               DRM_ERROR("UBWC format not supported for fmt: %4.4s\n",
+                       (char *)&fmt->base.pixel_format);
+               return -EINVAL;
+       }
+
+       if (DPU_FORMAT_IS_YUV(layout->format)) {
+               uint32_t y_sclines, uv_sclines;
+               uint32_t y_meta_scanlines = 0;
+               uint32_t uv_meta_scanlines = 0;
+
+               layout->num_planes = 2;
+               layout->plane_pitch[0] = VENUS_Y_STRIDE(color, width);
+               y_sclines = VENUS_Y_SCANLINES(color, height);
+               layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] *
+                       y_sclines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+               layout->plane_pitch[1] = VENUS_UV_STRIDE(color, width);
+               uv_sclines = VENUS_UV_SCANLINES(color, height);
+               layout->plane_size[1] = MSM_MEDIA_ALIGN(layout->plane_pitch[1] *
+                       uv_sclines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+               if (!meta)
+                       goto done;
+
+               layout->num_planes += 2;
+               layout->plane_pitch[2] = VENUS_Y_META_STRIDE(color, width);
+               y_meta_scanlines = VENUS_Y_META_SCANLINES(color, height);
+               layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] *
+                       y_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+               layout->plane_pitch[3] = VENUS_UV_META_STRIDE(color, width);
+               uv_meta_scanlines = VENUS_UV_META_SCANLINES(color, height);
+               layout->plane_size[3] = MSM_MEDIA_ALIGN(layout->plane_pitch[3] *
+                       uv_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+       } else {
+               uint32_t rgb_scanlines, rgb_meta_scanlines;
+
+               layout->num_planes = 1;
+
+               layout->plane_pitch[0] = VENUS_RGB_STRIDE(color, width);
+               rgb_scanlines = VENUS_RGB_SCANLINES(color, height);
+               layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] *
+                       rgb_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+               if (!meta)
+                       goto done;
+               layout->num_planes += 2;
+               layout->plane_pitch[2] = VENUS_RGB_META_STRIDE(color, width);
+               rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color, height);
+               layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] *
+                       rgb_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+       }
+
+done:
+       for (i = 0; i < DPU_MAX_PLANES; i++)
+               layout->total_size += layout->plane_size[i];
+
+       return 0;
+}
+
+static int _dpu_format_get_plane_sizes_linear(
+               const struct dpu_format *fmt,
+               const uint32_t width,
+               const uint32_t height,
+               struct dpu_hw_fmt_layout *layout,
+               const uint32_t *pitches)
+{
+       int i;
+
+       memset(layout, 0, sizeof(struct dpu_hw_fmt_layout));
+       layout->format = fmt;
+       layout->width = width;
+       layout->height = height;
+       layout->num_planes = fmt->num_planes;
+
+       /* Due to memset above, only need to set planes of interest */
+       if (fmt->fetch_planes == DPU_PLANE_INTERLEAVED) {
+               layout->num_planes = 1;
+               layout->plane_size[0] = width * height * layout->format->bpp;
+               layout->plane_pitch[0] = width * layout->format->bpp;
+       } else {
+               uint32_t v_subsample, h_subsample;
+               uint32_t chroma_samp;
+               uint32_t bpp = 1;
+
+               chroma_samp = fmt->chroma_sample;
+               _dpu_get_v_h_subsample_rate(chroma_samp, &v_subsample,
+                               &h_subsample);
+
+               if (width % h_subsample || height % v_subsample) {
+                       DRM_ERROR("mismatch in subsample vs dimensions\n");
+                       return -EINVAL;
+               }
+
+               if ((fmt->base.pixel_format == DRM_FORMAT_NV12) &&
+                       (DPU_FORMAT_IS_DX(fmt)))
+                       bpp = 2;
+               layout->plane_pitch[0] = width * bpp;
+               layout->plane_pitch[1] = layout->plane_pitch[0] / h_subsample;
+               layout->plane_size[0] = layout->plane_pitch[0] * height;
+               layout->plane_size[1] = layout->plane_pitch[1] *
+                               (height / v_subsample);
+
+               if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) {
+                       layout->num_planes = 2;
+                       layout->plane_size[1] *= 2;
+                       layout->plane_pitch[1] *= 2;
+               } else {
+                       /* planar */
+                       layout->num_planes = 3;
+                       layout->plane_size[2] = layout->plane_size[1];
+                       layout->plane_pitch[2] = layout->plane_pitch[1];
+               }
+       }
+
+       /*
+        * linear format: allow user allocated pitches if they are greater than
+        * the requirement.
+        * ubwc format: pitch values are computed uniformly across
+        * all the components based on ubwc specifications.
+        */
+       for (i = 0; i < layout->num_planes && i < DPU_MAX_PLANES; ++i) {
+               if (pitches && layout->plane_pitch[i] < pitches[i])
+                       layout->plane_pitch[i] = pitches[i];
+       }
+
+       for (i = 0; i < DPU_MAX_PLANES; i++)
+               layout->total_size += layout->plane_size[i];
+
+       return 0;
+}
+
+static int dpu_format_get_plane_sizes(
+               const struct dpu_format *fmt,
+               const uint32_t w,
+               const uint32_t h,
+               struct dpu_hw_fmt_layout *layout,
+               const uint32_t *pitches)
+{
+       if (!layout || !fmt) {
+               DRM_ERROR("invalid pointer\n");
+               return -EINVAL;
+       }
+
+       if ((w > DPU_MAX_IMG_WIDTH) || (h > DPU_MAX_IMG_HEIGHT)) {
+               DRM_ERROR("image dimensions outside max range\n");
+               return -ERANGE;
+       }
+
+       if (DPU_FORMAT_IS_UBWC(fmt) || DPU_FORMAT_IS_TILE(fmt))
+               return _dpu_format_get_plane_sizes_ubwc(fmt, w, h, layout);
+
+       return _dpu_format_get_plane_sizes_linear(fmt, w, h, layout, pitches);
+}
+
+static int _dpu_format_populate_addrs_ubwc(
+               struct msm_gem_address_space *aspace,
+               struct drm_framebuffer *fb,
+               struct dpu_hw_fmt_layout *layout)
+{
+       uint32_t base_addr = 0;
+       bool meta;
+
+       if (!fb || !layout) {
+               DRM_ERROR("invalid pointers\n");
+               return -EINVAL;
+       }
+
+       if (aspace)
+               base_addr = msm_framebuffer_iova(fb, aspace, 0);
+       if (!base_addr) {
+               DRM_ERROR("failed to retrieve base addr\n");
+               return -EFAULT;
+       }
+
+       meta = DPU_FORMAT_IS_UBWC(layout->format);
+
+       /* Per-format logic for verifying active planes */
+       if (DPU_FORMAT_IS_YUV(layout->format)) {
+               /************************************************/
+               /*      UBWC            **                      */
+               /*      buffer          **      DPU PLANE       */
+               /*      format          **                      */
+               /************************************************/
+               /* -------------------  ** -------------------- */
+               /* |      Y meta     |  ** |    Y bitstream   | */
+               /* |       data      |  ** |       plane      | */
+               /* -------------------  ** -------------------- */
+               /* |    Y bitstream  |  ** |  CbCr bitstream  | */
+               /* |       data      |  ** |       plane      | */
+               /* -------------------  ** -------------------- */
+               /* |   Cbcr metadata |  ** |       Y meta     | */
+               /* |       data      |  ** |       plane      | */
+               /* -------------------  ** -------------------- */
+               /* |  CbCr bitstream |  ** |     CbCr meta    | */
+               /* |       data      |  ** |       plane      | */
+               /* -------------------  ** -------------------- */
+               /************************************************/
+
+               /* configure Y bitstream plane */
+               layout->plane_addr[0] = base_addr + layout->plane_size[2];
+
+               /* configure CbCr bitstream plane */
+               layout->plane_addr[1] = base_addr + layout->plane_size[0]
+                       + layout->plane_size[2] + layout->plane_size[3];
+
+               if (!meta)
+                       goto done;
+
+               /* configure Y metadata plane */
+               layout->plane_addr[2] = base_addr;
+
+               /* configure CbCr metadata plane */
+               layout->plane_addr[3] = base_addr + layout->plane_size[0]
+                       + layout->plane_size[2];
+
+       } else {
+               /************************************************/
+               /*      UBWC            **                      */
+               /*      buffer          **      DPU PLANE       */
+               /*      format          **                      */
+               /************************************************/
+               /* -------------------  ** -------------------- */
+               /* |      RGB meta   |  ** |   RGB bitstream  | */
+               /* |       data      |  ** |       plane      | */
+               /* -------------------  ** -------------------- */
+               /* |  RGB bitstream  |  ** |       NONE       | */
+               /* |       data      |  ** |                  | */
+               /* -------------------  ** -------------------- */
+               /*                      ** |     RGB meta     | */
+               /*                      ** |       plane      | */
+               /*                      ** -------------------- */
+               /************************************************/
+
+               layout->plane_addr[0] = base_addr + layout->plane_size[2];
+               layout->plane_addr[1] = 0;
+
+               if (!meta)
+                       goto done;
+
+               layout->plane_addr[2] = base_addr;
+               layout->plane_addr[3] = 0;
+       }
+done:
+       return 0;
+}
+
+static int _dpu_format_populate_addrs_linear(
+               struct msm_gem_address_space *aspace,
+               struct drm_framebuffer *fb,
+               struct dpu_hw_fmt_layout *layout)
+{
+       unsigned int i;
+
+       /* Can now check the pitches given vs pitches expected */
+       for (i = 0; i < layout->num_planes; ++i) {
+               if (layout->plane_pitch[i] > fb->pitches[i]) {
+                       DRM_ERROR("plane %u expected pitch %u, fb %u\n",
+                               i, layout->plane_pitch[i], fb->pitches[i]);
+                       return -EINVAL;
+               }
+       }
+
+       /* Populate addresses for simple formats here */
+       for (i = 0; i < layout->num_planes; ++i) {
+               if (aspace)
+                       layout->plane_addr[i] =
+                               msm_framebuffer_iova(fb, aspace, i);
+               if (!layout->plane_addr[i]) {
+                       DRM_ERROR("failed to retrieve base addr\n");
+                       return -EFAULT;
+               }
+       }
+
+       return 0;
+}
+
+int dpu_format_populate_layout(
+               struct msm_gem_address_space *aspace,
+               struct drm_framebuffer *fb,
+               struct dpu_hw_fmt_layout *layout)
+{
+       uint32_t plane_addr[DPU_MAX_PLANES];
+       int i, ret;
+
+       if (!fb || !layout) {
+               DRM_ERROR("invalid arguments\n");
+               return -EINVAL;
+       }
+
+       if ((fb->width > DPU_MAX_IMG_WIDTH) ||
+                       (fb->height > DPU_MAX_IMG_HEIGHT)) {
+               DRM_ERROR("image dimensions outside max range\n");
+               return -ERANGE;
+       }
+
+       layout->format = to_dpu_format(msm_framebuffer_format(fb));
+
+       /* Populate the plane sizes etc via get_format */
+       ret = dpu_format_get_plane_sizes(layout->format, fb->width, fb->height,
+                       layout, fb->pitches);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < DPU_MAX_PLANES; ++i)
+               plane_addr[i] = layout->plane_addr[i];
+
+       /* Populate the addresses given the fb */
+       if (DPU_FORMAT_IS_UBWC(layout->format) ||
+                       DPU_FORMAT_IS_TILE(layout->format))
+               ret = _dpu_format_populate_addrs_ubwc(aspace, fb, layout);
+       else
+               ret = _dpu_format_populate_addrs_linear(aspace, fb, layout);
+
+       /* check if anything changed */
+       if (!ret && !memcmp(plane_addr, layout->plane_addr, sizeof(plane_addr)))
+               ret = -EAGAIN;
+
+       return ret;
+}
+
+int dpu_format_check_modified_format(
+               const struct msm_kms *kms,
+               const struct msm_format *msm_fmt,
+               const struct drm_mode_fb_cmd2 *cmd,
+               struct drm_gem_object **bos)
+{
+       int ret, i, num_base_fmt_planes;
+       const struct dpu_format *fmt;
+       struct dpu_hw_fmt_layout layout;
+       uint32_t bos_total_size = 0;
+
+       if (!msm_fmt || !cmd || !bos) {
+               DRM_ERROR("invalid arguments\n");
+               return -EINVAL;
+       }
+
+       fmt = to_dpu_format(msm_fmt);
+       num_base_fmt_planes = drm_format_num_planes(fmt->base.pixel_format);
+
+       ret = dpu_format_get_plane_sizes(fmt, cmd->width, cmd->height,
+                       &layout, cmd->pitches);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < num_base_fmt_planes; i++) {
+               if (!bos[i]) {
+                       DRM_ERROR("invalid handle for plane %d\n", i);
+                       return -EINVAL;
+               }
+               if ((i == 0) || (bos[i] != bos[0]))
+                       bos_total_size += bos[i]->size;
+       }
+
+       if (bos_total_size < layout.total_size) {
+               DRM_ERROR("buffers total size too small %u expected %u\n",
+                               bos_total_size, layout.total_size);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+const struct dpu_format *dpu_get_dpu_format_ext(
+               const uint32_t format,
+               const uint64_t modifier)
+{
+       uint32_t i = 0;
+       const struct dpu_format *fmt = NULL;
+       const struct dpu_format *map = NULL;
+       ssize_t map_size = 0;
+
+       /*
+        * Currently only support exactly zero or one modifier.
+        * All planes use the same modifier.
+        */
+       DPU_DEBUG("plane format modifier 0x%llX\n", modifier);
+
+       switch (modifier) {
+       case 0:
+               map = dpu_format_map;
+               map_size = ARRAY_SIZE(dpu_format_map);
+               break;
+       case DRM_FORMAT_MOD_QCOM_COMPRESSED:
+               map = dpu_format_map_ubwc;
+               map_size = ARRAY_SIZE(dpu_format_map_ubwc);
+               DPU_DEBUG("found fmt: %4.4s  DRM_FORMAT_MOD_QCOM_COMPRESSED\n",
+                               (char *)&format);
+               break;
+       default:
+               DPU_ERROR("unsupported format modifier %llX\n", modifier);
+               return NULL;
+       }
+
+       for (i = 0; i < map_size; i++) {
+               if (format == map[i].base.pixel_format) {
+                       fmt = &map[i];
+                       break;
+               }
+       }
+
+       if (fmt == NULL)
+               DPU_ERROR("unsupported fmt: %4.4s modifier 0x%llX\n",
+                       (char *)&format, modifier);
+       else
+               DPU_DEBUG("fmt %4.4s mod 0x%llX ubwc %d yuv %d\n",
+                               (char *)&format, modifier,
+                               DPU_FORMAT_IS_UBWC(fmt),
+                               DPU_FORMAT_IS_YUV(fmt));
+
+       return fmt;
+}
+
+const struct msm_format *dpu_get_msm_format(
+               struct msm_kms *kms,
+               const uint32_t format,
+               const uint64_t modifiers)
+{
+       const struct dpu_format *fmt = dpu_get_dpu_format_ext(format,
+                       modifiers);
+       if (fmt)
+               return &fmt->base;
+       return NULL;
+}
+
+uint32_t dpu_populate_formats(
+               const struct dpu_format_extended *format_list,
+               uint32_t *pixel_formats,
+               uint64_t *pixel_modifiers,
+               uint32_t pixel_formats_max)
+{
+       uint32_t i, fourcc_format;
+
+       if (!format_list || !pixel_formats)
+               return 0;
+
+       for (i = 0, fourcc_format = 0;
+                       format_list->fourcc_format && i < pixel_formats_max;
+                       ++format_list) {
+               /* verify if listed format is in dpu_format_map? */
+
+               /* optionally return modified formats */
+               if (pixel_modifiers) {
+                       /* assume same modifier for all fb planes */
+                       pixel_formats[i] = format_list->fourcc_format;
+                       pixel_modifiers[i++] = format_list->modifier;
+               } else {
+                       /* assume base formats grouped together */
+                       if (fourcc_format != format_list->fourcc_format) {
+                               fourcc_format = format_list->fourcc_format;
+                               pixel_formats[i++] = fourcc_format;
+                       }
+               }
+       }
+
+       return i;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
new file mode 100644 (file)
index 0000000..a54451d
--- /dev/null
@@ -0,0 +1,88 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_FORMATS_H
+#define _DPU_FORMATS_H
+
+#include <drm/drm_fourcc.h>
+#include "msm_gem.h"
+#include "dpu_hw_mdss.h"
+
+/**
+ * dpu_get_dpu_format_ext() - Returns dpu format structure pointer.
+ * @format:          DRM FourCC Code
+ * @modifiers:       format modifier array from client, one per plane
+ */
+const struct dpu_format *dpu_get_dpu_format_ext(
+               const uint32_t format,
+               const uint64_t modifier);
+
+#define dpu_get_dpu_format(f) dpu_get_dpu_format_ext(f, 0)
+
+/**
+ * dpu_get_msm_format - get an dpu_format by its msm_format base
+ *                     callback function registers with the msm_kms layer
+ * @kms:             kms driver
+ * @format:          DRM FourCC Code
+ * @modifiers:       data layout modifier
+ */
+const struct msm_format *dpu_get_msm_format(
+               struct msm_kms *kms,
+               const uint32_t format,
+               const uint64_t modifiers);
+
+/**
+ * dpu_populate_formats - populate the given array with fourcc codes supported
+ * @format_list:       pointer to list of possible formats
+ * @pixel_formats:     array to populate with fourcc codes
+ * @pixel_modifiers:   array to populate with drm modifiers, can be NULL
+ * @pixel_formats_max: length of pixel formats array
+ * Return: number of elements populated
+ */
+uint32_t dpu_populate_formats(
+               const struct dpu_format_extended *format_list,
+               uint32_t *pixel_formats,
+               uint64_t *pixel_modifiers,
+               uint32_t pixel_formats_max);
+
+/**
+ * dpu_format_check_modified_format - validate format and buffers for
+ *                   dpu non-standard, i.e. modified format
+ * @kms:             kms driver
+ * @msm_fmt:         pointer to the msm_fmt base pointer of an dpu_format
+ * @cmd:             fb_cmd2 structure user request
+ * @bos:             gem buffer object list
+ *
+ * Return: error code on failure, 0 on success
+ */
+int dpu_format_check_modified_format(
+               const struct msm_kms *kms,
+               const struct msm_format *msm_fmt,
+               const struct drm_mode_fb_cmd2 *cmd,
+               struct drm_gem_object **bos);
+
+/**
+ * dpu_format_populate_layout - populate the given format layout based on
+ *                     mmu, fb, and format found in the fb
+ * @aspace:            address space pointer
+ * @fb:                framebuffer pointer
+ * @fmtl:              format layout structure to populate
+ *
+ * Return: error code on failure, -EAGAIN if success but the addresses
+ *         are the same as before or 0 if new addresses were populated
+ */
+int dpu_format_populate_layout(
+               struct msm_gem_address_space *aspace,
+               struct drm_framebuffer *fb,
+               struct dpu_hw_fmt_layout *fmtl);
+
+#endif /*_DPU_FORMATS_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
new file mode 100644 (file)
index 0000000..58d29e4
--- /dev/null
@@ -0,0 +1,155 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)    "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_blk.h"
+
+/* Serialization lock for dpu_hw_blk_list */
+static DEFINE_MUTEX(dpu_hw_blk_lock);
+
+/* List of all hw block objects */
+static LIST_HEAD(dpu_hw_blk_list);
+
+/**
+ * dpu_hw_blk_init - initialize hw block object
+ * @type: hw block type - enum dpu_hw_blk_type
+ * @id: instance id of the hw block
+ * @ops: Pointer to block operations
+ * return: 0 if success; error code otherwise
+ */
+int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
+               struct dpu_hw_blk_ops *ops)
+{
+       if (!hw_blk) {
+               pr_err("invalid parameters\n");
+               return -EINVAL;
+       }
+
+       INIT_LIST_HEAD(&hw_blk->list);
+       hw_blk->type = type;
+       hw_blk->id = id;
+       atomic_set(&hw_blk->refcount, 0);
+
+       if (ops)
+               hw_blk->ops = *ops;
+
+       mutex_lock(&dpu_hw_blk_lock);
+       list_add(&hw_blk->list, &dpu_hw_blk_list);
+       mutex_unlock(&dpu_hw_blk_lock);
+
+       return 0;
+}
+
+/**
+ * dpu_hw_blk_destroy - destroy hw block object.
+ * @hw_blk:  pointer to hw block object
+ * return: none
+ */
+void dpu_hw_blk_destroy(struct dpu_hw_blk *hw_blk)
+{
+       if (!hw_blk) {
+               pr_err("invalid parameters\n");
+               return;
+       }
+
+       if (atomic_read(&hw_blk->refcount))
+               pr_err("hw_blk:%d.%d invalid refcount\n", hw_blk->type,
+                               hw_blk->id);
+
+       mutex_lock(&dpu_hw_blk_lock);
+       list_del(&hw_blk->list);
+       mutex_unlock(&dpu_hw_blk_lock);
+}
+
+/**
+ * dpu_hw_blk_get - get hw_blk from free pool
+ * @hw_blk: if specified, increment reference count only
+ * @type: if hw_blk is not specified, allocate the next available of this type
+ * @id: if specified (>= 0), allocate the given instance of the above type
+ * return: pointer to hw block object
+ */
+struct dpu_hw_blk *dpu_hw_blk_get(struct dpu_hw_blk *hw_blk, u32 type, int id)
+{
+       struct dpu_hw_blk *curr;
+       int rc, refcount;
+
+       if (!hw_blk) {
+               mutex_lock(&dpu_hw_blk_lock);
+               list_for_each_entry(curr, &dpu_hw_blk_list, list) {
+                       if ((curr->type != type) ||
+                                       (id >= 0 && curr->id != id) ||
+                                       (id < 0 &&
+                                               atomic_read(&curr->refcount)))
+                               continue;
+
+                       hw_blk = curr;
+                       break;
+               }
+               mutex_unlock(&dpu_hw_blk_lock);
+       }
+
+       if (!hw_blk) {
+               pr_debug("no hw_blk:%d\n", type);
+               return NULL;
+       }
+
+       refcount = atomic_inc_return(&hw_blk->refcount);
+
+       if (refcount == 1 && hw_blk->ops.start) {
+               rc = hw_blk->ops.start(hw_blk);
+               if (rc) {
+                       pr_err("failed to start  hw_blk:%d rc:%d\n", type, rc);
+                       goto error_start;
+               }
+       }
+
+       pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type,
+                       hw_blk->id, refcount);
+       return hw_blk;
+
+error_start:
+       dpu_hw_blk_put(hw_blk);
+       return ERR_PTR(rc);
+}
+
+/**
+ * dpu_hw_blk_put - put hw_blk to free pool if decremented refcount is zero
+ * @hw_blk: hw block to be freed
+ * @free_blk: function to be called when reference count goes to zero
+ */
+void dpu_hw_blk_put(struct dpu_hw_blk *hw_blk)
+{
+       if (!hw_blk) {
+               pr_err("invalid parameters\n");
+               return;
+       }
+
+       pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type, hw_blk->id,
+                       atomic_read(&hw_blk->refcount));
+
+       if (!atomic_read(&hw_blk->refcount)) {
+               pr_err("hw_blk:%d.%d invalid put\n", hw_blk->type, hw_blk->id);
+               return;
+       }
+
+       if (atomic_dec_return(&hw_blk->refcount))
+               return;
+
+       if (hw_blk->ops.stop)
+               hw_blk->ops.stop(hw_blk);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
new file mode 100644 (file)
index 0000000..0f4ca8a
--- /dev/null
@@ -0,0 +1,53 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_BLK_H
+#define _DPU_HW_BLK_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
+
+struct dpu_hw_blk;
+
+/**
+ * struct dpu_hw_blk_ops - common hardware block operations
+ * @start: start operation on first get
+ * @stop: stop operation on last put
+ */
+struct dpu_hw_blk_ops {
+       int (*start)(struct dpu_hw_blk *);
+       void (*stop)(struct dpu_hw_blk *);
+};
+
+/**
+ * struct dpu_hw_blk - definition of hardware block object
+ * @list: list of hardware blocks
+ * @type: hardware block type
+ * @id: instance id
+ * @refcount: reference/usage count
+ */
+struct dpu_hw_blk {
+       struct list_head list;
+       u32 type;
+       int id;
+       atomic_t refcount;
+       struct dpu_hw_blk_ops ops;
+};
+
+int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
+               struct dpu_hw_blk_ops *ops);
+void dpu_hw_blk_destroy(struct dpu_hw_blk *hw_blk);
+
+struct dpu_hw_blk *dpu_hw_blk_get(struct dpu_hw_blk *hw_blk, u32 type, int id);
+void dpu_hw_blk_put(struct dpu_hw_blk *hw_blk);
+#endif /*_DPU_HW_BLK_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
new file mode 100644 (file)
index 0000000..44ee063
--- /dev/null
@@ -0,0 +1,511 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)    "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_catalog_format.h"
+#include "dpu_kms.h"
+
+#define VIG_SDM845_MASK \
+       (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_SCALER_QSEED3) | BIT(DPU_SSPP_QOS) |\
+       BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_QOS_8LVL) |\
+       BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT))
+
+#define DMA_SDM845_MASK \
+       (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
+       BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
+       BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
+
+#define MIXER_SDM845_MASK \
+       (BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER))
+
+#define PINGPONG_SDM845_MASK BIT(DPU_PINGPONG_DITHER)
+
+#define PINGPONG_SDM845_SPLIT_MASK \
+       (PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2))
+
+#define DEFAULT_PIXEL_RAM_SIZE         (50 * 1024)
+#define DEFAULT_DPU_LINE_WIDTH         2048
+#define DEFAULT_DPU_OUTPUT_LINE_WIDTH  2560
+
+#define MAX_HORZ_DECIMATION    4
+#define MAX_VERT_DECIMATION    4
+
+#define MAX_UPSCALE_RATIO      20
+#define MAX_DOWNSCALE_RATIO    4
+#define SSPP_UNITY_SCALE       1
+
+#define STRCAT(X, Y) (X Y)
+
+/*************************************************************
+ * DPU sub blocks config
+ *************************************************************/
+/* DPU top level caps */
+static const struct dpu_caps sdm845_dpu_caps = {
+       .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+       .max_mixer_blendstages = 0xb,
+       .qseed_type = DPU_SSPP_SCALER_QSEED3,
+       .smart_dma_rev = DPU_SSPP_SMART_DMA_V2,
+       .ubwc_version = DPU_HW_UBWC_VER_20,
+       .has_src_split = true,
+       .has_dim_layer = true,
+       .has_idle_pc = true,
+};
+
+static struct dpu_mdp_cfg sdm845_mdp[] = {
+       {
+       .name = "top_0", .id = MDP_TOP,
+       .base = 0x0, .len = 0x45C,
+       .features = 0,
+       .highest_bank_bit = 0x2,
+       .has_dest_scaler = true,
+       .clk_ctrls[DPU_CLK_CTRL_VIG0] = {
+                       .reg_off = 0x2AC, .bit_off = 0},
+       .clk_ctrls[DPU_CLK_CTRL_VIG1] = {
+                       .reg_off = 0x2B4, .bit_off = 0},
+       .clk_ctrls[DPU_CLK_CTRL_VIG2] = {
+                       .reg_off = 0x2BC, .bit_off = 0},
+       .clk_ctrls[DPU_CLK_CTRL_VIG3] = {
+                       .reg_off = 0x2C4, .bit_off = 0},
+       .clk_ctrls[DPU_CLK_CTRL_DMA0] = {
+                       .reg_off = 0x2AC, .bit_off = 8},
+       .clk_ctrls[DPU_CLK_CTRL_DMA1] = {
+                       .reg_off = 0x2B4, .bit_off = 8},
+       .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+                       .reg_off = 0x2BC, .bit_off = 8},
+       .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+                       .reg_off = 0x2C4, .bit_off = 8},
+       },
+};
+
+/*************************************************************
+ * CTL sub blocks config
+ *************************************************************/
+static struct dpu_ctl_cfg sdm845_ctl[] = {
+       {
+       .name = "ctl_0", .id = CTL_0,
+       .base = 0x1000, .len = 0xE4,
+       .features = BIT(DPU_CTL_SPLIT_DISPLAY)
+       },
+       {
+       .name = "ctl_1", .id = CTL_1,
+       .base = 0x1200, .len = 0xE4,
+       .features = BIT(DPU_CTL_SPLIT_DISPLAY)
+       },
+       {
+       .name = "ctl_2", .id = CTL_2,
+       .base = 0x1400, .len = 0xE4,
+       .features = 0
+       },
+       {
+       .name = "ctl_3", .id = CTL_3,
+       .base = 0x1600, .len = 0xE4,
+       .features = 0
+       },
+       {
+       .name = "ctl_4", .id = CTL_4,
+       .base = 0x1800, .len = 0xE4,
+       .features = 0
+       },
+};
+
+/*************************************************************
+ * SSPP sub blocks config
+ *************************************************************/
+
+/* SSPP common configuration */
+static const struct dpu_sspp_blks_common sdm845_sspp_common = {
+       .maxlinewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+       .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+       .maxhdeciexp = MAX_HORZ_DECIMATION,
+       .maxvdeciexp = MAX_VERT_DECIMATION,
+};
+
+#define _VIG_SBLK(num, sdma_pri) \
+       { \
+       .common = &sdm845_sspp_common, \
+       .maxdwnscale = MAX_DOWNSCALE_RATIO, \
+       .maxupscale = MAX_UPSCALE_RATIO, \
+       .smart_dma_priority = sdma_pri, \
+       .src_blk = {.name = STRCAT("sspp_src_", num), \
+               .id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
+       .scaler_blk = {.name = STRCAT("sspp_scaler", num), \
+               .id = DPU_SSPP_SCALER_QSEED3, \
+               .base = 0xa00, .len = 0xa0,}, \
+       .csc_blk = {.name = STRCAT("sspp_csc", num), \
+               .id = DPU_SSPP_CSC_10BIT, \
+               .base = 0x1a00, .len = 0x100,}, \
+       .format_list = plane_formats_yuv, \
+       .virt_format_list = plane_formats, \
+       }
+
+#define _DMA_SBLK(num, sdma_pri) \
+       { \
+       .common = &sdm845_sspp_common, \
+       .maxdwnscale = SSPP_UNITY_SCALE, \
+       .maxupscale = SSPP_UNITY_SCALE, \
+       .smart_dma_priority = sdma_pri, \
+       .src_blk = {.name = STRCAT("sspp_src_", num), \
+               .id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
+       .format_list = plane_formats, \
+       .virt_format_list = plane_formats, \
+       }
+
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 = _VIG_SBLK("0", 5);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 = _VIG_SBLK("1", 6);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 = _VIG_SBLK("2", 7);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 = _VIG_SBLK("3", 8);
+
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_0 = _DMA_SBLK("8", 1);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK("9", 2);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_2 = _DMA_SBLK("10", 3);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK("11", 4);
+
+#define SSPP_VIG_BLK(_name, _id, _base, _sblk, _xinid, _clkctrl) \
+       { \
+       .name = _name, .id = _id, \
+       .base = _base, .len = 0x1c8, \
+       .features = VIG_SDM845_MASK, \
+       .sblk = &_sblk, \
+       .xin_id = _xinid, \
+       .type = SSPP_TYPE_VIG, \
+       .clk_ctrl = _clkctrl \
+       }
+
+#define SSPP_DMA_BLK(_name, _id, _base, _sblk, _xinid, _clkctrl) \
+       { \
+       .name = _name, .id = _id, \
+       .base = _base, .len = 0x1c8, \
+       .features = DMA_SDM845_MASK, \
+       .sblk = &_sblk, \
+       .xin_id = _xinid, \
+       .type = SSPP_TYPE_DMA, \
+       .clk_ctrl = _clkctrl \
+       }
+
+static struct dpu_sspp_cfg sdm845_sspp[] = {
+       SSPP_VIG_BLK("sspp_0", SSPP_VIG0, 0x4000,
+               sdm845_vig_sblk_0, 0, DPU_CLK_CTRL_VIG0),
+       SSPP_VIG_BLK("sspp_1", SSPP_VIG1, 0x6000,
+               sdm845_vig_sblk_1, 4, DPU_CLK_CTRL_VIG1),
+       SSPP_VIG_BLK("sspp_2", SSPP_VIG2, 0x8000,
+               sdm845_vig_sblk_2, 8, DPU_CLK_CTRL_VIG2),
+       SSPP_VIG_BLK("sspp_3", SSPP_VIG3, 0xa000,
+               sdm845_vig_sblk_3, 12, DPU_CLK_CTRL_VIG3),
+       SSPP_DMA_BLK("sspp_8", SSPP_DMA0, 0x24000,
+               sdm845_dma_sblk_0, 1, DPU_CLK_CTRL_DMA0),
+       SSPP_DMA_BLK("sspp_9", SSPP_DMA1, 0x26000,
+               sdm845_dma_sblk_1, 5, DPU_CLK_CTRL_DMA1),
+       SSPP_DMA_BLK("sspp_10", SSPP_DMA2, 0x28000,
+               sdm845_dma_sblk_2, 9, DPU_CLK_CTRL_CURSOR0),
+       SSPP_DMA_BLK("sspp_11", SSPP_DMA3, 0x2a000,
+               sdm845_dma_sblk_3, 13, DPU_CLK_CTRL_CURSOR1),
+};
+
+/*************************************************************
+ * MIXER sub blocks config
+ *************************************************************/
+static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
+       .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+       .maxblendstages = 11, /* excluding base layer */
+       .blendstage_base = { /* offsets relative to mixer base */
+               0x20, 0x38, 0x50, 0x68, 0x80, 0x98,
+               0xb0, 0xc8, 0xe0, 0xf8, 0x110
+       },
+};
+
+#define LM_BLK(_name, _id, _base, _ds, _pp, _lmpair) \
+       { \
+       .name = _name, .id = _id, \
+       .base = _base, .len = 0x320, \
+       .features = MIXER_SDM845_MASK, \
+       .sblk = &sdm845_lm_sblk, \
+       .ds = _ds, \
+       .pingpong = _pp, \
+       .lm_pair_mask = (1 << _lmpair) \
+       }
+
+static struct dpu_lm_cfg sdm845_lm[] = {
+       LM_BLK("lm_0", LM_0, 0x44000, DS_0, PINGPONG_0, LM_1),
+       LM_BLK("lm_1", LM_1, 0x45000, DS_1, PINGPONG_1, LM_0),
+       LM_BLK("lm_2", LM_2, 0x46000, DS_MAX, PINGPONG_2, LM_5),
+       LM_BLK("lm_3", LM_3, 0x0, DS_MAX, PINGPONG_MAX, 0),
+       LM_BLK("lm_4", LM_4, 0x0, DS_MAX, PINGPONG_MAX, 0),
+       LM_BLK("lm_5", LM_5, 0x49000, DS_MAX, PINGPONG_3, LM_2),
+};
+
+/*************************************************************
+ * DS sub blocks config
+ *************************************************************/
+static const struct dpu_ds_top_cfg sdm845_ds_top = {
+       .name = "ds_top_0", .id = DS_TOP,
+       .base = 0x60000, .len = 0xc,
+       .maxinputwidth = DEFAULT_DPU_LINE_WIDTH,
+       .maxoutputwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+       .maxupscale = MAX_UPSCALE_RATIO,
+};
+
+#define DS_BLK(_name, _id, _base) \
+       {\
+       .name = _name, .id = _id, \
+       .base = _base, .len = 0x800, \
+       .features = DPU_SSPP_SCALER_QSEED3, \
+       .top = &sdm845_ds_top \
+       }
+
+static struct dpu_ds_cfg sdm845_ds[] = {
+       DS_BLK("ds_0", DS_0, 0x800),
+       DS_BLK("ds_1", DS_1, 0x1000),
+};
+
+/*************************************************************
+ * PINGPONG sub blocks config
+ *************************************************************/
+static const struct dpu_pingpong_sub_blks sdm845_pp_sblk_te = {
+       .te2 = {.id = DPU_PINGPONG_TE2, .base = 0x2000, .len = 0x0,
+               .version = 0x1},
+       .dither = {.id = DPU_PINGPONG_DITHER, .base = 0x30e0,
+               .len = 0x20, .version = 0x10000},
+};
+
+static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = {
+       .dither = {.id = DPU_PINGPONG_DITHER, .base = 0x30e0,
+               .len = 0x20, .version = 0x10000},
+};
+
+#define PP_BLK_TE(_name, _id, _base) \
+       {\
+       .name = _name, .id = _id, \
+       .base = _base, .len = 0xd4, \
+       .features = PINGPONG_SDM845_SPLIT_MASK, \
+       .sblk = &sdm845_pp_sblk_te \
+       }
+#define PP_BLK(_name, _id, _base) \
+       {\
+       .name = _name, .id = _id, \
+       .base = _base, .len = 0xd4, \
+       .features = PINGPONG_SDM845_MASK, \
+       .sblk = &sdm845_pp_sblk \
+       }
+
+static struct dpu_pingpong_cfg sdm845_pp[] = {
+       PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000),
+       PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800),
+       PP_BLK("pingpong_2", PINGPONG_2, 0x71000),
+       PP_BLK("pingpong_3", PINGPONG_3, 0x71800),
+};
+
+/*************************************************************
+ * INTF sub blocks config
+ *************************************************************/
+#define INTF_BLK(_name, _id, _base, _type, _ctrl_id) \
+       {\
+       .name = _name, .id = _id, \
+       .base = _base, .len = 0x280, \
+       .type = _type, \
+       .controller_id = _ctrl_id, \
+       .prog_fetch_lines_worst_case = 24 \
+       }
+
+static struct dpu_intf_cfg sdm845_intf[] = {
+       INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0),
+       INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0),
+       INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1),
+       INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1),
+};
+
+/*************************************************************
+ * CDM sub blocks config
+ *************************************************************/
+static struct dpu_cdm_cfg sdm845_cdm[] = {
+       {
+       .name = "cdm_0", .id = CDM_0,
+       .base = 0x79200, .len = 0x224,
+       .features = 0,
+       .intf_connect = BIT(INTF_3),
+       },
+};
+
+/*************************************************************
+ * VBIF sub blocks config
+ *************************************************************/
+/* VBIF QOS remap */
+static u32 sdm845_rt_pri_lvl[] = {3, 3, 4, 4, 5, 5, 6, 6};
+static u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3};
+
+static struct dpu_vbif_cfg sdm845_vbif[] = {
+       {
+       .name = "vbif_0", .id = VBIF_0,
+       .base = 0, .len = 0x1040,
+       .features = BIT(DPU_VBIF_QOS_REMAP),
+       .xin_halt_timeout = 0x4000,
+       .qos_rt_tbl = {
+               .npriority_lvl = ARRAY_SIZE(sdm845_rt_pri_lvl),
+               .priority_lvl = sdm845_rt_pri_lvl,
+               },
+       .qos_nrt_tbl = {
+               .npriority_lvl = ARRAY_SIZE(sdm845_nrt_pri_lvl),
+               .priority_lvl = sdm845_nrt_pri_lvl,
+               },
+       .memtype_count = 14,
+       .memtype = {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3},
+       },
+};
+
+static struct dpu_reg_dma_cfg sdm845_regdma = {
+       .base = 0x0, .version = 0x1, .trigger_sel_off = 0x119c
+};
+
+/*************************************************************
+ * PERF data config
+ *************************************************************/
+
+/* SSPP QOS LUTs */
+static struct dpu_qos_lut_entry sdm845_qos_linear[] = {
+       {.fl = 4, .lut = 0x357},
+       {.fl = 5, .lut = 0x3357},
+       {.fl = 6, .lut = 0x23357},
+       {.fl = 7, .lut = 0x223357},
+       {.fl = 8, .lut = 0x2223357},
+       {.fl = 9, .lut = 0x22223357},
+       {.fl = 10, .lut = 0x222223357},
+       {.fl = 11, .lut = 0x2222223357},
+       {.fl = 12, .lut = 0x22222223357},
+       {.fl = 13, .lut = 0x222222223357},
+       {.fl = 14, .lut = 0x1222222223357},
+       {.fl = 0, .lut = 0x11222222223357}
+};
+
+static struct dpu_qos_lut_entry sdm845_qos_macrotile[] = {
+       {.fl = 10, .lut = 0x344556677},
+       {.fl = 11, .lut = 0x3344556677},
+       {.fl = 12, .lut = 0x23344556677},
+       {.fl = 13, .lut = 0x223344556677},
+       {.fl = 14, .lut = 0x1223344556677},
+       {.fl = 0, .lut = 0x112233344556677},
+};
+
+static struct dpu_qos_lut_entry sdm845_qos_nrt[] = {
+       {.fl = 0, .lut = 0x0},
+};
+
+static struct dpu_perf_cfg sdm845_perf_data = {
+       .max_bw_low = 6800000,
+       .max_bw_high = 6800000,
+       .min_core_ib = 2400000,
+       .min_llcc_ib = 800000,
+       .min_dram_ib = 800000,
+       .core_ib_ff = "6.0",
+       .core_clk_ff = "1.0",
+       .comp_ratio_rt =
+       "NV12/5/1/1.23 AB24/5/1/1.23 XB24/5/1/1.23",
+       .comp_ratio_nrt =
+       "NV12/5/1/1.25 AB24/5/1/1.25 XB24/5/1/1.25",
+       .undersized_prefill_lines = 2,
+       .xtra_prefill_lines = 2,
+       .dest_scale_prefill_lines = 3,
+       .macrotile_prefill_lines = 4,
+       .yuv_nv12_prefill_lines = 8,
+       .linear_prefill_lines = 1,
+       .downscaling_prefill_lines = 1,
+       .amortizable_threshold = 25,
+       .min_prefill_lines = 24,
+       .danger_lut_tbl = {0xf, 0xffff, 0x0},
+       .qos_lut_tbl = {
+               {.nentry = ARRAY_SIZE(sdm845_qos_linear),
+               .entries = sdm845_qos_linear
+               },
+               {.nentry = ARRAY_SIZE(sdm845_qos_macrotile),
+               .entries = sdm845_qos_macrotile
+               },
+               {.nentry = ARRAY_SIZE(sdm845_qos_nrt),
+               .entries = sdm845_qos_nrt
+               },
+       },
+       .cdp_cfg = {
+               {.rd_enable = 1, .wr_enable = 1},
+               {.rd_enable = 1, .wr_enable = 0}
+       },
+};
+
+/*************************************************************
+ * Hardware catalog init
+ *************************************************************/
+
+/*
+ * sdm845_cfg_init(): populate sdm845 dpu sub-blocks reg offsets
+ * and instance counts.
+ */
+static void sdm845_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
+{
+       *dpu_cfg = (struct dpu_mdss_cfg){
+               .caps = &sdm845_dpu_caps,
+               .mdp_count = ARRAY_SIZE(sdm845_mdp),
+               .mdp = sdm845_mdp,
+               .ctl_count = ARRAY_SIZE(sdm845_ctl),
+               .ctl = sdm845_ctl,
+               .sspp_count = ARRAY_SIZE(sdm845_sspp),
+               .sspp = sdm845_sspp,
+               .mixer_count = ARRAY_SIZE(sdm845_lm),
+               .mixer = sdm845_lm,
+               .ds_count = ARRAY_SIZE(sdm845_ds),
+               .ds = sdm845_ds,
+               .pingpong_count = ARRAY_SIZE(sdm845_pp),
+               .pingpong = sdm845_pp,
+               .cdm_count = ARRAY_SIZE(sdm845_cdm),
+               .cdm = sdm845_cdm,
+               .intf_count = ARRAY_SIZE(sdm845_intf),
+               .intf = sdm845_intf,
+               .vbif_count = ARRAY_SIZE(sdm845_vbif),
+               .vbif = sdm845_vbif,
+               .reg_dma_count = 1,
+               .dma_cfg = sdm845_regdma,
+               .perf = sdm845_perf_data,
+       };
+}
+
+static struct dpu_mdss_hw_cfg_handler cfg_handler[] = {
+       { .hw_rev = DPU_HW_VER_400, .cfg_init = sdm845_cfg_init},
+       { .hw_rev = DPU_HW_VER_401, .cfg_init = sdm845_cfg_init},
+};
+
+void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg)
+{
+       kfree(dpu_cfg);
+}
+
+struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev)
+{
+       int i;
+       struct dpu_mdss_cfg *dpu_cfg;
+
+       dpu_cfg = kzalloc(sizeof(*dpu_cfg), GFP_KERNEL);
+       if (!dpu_cfg)
+               return ERR_PTR(-ENOMEM);
+
+       for (i = 0; i < ARRAY_SIZE(cfg_handler); i++) {
+               if (cfg_handler[i].hw_rev == hw_rev) {
+                       cfg_handler[i].cfg_init(dpu_cfg);
+                       dpu_cfg->hwversion = hw_rev;
+                       return dpu_cfg;
+               }
+       }
+
+       DPU_ERROR("unsupported chipset id:%X\n", hw_rev);
+       dpu_hw_catalog_deinit(dpu_cfg);
+       return ERR_PTR(-ENODEV);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
new file mode 100644 (file)
index 0000000..f0cb0d4
--- /dev/null
@@ -0,0 +1,804 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_CATALOG_H
+#define _DPU_HW_CATALOG_H
+
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/bitmap.h>
+#include <linux/err.h>
+#include <drm/drmP.h>
+
+/**
+ * Max hardware block count: For ex: max 12 SSPP pipes or
+ * 5 ctl paths. In all cases, it can have max 12 hardware blocks
+ * based on current design
+ */
+#define MAX_BLOCKS    12
+
+#define DPU_HW_VER(MAJOR, MINOR, STEP) (((MAJOR & 0xF) << 28)    |\
+               ((MINOR & 0xFFF) << 16)  |\
+               (STEP & 0xFFFF))
+
+#define DPU_HW_MAJOR(rev)              ((rev) >> 28)
+#define DPU_HW_MINOR(rev)              (((rev) >> 16) & 0xFFF)
+#define DPU_HW_STEP(rev)               ((rev) & 0xFFFF)
+#define DPU_HW_MAJOR_MINOR(rev)                ((rev) >> 16)
+
+#define IS_DPU_MAJOR_MINOR_SAME(rev1, rev2)   \
+       (DPU_HW_MAJOR_MINOR((rev1)) == DPU_HW_MAJOR_MINOR((rev2)))
+
+#define DPU_HW_VER_170 DPU_HW_VER(1, 7, 0) /* 8996 v1.0 */
+#define DPU_HW_VER_171 DPU_HW_VER(1, 7, 1) /* 8996 v2.0 */
+#define DPU_HW_VER_172 DPU_HW_VER(1, 7, 2) /* 8996 v3.0 */
+#define DPU_HW_VER_300 DPU_HW_VER(3, 0, 0) /* 8998 v1.0 */
+#define DPU_HW_VER_301 DPU_HW_VER(3, 0, 1) /* 8998 v1.1 */
+#define DPU_HW_VER_400 DPU_HW_VER(4, 0, 0) /* sdm845 v1.0 */
+#define DPU_HW_VER_401 DPU_HW_VER(4, 0, 1) /* sdm845 v2.0 */
+#define DPU_HW_VER_410 DPU_HW_VER(4, 1, 0) /* sdm670 v1.0 */
+#define DPU_HW_VER_500 DPU_HW_VER(5, 0, 0) /* sdm855 v1.0 */
+
+
+#define IS_MSM8996_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_170)
+#define IS_MSM8998_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_300)
+#define IS_SDM845_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_400)
+#define IS_SDM670_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_410)
+#define IS_SDM855_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_500)
+
+
+#define DPU_HW_BLK_NAME_LEN    16
+
+#define MAX_IMG_WIDTH 0x3fff
+#define MAX_IMG_HEIGHT 0x3fff
+
+#define CRTC_DUAL_MIXERS       2
+
+#define MAX_XIN_COUNT 16
+
+/**
+ * Supported UBWC feature versions
+ */
+enum {
+       DPU_HW_UBWC_VER_10 = 0x100,
+       DPU_HW_UBWC_VER_20 = 0x200,
+       DPU_HW_UBWC_VER_30 = 0x300,
+};
+
+#define IS_UBWC_20_SUPPORTED(rev)       ((rev) >= DPU_HW_UBWC_VER_20)
+
+/**
+ * MDP TOP BLOCK features
+ * @DPU_MDP_PANIC_PER_PIPE Panic configuration needs to be be done per pipe
+ * @DPU_MDP_10BIT_SUPPORT, Chipset supports 10 bit pixel formats
+ * @DPU_MDP_BWC,           MDSS HW supports Bandwidth compression.
+ * @DPU_MDP_UBWC_1_0,      This chipsets supports Universal Bandwidth
+ *                         compression initial revision
+ * @DPU_MDP_UBWC_1_5,      Universal Bandwidth compression version 1.5
+ * @DPU_MDP_MAX            Maximum value
+
+ */
+enum {
+       DPU_MDP_PANIC_PER_PIPE = 0x1,
+       DPU_MDP_10BIT_SUPPORT,
+       DPU_MDP_BWC,
+       DPU_MDP_UBWC_1_0,
+       DPU_MDP_UBWC_1_5,
+       DPU_MDP_MAX
+};
+
+/**
+ * SSPP sub-blocks/features
+ * @DPU_SSPP_SRC             Src and fetch part of the pipes,
+ * @DPU_SSPP_SCALER_QSEED2,  QSEED2 algorithm support
+ * @DPU_SSPP_SCALER_QSEED3,  QSEED3 alogorithm support
+ * @DPU_SSPP_SCALER_RGB,     RGB Scaler, supported by RGB pipes
+ * @DPU_SSPP_CSC,            Support of Color space converion
+ * @DPU_SSPP_CSC_10BIT,      Support of 10-bit Color space conversion
+ * @DPU_SSPP_CURSOR,         SSPP can be used as a cursor layer
+ * @DPU_SSPP_QOS,            SSPP support QoS control, danger/safe/creq
+ * @DPU_SSPP_QOS_8LVL,       SSPP support 8-level QoS control
+ * @DPU_SSPP_EXCL_RECT,      SSPP supports exclusion rect
+ * @DPU_SSPP_SMART_DMA_V1,   SmartDMA 1.0 support
+ * @DPU_SSPP_SMART_DMA_V2,   SmartDMA 2.0 support
+ * @DPU_SSPP_TS_PREFILL      Supports prefill with traffic shaper
+ * @DPU_SSPP_TS_PREFILL_REC1 Supports prefill with traffic shaper multirec
+ * @DPU_SSPP_CDP             Supports client driven prefetch
+ * @DPU_SSPP_MAX             maximum value
+ */
+enum {
+       DPU_SSPP_SRC = 0x1,
+       DPU_SSPP_SCALER_QSEED2,
+       DPU_SSPP_SCALER_QSEED3,
+       DPU_SSPP_SCALER_RGB,
+       DPU_SSPP_CSC,
+       DPU_SSPP_CSC_10BIT,
+       DPU_SSPP_CURSOR,
+       DPU_SSPP_QOS,
+       DPU_SSPP_QOS_8LVL,
+       DPU_SSPP_EXCL_RECT,
+       DPU_SSPP_SMART_DMA_V1,
+       DPU_SSPP_SMART_DMA_V2,
+       DPU_SSPP_TS_PREFILL,
+       DPU_SSPP_TS_PREFILL_REC1,
+       DPU_SSPP_CDP,
+       DPU_SSPP_MAX
+};
+
+/*
+ * MIXER sub-blocks/features
+ * @DPU_MIXER_LAYER           Layer mixer layer blend configuration,
+ * @DPU_MIXER_SOURCESPLIT     Layer mixer supports source-split configuration
+ * @DPU_MIXER_GC              Gamma correction block
+ * @DPU_DIM_LAYER             Layer mixer supports dim layer
+ * @DPU_MIXER_MAX             maximum value
+ */
+enum {
+       DPU_MIXER_LAYER = 0x1,
+       DPU_MIXER_SOURCESPLIT,
+       DPU_MIXER_GC,
+       DPU_DIM_LAYER,
+       DPU_MIXER_MAX
+};
+
+/**
+ * PINGPONG sub-blocks
+ * @DPU_PINGPONG_TE         Tear check block
+ * @DPU_PINGPONG_TE2        Additional tear check block for split pipes
+ * @DPU_PINGPONG_SPLIT      PP block supports split fifo
+ * @DPU_PINGPONG_SLAVE      PP block is a suitable slave for split fifo
+ * @DPU_PINGPONG_DITHER,    Dither blocks
+ * @DPU_PINGPONG_MAX
+ */
+enum {
+       DPU_PINGPONG_TE = 0x1,
+       DPU_PINGPONG_TE2,
+       DPU_PINGPONG_SPLIT,
+       DPU_PINGPONG_SLAVE,
+       DPU_PINGPONG_DITHER,
+       DPU_PINGPONG_MAX
+};
+
+/**
+ * CTL sub-blocks
+ * @DPU_CTL_SPLIT_DISPLAY       CTL supports video mode split display
+ * @DPU_CTL_MAX
+ */
+enum {
+       DPU_CTL_SPLIT_DISPLAY = 0x1,
+       DPU_CTL_MAX
+};
+
+/**
+ * VBIF sub-blocks and features
+ * @DPU_VBIF_QOS_OTLIM        VBIF supports OT Limit
+ * @DPU_VBIF_QOS_REMAP        VBIF supports QoS priority remap
+ * @DPU_VBIF_MAX              maximum value
+ */
+enum {
+       DPU_VBIF_QOS_OTLIM = 0x1,
+       DPU_VBIF_QOS_REMAP,
+       DPU_VBIF_MAX
+};
+
+/**
+ * MACRO DPU_HW_BLK_INFO - information of HW blocks inside DPU
+ * @name:              string name for debug purposes
+ * @id:                enum identifying this block
+ * @base:              register base offset to mdss
+ * @len:               length of hardware block
+ * @features           bit mask identifying sub-blocks/features
+ */
+#define DPU_HW_BLK_INFO \
+       char name[DPU_HW_BLK_NAME_LEN]; \
+       u32 id; \
+       u32 base; \
+       u32 len; \
+       unsigned long features
+
+/**
+ * MACRO DPU_HW_SUBBLK_INFO - information of HW sub-block inside DPU
+ * @name:              string name for debug purposes
+ * @id:                enum identifying this sub-block
+ * @base:              offset of this sub-block relative to the block
+ *                     offset
+ * @len                register block length of this sub-block
+ */
+#define DPU_HW_SUBBLK_INFO \
+       char name[DPU_HW_BLK_NAME_LEN]; \
+       u32 id; \
+       u32 base; \
+       u32 len
+
+/**
+ * struct dpu_src_blk: SSPP part of the source pipes
+ * @info:   HW register and features supported by this sub-blk
+ */
+struct dpu_src_blk {
+       DPU_HW_SUBBLK_INFO;
+};
+
+/**
+ * struct dpu_scaler_blk: Scaler information
+ * @info:   HW register and features supported by this sub-blk
+ * @version: qseed block revision
+ */
+struct dpu_scaler_blk {
+       DPU_HW_SUBBLK_INFO;
+       u32 version;
+};
+
+struct dpu_csc_blk {
+       DPU_HW_SUBBLK_INFO;
+};
+
+/**
+ * struct dpu_pp_blk : Pixel processing sub-blk information
+ * @info:   HW register and features supported by this sub-blk
+ * @version: HW Algorithm version
+ */
+struct dpu_pp_blk {
+       DPU_HW_SUBBLK_INFO;
+       u32 version;
+};
+
+/**
+ * struct dpu_format_extended - define dpu specific pixel format+modifier
+ * @fourcc_format: Base FOURCC pixel format code
+ * @modifier: 64-bit drm format modifier, same modifier must be applied to all
+ *            framebuffer planes
+ */
+struct dpu_format_extended {
+       uint32_t fourcc_format;
+       uint64_t modifier;
+};
+
+/**
+ * enum dpu_qos_lut_usage - define QoS LUT use cases
+ */
+enum dpu_qos_lut_usage {
+       DPU_QOS_LUT_USAGE_LINEAR,
+       DPU_QOS_LUT_USAGE_MACROTILE,
+       DPU_QOS_LUT_USAGE_NRT,
+       DPU_QOS_LUT_USAGE_MAX,
+};
+
+/**
+ * struct dpu_qos_lut_entry - define QoS LUT table entry
+ * @fl: fill level, or zero on last entry to indicate default lut
+ * @lut: lut to use if equal to or less than fill level
+ */
+struct dpu_qos_lut_entry {
+       u32 fl;
+       u64 lut;
+};
+
+/**
+ * struct dpu_qos_lut_tbl - define QoS LUT table
+ * @nentry: number of entry in this table
+ * @entries: Pointer to table entries
+ */
+struct dpu_qos_lut_tbl {
+       u32 nentry;
+       struct dpu_qos_lut_entry *entries;
+};
+
+/**
+ * struct dpu_caps - define DPU capabilities
+ * @max_mixer_width    max layer mixer line width support.
+ * @max_mixer_blendstages max layer mixer blend stages or
+ *                       supported z order
+ * @qseed_type         qseed2 or qseed3 support.
+ * @smart_dma_rev      Supported version of SmartDMA feature.
+ * @ubwc_version       UBWC feature version (0x0 for not supported)
+ * @has_src_split      source split feature status
+ * @has_dim_layer      dim layer feature status
+ * @has_idle_pc        indicate if idle power collapse feature is supported
+ */
+struct dpu_caps {
+       u32 max_mixer_width;
+       u32 max_mixer_blendstages;
+       u32 qseed_type;
+       u32 smart_dma_rev;
+       u32 ubwc_version;
+       bool has_src_split;
+       bool has_dim_layer;
+       bool has_idle_pc;
+};
+
+/**
+ * struct dpu_sspp_blks_common : SSPP sub-blocks common configuration
+ * @maxwidth: max pixelwidth supported by this pipe
+ * @pixel_ram_size: size of latency hiding and de-tiling buffer in bytes
+ * @maxhdeciexp: max horizontal decimation supported by this pipe
+ *                             (max is 2^value)
+ * @maxvdeciexp: max vertical decimation supported by this pipe
+ *                             (max is 2^value)
+ */
+struct dpu_sspp_blks_common {
+       u32 maxlinewidth;
+       u32 pixel_ram_size;
+       u32 maxhdeciexp;
+       u32 maxvdeciexp;
+};
+
+/**
+ * struct dpu_sspp_sub_blks : SSPP sub-blocks
+ * common: Pointer to common configurations shared by sub blocks
+ * @creq_vblank: creq priority during vertical blanking
+ * @danger_vblank: danger priority during vertical blanking
+ * @maxdwnscale: max downscale ratio supported(without DECIMATION)
+ * @maxupscale:  maxupscale ratio supported
+ * @smart_dma_priority: hw priority of rect1 of multirect pipe
+ * @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps
+ * @src_blk:
+ * @scaler_blk:
+ * @csc_blk:
+ * @hsic:
+ * @memcolor:
+ * @pcc_blk:
+ * @igc_blk:
+ * @format_list: Pointer to list of supported formats
+ * @virt_format_list: Pointer to list of supported formats for virtual planes
+ */
+struct dpu_sspp_sub_blks {
+       const struct dpu_sspp_blks_common *common;
+       u32 creq_vblank;
+       u32 danger_vblank;
+       u32 maxdwnscale;
+       u32 maxupscale;
+       u32 smart_dma_priority;
+       u32 max_per_pipe_bw;
+       struct dpu_src_blk src_blk;
+       struct dpu_scaler_blk scaler_blk;
+       struct dpu_pp_blk csc_blk;
+       struct dpu_pp_blk hsic_blk;
+       struct dpu_pp_blk memcolor_blk;
+       struct dpu_pp_blk pcc_blk;
+       struct dpu_pp_blk igc_blk;
+
+       const struct dpu_format_extended *format_list;
+       const struct dpu_format_extended *virt_format_list;
+};
+
+/**
+ * struct dpu_lm_sub_blks:      information of mixer block
+ * @maxwidth:               Max pixel width supported by this mixer
+ * @maxblendstages:         Max number of blend-stages supported
+ * @blendstage_base:        Blend-stage register base offset
+ * @gc: gamma correction block
+ */
+struct dpu_lm_sub_blks {
+       u32 maxwidth;
+       u32 maxblendstages;
+       u32 blendstage_base[MAX_BLOCKS];
+       struct dpu_pp_blk gc;
+};
+
+struct dpu_pingpong_sub_blks {
+       struct dpu_pp_blk te;
+       struct dpu_pp_blk te2;
+       struct dpu_pp_blk dither;
+};
+
+/**
+ * dpu_clk_ctrl_type - Defines top level clock control signals
+ */
+enum dpu_clk_ctrl_type {
+       DPU_CLK_CTRL_NONE,
+       DPU_CLK_CTRL_VIG0,
+       DPU_CLK_CTRL_VIG1,
+       DPU_CLK_CTRL_VIG2,
+       DPU_CLK_CTRL_VIG3,
+       DPU_CLK_CTRL_VIG4,
+       DPU_CLK_CTRL_RGB0,
+       DPU_CLK_CTRL_RGB1,
+       DPU_CLK_CTRL_RGB2,
+       DPU_CLK_CTRL_RGB3,
+       DPU_CLK_CTRL_DMA0,
+       DPU_CLK_CTRL_DMA1,
+       DPU_CLK_CTRL_CURSOR0,
+       DPU_CLK_CTRL_CURSOR1,
+       DPU_CLK_CTRL_INLINE_ROT0_SSPP,
+       DPU_CLK_CTRL_MAX,
+};
+
+/* struct dpu_clk_ctrl_reg : Clock control register
+ * @reg_off:           register offset
+ * @bit_off:           bit offset
+ */
+struct dpu_clk_ctrl_reg {
+       u32 reg_off;
+       u32 bit_off;
+};
+
+/* struct dpu_mdp_cfg : MDP TOP-BLK instance info
+ * @id:                index identifying this block
+ * @base:              register base offset to mdss
+ * @features           bit mask identifying sub-blocks/features
+ * @highest_bank_bit:  UBWC parameter
+ * @ubwc_static:       ubwc static configuration
+ * @ubwc_swizzle:      ubwc default swizzle setting
+ * @has_dest_scaler:   indicates support of destination scaler
+ * @clk_ctrls          clock control register definition
+ */
+struct dpu_mdp_cfg {
+       DPU_HW_BLK_INFO;
+       u32 highest_bank_bit;
+       u32 ubwc_static;
+       u32 ubwc_swizzle;
+       bool has_dest_scaler;
+       struct dpu_clk_ctrl_reg clk_ctrls[DPU_CLK_CTRL_MAX];
+};
+
+/* struct dpu_mdp_cfg : MDP TOP-BLK instance info
+ * @id:                index identifying this block
+ * @base:              register base offset to mdss
+ * @features           bit mask identifying sub-blocks/features
+ */
+struct dpu_ctl_cfg {
+       DPU_HW_BLK_INFO;
+};
+
+/**
+ * struct dpu_sspp_cfg - information of source pipes
+ * @id:                index identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @sblk:              SSPP sub-blocks information
+ * @xin_id:            bus client identifier
+ * @clk_ctrl           clock control identifier
+ * @type               sspp type identifier
+ */
+struct dpu_sspp_cfg {
+       DPU_HW_BLK_INFO;
+       const struct dpu_sspp_sub_blks *sblk;
+       u32 xin_id;
+       enum dpu_clk_ctrl_type clk_ctrl;
+       u32 type;
+};
+
+/**
+ * struct dpu_lm_cfg - information of layer mixer blocks
+ * @id:                index identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @sblk:              LM Sub-blocks information
+ * @pingpong:          ID of connected PingPong, PINGPONG_MAX if unsupported
+ * @ds:                ID of connected DS, DS_MAX if unsupported
+ * @lm_pair_mask:      Bitmask of LMs that can be controlled by same CTL
+ */
+struct dpu_lm_cfg {
+       DPU_HW_BLK_INFO;
+       const struct dpu_lm_sub_blks *sblk;
+       u32 pingpong;
+       u32 ds;
+       unsigned long lm_pair_mask;
+};
+
+/**
+ * struct dpu_ds_top_cfg - information of dest scaler top
+ * @id               enum identifying this block
+ * @base             register offset of this block
+ * @features         bit mask identifying features
+ * @version          hw version of dest scaler
+ * @maxinputwidth    maximum input line width
+ * @maxoutputwidth   maximum output line width
+ * @maxupscale       maximum upscale ratio
+ */
+struct dpu_ds_top_cfg {
+       DPU_HW_BLK_INFO;
+       u32 version;
+       u32 maxinputwidth;
+       u32 maxoutputwidth;
+       u32 maxupscale;
+};
+
+/**
+ * struct dpu_ds_cfg - information of dest scaler blocks
+ * @id          enum identifying this block
+ * @base        register offset wrt DS top offset
+ * @features    bit mask identifying features
+ * @version     hw version of the qseed block
+ * @top         DS top information
+ */
+struct dpu_ds_cfg {
+       DPU_HW_BLK_INFO;
+       u32 version;
+       const struct dpu_ds_top_cfg *top;
+};
+
+/**
+ * struct dpu_pingpong_cfg - information of PING-PONG blocks
+ * @id                 enum identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @sblk               sub-blocks information
+ */
+struct dpu_pingpong_cfg  {
+       DPU_HW_BLK_INFO;
+       const struct dpu_pingpong_sub_blks *sblk;
+};
+
+/**
+ * struct dpu_cdm_cfg - information of chroma down blocks
+ * @id                 enum identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @intf_connect       Bitmask of INTF IDs this CDM can connect to
+ */
+struct dpu_cdm_cfg   {
+       DPU_HW_BLK_INFO;
+       unsigned long intf_connect;
+};
+
+/**
+ * struct dpu_intf_cfg - information of timing engine blocks
+ * @id                 enum identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @type:              Interface type(DSI, DP, HDMI)
+ * @controller_id:     Controller Instance ID in case of multiple of intf type
+ * @prog_fetch_lines_worst_case        Worst case latency num lines needed to prefetch
+ */
+struct dpu_intf_cfg  {
+       DPU_HW_BLK_INFO;
+       u32 type;   /* interface type*/
+       u32 controller_id;
+       u32 prog_fetch_lines_worst_case;
+};
+
+/**
+ * struct dpu_vbif_dynamic_ot_cfg - dynamic OT setting
+ * @pps                pixel per seconds
+ * @ot_limit           OT limit to use up to specified pixel per second
+ */
+struct dpu_vbif_dynamic_ot_cfg {
+       u64 pps;
+       u32 ot_limit;
+};
+
+/**
+ * struct dpu_vbif_dynamic_ot_tbl - dynamic OT setting table
+ * @count              length of cfg
+ * @cfg                pointer to array of configuration settings with
+ *                     ascending requirements
+ */
+struct dpu_vbif_dynamic_ot_tbl {
+       u32 count;
+       struct dpu_vbif_dynamic_ot_cfg *cfg;
+};
+
+/**
+ * struct dpu_vbif_qos_tbl - QoS priority table
+ * @npriority_lvl      num of priority level
+ * @priority_lvl       pointer to array of priority level in ascending order
+ */
+struct dpu_vbif_qos_tbl {
+       u32 npriority_lvl;
+       u32 *priority_lvl;
+};
+
+/**
+ * struct dpu_vbif_cfg - information of VBIF blocks
+ * @id                 enum identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @ot_rd_limit        default OT read limit
+ * @ot_wr_limit        default OT write limit
+ * @xin_halt_timeout   maximum time (in usec) for xin to halt
+ * @dynamic_ot_rd_tbl  dynamic OT read configuration table
+ * @dynamic_ot_wr_tbl  dynamic OT write configuration table
+ * @qos_rt_tbl         real-time QoS priority table
+ * @qos_nrt_tbl        non-real-time QoS priority table
+ * @memtype_count      number of defined memtypes
+ * @memtype            array of xin memtype definitions
+ */
+struct dpu_vbif_cfg {
+       DPU_HW_BLK_INFO;
+       u32 default_ot_rd_limit;
+       u32 default_ot_wr_limit;
+       u32 xin_halt_timeout;
+       struct dpu_vbif_dynamic_ot_tbl dynamic_ot_rd_tbl;
+       struct dpu_vbif_dynamic_ot_tbl dynamic_ot_wr_tbl;
+       struct dpu_vbif_qos_tbl qos_rt_tbl;
+       struct dpu_vbif_qos_tbl qos_nrt_tbl;
+       u32 memtype_count;
+       u32 memtype[MAX_XIN_COUNT];
+};
+/**
+ * struct dpu_reg_dma_cfg - information of lut dma blocks
+ * @id                 enum identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ * @version            version of lutdma hw block
+ * @trigger_sel_off    offset to trigger select registers of lutdma
+ */
+struct dpu_reg_dma_cfg {
+       DPU_HW_BLK_INFO;
+       u32 version;
+       u32 trigger_sel_off;
+};
+
+/**
+ * Define CDP use cases
+ * @DPU_PERF_CDP_UDAGE_RT: real-time use cases
+ * @DPU_PERF_CDP_USAGE_NRT: non real-time use cases such as WFD
+ */
+enum {
+       DPU_PERF_CDP_USAGE_RT,
+       DPU_PERF_CDP_USAGE_NRT,
+       DPU_PERF_CDP_USAGE_MAX
+};
+
+/**
+ * struct dpu_perf_cdp_cfg - define CDP use case configuration
+ * @rd_enable: true if read pipe CDP is enabled
+ * @wr_enable: true if write pipe CDP is enabled
+ */
+struct dpu_perf_cdp_cfg {
+       bool rd_enable;
+       bool wr_enable;
+};
+
+/**
+ * struct dpu_perf_cfg - performance control settings
+ * @max_bw_low         low threshold of maximum bandwidth (kbps)
+ * @max_bw_high        high threshold of maximum bandwidth (kbps)
+ * @min_core_ib        minimum bandwidth for core (kbps)
+ * @min_core_ib        minimum mnoc ib vote in kbps
+ * @min_llcc_ib        minimum llcc ib vote in kbps
+ * @min_dram_ib        minimum dram ib vote in kbps
+ * @core_ib_ff         core instantaneous bandwidth fudge factor
+ * @core_clk_ff        core clock fudge factor
+ * @comp_ratio_rt      string of 0 or more of <fourcc>/<ven>/<mod>/<comp ratio>
+ * @comp_ratio_nrt     string of 0 or more of <fourcc>/<ven>/<mod>/<comp ratio>
+ * @undersized_prefill_lines   undersized prefill in lines
+ * @xtra_prefill_lines         extra prefill latency in lines
+ * @dest_scale_prefill_lines   destination scaler latency in lines
+ * @macrotile_perfill_lines    macrotile latency in lines
+ * @yuv_nv12_prefill_lines     yuv_nv12 latency in lines
+ * @linear_prefill_lines       linear latency in lines
+ * @downscaling_prefill_lines  downscaling latency in lines
+ * @amortizable_theshold minimum y position for traffic shaping prefill
+ * @min_prefill_lines  minimum pipeline latency in lines
+ * @safe_lut_tbl: LUT tables for safe signals
+ * @danger_lut_tbl: LUT tables for danger signals
+ * @qos_lut_tbl: LUT tables for QoS signals
+ * @cdp_cfg            cdp use case configurations
+ */
+struct dpu_perf_cfg {
+       u32 max_bw_low;
+       u32 max_bw_high;
+       u32 min_core_ib;
+       u32 min_llcc_ib;
+       u32 min_dram_ib;
+       const char *core_ib_ff;
+       const char *core_clk_ff;
+       const char *comp_ratio_rt;
+       const char *comp_ratio_nrt;
+       u32 undersized_prefill_lines;
+       u32 xtra_prefill_lines;
+       u32 dest_scale_prefill_lines;
+       u32 macrotile_prefill_lines;
+       u32 yuv_nv12_prefill_lines;
+       u32 linear_prefill_lines;
+       u32 downscaling_prefill_lines;
+       u32 amortizable_threshold;
+       u32 min_prefill_lines;
+       u32 safe_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+       u32 danger_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+       struct dpu_qos_lut_tbl qos_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+       struct dpu_perf_cdp_cfg cdp_cfg[DPU_PERF_CDP_USAGE_MAX];
+};
+
+/**
+ * struct dpu_mdss_cfg - information of MDSS HW
+ * This is the main catalog data structure representing
+ * this HW version. Contains number of instances,
+ * register offsets, capabilities of the all MDSS HW sub-blocks.
+ *
+ * @dma_formats        Supported formats for dma pipe
+ * @cursor_formats     Supported formats for cursor pipe
+ * @vig_formats        Supported formats for vig pipe
+ */
+struct dpu_mdss_cfg {
+       u32 hwversion;
+
+       const struct dpu_caps *caps;
+
+       u32 mdp_count;
+       struct dpu_mdp_cfg *mdp;
+
+       u32 ctl_count;
+       struct dpu_ctl_cfg *ctl;
+
+       u32 sspp_count;
+       struct dpu_sspp_cfg *sspp;
+
+       u32 mixer_count;
+       struct dpu_lm_cfg *mixer;
+
+       u32 ds_count;
+       struct dpu_ds_cfg *ds;
+
+       u32 pingpong_count;
+       struct dpu_pingpong_cfg *pingpong;
+
+       u32 cdm_count;
+       struct dpu_cdm_cfg *cdm;
+
+       u32 intf_count;
+       struct dpu_intf_cfg *intf;
+
+       u32 vbif_count;
+       struct dpu_vbif_cfg *vbif;
+
+       u32 reg_dma_count;
+       struct dpu_reg_dma_cfg dma_cfg;
+
+       u32 ad_count;
+
+       /* Add additional block data structures here */
+
+       struct dpu_perf_cfg perf;
+       struct dpu_format_extended *dma_formats;
+       struct dpu_format_extended *cursor_formats;
+       struct dpu_format_extended *vig_formats;
+};
+
+struct dpu_mdss_hw_cfg_handler {
+       u32 hw_rev;
+       void (*cfg_init)(struct dpu_mdss_cfg *dpu_cfg);
+};
+
+/*
+ * Access Macros
+ */
+#define BLK_MDP(s) ((s)->mdp)
+#define BLK_CTL(s) ((s)->ctl)
+#define BLK_VIG(s) ((s)->vig)
+#define BLK_RGB(s) ((s)->rgb)
+#define BLK_DMA(s) ((s)->dma)
+#define BLK_CURSOR(s) ((s)->cursor)
+#define BLK_MIXER(s) ((s)->mixer)
+#define BLK_DS(s) ((s)->ds)
+#define BLK_PINGPONG(s) ((s)->pingpong)
+#define BLK_CDM(s) ((s)->cdm)
+#define BLK_INTF(s) ((s)->intf)
+#define BLK_AD(s) ((s)->ad)
+
+/**
+ * dpu_hw_catalog_init - dpu hardware catalog init API retrieves
+ * hardcoded target specific catalog information in config structure
+ * @hw_rev:       caller needs provide the hardware revision.
+ *
+ * Return: dpu config structure
+ */
+struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev);
+
+/**
+ * dpu_hw_catalog_deinit - dpu hardware catalog cleanup
+ * @dpu_cfg:      pointer returned from init function
+ */
+void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg);
+
+/**
+ * dpu_hw_sspp_multirect_enabled - check multirect enabled for the sspp
+ * @cfg:          pointer to sspp cfg
+ */
+static inline bool dpu_hw_sspp_multirect_enabled(const struct dpu_sspp_cfg *cfg)
+{
+       return test_bit(DPU_SSPP_SMART_DMA_V1, &cfg->features) ||
+                        test_bit(DPU_SSPP_SMART_DMA_V2, &cfg->features);
+}
+#endif /* _DPU_HW_CATALOG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
new file mode 100644 (file)
index 0000000..3c9f028
--- /dev/null
@@ -0,0 +1,168 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hw_mdss.h"
+
+static const struct dpu_format_extended plane_formats[] = {
+       {DRM_FORMAT_ARGB8888, 0},
+       {DRM_FORMAT_ABGR8888, 0},
+       {DRM_FORMAT_RGBA8888, 0},
+       {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+       {DRM_FORMAT_BGRA8888, 0},
+       {DRM_FORMAT_XRGB8888, 0},
+       {DRM_FORMAT_RGBX8888, 0},
+       {DRM_FORMAT_BGRX8888, 0},
+       {DRM_FORMAT_XBGR8888, 0},
+       {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+       {DRM_FORMAT_RGB888, 0},
+       {DRM_FORMAT_BGR888, 0},
+       {DRM_FORMAT_RGB565, 0},
+       {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+       {DRM_FORMAT_BGR565, 0},
+       {DRM_FORMAT_ARGB1555, 0},
+       {DRM_FORMAT_ABGR1555, 0},
+       {DRM_FORMAT_RGBA5551, 0},
+       {DRM_FORMAT_BGRA5551, 0},
+       {DRM_FORMAT_XRGB1555, 0},
+       {DRM_FORMAT_XBGR1555, 0},
+       {DRM_FORMAT_RGBX5551, 0},
+       {DRM_FORMAT_BGRX5551, 0},
+       {DRM_FORMAT_ARGB4444, 0},
+       {DRM_FORMAT_ABGR4444, 0},
+       {DRM_FORMAT_RGBA4444, 0},
+       {DRM_FORMAT_BGRA4444, 0},
+       {DRM_FORMAT_XRGB4444, 0},
+       {DRM_FORMAT_XBGR4444, 0},
+       {DRM_FORMAT_RGBX4444, 0},
+       {DRM_FORMAT_BGRX4444, 0},
+       {0, 0},
+};
+
+static const struct dpu_format_extended plane_formats_yuv[] = {
+       {DRM_FORMAT_ARGB8888, 0},
+       {DRM_FORMAT_ABGR8888, 0},
+       {DRM_FORMAT_RGBA8888, 0},
+       {DRM_FORMAT_BGRX8888, 0},
+       {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+       {DRM_FORMAT_BGRA8888, 0},
+       {DRM_FORMAT_XRGB8888, 0},
+       {DRM_FORMAT_XBGR8888, 0},
+       {DRM_FORMAT_RGBX8888, 0},
+       {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+       {DRM_FORMAT_RGB888, 0},
+       {DRM_FORMAT_BGR888, 0},
+       {DRM_FORMAT_RGB565, 0},
+       {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+       {DRM_FORMAT_BGR565, 0},
+       {DRM_FORMAT_ARGB1555, 0},
+       {DRM_FORMAT_ABGR1555, 0},
+       {DRM_FORMAT_RGBA5551, 0},
+       {DRM_FORMAT_BGRA5551, 0},
+       {DRM_FORMAT_XRGB1555, 0},
+       {DRM_FORMAT_XBGR1555, 0},
+       {DRM_FORMAT_RGBX5551, 0},
+       {DRM_FORMAT_BGRX5551, 0},
+       {DRM_FORMAT_ARGB4444, 0},
+       {DRM_FORMAT_ABGR4444, 0},
+       {DRM_FORMAT_RGBA4444, 0},
+       {DRM_FORMAT_BGRA4444, 0},
+       {DRM_FORMAT_XRGB4444, 0},
+       {DRM_FORMAT_XBGR4444, 0},
+       {DRM_FORMAT_RGBX4444, 0},
+       {DRM_FORMAT_BGRX4444, 0},
+
+       {DRM_FORMAT_NV12, 0},
+       {DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+       {DRM_FORMAT_NV21, 0},
+       {DRM_FORMAT_NV16, 0},
+       {DRM_FORMAT_NV61, 0},
+       {DRM_FORMAT_VYUY, 0},
+       {DRM_FORMAT_UYVY, 0},
+       {DRM_FORMAT_YUYV, 0},
+       {DRM_FORMAT_YVYU, 0},
+       {DRM_FORMAT_YUV420, 0},
+       {DRM_FORMAT_YVU420, 0},
+       {0, 0},
+};
+
+static const struct dpu_format_extended cursor_formats[] = {
+       {DRM_FORMAT_ARGB8888, 0},
+       {DRM_FORMAT_ABGR8888, 0},
+       {DRM_FORMAT_RGBA8888, 0},
+       {DRM_FORMAT_BGRA8888, 0},
+       {DRM_FORMAT_XRGB8888, 0},
+       {DRM_FORMAT_ARGB1555, 0},
+       {DRM_FORMAT_ABGR1555, 0},
+       {DRM_FORMAT_RGBA5551, 0},
+       {DRM_FORMAT_BGRA5551, 0},
+       {DRM_FORMAT_ARGB4444, 0},
+       {DRM_FORMAT_ABGR4444, 0},
+       {DRM_FORMAT_RGBA4444, 0},
+       {DRM_FORMAT_BGRA4444, 0},
+       {0, 0},
+};
+
+static const struct dpu_format_extended wb2_formats[] = {
+       {DRM_FORMAT_RGB565, 0},
+       {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+       {DRM_FORMAT_RGB888, 0},
+       {DRM_FORMAT_ARGB8888, 0},
+       {DRM_FORMAT_RGBA8888, 0},
+       {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+       {DRM_FORMAT_XRGB8888, 0},
+       {DRM_FORMAT_RGBX8888, 0},
+       {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+       {DRM_FORMAT_ARGB1555, 0},
+       {DRM_FORMAT_RGBA5551, 0},
+       {DRM_FORMAT_XRGB1555, 0},
+       {DRM_FORMAT_RGBX5551, 0},
+       {DRM_FORMAT_ARGB4444, 0},
+       {DRM_FORMAT_RGBA4444, 0},
+       {DRM_FORMAT_RGBX4444, 0},
+       {DRM_FORMAT_XRGB4444, 0},
+
+       {DRM_FORMAT_BGR565, 0},
+       {DRM_FORMAT_BGR888, 0},
+       {DRM_FORMAT_ABGR8888, 0},
+       {DRM_FORMAT_BGRA8888, 0},
+       {DRM_FORMAT_BGRX8888, 0},
+       {DRM_FORMAT_XBGR8888, 0},
+       {DRM_FORMAT_ABGR1555, 0},
+       {DRM_FORMAT_BGRA5551, 0},
+       {DRM_FORMAT_XBGR1555, 0},
+       {DRM_FORMAT_BGRX5551, 0},
+       {DRM_FORMAT_ABGR4444, 0},
+       {DRM_FORMAT_BGRA4444, 0},
+       {DRM_FORMAT_BGRX4444, 0},
+       {DRM_FORMAT_XBGR4444, 0},
+
+       {DRM_FORMAT_YUV420, 0},
+       {DRM_FORMAT_NV12, 0},
+       {DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+       {DRM_FORMAT_NV16, 0},
+       {DRM_FORMAT_YUYV, 0},
+
+       {0, 0},
+};
+
+static const struct dpu_format_extended rgb_10bit_formats[] = {
+       {DRM_FORMAT_BGRA1010102, 0},
+       {DRM_FORMAT_BGRX1010102, 0},
+       {DRM_FORMAT_RGBA1010102, 0},
+       {DRM_FORMAT_RGBX1010102, 0},
+       {DRM_FORMAT_ABGR2101010, 0},
+       {DRM_FORMAT_ABGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+       {DRM_FORMAT_XBGR2101010, 0},
+       {DRM_FORMAT_XBGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+       {DRM_FORMAT_ARGB2101010, 0},
+       {DRM_FORMAT_XRGB2101010, 0},
+};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
new file mode 100644 (file)
index 0000000..554874b
--- /dev/null
@@ -0,0 +1,323 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_cdm.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define CDM_CSC_10_OPMODE                  0x000
+#define CDM_CSC_10_BASE                    0x004
+
+#define CDM_CDWN2_OP_MODE                  0x100
+#define CDM_CDWN2_CLAMP_OUT                0x104
+#define CDM_CDWN2_PARAMS_3D_0              0x108
+#define CDM_CDWN2_PARAMS_3D_1              0x10C
+#define CDM_CDWN2_COEFF_COSITE_H_0         0x110
+#define CDM_CDWN2_COEFF_COSITE_H_1         0x114
+#define CDM_CDWN2_COEFF_COSITE_H_2         0x118
+#define CDM_CDWN2_COEFF_OFFSITE_H_0        0x11C
+#define CDM_CDWN2_COEFF_OFFSITE_H_1        0x120
+#define CDM_CDWN2_COEFF_OFFSITE_H_2        0x124
+#define CDM_CDWN2_COEFF_COSITE_V           0x128
+#define CDM_CDWN2_COEFF_OFFSITE_V          0x12C
+#define CDM_CDWN2_OUT_SIZE                 0x130
+
+#define CDM_HDMI_PACK_OP_MODE              0x200
+#define CDM_CSC_10_MATRIX_COEFF_0          0x004
+
+/**
+ * Horizontal coefficients for cosite chroma downscale
+ * s13 representation of coefficients
+ */
+static u32 cosite_h_coeff[] = {0x00000016, 0x000001cc, 0x0100009e};
+
+/**
+ * Horizontal coefficients for offsite chroma downscale
+ */
+static u32 offsite_h_coeff[] = {0x000b0005, 0x01db01eb, 0x00e40046};
+
+/**
+ * Vertical coefficients for cosite chroma downscale
+ */
+static u32 cosite_v_coeff[] = {0x00080004};
+/**
+ * Vertical coefficients for offsite chroma downscale
+ */
+static u32 offsite_v_coeff[] = {0x00060002};
+
+/* Limited Range rgb2yuv coeff with clamp and bias values for CSC 10 module */
+static struct dpu_csc_cfg rgb2yuv_cfg = {
+       {
+               0x0083, 0x0102, 0x0032,
+               0x1fb5, 0x1f6c, 0x00e1,
+               0x00e1, 0x1f45, 0x1fdc
+       },
+       { 0x00, 0x00, 0x00 },
+       { 0x0040, 0x0200, 0x0200 },
+       { 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff },
+       { 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 },
+};
+
+static struct dpu_cdm_cfg *_cdm_offset(enum dpu_cdm cdm,
+               struct dpu_mdss_cfg *m,
+               void __iomem *addr,
+               struct dpu_hw_blk_reg_map *b)
+{
+       int i;
+
+       for (i = 0; i < m->cdm_count; i++) {
+               if (cdm == m->cdm[i].id) {
+                       b->base_off = addr;
+                       b->blk_off = m->cdm[i].base;
+                       b->length = m->cdm[i].len;
+                       b->hwversion = m->hwversion;
+                       b->log_mask = DPU_DBG_MASK_CDM;
+                       return &m->cdm[i];
+               }
+       }
+
+       return ERR_PTR(-EINVAL);
+}
+
+static int dpu_hw_cdm_setup_csc_10bit(struct dpu_hw_cdm *ctx,
+               struct dpu_csc_cfg *data)
+{
+       dpu_hw_csc_setup(&ctx->hw, CDM_CSC_10_MATRIX_COEFF_0, data, true);
+
+       return 0;
+}
+
+static int dpu_hw_cdm_setup_cdwn(struct dpu_hw_cdm *ctx,
+               struct dpu_hw_cdm_cfg *cfg)
+{
+       struct dpu_hw_blk_reg_map *c = &ctx->hw;
+       u32 opmode = 0;
+       u32 out_size = 0;
+
+       if (cfg->output_bit_depth == CDM_CDWN_OUTPUT_10BIT)
+               opmode &= ~BIT(7);
+       else
+               opmode |= BIT(7);
+
+       /* ENABLE DWNS_H bit */
+       opmode |= BIT(1);
+
+       switch (cfg->h_cdwn_type) {
+       case CDM_CDWN_DISABLE:
+               /* CLEAR METHOD_H field */
+               opmode &= ~(0x18);
+               /* CLEAR DWNS_H bit */
+               opmode &= ~BIT(1);
+               break;
+       case CDM_CDWN_PIXEL_DROP:
+               /* Clear METHOD_H field (pixel drop is 0) */
+               opmode &= ~(0x18);
+               break;
+       case CDM_CDWN_AVG:
+               /* Clear METHOD_H field (Average is 0x1) */
+               opmode &= ~(0x18);
+               opmode |= (0x1 << 0x3);
+               break;
+       case CDM_CDWN_COSITE:
+               /* Clear METHOD_H field (Average is 0x2) */
+               opmode &= ~(0x18);
+               opmode |= (0x2 << 0x3);
+               /* Co-site horizontal coefficients */
+               DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_0,
+                               cosite_h_coeff[0]);
+               DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_1,
+                               cosite_h_coeff[1]);
+               DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_2,
+                               cosite_h_coeff[2]);
+               break;
+       case CDM_CDWN_OFFSITE:
+               /* Clear METHOD_H field (Average is 0x3) */
+               opmode &= ~(0x18);
+               opmode |= (0x3 << 0x3);
+
+               /* Off-site horizontal coefficients */
+               DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_0,
+                               offsite_h_coeff[0]);
+               DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_1,
+                               offsite_h_coeff[1]);
+               DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_2,
+                               offsite_h_coeff[2]);
+               break;
+       default:
+               pr_err("%s invalid horz down sampling type\n", __func__);
+               return -EINVAL;
+       }
+
+       /* ENABLE DWNS_V bit */
+       opmode |= BIT(2);
+
+       switch (cfg->v_cdwn_type) {
+       case CDM_CDWN_DISABLE:
+               /* CLEAR METHOD_V field */
+               opmode &= ~(0x60);
+               /* CLEAR DWNS_V bit */
+               opmode &= ~BIT(2);
+               break;
+       case CDM_CDWN_PIXEL_DROP:
+               /* Clear METHOD_V field (pixel drop is 0) */
+               opmode &= ~(0x60);
+               break;
+       case CDM_CDWN_AVG:
+               /* Clear METHOD_V field (Average is 0x1) */
+               opmode &= ~(0x60);
+               opmode |= (0x1 << 0x5);
+               break;
+       case CDM_CDWN_COSITE:
+               /* Clear METHOD_V field (Average is 0x2) */
+               opmode &= ~(0x60);
+               opmode |= (0x2 << 0x5);
+               /* Co-site vertical coefficients */
+               DPU_REG_WRITE(c,
+                               CDM_CDWN2_COEFF_COSITE_V,
+                               cosite_v_coeff[0]);
+               break;
+       case CDM_CDWN_OFFSITE:
+               /* Clear METHOD_V field (Average is 0x3) */
+               opmode &= ~(0x60);
+               opmode |= (0x3 << 0x5);
+
+               /* Off-site vertical coefficients */
+               DPU_REG_WRITE(c,
+                               CDM_CDWN2_COEFF_OFFSITE_V,
+                               offsite_v_coeff[0]);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (cfg->v_cdwn_type || cfg->h_cdwn_type)
+               opmode |= BIT(0); /* EN CDWN module */
+       else
+               opmode &= ~BIT(0);
+
+       out_size = (cfg->output_width & 0xFFFF) |
+               ((cfg->output_height & 0xFFFF) << 16);
+       DPU_REG_WRITE(c, CDM_CDWN2_OUT_SIZE, out_size);
+       DPU_REG_WRITE(c, CDM_CDWN2_OP_MODE, opmode);
+       DPU_REG_WRITE(c, CDM_CDWN2_CLAMP_OUT,
+                       ((0x3FF << 16) | 0x0));
+
+       return 0;
+}
+
+static int dpu_hw_cdm_enable(struct dpu_hw_cdm *ctx,
+               struct dpu_hw_cdm_cfg *cdm)
+{
+       struct dpu_hw_blk_reg_map *c = &ctx->hw;
+       const struct dpu_format *fmt = cdm->output_fmt;
+       struct cdm_output_cfg cdm_cfg = { 0 };
+       u32 opmode = 0;
+       u32 csc = 0;
+
+       if (!DPU_FORMAT_IS_YUV(fmt))
+               return -EINVAL;
+
+       if (cdm->output_type == CDM_CDWN_OUTPUT_HDMI) {
+               if (fmt->chroma_sample != DPU_CHROMA_H1V2)
+                       return -EINVAL; /*unsupported format */
+               opmode = BIT(0);
+               opmode |= (fmt->chroma_sample << 1);
+               cdm_cfg.intf_en = true;
+       }
+
+       csc |= BIT(2);
+       csc &= ~BIT(1);
+       csc |= BIT(0);
+
+       if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
+               ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
+
+       DPU_REG_WRITE(c, CDM_CSC_10_OPMODE, csc);
+       DPU_REG_WRITE(c, CDM_HDMI_PACK_OP_MODE, opmode);
+       return 0;
+}
+
+static void dpu_hw_cdm_disable(struct dpu_hw_cdm *ctx)
+{
+       struct cdm_output_cfg cdm_cfg = { 0 };
+
+       if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
+               ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
+}
+
+static void _setup_cdm_ops(struct dpu_hw_cdm_ops *ops,
+       unsigned long features)
+{
+       ops->setup_csc_data = dpu_hw_cdm_setup_csc_10bit;
+       ops->setup_cdwn = dpu_hw_cdm_setup_cdwn;
+       ops->enable = dpu_hw_cdm_enable;
+       ops->disable = dpu_hw_cdm_disable;
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+       .start = NULL,
+       .stop = NULL,
+};
+
+struct dpu_hw_cdm *dpu_hw_cdm_init(enum dpu_cdm idx,
+               void __iomem *addr,
+               struct dpu_mdss_cfg *m,
+               struct dpu_hw_mdp *hw_mdp)
+{
+       struct dpu_hw_cdm *c;
+       struct dpu_cdm_cfg *cfg;
+       int rc;
+
+       c = kzalloc(sizeof(*c), GFP_KERNEL);
+       if (!c)
+               return ERR_PTR(-ENOMEM);
+
+       cfg = _cdm_offset(idx, m, addr, &c->hw);
+       if (IS_ERR_OR_NULL(cfg)) {
+               kfree(c);
+               return ERR_PTR(-EINVAL);
+       }
+
+       c->idx = idx;
+       c->caps = cfg;
+       _setup_cdm_ops(&c->ops, c->caps->features);
+       c->hw_mdp = hw_mdp;
+
+       rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CDM, idx, &dpu_hw_ops);
+       if (rc) {
+               DPU_ERROR("failed to init hw blk %d\n", rc);
+               goto blk_init_error;
+       }
+
+       /*
+        * Perform any default initialization for the chroma down module
+        * @setup default csc coefficients
+        */
+       dpu_hw_cdm_setup_csc_10bit(c, &rgb2yuv_cfg);
+
+       return c;
+
+blk_init_error:
+       kzfree(c);
+
+       return ERR_PTR(rc);
+}
+
+void dpu_hw_cdm_destroy(struct dpu_hw_cdm *cdm)
+{
+       if (cdm)
+               dpu_hw_blk_destroy(&cdm->base);
+       kfree(cdm);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
new file mode 100644 (file)
index 0000000..5cceb1e
--- /dev/null
@@ -0,0 +1,139 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_CDM_H
+#define _DPU_HW_CDM_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_top.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_cdm;
+
+struct dpu_hw_cdm_cfg {
+       u32 output_width;
+       u32 output_height;
+       u32 output_bit_depth;
+       u32 h_cdwn_type;
+       u32 v_cdwn_type;
+       const struct dpu_format *output_fmt;
+       u32 output_type;
+       int flags;
+};
+
+enum dpu_hw_cdwn_type {
+       CDM_CDWN_DISABLE,
+       CDM_CDWN_PIXEL_DROP,
+       CDM_CDWN_AVG,
+       CDM_CDWN_COSITE,
+       CDM_CDWN_OFFSITE,
+};
+
+enum dpu_hw_cdwn_output_type {
+       CDM_CDWN_OUTPUT_HDMI,
+       CDM_CDWN_OUTPUT_WB,
+};
+
+enum dpu_hw_cdwn_output_bit_depth {
+       CDM_CDWN_OUTPUT_8BIT,
+       CDM_CDWN_OUTPUT_10BIT,
+};
+
+/**
+ * struct dpu_hw_cdm_ops : Interface to the chroma down Hw driver functions
+ *                         Assumption is these functions will be called after
+ *                         clocks are enabled
+ *  @setup_csc:            Programs the csc matrix
+ *  @setup_cdwn:           Sets up the chroma down sub module
+ *  @enable:               Enables the output to interface and programs the
+ *                         output packer
+ *  @disable:              Puts the cdm in bypass mode
+ */
+struct dpu_hw_cdm_ops {
+       /**
+        * Programs the CSC matrix for conversion from RGB space to YUV space,
+        * it is optional to call this function as this matrix is automatically
+        * set during initialization, user should call this if it wants
+        * to program a different matrix than default matrix.
+        * @cdm:          Pointer to the chroma down context structure
+        * @data          Pointer to CSC configuration data
+        * return:        0 if success; error code otherwise
+        */
+       int (*setup_csc_data)(struct dpu_hw_cdm *cdm,
+                       struct dpu_csc_cfg *data);
+
+       /**
+        * Programs the Chroma downsample part.
+        * @cdm         Pointer to chroma down context
+        */
+       int (*setup_cdwn)(struct dpu_hw_cdm *cdm,
+       struct dpu_hw_cdm_cfg *cfg);
+
+       /**
+        * Enable the CDM module
+        * @cdm         Pointer to chroma down context
+        */
+       int (*enable)(struct dpu_hw_cdm *cdm,
+       struct dpu_hw_cdm_cfg *cfg);
+
+       /**
+        * Disable the CDM module
+        * @cdm         Pointer to chroma down context
+        */
+       void (*disable)(struct dpu_hw_cdm *cdm);
+};
+
+struct dpu_hw_cdm {
+       struct dpu_hw_blk base;
+       struct dpu_hw_blk_reg_map hw;
+
+       /* chroma down */
+       const struct dpu_cdm_cfg *caps;
+       enum  dpu_cdm  idx;
+
+       /* mdp top hw driver */
+       struct dpu_hw_mdp *hw_mdp;
+
+       /* ops */
+       struct dpu_hw_cdm_ops ops;
+};
+
+/**
+ * dpu_hw_cdm - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_cdm *to_dpu_hw_cdm(struct dpu_hw_blk *hw)
+{
+       return container_of(hw, struct dpu_hw_cdm, base);
+}
+
+/**
+ * dpu_hw_cdm_init - initializes the cdm hw driver object.
+ * should be called once before accessing every cdm.
+ * @idx:  cdm index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m :   pointer to mdss catalog data
+ * @hw_mdp:  pointer to mdp top hw driver object
+ */
+struct dpu_hw_cdm *dpu_hw_cdm_init(enum dpu_cdm idx,
+               void __iomem *addr,
+               struct dpu_mdss_cfg *m,
+               struct dpu_hw_mdp *hw_mdp);
+
+/**
+ * dpu_hw_cdm_destroy - destroys CDM driver context
+ * @cdm:   pointer to CDM driver context
+ */
+void dpu_hw_cdm_destroy(struct dpu_hw_cdm *cdm);
+
+#endif /*_DPU_HW_CDM_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
new file mode 100644 (file)
index 0000000..06be7cf
--- /dev/null
@@ -0,0 +1,540 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include "dpu_hwio.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define   CTL_LAYER(lm)                 \
+       (((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
+#define   CTL_LAYER_EXT(lm)             \
+       (0x40 + (((lm) - LM_0) * 0x004))
+#define   CTL_LAYER_EXT2(lm)             \
+       (0x70 + (((lm) - LM_0) * 0x004))
+#define   CTL_LAYER_EXT3(lm)             \
+       (0xA0 + (((lm) - LM_0) * 0x004))
+#define   CTL_TOP                       0x014
+#define   CTL_FLUSH                     0x018
+#define   CTL_START                     0x01C
+#define   CTL_PREPARE                   0x0d0
+#define   CTL_SW_RESET                  0x030
+#define   CTL_LAYER_EXTN_OFFSET         0x40
+
+#define CTL_MIXER_BORDER_OUT            BIT(24)
+#define CTL_FLUSH_MASK_CTL              BIT(17)
+
+#define DPU_REG_RESET_TIMEOUT_US        2000
+
+static struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
+               struct dpu_mdss_cfg *m,
+               void __iomem *addr,
+               struct dpu_hw_blk_reg_map *b)
+{
+       int i;
+
+       for (i = 0; i < m->ctl_count; i++) {
+               if (ctl == m->ctl[i].id) {
+                       b->base_off = addr;
+                       b->blk_off = m->ctl[i].base;
+                       b->length = m->ctl[i].len;
+                       b->hwversion = m->hwversion;
+                       b->log_mask = DPU_DBG_MASK_CTL;
+                       return &m->ctl[i];
+               }
+       }
+       return ERR_PTR(-ENOMEM);
+}
+
+static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
+               enum dpu_lm lm)
+{
+       int i;
+       int stages = -EINVAL;
+
+       for (i = 0; i < count; i++) {
+               if (lm == mixer[i].id) {
+                       stages = mixer[i].sblk->maxblendstages;
+                       break;
+               }
+       }
+
+       return stages;
+}
+
+static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
+{
+       DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
+}
+
+static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
+{
+       DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
+}
+
+static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
+{
+       ctx->pending_flush_mask = 0x0;
+}
+
+static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
+               u32 flushbits)
+{
+       ctx->pending_flush_mask |= flushbits;
+}
+
+static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
+{
+       if (!ctx)
+               return 0x0;
+
+       return ctx->pending_flush_mask;
+}
+
+static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
+{
+
+       DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
+}
+
+static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
+{
+       struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+       return DPU_REG_READ(c, CTL_FLUSH);
+}
+
+static inline uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
+       enum dpu_sspp sspp)
+{
+       uint32_t flushbits = 0;
+
+       switch (sspp) {
+       case SSPP_VIG0:
+               flushbits =  BIT(0);
+               break;
+       case SSPP_VIG1:
+               flushbits = BIT(1);
+               break;
+       case SSPP_VIG2:
+               flushbits = BIT(2);
+               break;
+       case SSPP_VIG3:
+               flushbits = BIT(18);
+               break;
+       case SSPP_RGB0:
+               flushbits = BIT(3);
+               break;
+       case SSPP_RGB1:
+               flushbits = BIT(4);
+               break;
+       case SSPP_RGB2:
+               flushbits = BIT(5);
+               break;
+       case SSPP_RGB3:
+               flushbits = BIT(19);
+               break;
+       case SSPP_DMA0:
+               flushbits = BIT(11);
+               break;
+       case SSPP_DMA1:
+               flushbits = BIT(12);
+               break;
+       case SSPP_DMA2:
+               flushbits = BIT(24);
+               break;
+       case SSPP_DMA3:
+               flushbits = BIT(25);
+               break;
+       case SSPP_CURSOR0:
+               flushbits = BIT(22);
+               break;
+       case SSPP_CURSOR1:
+               flushbits = BIT(23);
+               break;
+       default:
+               break;
+       }
+
+       return flushbits;
+}
+
+static inline uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
+       enum dpu_lm lm)
+{
+       uint32_t flushbits = 0;
+
+       switch (lm) {
+       case LM_0:
+               flushbits = BIT(6);
+               break;
+       case LM_1:
+               flushbits = BIT(7);
+               break;
+       case LM_2:
+               flushbits = BIT(8);
+               break;
+       case LM_3:
+               flushbits = BIT(9);
+               break;
+       case LM_4:
+               flushbits = BIT(10);
+               break;
+       case LM_5:
+               flushbits = BIT(20);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       flushbits |= CTL_FLUSH_MASK_CTL;
+
+       return flushbits;
+}
+
+static inline int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
+               u32 *flushbits, enum dpu_intf intf)
+{
+       switch (intf) {
+       case INTF_0:
+               *flushbits |= BIT(31);
+               break;
+       case INTF_1:
+               *flushbits |= BIT(30);
+               break;
+       case INTF_2:
+               *flushbits |= BIT(29);
+               break;
+       case INTF_3:
+               *flushbits |= BIT(28);
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static inline int dpu_hw_ctl_get_bitmask_cdm(struct dpu_hw_ctl *ctx,
+               u32 *flushbits, enum dpu_cdm cdm)
+{
+       switch (cdm) {
+       case CDM_0:
+               *flushbits |= BIT(26);
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
+{
+       struct dpu_hw_blk_reg_map *c = &ctx->hw;
+       ktime_t timeout;
+       u32 status;
+
+       timeout = ktime_add_us(ktime_get(), timeout_us);
+
+       /*
+        * it takes around 30us to have mdp finish resetting its ctl path
+        * poll every 50us so that reset should be completed at 1st poll
+        */
+       do {
+               status = DPU_REG_READ(c, CTL_SW_RESET);
+               status &= 0x1;
+               if (status)
+                       usleep_range(20, 50);
+       } while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
+
+       return status;
+}
+
+static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
+{
+       struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+       pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
+       DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
+       if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
+{
+       struct dpu_hw_blk_reg_map *c = &ctx->hw;
+       u32 status;
+
+       status = DPU_REG_READ(c, CTL_SW_RESET);
+       status &= 0x01;
+       if (!status)
+               return 0;
+
+       pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
+       if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
+               pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
+{
+       struct dpu_hw_blk_reg_map *c = &ctx->hw;
+       int i;
+
+       for (i = 0; i < ctx->mixer_count; i++) {
+               DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
+               DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
+               DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0);
+               DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0);
+       }
+}
+
+static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
+       enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
+{
+       struct dpu_hw_blk_reg_map *c = &ctx->hw;
+       u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
+       u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
+       int i, j;
+       u8 stages;
+       int pipes_per_stage;
+
+       stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
+       if (stages < 0)
+               return;
+
+       if (test_bit(DPU_MIXER_SOURCESPLIT,
+               &ctx->mixer_hw_caps->features))
+               pipes_per_stage = PIPES_PER_STAGE;
+       else
+               pipes_per_stage = 1;
+
+       mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
+
+       if (!stage_cfg)
+               goto exit;
+
+       for (i = 0; i <= stages; i++) {
+               /* overflow to ext register if 'i + 1 > 7' */
+               mix = (i + 1) & 0x7;
+               ext = i >= 7;
+
+               for (j = 0 ; j < pipes_per_stage; j++) {
+                       enum dpu_sspp_multirect_index rect_index =
+                               stage_cfg->multirect_index[i][j];
+
+                       switch (stage_cfg->stage[i][j]) {
+                       case SSPP_VIG0:
+                               if (rect_index == DPU_SSPP_RECT_1) {
+                                       mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
+                               } else {
+                                       mixercfg |= mix << 0;
+                                       mixercfg_ext |= ext << 0;
+                               }
+                               break;
+                       case SSPP_VIG1:
+                               if (rect_index == DPU_SSPP_RECT_1) {
+                                       mixercfg_ext3 |= ((i + 1) & 0xF) << 4;
+                               } else {
+                                       mixercfg |= mix << 3;
+                                       mixercfg_ext |= ext << 2;
+                               }
+                               break;
+                       case SSPP_VIG2:
+                               if (rect_index == DPU_SSPP_RECT_1) {
+                                       mixercfg_ext3 |= ((i + 1) & 0xF) << 8;
+                               } else {
+                                       mixercfg |= mix << 6;
+                                       mixercfg_ext |= ext << 4;
+                               }
+                               break;
+                       case SSPP_VIG3:
+                               if (rect_index == DPU_SSPP_RECT_1) {
+                                       mixercfg_ext3 |= ((i + 1) & 0xF) << 12;
+                               } else {
+                                       mixercfg |= mix << 26;
+                                       mixercfg_ext |= ext << 6;
+                               }
+                               break;
+                       case SSPP_RGB0:
+                               mixercfg |= mix << 9;
+                               mixercfg_ext |= ext << 8;
+                               break;
+                       case SSPP_RGB1:
+                               mixercfg |= mix << 12;
+                               mixercfg_ext |= ext << 10;
+                               break;
+                       case SSPP_RGB2:
+                               mixercfg |= mix << 15;
+                               mixercfg_ext |= ext << 12;
+                               break;
+                       case SSPP_RGB3:
+                               mixercfg |= mix << 29;
+                               mixercfg_ext |= ext << 14;
+                               break;
+                       case SSPP_DMA0:
+                               if (rect_index == DPU_SSPP_RECT_1) {
+                                       mixercfg_ext2 |= ((i + 1) & 0xF) << 8;
+                               } else {
+                                       mixercfg |= mix << 18;
+                                       mixercfg_ext |= ext << 16;
+                               }
+                               break;
+                       case SSPP_DMA1:
+                               if (rect_index == DPU_SSPP_RECT_1) {
+                                       mixercfg_ext2 |= ((i + 1) & 0xF) << 12;
+                               } else {
+                                       mixercfg |= mix << 21;
+                                       mixercfg_ext |= ext << 18;
+                               }
+                               break;
+                       case SSPP_DMA2:
+                               if (rect_index == DPU_SSPP_RECT_1) {
+                                       mixercfg_ext2 |= ((i + 1) & 0xF) << 16;
+                               } else {
+                                       mix |= (i + 1) & 0xF;
+                                       mixercfg_ext2 |= mix << 0;
+                               }
+                               break;
+                       case SSPP_DMA3:
+                               if (rect_index == DPU_SSPP_RECT_1) {
+                                       mixercfg_ext2 |= ((i + 1) & 0xF) << 20;
+                               } else {
+                                       mix |= (i + 1) & 0xF;
+                                       mixercfg_ext2 |= mix << 4;
+                               }
+                               break;
+                       case SSPP_CURSOR0:
+                               mixercfg_ext |= ((i + 1) & 0xF) << 20;
+                               break;
+                       case SSPP_CURSOR1:
+                               mixercfg_ext |= ((i + 1) & 0xF) << 26;
+                               break;
+                       default:
+                               break;
+                       }
+               }
+       }
+
+exit:
+       DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
+       DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
+       DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
+       DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
+}
+
+static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
+               struct dpu_hw_intf_cfg *cfg)
+{
+       struct dpu_hw_blk_reg_map *c = &ctx->hw;
+       u32 intf_cfg = 0;
+
+       intf_cfg |= (cfg->intf & 0xF) << 4;
+
+       if (cfg->mode_3d) {
+               intf_cfg |= BIT(19);
+               intf_cfg |= (cfg->mode_3d - 0x1) << 20;
+       }
+
+       switch (cfg->intf_mode_sel) {
+       case DPU_CTL_MODE_SEL_VID:
+               intf_cfg &= ~BIT(17);
+               intf_cfg &= ~(0x3 << 15);
+               break;
+       case DPU_CTL_MODE_SEL_CMD:
+               intf_cfg |= BIT(17);
+               intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
+               break;
+       default:
+               pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
+               return;
+       }
+
+       DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
+}
+
+static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
+               unsigned long cap)
+{
+       ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
+       ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
+       ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
+       ops->trigger_flush = dpu_hw_ctl_trigger_flush;
+       ops->get_flush_register = dpu_hw_ctl_get_flush_register;
+       ops->trigger_start = dpu_hw_ctl_trigger_start;
+       ops->trigger_pending = dpu_hw_ctl_trigger_pending;
+       ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
+       ops->reset = dpu_hw_ctl_reset_control;
+       ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
+       ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
+       ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
+       ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
+       ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
+       ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
+       ops->get_bitmask_cdm = dpu_hw_ctl_get_bitmask_cdm;
+};
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+       .start = NULL,
+       .stop = NULL,
+};
+
+struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
+               void __iomem *addr,
+               struct dpu_mdss_cfg *m)
+{
+       struct dpu_hw_ctl *c;
+       struct dpu_ctl_cfg *cfg;
+       int rc;
+
+       c = kzalloc(sizeof(*c), GFP_KERNEL);
+       if (!c)
+               return ERR_PTR(-ENOMEM);
+
+       cfg = _ctl_offset(idx, m, addr, &c->hw);
+       if (IS_ERR_OR_NULL(cfg)) {
+               kfree(c);
+               pr_err("failed to create dpu_hw_ctl %d\n", idx);
+               return ERR_PTR(-EINVAL);
+       }
+
+       c->caps = cfg;
+       _setup_ctl_ops(&c->ops, c->caps->features);
+       c->idx = idx;
+       c->mixer_count = m->mixer_count;
+       c->mixer_hw_caps = m->mixer;
+
+       rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
+       if (rc) {
+               DPU_ERROR("failed to init hw blk %d\n", rc);
+               goto blk_init_error;
+       }
+
+       return c;
+
+blk_init_error:
+       kzfree(c);
+
+       return ERR_PTR(rc);
+}
+
+void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
+{
+       if (ctx)
+               dpu_hw_blk_destroy(&ctx->base);
+       kfree(ctx);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
new file mode 100644 (file)
index 0000000..c66a71f
--- /dev/null
@@ -0,0 +1,218 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_CTL_H
+#define _DPU_HW_CTL_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_sspp.h"
+#include "dpu_hw_blk.h"
+
+/**
+ * dpu_ctl_mode_sel: Interface mode selection
+ * DPU_CTL_MODE_SEL_VID:    Video mode interface
+ * DPU_CTL_MODE_SEL_CMD:    Command mode interface
+ */
+enum dpu_ctl_mode_sel {
+       DPU_CTL_MODE_SEL_VID = 0,
+       DPU_CTL_MODE_SEL_CMD
+};
+
+struct dpu_hw_ctl;
+/**
+ * struct dpu_hw_stage_cfg - blending stage cfg
+ * @stage : SSPP_ID at each stage
+ * @multirect_index: index of the rectangle of SSPP.
+ */
+struct dpu_hw_stage_cfg {
+       enum dpu_sspp stage[DPU_STAGE_MAX][PIPES_PER_STAGE];
+       enum dpu_sspp_multirect_index multirect_index
+                                       [DPU_STAGE_MAX][PIPES_PER_STAGE];
+};
+
+/**
+ * struct dpu_hw_intf_cfg :Describes how the DPU writes data to output interface
+ * @intf :                 Interface id
+ * @mode_3d:               3d mux configuration
+ * @intf_mode_sel:         Interface mode, cmd / vid
+ * @stream_sel:            Stream selection for multi-stream interfaces
+ */
+struct dpu_hw_intf_cfg {
+       enum dpu_intf intf;
+       enum dpu_3d_blend_mode mode_3d;
+       enum dpu_ctl_mode_sel intf_mode_sel;
+       int stream_sel;
+};
+
+/**
+ * struct dpu_hw_ctl_ops - Interface to the wb Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_ctl_ops {
+       /**
+        * kickoff hw operation for Sw controlled interfaces
+        * DSI cmd mode and WB interface are SW controlled
+        * @ctx       : ctl path ctx pointer
+        */
+       void (*trigger_start)(struct dpu_hw_ctl *ctx);
+
+       /**
+        * kickoff prepare is in progress hw operation for sw
+        * controlled interfaces: DSI cmd mode and WB interface
+        * are SW controlled
+        * @ctx       : ctl path ctx pointer
+        */
+       void (*trigger_pending)(struct dpu_hw_ctl *ctx);
+
+       /**
+        * Clear the value of the cached pending_flush_mask
+        * No effect on hardware
+        * @ctx       : ctl path ctx pointer
+        */
+       void (*clear_pending_flush)(struct dpu_hw_ctl *ctx);
+
+       /**
+        * Query the value of the cached pending_flush_mask
+        * No effect on hardware
+        * @ctx       : ctl path ctx pointer
+        */
+       u32 (*get_pending_flush)(struct dpu_hw_ctl *ctx);
+
+       /**
+        * OR in the given flushbits to the cached pending_flush_mask
+        * No effect on hardware
+        * @ctx       : ctl path ctx pointer
+        * @flushbits : module flushmask
+        */
+       void (*update_pending_flush)(struct dpu_hw_ctl *ctx,
+               u32 flushbits);
+
+       /**
+        * Write the value of the pending_flush_mask to hardware
+        * @ctx       : ctl path ctx pointer
+        */
+       void (*trigger_flush)(struct dpu_hw_ctl *ctx);
+
+       /**
+        * Read the value of the flush register
+        * @ctx       : ctl path ctx pointer
+        * @Return: value of the ctl flush register.
+        */
+       u32 (*get_flush_register)(struct dpu_hw_ctl *ctx);
+
+       /**
+        * Setup ctl_path interface config
+        * @ctx
+        * @cfg    : interface config structure pointer
+        */
+       void (*setup_intf_cfg)(struct dpu_hw_ctl *ctx,
+               struct dpu_hw_intf_cfg *cfg);
+
+       int (*reset)(struct dpu_hw_ctl *c);
+
+       /*
+        * wait_reset_status - checks ctl reset status
+        * @ctx       : ctl path ctx pointer
+        *
+        * This function checks the ctl reset status bit.
+        * If the reset bit is set, it keeps polling the status till the hw
+        * reset is complete.
+        * Returns: 0 on success or -error if reset incomplete within interval
+        */
+       int (*wait_reset_status)(struct dpu_hw_ctl *ctx);
+
+       uint32_t (*get_bitmask_sspp)(struct dpu_hw_ctl *ctx,
+               enum dpu_sspp blk);
+
+       uint32_t (*get_bitmask_mixer)(struct dpu_hw_ctl *ctx,
+               enum dpu_lm blk);
+
+       int (*get_bitmask_intf)(struct dpu_hw_ctl *ctx,
+               u32 *flushbits,
+               enum dpu_intf blk);
+
+       int (*get_bitmask_cdm)(struct dpu_hw_ctl *ctx,
+               u32 *flushbits,
+               enum dpu_cdm blk);
+
+       /**
+        * Set all blend stages to disabled
+        * @ctx       : ctl path ctx pointer
+        */
+       void (*clear_all_blendstages)(struct dpu_hw_ctl *ctx);
+
+       /**
+        * Configure layer mixer to pipe configuration
+        * @ctx       : ctl path ctx pointer
+        * @lm        : layer mixer enumeration
+        * @cfg       : blend stage configuration
+        */
+       void (*setup_blendstage)(struct dpu_hw_ctl *ctx,
+               enum dpu_lm lm, struct dpu_hw_stage_cfg *cfg);
+};
+
+/**
+ * struct dpu_hw_ctl : CTL PATH driver object
+ * @base: hardware block base structure
+ * @hw: block register map object
+ * @idx: control path index
+ * @caps: control path capabilities
+ * @mixer_count: number of mixers
+ * @mixer_hw_caps: mixer hardware capabilities
+ * @pending_flush_mask: storage for pending ctl_flush managed via ops
+ * @ops: operation list
+ */
+struct dpu_hw_ctl {
+       struct dpu_hw_blk base;
+       struct dpu_hw_blk_reg_map hw;
+
+       /* ctl path */
+       int idx;
+       const struct dpu_ctl_cfg *caps;
+       int mixer_count;
+       const struct dpu_lm_cfg *mixer_hw_caps;
+       u32 pending_flush_mask;
+
+       /* ops */
+       struct dpu_hw_ctl_ops ops;
+};
+
+/**
+ * dpu_hw_ctl - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_ctl *to_dpu_hw_ctl(struct dpu_hw_blk *hw)
+{
+       return container_of(hw, struct dpu_hw_ctl, base);
+}
+
+/**
+ * dpu_hw_ctl_init(): Initializes the ctl_path hw driver object.
+ * should be called before accessing every ctl path registers.
+ * @idx:  ctl_path index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m :   pointer to mdss catalog data
+ */
+struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
+               void __iomem *addr,
+               struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_ctl_destroy(): Destroys ctl driver context
+ * should be called to free the context
+ */
+void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx);
+
+#endif /*_DPU_HW_CTL_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
new file mode 100644 (file)
index 0000000..c0b7f00
--- /dev/null
@@ -0,0 +1,1183 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/slab.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_mdss.h"
+
+/**
+ * Register offsets in MDSS register file for the interrupt registers
+ * w.r.t. to the MDP base
+ */
+#define MDP_SSPP_TOP0_OFF              0x0
+#define MDP_INTF_0_OFF                 0x6A000
+#define MDP_INTF_1_OFF                 0x6A800
+#define MDP_INTF_2_OFF                 0x6B000
+#define MDP_INTF_3_OFF                 0x6B800
+#define MDP_INTF_4_OFF                 0x6C000
+#define MDP_AD4_0_OFF                  0x7C000
+#define MDP_AD4_1_OFF                  0x7D000
+#define MDP_AD4_INTR_EN_OFF            0x41c
+#define MDP_AD4_INTR_CLEAR_OFF         0x424
+#define MDP_AD4_INTR_STATUS_OFF                0x420
+
+/**
+ * WB interrupt status bit definitions
+ */
+#define DPU_INTR_WB_0_DONE BIT(0)
+#define DPU_INTR_WB_1_DONE BIT(1)
+#define DPU_INTR_WB_2_DONE BIT(4)
+
+/**
+ * WDOG timer interrupt status bit definitions
+ */
+#define DPU_INTR_WD_TIMER_0_DONE BIT(2)
+#define DPU_INTR_WD_TIMER_1_DONE BIT(3)
+#define DPU_INTR_WD_TIMER_2_DONE BIT(5)
+#define DPU_INTR_WD_TIMER_3_DONE BIT(6)
+#define DPU_INTR_WD_TIMER_4_DONE BIT(7)
+
+/**
+ * Pingpong interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_0_DONE BIT(8)
+#define DPU_INTR_PING_PONG_1_DONE BIT(9)
+#define DPU_INTR_PING_PONG_2_DONE BIT(10)
+#define DPU_INTR_PING_PONG_3_DONE BIT(11)
+#define DPU_INTR_PING_PONG_0_RD_PTR BIT(12)
+#define DPU_INTR_PING_PONG_1_RD_PTR BIT(13)
+#define DPU_INTR_PING_PONG_2_RD_PTR BIT(14)
+#define DPU_INTR_PING_PONG_3_RD_PTR BIT(15)
+#define DPU_INTR_PING_PONG_0_WR_PTR BIT(16)
+#define DPU_INTR_PING_PONG_1_WR_PTR BIT(17)
+#define DPU_INTR_PING_PONG_2_WR_PTR BIT(18)
+#define DPU_INTR_PING_PONG_3_WR_PTR BIT(19)
+#define DPU_INTR_PING_PONG_0_AUTOREFRESH_DONE BIT(20)
+#define DPU_INTR_PING_PONG_1_AUTOREFRESH_DONE BIT(21)
+#define DPU_INTR_PING_PONG_2_AUTOREFRESH_DONE BIT(22)
+#define DPU_INTR_PING_PONG_3_AUTOREFRESH_DONE BIT(23)
+
+/**
+ * Interface interrupt status bit definitions
+ */
+#define DPU_INTR_INTF_0_UNDERRUN BIT(24)
+#define DPU_INTR_INTF_1_UNDERRUN BIT(26)
+#define DPU_INTR_INTF_2_UNDERRUN BIT(28)
+#define DPU_INTR_INTF_3_UNDERRUN BIT(30)
+#define DPU_INTR_INTF_0_VSYNC BIT(25)
+#define DPU_INTR_INTF_1_VSYNC BIT(27)
+#define DPU_INTR_INTF_2_VSYNC BIT(29)
+#define DPU_INTR_INTF_3_VSYNC BIT(31)
+
+/**
+ * Pingpong Secondary interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_S0_AUTOREFRESH_DONE BIT(0)
+#define DPU_INTR_PING_PONG_S0_WR_PTR BIT(4)
+#define DPU_INTR_PING_PONG_S0_RD_PTR BIT(8)
+#define DPU_INTR_PING_PONG_S0_TEAR_DETECTED BIT(22)
+#define DPU_INTR_PING_PONG_S0_TE_DETECTED BIT(28)
+
+/**
+ * Pingpong TEAR detection interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_0_TEAR_DETECTED BIT(16)
+#define DPU_INTR_PING_PONG_1_TEAR_DETECTED BIT(17)
+#define DPU_INTR_PING_PONG_2_TEAR_DETECTED BIT(18)
+#define DPU_INTR_PING_PONG_3_TEAR_DETECTED BIT(19)
+
+/**
+ * Pingpong TE detection interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_0_TE_DETECTED BIT(24)
+#define DPU_INTR_PING_PONG_1_TE_DETECTED BIT(25)
+#define DPU_INTR_PING_PONG_2_TE_DETECTED BIT(26)
+#define DPU_INTR_PING_PONG_3_TE_DETECTED BIT(27)
+
+/**
+ * Ctl start interrupt status bit definitions
+ */
+#define DPU_INTR_CTL_0_START BIT(9)
+#define DPU_INTR_CTL_1_START BIT(10)
+#define DPU_INTR_CTL_2_START BIT(11)
+#define DPU_INTR_CTL_3_START BIT(12)
+#define DPU_INTR_CTL_4_START BIT(13)
+
+/**
+ * Concurrent WB overflow interrupt status bit definitions
+ */
+#define DPU_INTR_CWB_2_OVERFLOW BIT(14)
+#define DPU_INTR_CWB_3_OVERFLOW BIT(15)
+
+/**
+ * Histogram VIG done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_VIG_0_DONE BIT(0)
+#define DPU_INTR_HIST_VIG_1_DONE BIT(4)
+#define DPU_INTR_HIST_VIG_2_DONE BIT(8)
+#define DPU_INTR_HIST_VIG_3_DONE BIT(10)
+
+/**
+ * Histogram VIG reset Sequence done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_VIG_0_RSTSEQ_DONE BIT(1)
+#define DPU_INTR_HIST_VIG_1_RSTSEQ_DONE BIT(5)
+#define DPU_INTR_HIST_VIG_2_RSTSEQ_DONE BIT(9)
+#define DPU_INTR_HIST_VIG_3_RSTSEQ_DONE BIT(11)
+
+/**
+ * Histogram DSPP done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_DSPP_0_DONE BIT(12)
+#define DPU_INTR_HIST_DSPP_1_DONE BIT(16)
+#define DPU_INTR_HIST_DSPP_2_DONE BIT(20)
+#define DPU_INTR_HIST_DSPP_3_DONE BIT(22)
+
+/**
+ * Histogram DSPP reset Sequence done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_DSPP_0_RSTSEQ_DONE BIT(13)
+#define DPU_INTR_HIST_DSPP_1_RSTSEQ_DONE BIT(17)
+#define DPU_INTR_HIST_DSPP_2_RSTSEQ_DONE BIT(21)
+#define DPU_INTR_HIST_DSPP_3_RSTSEQ_DONE BIT(23)
+
+/**
+ * INTF interrupt status bit definitions
+ */
+#define DPU_INTR_VIDEO_INTO_STATIC BIT(0)
+#define DPU_INTR_VIDEO_OUTOF_STATIC BIT(1)
+#define DPU_INTR_DSICMD_0_INTO_STATIC BIT(2)
+#define DPU_INTR_DSICMD_0_OUTOF_STATIC BIT(3)
+#define DPU_INTR_DSICMD_1_INTO_STATIC BIT(4)
+#define DPU_INTR_DSICMD_1_OUTOF_STATIC BIT(5)
+#define DPU_INTR_DSICMD_2_INTO_STATIC BIT(6)
+#define DPU_INTR_DSICMD_2_OUTOF_STATIC BIT(7)
+#define DPU_INTR_PROG_LINE BIT(8)
+
+/**
+ * AD4 interrupt status bit definitions
+ */
+#define DPU_INTR_BRIGHTPR_UPDATED BIT(4)
+#define DPU_INTR_DARKENH_UPDATED BIT(3)
+#define DPU_INTR_STREN_OUTROI_UPDATED BIT(2)
+#define DPU_INTR_STREN_INROI_UPDATED BIT(1)
+#define DPU_INTR_BACKLIGHT_UPDATED BIT(0)
+/**
+ * struct dpu_intr_reg - array of DPU register sets
+ * @clr_off:   offset to CLEAR reg
+ * @en_off:    offset to ENABLE reg
+ * @status_off:        offset to STATUS reg
+ */
+struct dpu_intr_reg {
+       u32 clr_off;
+       u32 en_off;
+       u32 status_off;
+};
+
+/**
+ * struct dpu_irq_type - maps each irq with i/f
+ * @intr_type:         type of interrupt listed in dpu_intr_type
+ * @instance_idx:      instance index of the associated HW block in DPU
+ * @irq_mask:          corresponding bit in the interrupt status reg
+ * @reg_idx:           which reg set to use
+ */
+struct dpu_irq_type {
+       u32 intr_type;
+       u32 instance_idx;
+       u32 irq_mask;
+       u32 reg_idx;
+};
+
+/**
+ * List of DPU interrupt registers
+ */
+static const struct dpu_intr_reg dpu_intr_set[] = {
+       {
+               MDP_SSPP_TOP0_OFF+INTR_CLEAR,
+               MDP_SSPP_TOP0_OFF+INTR_EN,
+               MDP_SSPP_TOP0_OFF+INTR_STATUS
+       },
+       {
+               MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
+               MDP_SSPP_TOP0_OFF+INTR2_EN,
+               MDP_SSPP_TOP0_OFF+INTR2_STATUS
+       },
+       {
+               MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
+               MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
+               MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
+       },
+       {
+               MDP_INTF_0_OFF+INTF_INTR_CLEAR,
+               MDP_INTF_0_OFF+INTF_INTR_EN,
+               MDP_INTF_0_OFF+INTF_INTR_STATUS
+       },
+       {
+               MDP_INTF_1_OFF+INTF_INTR_CLEAR,
+               MDP_INTF_1_OFF+INTF_INTR_EN,
+               MDP_INTF_1_OFF+INTF_INTR_STATUS
+       },
+       {
+               MDP_INTF_2_OFF+INTF_INTR_CLEAR,
+               MDP_INTF_2_OFF+INTF_INTR_EN,
+               MDP_INTF_2_OFF+INTF_INTR_STATUS
+       },
+       {
+               MDP_INTF_3_OFF+INTF_INTR_CLEAR,
+               MDP_INTF_3_OFF+INTF_INTR_EN,
+               MDP_INTF_3_OFF+INTF_INTR_STATUS
+       },
+       {
+               MDP_INTF_4_OFF+INTF_INTR_CLEAR,
+               MDP_INTF_4_OFF+INTF_INTR_EN,
+               MDP_INTF_4_OFF+INTF_INTR_STATUS
+       },
+       {
+               MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF,
+               MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF,
+               MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF,
+       },
+       {
+               MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF,
+               MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF,
+               MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF,
+       }
+};
+
+/**
+ * IRQ mapping table - use for lookup an irq_idx in this table that have
+ *                     a matching interface type and instance index.
+ */
+static const struct dpu_irq_type dpu_irq_map[] = {
+       /* BEGIN MAP_RANGE: 0-31, INTR */
+       /* irq_idx: 0-3 */
+       { DPU_IRQ_TYPE_WB_ROT_COMP, WB_0, DPU_INTR_WB_0_DONE, 0},
+       { DPU_IRQ_TYPE_WB_ROT_COMP, WB_1, DPU_INTR_WB_1_DONE, 0},
+       { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_0, DPU_INTR_WD_TIMER_0_DONE, 0},
+       { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_1, DPU_INTR_WD_TIMER_1_DONE, 0},
+       /* irq_idx: 4-7 */
+       { DPU_IRQ_TYPE_WB_WFD_COMP, WB_2, DPU_INTR_WB_2_DONE, 0},
+       { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_2, DPU_INTR_WD_TIMER_2_DONE, 0},
+       { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_3, DPU_INTR_WD_TIMER_3_DONE, 0},
+       { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_4, DPU_INTR_WD_TIMER_4_DONE, 0},
+       /* irq_idx: 8-11 */
+       { DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_0,
+               DPU_INTR_PING_PONG_0_DONE, 0},
+       { DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_1,
+               DPU_INTR_PING_PONG_1_DONE, 0},
+       { DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_2,
+               DPU_INTR_PING_PONG_2_DONE, 0},
+       { DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_3,
+               DPU_INTR_PING_PONG_3_DONE, 0},
+       /* irq_idx: 12-15 */
+       { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_0,
+               DPU_INTR_PING_PONG_0_RD_PTR, 0},
+       { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_1,
+               DPU_INTR_PING_PONG_1_RD_PTR, 0},
+       { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_2,
+               DPU_INTR_PING_PONG_2_RD_PTR, 0},
+       { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_3,
+               DPU_INTR_PING_PONG_3_RD_PTR, 0},
+       /* irq_idx: 16-19 */
+       { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_0,
+               DPU_INTR_PING_PONG_0_WR_PTR, 0},
+       { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_1,
+               DPU_INTR_PING_PONG_1_WR_PTR, 0},
+       { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_2,
+               DPU_INTR_PING_PONG_2_WR_PTR, 0},
+       { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_3,
+               DPU_INTR_PING_PONG_3_WR_PTR, 0},
+       /* irq_idx: 20-23 */
+       { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_0,
+               DPU_INTR_PING_PONG_0_AUTOREFRESH_DONE, 0},
+       { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_1,
+               DPU_INTR_PING_PONG_1_AUTOREFRESH_DONE, 0},
+       { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_2,
+               DPU_INTR_PING_PONG_2_AUTOREFRESH_DONE, 0},
+       { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_3,
+               DPU_INTR_PING_PONG_3_AUTOREFRESH_DONE, 0},
+       /* irq_idx: 24-27 */
+       { DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_0, DPU_INTR_INTF_0_UNDERRUN, 0},
+       { DPU_IRQ_TYPE_INTF_VSYNC, INTF_0, DPU_INTR_INTF_0_VSYNC, 0},
+       { DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_1, DPU_INTR_INTF_1_UNDERRUN, 0},
+       { DPU_IRQ_TYPE_INTF_VSYNC, INTF_1, DPU_INTR_INTF_1_VSYNC, 0},
+       /* irq_idx: 28-31 */
+       { DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_2, DPU_INTR_INTF_2_UNDERRUN, 0},
+       { DPU_IRQ_TYPE_INTF_VSYNC, INTF_2, DPU_INTR_INTF_2_VSYNC, 0},
+       { DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_3, DPU_INTR_INTF_3_UNDERRUN, 0},
+       { DPU_IRQ_TYPE_INTF_VSYNC, INTF_3, DPU_INTR_INTF_3_VSYNC, 0},
+
+       /* BEGIN MAP_RANGE: 32-64, INTR2 */
+       /* irq_idx: 32-35 */
+       { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_S0,
+               DPU_INTR_PING_PONG_S0_AUTOREFRESH_DONE, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       /* irq_idx: 36-39 */
+       { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_S0,
+               DPU_INTR_PING_PONG_S0_WR_PTR, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       /* irq_idx: 40 */
+       { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_S0,
+               DPU_INTR_PING_PONG_S0_RD_PTR, 1},
+       /* irq_idx: 41-45 */
+       { DPU_IRQ_TYPE_CTL_START, CTL_0,
+               DPU_INTR_CTL_0_START, 1},
+       { DPU_IRQ_TYPE_CTL_START, CTL_1,
+               DPU_INTR_CTL_1_START, 1},
+       { DPU_IRQ_TYPE_CTL_START, CTL_2,
+               DPU_INTR_CTL_2_START, 1},
+       { DPU_IRQ_TYPE_CTL_START, CTL_3,
+               DPU_INTR_CTL_3_START, 1},
+       { DPU_IRQ_TYPE_CTL_START, CTL_4,
+               DPU_INTR_CTL_4_START, 1},
+       /* irq_idx: 46-47 */
+       { DPU_IRQ_TYPE_CWB_OVERFLOW, CWB_2, DPU_INTR_CWB_2_OVERFLOW, 1},
+       { DPU_IRQ_TYPE_CWB_OVERFLOW, CWB_3, DPU_INTR_CWB_3_OVERFLOW, 1},
+       /* irq_idx: 48-51 */
+       { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_0,
+               DPU_INTR_PING_PONG_0_TEAR_DETECTED, 1},
+       { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_1,
+               DPU_INTR_PING_PONG_1_TEAR_DETECTED, 1},
+       { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_2,
+               DPU_INTR_PING_PONG_2_TEAR_DETECTED, 1},
+       { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_3,
+               DPU_INTR_PING_PONG_3_TEAR_DETECTED, 1},
+       /* irq_idx: 52-55 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_S0,
+               DPU_INTR_PING_PONG_S0_TEAR_DETECTED, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       /* irq_idx: 56-59 */
+       { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_0,
+               DPU_INTR_PING_PONG_0_TE_DETECTED, 1},
+       { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_1,
+               DPU_INTR_PING_PONG_1_TE_DETECTED, 1},
+       { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_2,
+               DPU_INTR_PING_PONG_2_TE_DETECTED, 1},
+       { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_3,
+               DPU_INTR_PING_PONG_3_TE_DETECTED, 1},
+       /* irq_idx: 60-63 */
+       { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_S0,
+               DPU_INTR_PING_PONG_S0_TE_DETECTED, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+
+       /* BEGIN MAP_RANGE: 64-95 HIST */
+       /* irq_idx: 64-67 */
+       { DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG0, DPU_INTR_HIST_VIG_0_DONE, 2},
+       { DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG0,
+               DPU_INTR_HIST_VIG_0_RSTSEQ_DONE, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       /* irq_idx: 68-71 */
+       { DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG1, DPU_INTR_HIST_VIG_1_DONE, 2},
+       { DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG1,
+               DPU_INTR_HIST_VIG_1_RSTSEQ_DONE, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       /* irq_idx: 72-75 */
+       { DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, DPU_INTR_HIST_VIG_2_DONE, 2},
+       { DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2,
+               DPU_INTR_HIST_VIG_2_RSTSEQ_DONE, 2},
+       { DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG3, DPU_INTR_HIST_VIG_3_DONE, 2},
+       { DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG3,
+               DPU_INTR_HIST_VIG_3_RSTSEQ_DONE, 2},
+       /* irq_idx: 76-79 */
+       { DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, DPU_INTR_HIST_DSPP_0_DONE, 2},
+       { DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0,
+               DPU_INTR_HIST_DSPP_0_RSTSEQ_DONE, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       /* irq_idx: 80-83 */
+       { DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, DPU_INTR_HIST_DSPP_1_DONE, 2},
+       { DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1,
+               DPU_INTR_HIST_DSPP_1_RSTSEQ_DONE, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       /* irq_idx: 84-87 */
+       { DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, DPU_INTR_HIST_DSPP_2_DONE, 2},
+       { DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2,
+               DPU_INTR_HIST_DSPP_2_RSTSEQ_DONE, 2},
+       { DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_3, DPU_INTR_HIST_DSPP_3_DONE, 2},
+       { DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_3,
+               DPU_INTR_HIST_DSPP_3_RSTSEQ_DONE, 2},
+       /* irq_idx: 88-91 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       /* irq_idx: 92-95 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+
+       /* BEGIN MAP_RANGE: 96-127 INTF_0_INTR */
+       /* irq_idx: 96-99 */
+       { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_0,
+               DPU_INTR_VIDEO_INTO_STATIC, 3},
+       { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_0,
+               DPU_INTR_VIDEO_OUTOF_STATIC, 3},
+       { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_0,
+               DPU_INTR_DSICMD_0_INTO_STATIC, 3},
+       { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_0,
+               DPU_INTR_DSICMD_0_OUTOF_STATIC, 3},
+       /* irq_idx: 100-103 */
+       { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_0,
+               DPU_INTR_DSICMD_1_INTO_STATIC, 3},
+       { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_0,
+               DPU_INTR_DSICMD_1_OUTOF_STATIC, 3},
+       { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_0,
+               DPU_INTR_DSICMD_2_INTO_STATIC, 3},
+       { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_0,
+               DPU_INTR_DSICMD_2_OUTOF_STATIC, 3},
+       /* irq_idx: 104-107 */
+       { DPU_IRQ_TYPE_PROG_LINE, INTF_0, DPU_INTR_PROG_LINE, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       /* irq_idx: 108-111 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       /* irq_idx: 112-115 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       /* irq_idx: 116-119 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       /* irq_idx: 120-123 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       /* irq_idx: 124-127 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+
+       /* BEGIN MAP_RANGE: 128-159 INTF_1_INTR */
+       /* irq_idx: 128-131 */
+       { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_1,
+               DPU_INTR_VIDEO_INTO_STATIC, 4},
+       { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_1,
+               DPU_INTR_VIDEO_OUTOF_STATIC, 4},
+       { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_1,
+               DPU_INTR_DSICMD_0_INTO_STATIC, 4},
+       { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_1,
+               DPU_INTR_DSICMD_0_OUTOF_STATIC, 4},
+       /* irq_idx: 132-135 */
+       { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_1,
+               DPU_INTR_DSICMD_1_INTO_STATIC, 4},
+       { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_1,
+               DPU_INTR_DSICMD_1_OUTOF_STATIC, 4},
+       { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_1,
+               DPU_INTR_DSICMD_2_INTO_STATIC, 4},
+       { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_1,
+               DPU_INTR_DSICMD_2_OUTOF_STATIC, 4},
+       /* irq_idx: 136-139 */
+       { DPU_IRQ_TYPE_PROG_LINE, INTF_1, DPU_INTR_PROG_LINE, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       /* irq_idx: 140-143 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       /* irq_idx: 144-147 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       /* irq_idx: 148-151 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       /* irq_idx: 152-155 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       /* irq_idx: 156-159 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+
+       /* BEGIN MAP_RANGE: 160-191 INTF_2_INTR */
+       /* irq_idx: 160-163 */
+       { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_2,
+               DPU_INTR_VIDEO_INTO_STATIC, 5},
+       { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_2,
+               DPU_INTR_VIDEO_OUTOF_STATIC, 5},
+       { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_2,
+               DPU_INTR_DSICMD_0_INTO_STATIC, 5},
+       { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_2,
+               DPU_INTR_DSICMD_0_OUTOF_STATIC, 5},
+       /* irq_idx: 164-167 */
+       { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_2,
+               DPU_INTR_DSICMD_1_INTO_STATIC, 5},
+       { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_2,
+               DPU_INTR_DSICMD_1_OUTOF_STATIC, 5},
+       { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_2,
+               DPU_INTR_DSICMD_2_INTO_STATIC, 5},
+       { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_2,
+               DPU_INTR_DSICMD_2_OUTOF_STATIC, 5},
+       /* irq_idx: 168-171 */
+       { DPU_IRQ_TYPE_PROG_LINE, INTF_2, DPU_INTR_PROG_LINE, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       /* irq_idx: 172-175 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       /* irq_idx: 176-179 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       /* irq_idx: 180-183 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       /* irq_idx: 184-187 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       /* irq_idx: 188-191 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+
+       /* BEGIN MAP_RANGE: 192-223 INTF_3_INTR */
+       /* irq_idx: 192-195 */
+       { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_3,
+               DPU_INTR_VIDEO_INTO_STATIC, 6},
+       { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_3,
+               DPU_INTR_VIDEO_OUTOF_STATIC, 6},
+       { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_3,
+               DPU_INTR_DSICMD_0_INTO_STATIC, 6},
+       { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_3,
+               DPU_INTR_DSICMD_0_OUTOF_STATIC, 6},
+       /* irq_idx: 196-199 */
+       { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_3,
+               DPU_INTR_DSICMD_1_INTO_STATIC, 6},
+       { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_3,
+               DPU_INTR_DSICMD_1_OUTOF_STATIC, 6},
+       { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_3,
+               DPU_INTR_DSICMD_2_INTO_STATIC, 6},
+       { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_3,
+               DPU_INTR_DSICMD_2_OUTOF_STATIC, 6},
+       /* irq_idx: 200-203 */
+       { DPU_IRQ_TYPE_PROG_LINE, INTF_3, DPU_INTR_PROG_LINE, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       /* irq_idx: 204-207 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       /* irq_idx: 208-211 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       /* irq_idx: 212-215 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       /* irq_idx: 216-219 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       /* irq_idx: 220-223 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+
+       /* BEGIN MAP_RANGE: 224-255 INTF_4_INTR */
+       /* irq_idx: 224-227 */
+       { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_4,
+               DPU_INTR_VIDEO_INTO_STATIC, 7},
+       { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_4,
+               DPU_INTR_VIDEO_OUTOF_STATIC, 7},
+       { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_4,
+               DPU_INTR_DSICMD_0_INTO_STATIC, 7},
+       { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_4,
+               DPU_INTR_DSICMD_0_OUTOF_STATIC, 7},
+       /* irq_idx: 228-231 */
+       { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_4,
+               DPU_INTR_DSICMD_1_INTO_STATIC, 7},
+       { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_4,
+               DPU_INTR_DSICMD_1_OUTOF_STATIC, 7},
+       { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_4,
+               DPU_INTR_DSICMD_2_INTO_STATIC, 7},
+       { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_4,
+               DPU_INTR_DSICMD_2_OUTOF_STATIC, 7},
+       /* irq_idx: 232-235 */
+       { DPU_IRQ_TYPE_PROG_LINE, INTF_4, DPU_INTR_PROG_LINE, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       /* irq_idx: 236-239 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       /* irq_idx: 240-243 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       /* irq_idx: 244-247 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       /* irq_idx: 248-251 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       /* irq_idx: 252-255 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+
+       /* BEGIN MAP_RANGE: 256-287 AD4_0_INTR */
+       /* irq_idx: 256-259 */
+       { DPU_IRQ_TYPE_AD4_BL_DONE, DSPP_0, DPU_INTR_BACKLIGHT_UPDATED, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       /* irq_idx: 260-263 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       /* irq_idx: 264-267 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       /* irq_idx: 268-271 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       /* irq_idx: 272-275 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       /* irq_idx: 276-279 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       /* irq_idx: 280-283 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       /* irq_idx: 284-287 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+
+       /* BEGIN MAP_RANGE: 288-319 AD4_1_INTR */
+       /* irq_idx: 288-291 */
+       { DPU_IRQ_TYPE_AD4_BL_DONE, DSPP_1, DPU_INTR_BACKLIGHT_UPDATED, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       /* irq_idx: 292-295 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       /* irq_idx: 296-299 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       /* irq_idx: 300-303 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       /* irq_idx: 304-307 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       /* irq_idx: 308-311 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       /* irq_idx: 312-315 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       /* irq_idx: 315-319 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+};
+
+static int dpu_hw_intr_irqidx_lookup(enum dpu_intr_type intr_type,
+               u32 instance_idx)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(dpu_irq_map); i++) {
+               if (intr_type == dpu_irq_map[i].intr_type &&
+                       instance_idx == dpu_irq_map[i].instance_idx)
+                       return i;
+       }
+
+       pr_debug("IRQ lookup fail!! intr_type=%d, instance_idx=%d\n",
+                       intr_type, instance_idx);
+       return -EINVAL;
+}
+
+static void dpu_hw_intr_set_mask(struct dpu_hw_intr *intr, uint32_t reg_off,
+               uint32_t mask)
+{
+       if (!intr)
+               return;
+
+       DPU_REG_WRITE(&intr->hw, reg_off, mask);
+
+       /* ensure register writes go through */
+       wmb();
+}
+
+static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
+               void (*cbfunc)(void *, int),
+               void *arg)
+{
+       int reg_idx;
+       int irq_idx;
+       int start_idx;
+       int end_idx;
+       u32 irq_status;
+       unsigned long irq_flags;
+
+       if (!intr)
+               return;
+
+       /*
+        * The dispatcher will save the IRQ status before calling here.
+        * Now need to go through each IRQ status and find matching
+        * irq lookup index.
+        */
+       spin_lock_irqsave(&intr->irq_lock, irq_flags);
+       for (reg_idx = 0; reg_idx < ARRAY_SIZE(dpu_intr_set); reg_idx++) {
+               irq_status = intr->save_irq_status[reg_idx];
+
+               /*
+                * Each Interrupt register has a range of 32 indexes, and
+                * that is static for dpu_irq_map.
+                */
+               start_idx = reg_idx * 32;
+               end_idx = start_idx + 32;
+
+               if (start_idx >= ARRAY_SIZE(dpu_irq_map) ||
+                               end_idx > ARRAY_SIZE(dpu_irq_map))
+                       continue;
+
+               /*
+                * Search through matching intr status from irq map.
+                * start_idx and end_idx defined the search range in
+                * the dpu_irq_map.
+                */
+               for (irq_idx = start_idx;
+                               (irq_idx < end_idx) && irq_status;
+                               irq_idx++)
+                       if ((irq_status & dpu_irq_map[irq_idx].irq_mask) &&
+                               (dpu_irq_map[irq_idx].reg_idx == reg_idx)) {
+                               /*
+                                * Once a match on irq mask, perform a callback
+                                * to the given cbfunc. cbfunc will take care
+                                * the interrupt status clearing. If cbfunc is
+                                * not provided, then the interrupt clearing
+                                * is here.
+                                */
+                               if (cbfunc)
+                                       cbfunc(arg, irq_idx);
+                               else
+                                       intr->ops.clear_intr_status_nolock(
+                                                       intr, irq_idx);
+
+                               /*
+                                * When callback finish, clear the irq_status
+                                * with the matching mask. Once irq_status
+                                * is all cleared, the search can be stopped.
+                                */
+                               irq_status &= ~dpu_irq_map[irq_idx].irq_mask;
+                       }
+       }
+       spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+}
+
+static int dpu_hw_intr_enable_irq(struct dpu_hw_intr *intr, int irq_idx)
+{
+       int reg_idx;
+       unsigned long irq_flags;
+       const struct dpu_intr_reg *reg;
+       const struct dpu_irq_type *irq;
+       const char *dbgstr = NULL;
+       uint32_t cache_irq_mask;
+
+       if (!intr)
+               return -EINVAL;
+
+       if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
+               pr_err("invalid IRQ index: [%d]\n", irq_idx);
+               return -EINVAL;
+       }
+
+       irq = &dpu_irq_map[irq_idx];
+       reg_idx = irq->reg_idx;
+       reg = &dpu_intr_set[reg_idx];
+
+       spin_lock_irqsave(&intr->irq_lock, irq_flags);
+       cache_irq_mask = intr->cache_irq_mask[reg_idx];
+       if (cache_irq_mask & irq->irq_mask) {
+               dbgstr = "DPU IRQ already set:";
+       } else {
+               dbgstr = "DPU IRQ enabled:";
+
+               cache_irq_mask |= irq->irq_mask;
+               /* Cleaning any pending interrupt */
+               DPU_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+               /* Enabling interrupts with the new mask */
+               DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+
+               /* ensure register write goes through */
+               wmb();
+
+               intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+       }
+       spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+       pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
+                       irq->irq_mask, cache_irq_mask);
+
+       return 0;
+}
+
+static int dpu_hw_intr_disable_irq_nolock(struct dpu_hw_intr *intr, int irq_idx)
+{
+       int reg_idx;
+       const struct dpu_intr_reg *reg;
+       const struct dpu_irq_type *irq;
+       const char *dbgstr = NULL;
+       uint32_t cache_irq_mask;
+
+       if (!intr)
+               return -EINVAL;
+
+       if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
+               pr_err("invalid IRQ index: [%d]\n", irq_idx);
+               return -EINVAL;
+       }
+
+       irq = &dpu_irq_map[irq_idx];
+       reg_idx = irq->reg_idx;
+       reg = &dpu_intr_set[reg_idx];
+
+       cache_irq_mask = intr->cache_irq_mask[reg_idx];
+       if ((cache_irq_mask & irq->irq_mask) == 0) {
+               dbgstr = "DPU IRQ is already cleared:";
+       } else {
+               dbgstr = "DPU IRQ mask disable:";
+
+               cache_irq_mask &= ~irq->irq_mask;
+               /* Disable interrupts based on the new mask */
+               DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+               /* Cleaning any pending interrupt */
+               DPU_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+
+               /* ensure register write goes through */
+               wmb();
+
+               intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+       }
+
+       pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
+                       irq->irq_mask, cache_irq_mask);
+
+       return 0;
+}
+
+static int dpu_hw_intr_disable_irq(struct dpu_hw_intr *intr, int irq_idx)
+{
+       unsigned long irq_flags;
+
+       if (!intr)
+               return -EINVAL;
+
+       if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
+               pr_err("invalid IRQ index: [%d]\n", irq_idx);
+               return -EINVAL;
+       }
+
+       spin_lock_irqsave(&intr->irq_lock, irq_flags);
+       dpu_hw_intr_disable_irq_nolock(intr, irq_idx);
+       spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+       return 0;
+}
+
+static int dpu_hw_intr_clear_irqs(struct dpu_hw_intr *intr)
+{
+       int i;
+
+       if (!intr)
+               return -EINVAL;
+
+       for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++)
+               DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].clr_off, 0xffffffff);
+
+       /* ensure register writes go through */
+       wmb();
+
+       return 0;
+}
+
+static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr)
+{
+       int i;
+
+       if (!intr)
+               return -EINVAL;
+
+       for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++)
+               DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].en_off, 0x00000000);
+
+       /* ensure register writes go through */
+       wmb();
+
+       return 0;
+}
+
+static int dpu_hw_intr_get_valid_interrupts(struct dpu_hw_intr *intr,
+               uint32_t *mask)
+{
+       if (!intr || !mask)
+               return -EINVAL;
+
+       *mask = IRQ_SOURCE_MDP | IRQ_SOURCE_DSI0 | IRQ_SOURCE_DSI1
+               | IRQ_SOURCE_HDMI | IRQ_SOURCE_EDP;
+
+       return 0;
+}
+
+static void dpu_hw_intr_get_interrupt_statuses(struct dpu_hw_intr *intr)
+{
+       int i;
+       u32 enable_mask;
+       unsigned long irq_flags;
+
+       if (!intr)
+               return;
+
+       spin_lock_irqsave(&intr->irq_lock, irq_flags);
+       for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
+               /* Read interrupt status */
+               intr->save_irq_status[i] = DPU_REG_READ(&intr->hw,
+                               dpu_intr_set[i].status_off);
+
+               /* Read enable mask */
+               enable_mask = DPU_REG_READ(&intr->hw, dpu_intr_set[i].en_off);
+
+               /* and clear the interrupt */
+               if (intr->save_irq_status[i])
+                       DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].clr_off,
+                                       intr->save_irq_status[i]);
+
+               /* Finally update IRQ status based on enable mask */
+               intr->save_irq_status[i] &= enable_mask;
+       }
+
+       /* ensure register writes go through */
+       wmb();
+
+       spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+}
+
+static void dpu_hw_intr_clear_intr_status_nolock(struct dpu_hw_intr *intr,
+               int irq_idx)
+{
+       int reg_idx;
+
+       if (!intr)
+               return;
+
+       reg_idx = dpu_irq_map[irq_idx].reg_idx;
+       DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
+                       dpu_irq_map[irq_idx].irq_mask);
+
+       /* ensure register writes go through */
+       wmb();
+}
+
+static void dpu_hw_intr_clear_interrupt_status(struct dpu_hw_intr *intr,
+               int irq_idx)
+{
+       unsigned long irq_flags;
+
+       if (!intr)
+               return;
+
+       spin_lock_irqsave(&intr->irq_lock, irq_flags);
+       dpu_hw_intr_clear_intr_status_nolock(intr, irq_idx);
+       spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+}
+
+static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr,
+               int irq_idx, bool clear)
+{
+       int reg_idx;
+       unsigned long irq_flags;
+       u32 intr_status;
+
+       if (!intr)
+               return 0;
+
+       if (irq_idx >= ARRAY_SIZE(dpu_irq_map) || irq_idx < 0) {
+               pr_err("invalid IRQ index: [%d]\n", irq_idx);
+               return 0;
+       }
+
+       spin_lock_irqsave(&intr->irq_lock, irq_flags);
+
+       reg_idx = dpu_irq_map[irq_idx].reg_idx;
+       intr_status = DPU_REG_READ(&intr->hw,
+                       dpu_intr_set[reg_idx].status_off) &
+                                       dpu_irq_map[irq_idx].irq_mask;
+       if (intr_status && clear)
+               DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
+                               intr_status);
+
+       /* ensure register writes go through */
+       wmb();
+
+       spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+       return intr_status;
+}
+
+static void __setup_intr_ops(struct dpu_hw_intr_ops *ops)
+{
+       ops->set_mask = dpu_hw_intr_set_mask;
+       ops->irq_idx_lookup = dpu_hw_intr_irqidx_lookup;
+       ops->enable_irq = dpu_hw_intr_enable_irq;
+       ops->disable_irq = dpu_hw_intr_disable_irq;
+       ops->dispatch_irqs = dpu_hw_intr_dispatch_irq;
+       ops->clear_all_irqs = dpu_hw_intr_clear_irqs;
+       ops->disable_all_irqs = dpu_hw_intr_disable_irqs;
+       ops->get_valid_interrupts = dpu_hw_intr_get_valid_interrupts;
+       ops->get_interrupt_statuses = dpu_hw_intr_get_interrupt_statuses;
+       ops->clear_interrupt_status = dpu_hw_intr_clear_interrupt_status;
+       ops->clear_intr_status_nolock = dpu_hw_intr_clear_intr_status_nolock;
+       ops->get_interrupt_status = dpu_hw_intr_get_interrupt_status;
+}
+
+static void __intr_offset(struct dpu_mdss_cfg *m,
+               void __iomem *addr, struct dpu_hw_blk_reg_map *hw)
+{
+       hw->base_off = addr;
+       hw->blk_off = m->mdp[0].base;
+       hw->hwversion = m->hwversion;
+}
+
+struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
+               struct dpu_mdss_cfg *m)
+{
+       struct dpu_hw_intr *intr;
+
+       if (!addr || !m)
+               return ERR_PTR(-EINVAL);
+
+       intr = kzalloc(sizeof(*intr), GFP_KERNEL);
+       if (!intr)
+               return ERR_PTR(-ENOMEM);
+
+       __intr_offset(m, addr, &intr->hw);
+       __setup_intr_ops(&intr->ops);
+
+       intr->irq_idx_tbl_size = ARRAY_SIZE(dpu_irq_map);
+
+       intr->cache_irq_mask = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
+                       GFP_KERNEL);
+       if (intr->cache_irq_mask == NULL) {
+               kfree(intr);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       intr->save_irq_status = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
+                       GFP_KERNEL);
+       if (intr->save_irq_status == NULL) {
+               kfree(intr->cache_irq_mask);
+               kfree(intr);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       spin_lock_init(&intr->irq_lock);
+
+       return intr;
+}
+
+void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
+{
+       if (intr) {
+               kfree(intr->cache_irq_mask);
+               kfree(intr->save_irq_status);
+               kfree(intr);
+       }
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
new file mode 100644 (file)
index 0000000..61e4cba
--- /dev/null
@@ -0,0 +1,257 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_INTERRUPTS_H
+#define _DPU_HW_INTERRUPTS_H
+
+#include <linux/types.h>
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_mdss.h"
+
+#define IRQ_SOURCE_MDP         BIT(0)
+#define IRQ_SOURCE_DSI0                BIT(4)
+#define IRQ_SOURCE_DSI1                BIT(5)
+#define IRQ_SOURCE_HDMI                BIT(8)
+#define IRQ_SOURCE_EDP         BIT(12)
+#define IRQ_SOURCE_MHL         BIT(16)
+
+/**
+ * dpu_intr_type - HW Interrupt Type
+ * @DPU_IRQ_TYPE_WB_ROT_COMP:          WB rotator done
+ * @DPU_IRQ_TYPE_WB_WFD_COMP:          WB WFD done
+ * @DPU_IRQ_TYPE_PING_PONG_COMP:       PingPong done
+ * @DPU_IRQ_TYPE_PING_PONG_RD_PTR:     PingPong read pointer
+ * @DPU_IRQ_TYPE_PING_PONG_WR_PTR:     PingPong write pointer
+ * @DPU_IRQ_TYPE_PING_PONG_AUTO_REF:   PingPong auto refresh
+ * @DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK: PingPong Tear check
+ * @DPU_IRQ_TYPE_PING_PONG_TE_CHECK:   PingPong TE detection
+ * @DPU_IRQ_TYPE_INTF_UNDER_RUN:       INTF underrun
+ * @DPU_IRQ_TYPE_INTF_VSYNC:           INTF VSYNC
+ * @DPU_IRQ_TYPE_CWB_OVERFLOW:         Concurrent WB overflow
+ * @DPU_IRQ_TYPE_HIST_VIG_DONE:                VIG Histogram done
+ * @DPU_IRQ_TYPE_HIST_VIG_RSTSEQ:      VIG Histogram reset
+ * @DPU_IRQ_TYPE_HIST_DSPP_DONE:       DSPP Histogram done
+ * @DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ:     DSPP Histogram reset
+ * @DPU_IRQ_TYPE_WD_TIMER:             Watchdog timer
+ * @DPU_IRQ_TYPE_SFI_VIDEO_IN:         Video static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_VIDEO_OUT:                Video static frame INTR out-of static
+ * @DPU_IRQ_TYPE_SFI_CMD_0_IN:         DSI CMD0 static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_CMD_0_OUT:                DSI CMD0 static frame INTR out-of static
+ * @DPU_IRQ_TYPE_SFI_CMD_1_IN:         DSI CMD1 static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_CMD_1_OUT:                DSI CMD1 static frame INTR out-of static
+ * @DPU_IRQ_TYPE_SFI_CMD_2_IN:         DSI CMD2 static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_CMD_2_OUT:                DSI CMD2 static frame INTR out-of static
+ * @DPU_IRQ_TYPE_PROG_LINE:            Programmable Line interrupt
+ * @DPU_IRQ_TYPE_AD4_BL_DONE:          AD4 backlight
+ * @DPU_IRQ_TYPE_CTL_START:            Control start
+ * @DPU_IRQ_TYPE_RESERVED:             Reserved for expansion
+ */
+enum dpu_intr_type {
+       DPU_IRQ_TYPE_WB_ROT_COMP,
+       DPU_IRQ_TYPE_WB_WFD_COMP,
+       DPU_IRQ_TYPE_PING_PONG_COMP,
+       DPU_IRQ_TYPE_PING_PONG_RD_PTR,
+       DPU_IRQ_TYPE_PING_PONG_WR_PTR,
+       DPU_IRQ_TYPE_PING_PONG_AUTO_REF,
+       DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK,
+       DPU_IRQ_TYPE_PING_PONG_TE_CHECK,
+       DPU_IRQ_TYPE_INTF_UNDER_RUN,
+       DPU_IRQ_TYPE_INTF_VSYNC,
+       DPU_IRQ_TYPE_CWB_OVERFLOW,
+       DPU_IRQ_TYPE_HIST_VIG_DONE,
+       DPU_IRQ_TYPE_HIST_VIG_RSTSEQ,
+       DPU_IRQ_TYPE_HIST_DSPP_DONE,
+       DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ,
+       DPU_IRQ_TYPE_WD_TIMER,
+       DPU_IRQ_TYPE_SFI_VIDEO_IN,
+       DPU_IRQ_TYPE_SFI_VIDEO_OUT,
+       DPU_IRQ_TYPE_SFI_CMD_0_IN,
+       DPU_IRQ_TYPE_SFI_CMD_0_OUT,
+       DPU_IRQ_TYPE_SFI_CMD_1_IN,
+       DPU_IRQ_TYPE_SFI_CMD_1_OUT,
+       DPU_IRQ_TYPE_SFI_CMD_2_IN,
+       DPU_IRQ_TYPE_SFI_CMD_2_OUT,
+       DPU_IRQ_TYPE_PROG_LINE,
+       DPU_IRQ_TYPE_AD4_BL_DONE,
+       DPU_IRQ_TYPE_CTL_START,
+       DPU_IRQ_TYPE_RESERVED,
+};
+
+struct dpu_hw_intr;
+
+/**
+ * Interrupt operations.
+ */
+struct dpu_hw_intr_ops {
+       /**
+        * set_mask - Programs the given interrupt register with the
+        *            given interrupt mask. Register value will get overwritten.
+        * @intr:       HW interrupt handle
+        * @reg_off:    MDSS HW register offset
+        * @irqmask:    IRQ mask value
+        */
+       void (*set_mask)(
+                       struct dpu_hw_intr *intr,
+                       uint32_t reg,
+                       uint32_t irqmask);
+
+       /**
+        * irq_idx_lookup - Lookup IRQ index on the HW interrupt type
+        *                 Used for all irq related ops
+        * @intr_type:          Interrupt type defined in dpu_intr_type
+        * @instance_idx:       HW interrupt block instance
+        * @return:             irq_idx or -EINVAL for lookup fail
+        */
+       int (*irq_idx_lookup)(
+                       enum dpu_intr_type intr_type,
+                       u32 instance_idx);
+
+       /**
+        * enable_irq - Enable IRQ based on lookup IRQ index
+        * @intr:       HW interrupt handle
+        * @irq_idx:    Lookup irq index return from irq_idx_lookup
+        * @return:     0 for success, otherwise failure
+        */
+       int (*enable_irq)(
+                       struct dpu_hw_intr *intr,
+                       int irq_idx);
+
+       /**
+        * disable_irq - Disable IRQ based on lookup IRQ index
+        * @intr:       HW interrupt handle
+        * @irq_idx:    Lookup irq index return from irq_idx_lookup
+        * @return:     0 for success, otherwise failure
+        */
+       int (*disable_irq)(
+                       struct dpu_hw_intr *intr,
+                       int irq_idx);
+
+       /**
+        * clear_all_irqs - Clears all the interrupts (i.e. acknowledges
+        *                  any asserted IRQs). Useful during reset.
+        * @intr:       HW interrupt handle
+        * @return:     0 for success, otherwise failure
+        */
+       int (*clear_all_irqs)(
+                       struct dpu_hw_intr *intr);
+
+       /**
+        * disable_all_irqs - Disables all the interrupts. Useful during reset.
+        * @intr:       HW interrupt handle
+        * @return:     0 for success, otherwise failure
+        */
+       int (*disable_all_irqs)(
+                       struct dpu_hw_intr *intr);
+
+       /**
+        * dispatch_irqs - IRQ dispatcher will call the given callback
+        *                 function when a matching interrupt status bit is
+        *                 found in the irq mapping table.
+        * @intr:       HW interrupt handle
+        * @cbfunc:     Callback function pointer
+        * @arg:        Argument to pass back during callback
+        */
+       void (*dispatch_irqs)(
+                       struct dpu_hw_intr *intr,
+                       void (*cbfunc)(void *arg, int irq_idx),
+                       void *arg);
+
+       /**
+        * get_interrupt_statuses - Gets and store value from all interrupt
+        *                          status registers that are currently fired.
+        * @intr:       HW interrupt handle
+        */
+       void (*get_interrupt_statuses)(
+                       struct dpu_hw_intr *intr);
+
+       /**
+        * clear_interrupt_status - Clears HW interrupt status based on given
+        *                          lookup IRQ index.
+        * @intr:       HW interrupt handle
+        * @irq_idx:    Lookup irq index return from irq_idx_lookup
+        */
+       void (*clear_interrupt_status)(
+                       struct dpu_hw_intr *intr,
+                       int irq_idx);
+
+       /**
+        * clear_intr_status_nolock() - clears the HW interrupts without lock
+        * @intr:       HW interrupt handle
+        * @irq_idx:    Lookup irq index return from irq_idx_lookup
+        */
+       void (*clear_intr_status_nolock)(
+                       struct dpu_hw_intr *intr,
+                       int irq_idx);
+
+       /**
+        * get_interrupt_status - Gets HW interrupt status, and clear if set,
+        *                        based on given lookup IRQ index.
+        * @intr:       HW interrupt handle
+        * @irq_idx:    Lookup irq index return from irq_idx_lookup
+        * @clear:      True to clear irq after read
+        */
+       u32 (*get_interrupt_status)(
+                       struct dpu_hw_intr *intr,
+                       int irq_idx,
+                       bool clear);
+
+       /**
+        * get_valid_interrupts - Gets a mask of all valid interrupt sources
+        *                        within DPU. These are actually status bits
+        *                        within interrupt registers that specify the
+        *                        source of the interrupt in IRQs. For example,
+        *                        valid interrupt sources can be MDP, DSI,
+        *                        HDMI etc.
+        * @intr:       HW interrupt handle
+        * @mask:       Returning the interrupt source MASK
+        * @return:     0 for success, otherwise failure
+        */
+       int (*get_valid_interrupts)(
+                       struct dpu_hw_intr *intr,
+                       uint32_t *mask);
+};
+
+/**
+ * struct dpu_hw_intr: hw interrupts handling data structure
+ * @hw:               virtual address mapping
+ * @ops:              function pointer mapping for IRQ handling
+ * @cache_irq_mask:   array of IRQ enable masks reg storage created during init
+ * @save_irq_status:  array of IRQ status reg storage created during init
+ * @irq_idx_tbl_size: total number of irq_idx mapped in the hw_interrupts
+ * @irq_lock:         spinlock for accessing IRQ resources
+ */
+struct dpu_hw_intr {
+       struct dpu_hw_blk_reg_map hw;
+       struct dpu_hw_intr_ops ops;
+       u32 *cache_irq_mask;
+       u32 *save_irq_status;
+       u32 irq_idx_tbl_size;
+       spinlock_t irq_lock;
+};
+
+/**
+ * dpu_hw_intr_init(): Initializes the interrupts hw object
+ * @addr: mapped register io address of MDP
+ * @m :   pointer to mdss catalog data
+ */
+struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
+               struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_intr_destroy(): Cleanup interrutps hw object
+ * @intr: pointer to interrupts hw object
+ */
+void dpu_hw_intr_destroy(struct dpu_hw_intr *intr);
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
new file mode 100644 (file)
index 0000000..d280df5
--- /dev/null
@@ -0,0 +1,349 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_intf.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define INTF_TIMING_ENGINE_EN           0x000
+#define INTF_CONFIG                     0x004
+#define INTF_HSYNC_CTL                  0x008
+#define INTF_VSYNC_PERIOD_F0            0x00C
+#define INTF_VSYNC_PERIOD_F1            0x010
+#define INTF_VSYNC_PULSE_WIDTH_F0       0x014
+#define INTF_VSYNC_PULSE_WIDTH_F1       0x018
+#define INTF_DISPLAY_V_START_F0         0x01C
+#define INTF_DISPLAY_V_START_F1         0x020
+#define INTF_DISPLAY_V_END_F0           0x024
+#define INTF_DISPLAY_V_END_F1           0x028
+#define INTF_ACTIVE_V_START_F0          0x02C
+#define INTF_ACTIVE_V_START_F1          0x030
+#define INTF_ACTIVE_V_END_F0            0x034
+#define INTF_ACTIVE_V_END_F1            0x038
+#define INTF_DISPLAY_HCTL               0x03C
+#define INTF_ACTIVE_HCTL                0x040
+#define INTF_BORDER_COLOR               0x044
+#define INTF_UNDERFLOW_COLOR            0x048
+#define INTF_HSYNC_SKEW                 0x04C
+#define INTF_POLARITY_CTL               0x050
+#define INTF_TEST_CTL                   0x054
+#define INTF_TP_COLOR0                  0x058
+#define INTF_TP_COLOR1                  0x05C
+#define INTF_FRAME_LINE_COUNT_EN        0x0A8
+#define INTF_FRAME_COUNT                0x0AC
+#define   INTF_LINE_COUNT               0x0B0
+
+#define   INTF_DEFLICKER_CONFIG         0x0F0
+#define   INTF_DEFLICKER_STRNG_COEFF    0x0F4
+#define   INTF_DEFLICKER_WEAK_COEFF     0x0F8
+
+#define   INTF_DSI_CMD_MODE_TRIGGER_EN  0x084
+#define   INTF_PANEL_FORMAT             0x090
+#define   INTF_TPG_ENABLE               0x100
+#define   INTF_TPG_MAIN_CONTROL         0x104
+#define   INTF_TPG_VIDEO_CONFIG         0x108
+#define   INTF_TPG_COMPONENT_LIMITS     0x10C
+#define   INTF_TPG_RECTANGLE            0x110
+#define   INTF_TPG_INITIAL_VALUE        0x114
+#define   INTF_TPG_BLK_WHITE_PATTERN_FRAMES   0x118
+#define   INTF_TPG_RGB_MAPPING          0x11C
+#define   INTF_PROG_FETCH_START         0x170
+#define   INTF_PROG_ROT_START           0x174
+
+#define   INTF_FRAME_LINE_COUNT_EN      0x0A8
+#define   INTF_FRAME_COUNT              0x0AC
+#define   INTF_LINE_COUNT               0x0B0
+
+#define INTF_MISR_CTRL                 0x180
+#define INTF_MISR_SIGNATURE            0x184
+
+static struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf,
+               struct dpu_mdss_cfg *m,
+               void __iomem *addr,
+               struct dpu_hw_blk_reg_map *b)
+{
+       int i;
+
+       for (i = 0; i < m->intf_count; i++) {
+               if ((intf == m->intf[i].id) &&
+               (m->intf[i].type != INTF_NONE)) {
+                       b->base_off = addr;
+                       b->blk_off = m->intf[i].base;
+                       b->length = m->intf[i].len;
+                       b->hwversion = m->hwversion;
+                       b->log_mask = DPU_DBG_MASK_INTF;
+                       return &m->intf[i];
+               }
+       }
+
+       return ERR_PTR(-EINVAL);
+}
+
+static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
+               const struct intf_timing_params *p,
+               const struct dpu_format *fmt)
+{
+       struct dpu_hw_blk_reg_map *c = &ctx->hw;
+       u32 hsync_period, vsync_period;
+       u32 display_v_start, display_v_end;
+       u32 hsync_start_x, hsync_end_x;
+       u32 active_h_start, active_h_end;
+       u32 active_v_start, active_v_end;
+       u32 active_hctl, display_hctl, hsync_ctl;
+       u32 polarity_ctl, den_polarity, hsync_polarity, vsync_polarity;
+       u32 panel_format;
+       u32 intf_cfg;
+
+       /* read interface_cfg */
+       intf_cfg = DPU_REG_READ(c, INTF_CONFIG);
+       hsync_period = p->hsync_pulse_width + p->h_back_porch + p->width +
+       p->h_front_porch;
+       vsync_period = p->vsync_pulse_width + p->v_back_porch + p->height +
+       p->v_front_porch;
+
+       display_v_start = ((p->vsync_pulse_width + p->v_back_porch) *
+       hsync_period) + p->hsync_skew;
+       display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) +
+       p->hsync_skew - 1;
+
+       if (ctx->cap->type == INTF_EDP || ctx->cap->type == INTF_DP) {
+               display_v_start += p->hsync_pulse_width + p->h_back_porch;
+               display_v_end -= p->h_front_porch;
+       }
+
+       hsync_start_x = p->h_back_porch + p->hsync_pulse_width;
+       hsync_end_x = hsync_period - p->h_front_porch - 1;
+
+       if (p->width != p->xres) {
+               active_h_start = hsync_start_x;
+               active_h_end = active_h_start + p->xres - 1;
+       } else {
+               active_h_start = 0;
+               active_h_end = 0;
+       }
+
+       if (p->height != p->yres) {
+               active_v_start = display_v_start;
+               active_v_end = active_v_start + (p->yres * hsync_period) - 1;
+       } else {
+               active_v_start = 0;
+               active_v_end = 0;
+       }
+
+       if (active_h_end) {
+               active_hctl = (active_h_end << 16) | active_h_start;
+               intf_cfg |= BIT(29);    /* ACTIVE_H_ENABLE */
+       } else {
+               active_hctl = 0;
+       }
+
+       if (active_v_end)
+               intf_cfg |= BIT(30); /* ACTIVE_V_ENABLE */
+
+       hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
+       display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+       den_polarity = 0;
+       if (ctx->cap->type == INTF_HDMI) {
+               hsync_polarity = p->yres >= 720 ? 0 : 1;
+               vsync_polarity = p->yres >= 720 ? 0 : 1;
+       } else {
+               hsync_polarity = 0;
+               vsync_polarity = 0;
+       }
+       polarity_ctl = (den_polarity << 2) | /*  DEN Polarity  */
+               (vsync_polarity << 1) | /* VSYNC Polarity */
+               (hsync_polarity << 0);  /* HSYNC Polarity */
+
+       if (!DPU_FORMAT_IS_YUV(fmt))
+               panel_format = (fmt->bits[C0_G_Y] |
+                               (fmt->bits[C1_B_Cb] << 2) |
+                               (fmt->bits[C2_R_Cr] << 4) |
+                               (0x21 << 8));
+       else
+               /* Interface treats all the pixel data in RGB888 format */
+               panel_format = (COLOR_8BIT |
+                               (COLOR_8BIT << 2) |
+                               (COLOR_8BIT << 4) |
+                               (0x21 << 8));
+
+       DPU_REG_WRITE(c, INTF_HSYNC_CTL, hsync_ctl);
+       DPU_REG_WRITE(c, INTF_VSYNC_PERIOD_F0, vsync_period * hsync_period);
+       DPU_REG_WRITE(c, INTF_VSYNC_PULSE_WIDTH_F0,
+                       p->vsync_pulse_width * hsync_period);
+       DPU_REG_WRITE(c, INTF_DISPLAY_HCTL, display_hctl);
+       DPU_REG_WRITE(c, INTF_DISPLAY_V_START_F0, display_v_start);
+       DPU_REG_WRITE(c, INTF_DISPLAY_V_END_F0, display_v_end);
+       DPU_REG_WRITE(c, INTF_ACTIVE_HCTL,  active_hctl);
+       DPU_REG_WRITE(c, INTF_ACTIVE_V_START_F0, active_v_start);
+       DPU_REG_WRITE(c, INTF_ACTIVE_V_END_F0, active_v_end);
+       DPU_REG_WRITE(c, INTF_BORDER_COLOR, p->border_clr);
+       DPU_REG_WRITE(c, INTF_UNDERFLOW_COLOR, p->underflow_clr);
+       DPU_REG_WRITE(c, INTF_HSYNC_SKEW, p->hsync_skew);
+       DPU_REG_WRITE(c, INTF_POLARITY_CTL, polarity_ctl);
+       DPU_REG_WRITE(c, INTF_FRAME_LINE_COUNT_EN, 0x3);
+       DPU_REG_WRITE(c, INTF_CONFIG, intf_cfg);
+       DPU_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format);
+}
+
+static void dpu_hw_intf_enable_timing_engine(
+               struct dpu_hw_intf *intf,
+               u8 enable)
+{
+       struct dpu_hw_blk_reg_map *c = &intf->hw;
+       /* Note: Display interface select is handled in top block hw layer */
+       DPU_REG_WRITE(c, INTF_TIMING_ENGINE_EN, enable != 0);
+}
+
+static void dpu_hw_intf_setup_prg_fetch(
+               struct dpu_hw_intf *intf,
+               const struct intf_prog_fetch *fetch)
+{
+       struct dpu_hw_blk_reg_map *c = &intf->hw;
+       int fetch_enable;
+
+       /*
+        * Fetch should always be outside the active lines. If the fetching
+        * is programmed within active region, hardware behavior is unknown.
+        */
+
+       fetch_enable = DPU_REG_READ(c, INTF_CONFIG);
+       if (fetch->enable) {
+               fetch_enable |= BIT(31);
+               DPU_REG_WRITE(c, INTF_PROG_FETCH_START,
+                               fetch->fetch_start);
+       } else {
+               fetch_enable &= ~BIT(31);
+       }
+
+       DPU_REG_WRITE(c, INTF_CONFIG, fetch_enable);
+}
+
+static void dpu_hw_intf_get_status(
+               struct dpu_hw_intf *intf,
+               struct intf_status *s)
+{
+       struct dpu_hw_blk_reg_map *c = &intf->hw;
+
+       s->is_en = DPU_REG_READ(c, INTF_TIMING_ENGINE_EN);
+       if (s->is_en) {
+               s->frame_count = DPU_REG_READ(c, INTF_FRAME_COUNT);
+               s->line_count = DPU_REG_READ(c, INTF_LINE_COUNT);
+       } else {
+               s->line_count = 0;
+               s->frame_count = 0;
+       }
+}
+
+static void dpu_hw_intf_setup_misr(struct dpu_hw_intf *intf,
+                                               bool enable, u32 frame_count)
+{
+       struct dpu_hw_blk_reg_map *c = &intf->hw;
+       u32 config = 0;
+
+       DPU_REG_WRITE(c, INTF_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
+       /* clear misr data */
+       wmb();
+
+       if (enable)
+               config = (frame_count & MISR_FRAME_COUNT_MASK) |
+                       MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
+
+       DPU_REG_WRITE(c, INTF_MISR_CTRL, config);
+}
+
+static u32 dpu_hw_intf_collect_misr(struct dpu_hw_intf *intf)
+{
+       struct dpu_hw_blk_reg_map *c = &intf->hw;
+
+       return DPU_REG_READ(c, INTF_MISR_SIGNATURE);
+}
+
+static u32 dpu_hw_intf_get_line_count(struct dpu_hw_intf *intf)
+{
+       struct dpu_hw_blk_reg_map *c;
+
+       if (!intf)
+               return 0;
+
+       c = &intf->hw;
+
+       return DPU_REG_READ(c, INTF_LINE_COUNT);
+}
+
+static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
+               unsigned long cap)
+{
+       ops->setup_timing_gen = dpu_hw_intf_setup_timing_engine;
+       ops->setup_prg_fetch  = dpu_hw_intf_setup_prg_fetch;
+       ops->get_status = dpu_hw_intf_get_status;
+       ops->enable_timing = dpu_hw_intf_enable_timing_engine;
+       ops->setup_misr = dpu_hw_intf_setup_misr;
+       ops->collect_misr = dpu_hw_intf_collect_misr;
+       ops->get_line_count = dpu_hw_intf_get_line_count;
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+       .start = NULL,
+       .stop = NULL,
+};
+
+struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
+               void __iomem *addr,
+               struct dpu_mdss_cfg *m)
+{
+       struct dpu_hw_intf *c;
+       struct dpu_intf_cfg *cfg;
+       int rc;
+
+       c = kzalloc(sizeof(*c), GFP_KERNEL);
+       if (!c)
+               return ERR_PTR(-ENOMEM);
+
+       cfg = _intf_offset(idx, m, addr, &c->hw);
+       if (IS_ERR_OR_NULL(cfg)) {
+               kfree(c);
+               pr_err("failed to create dpu_hw_intf %d\n", idx);
+               return ERR_PTR(-EINVAL);
+       }
+
+       /*
+        * Assign ops
+        */
+       c->idx = idx;
+       c->cap = cfg;
+       c->mdss = m;
+       _setup_intf_ops(&c->ops, c->cap->features);
+
+       rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_INTF, idx, &dpu_hw_ops);
+       if (rc) {
+               DPU_ERROR("failed to init hw blk %d\n", rc);
+               goto blk_init_error;
+       }
+
+       return c;
+
+blk_init_error:
+       kzfree(c);
+
+       return ERR_PTR(rc);
+}
+
+void dpu_hw_intf_destroy(struct dpu_hw_intf *intf)
+{
+       if (intf)
+               dpu_hw_blk_destroy(&intf->base);
+       kfree(intf);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
new file mode 100644 (file)
index 0000000..a79d735
--- /dev/null
@@ -0,0 +1,128 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_INTF_H
+#define _DPU_HW_INTF_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_intf;
+
+/* intf timing settings */
+struct intf_timing_params {
+       u32 width;              /* active width */
+       u32 height;             /* active height */
+       u32 xres;               /* Display panel width */
+       u32 yres;               /* Display panel height */
+
+       u32 h_back_porch;
+       u32 h_front_porch;
+       u32 v_back_porch;
+       u32 v_front_porch;
+       u32 hsync_pulse_width;
+       u32 vsync_pulse_width;
+       u32 hsync_polarity;
+       u32 vsync_polarity;
+       u32 border_clr;
+       u32 underflow_clr;
+       u32 hsync_skew;
+};
+
+struct intf_prog_fetch {
+       u8 enable;
+       /* vsync counter for the front porch pixel line */
+       u32 fetch_start;
+};
+
+struct intf_status {
+       u8 is_en;               /* interface timing engine is enabled or not */
+       u32 frame_count;        /* frame count since timing engine enabled */
+       u32 line_count;         /* current line count including blanking */
+};
+
+/**
+ * struct dpu_hw_intf_ops : Interface to the interface Hw driver functions
+ *  Assumption is these functions will be called after clocks are enabled
+ * @ setup_timing_gen : programs the timing engine
+ * @ setup_prog_fetch : enables/disables the programmable fetch logic
+ * @ enable_timing: enable/disable timing engine
+ * @ get_status: returns if timing engine is enabled or not
+ * @ setup_misr: enables/disables MISR in HW register
+ * @ collect_misr: reads and stores MISR data from HW register
+ * @ get_line_count: reads current vertical line counter
+ */
+struct dpu_hw_intf_ops {
+       void (*setup_timing_gen)(struct dpu_hw_intf *intf,
+                       const struct intf_timing_params *p,
+                       const struct dpu_format *fmt);
+
+       void (*setup_prg_fetch)(struct dpu_hw_intf *intf,
+                       const struct intf_prog_fetch *fetch);
+
+       void (*enable_timing)(struct dpu_hw_intf *intf,
+                       u8 enable);
+
+       void (*get_status)(struct dpu_hw_intf *intf,
+                       struct intf_status *status);
+
+       void (*setup_misr)(struct dpu_hw_intf *intf,
+                       bool enable, u32 frame_count);
+
+       u32 (*collect_misr)(struct dpu_hw_intf *intf);
+
+       u32 (*get_line_count)(struct dpu_hw_intf *intf);
+};
+
+struct dpu_hw_intf {
+       struct dpu_hw_blk base;
+       struct dpu_hw_blk_reg_map hw;
+
+       /* intf */
+       enum dpu_intf idx;
+       const struct dpu_intf_cfg *cap;
+       const struct dpu_mdss_cfg *mdss;
+
+       /* ops */
+       struct dpu_hw_intf_ops ops;
+};
+
+/**
+ * to_dpu_hw_intf - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_intf *to_dpu_hw_intf(struct dpu_hw_blk *hw)
+{
+       return container_of(hw, struct dpu_hw_intf, base);
+}
+
+/**
+ * dpu_hw_intf_init(): Initializes the intf driver for the passed
+ * interface idx.
+ * @idx:  interface index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m :   pointer to mdss catalog data
+ */
+struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
+               void __iomem *addr,
+               struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_intf_destroy(): Destroys INTF driver context
+ * @intf:   Pointer to INTF driver context
+ */
+void dpu_hw_intf_destroy(struct dpu_hw_intf *intf);
+
+#endif /*_DPU_HW_INTF_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
new file mode 100644 (file)
index 0000000..4ab72b0
--- /dev/null
@@ -0,0 +1,261 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_kms.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define LM_OP_MODE                        0x00
+#define LM_OUT_SIZE                       0x04
+#define LM_BORDER_COLOR_0                 0x08
+#define LM_BORDER_COLOR_1                 0x010
+
+/* These register are offset to mixer base + stage base */
+#define LM_BLEND0_OP                     0x00
+#define LM_BLEND0_CONST_ALPHA            0x04
+#define LM_FG_COLOR_FILL_COLOR_0         0x08
+#define LM_FG_COLOR_FILL_COLOR_1         0x0C
+#define LM_FG_COLOR_FILL_SIZE            0x10
+#define LM_FG_COLOR_FILL_XY              0x14
+
+#define LM_BLEND0_FG_ALPHA               0x04
+#define LM_BLEND0_BG_ALPHA               0x08
+
+#define LM_MISR_CTRL                   0x310
+#define LM_MISR_SIGNATURE              0x314
+
+static struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
+               struct dpu_mdss_cfg *m,
+               void __iomem *addr,
+               struct dpu_hw_blk_reg_map *b)
+{
+       int i;
+
+       for (i = 0; i < m->mixer_count; i++) {
+               if (mixer == m->mixer[i].id) {
+                       b->base_off = addr;
+                       b->blk_off = m->mixer[i].base;
+                       b->length = m->mixer[i].len;
+                       b->hwversion = m->hwversion;
+                       b->log_mask = DPU_DBG_MASK_LM;
+                       return &m->mixer[i];
+               }
+       }
+
+       return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * _stage_offset(): returns the relative offset of the blend registers
+ * for the stage to be setup
+ * @c:     mixer ctx contains the mixer to be programmed
+ * @stage: stage index to setup
+ */
+static inline int _stage_offset(struct dpu_hw_mixer *ctx, enum dpu_stage stage)
+{
+       const struct dpu_lm_sub_blks *sblk = ctx->cap->sblk;
+       int rc;
+
+       if (stage == DPU_STAGE_BASE)
+               rc = -EINVAL;
+       else if (stage <= sblk->maxblendstages)
+               rc = sblk->blendstage_base[stage - DPU_STAGE_0];
+       else
+               rc = -EINVAL;
+
+       return rc;
+}
+
+static void dpu_hw_lm_setup_out(struct dpu_hw_mixer *ctx,
+               struct dpu_hw_mixer_cfg *mixer)
+{
+       struct dpu_hw_blk_reg_map *c = &ctx->hw;
+       u32 outsize;
+       u32 op_mode;
+
+       op_mode = DPU_REG_READ(c, LM_OP_MODE);
+
+       outsize = mixer->out_height << 16 | mixer->out_width;
+       DPU_REG_WRITE(c, LM_OUT_SIZE, outsize);
+
+       /* SPLIT_LEFT_RIGHT */
+       if (mixer->right_mixer)
+               op_mode |= BIT(31);
+       else
+               op_mode &= ~BIT(31);
+       DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
+}
+
+static void dpu_hw_lm_setup_border_color(struct dpu_hw_mixer *ctx,
+               struct dpu_mdss_color *color,
+               u8 border_en)
+{
+       struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+       if (border_en) {
+               DPU_REG_WRITE(c, LM_BORDER_COLOR_0,
+                       (color->color_0 & 0xFFF) |
+                       ((color->color_1 & 0xFFF) << 0x10));
+               DPU_REG_WRITE(c, LM_BORDER_COLOR_1,
+                       (color->color_2 & 0xFFF) |
+                       ((color->color_3 & 0xFFF) << 0x10));
+       }
+}
+
+static void dpu_hw_lm_setup_blend_config_sdm845(struct dpu_hw_mixer *ctx,
+       u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
+{
+       struct dpu_hw_blk_reg_map *c = &ctx->hw;
+       int stage_off;
+       u32 const_alpha;
+
+       if (stage == DPU_STAGE_BASE)
+               return;
+
+       stage_off = _stage_offset(ctx, stage);
+       if (WARN_ON(stage_off < 0))
+               return;
+
+       const_alpha = (bg_alpha & 0xFF) | ((fg_alpha & 0xFF) << 16);
+       DPU_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, const_alpha);
+       DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
+}
+
+static void dpu_hw_lm_setup_blend_config(struct dpu_hw_mixer *ctx,
+       u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
+{
+       struct dpu_hw_blk_reg_map *c = &ctx->hw;
+       int stage_off;
+
+       if (stage == DPU_STAGE_BASE)
+               return;
+
+       stage_off = _stage_offset(ctx, stage);
+       if (WARN_ON(stage_off < 0))
+               return;
+
+       DPU_REG_WRITE(c, LM_BLEND0_FG_ALPHA + stage_off, fg_alpha);
+       DPU_REG_WRITE(c, LM_BLEND0_BG_ALPHA + stage_off, bg_alpha);
+       DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
+}
+
+static void dpu_hw_lm_setup_color3(struct dpu_hw_mixer *ctx,
+       uint32_t mixer_op_mode)
+{
+       struct dpu_hw_blk_reg_map *c = &ctx->hw;
+       int op_mode;
+
+       /* read the existing op_mode configuration */
+       op_mode = DPU_REG_READ(c, LM_OP_MODE);
+
+       op_mode = (op_mode & (BIT(31) | BIT(30))) | mixer_op_mode;
+
+       DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
+}
+
+static void dpu_hw_lm_gc(struct dpu_hw_mixer *mixer,
+                       void *cfg)
+{
+}
+
+static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx,
+                               bool enable, u32 frame_count)
+{
+       struct dpu_hw_blk_reg_map *c = &ctx->hw;
+       u32 config = 0;
+
+       DPU_REG_WRITE(c, LM_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
+       /* clear misr data */
+       wmb();
+
+       if (enable)
+               config = (frame_count & MISR_FRAME_COUNT_MASK) |
+                       MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
+
+       DPU_REG_WRITE(c, LM_MISR_CTRL, config);
+}
+
+static u32 dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx)
+{
+       struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+       return DPU_REG_READ(c, LM_MISR_SIGNATURE);
+}
+
+static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
+               struct dpu_hw_lm_ops *ops,
+               unsigned long features)
+{
+       ops->setup_mixer_out = dpu_hw_lm_setup_out;
+       if (IS_SDM845_TARGET(m->hwversion) || IS_SDM670_TARGET(m->hwversion))
+               ops->setup_blend_config = dpu_hw_lm_setup_blend_config_sdm845;
+       else
+               ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
+       ops->setup_alpha_out = dpu_hw_lm_setup_color3;
+       ops->setup_border_color = dpu_hw_lm_setup_border_color;
+       ops->setup_gc = dpu_hw_lm_gc;
+       ops->setup_misr = dpu_hw_lm_setup_misr;
+       ops->collect_misr = dpu_hw_lm_collect_misr;
+};
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+       .start = NULL,
+       .stop = NULL,
+};
+
+struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
+               void __iomem *addr,
+               struct dpu_mdss_cfg *m)
+{
+       struct dpu_hw_mixer *c;
+       struct dpu_lm_cfg *cfg;
+       int rc;
+
+       c = kzalloc(sizeof(*c), GFP_KERNEL);
+       if (!c)
+               return ERR_PTR(-ENOMEM);
+
+       cfg = _lm_offset(idx, m, addr, &c->hw);
+       if (IS_ERR_OR_NULL(cfg)) {
+               kfree(c);
+               return ERR_PTR(-EINVAL);
+       }
+
+       /* Assign ops */
+       c->idx = idx;
+       c->cap = cfg;
+       _setup_mixer_ops(m, &c->ops, c->cap->features);
+
+       rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_LM, idx, &dpu_hw_ops);
+       if (rc) {
+               DPU_ERROR("failed to init hw blk %d\n", rc);
+               goto blk_init_error;
+       }
+
+       return c;
+
+blk_init_error:
+       kzfree(c);
+
+       return ERR_PTR(rc);
+}
+
+void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm)
+{
+       if (lm)
+               dpu_hw_blk_destroy(&lm->base);
+       kfree(lm);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
new file mode 100644 (file)
index 0000000..e29e5da
--- /dev/null
@@ -0,0 +1,122 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_LM_H
+#define _DPU_HW_LM_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_mixer;
+
+struct dpu_hw_mixer_cfg {
+       u32 out_width;
+       u32 out_height;
+       bool right_mixer;
+       int flags;
+};
+
+struct dpu_hw_color3_cfg {
+       u8 keep_fg[DPU_STAGE_MAX];
+};
+
+/**
+ *
+ * struct dpu_hw_lm_ops : Interface to the mixer Hw driver functions
+ *  Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_lm_ops {
+       /*
+        * Sets up mixer output width and height
+        * and border color if enabled
+        */
+       void (*setup_mixer_out)(struct dpu_hw_mixer *ctx,
+               struct dpu_hw_mixer_cfg *cfg);
+
+       /*
+        * Alpha blending configuration
+        * for the specified stage
+        */
+       void (*setup_blend_config)(struct dpu_hw_mixer *ctx, uint32_t stage,
+               uint32_t fg_alpha, uint32_t bg_alpha, uint32_t blend_op);
+
+       /*
+        * Alpha color component selection from either fg or bg
+        */
+       void (*setup_alpha_out)(struct dpu_hw_mixer *ctx, uint32_t mixer_op);
+
+       /**
+        * setup_border_color : enable/disable border color
+        */
+       void (*setup_border_color)(struct dpu_hw_mixer *ctx,
+               struct dpu_mdss_color *color,
+               u8 border_en);
+       /**
+        * setup_gc : enable/disable gamma correction feature
+        */
+       void (*setup_gc)(struct dpu_hw_mixer *mixer,
+                       void *cfg);
+
+       /* setup_misr: enables/disables MISR in HW register */
+       void (*setup_misr)(struct dpu_hw_mixer *ctx,
+                       bool enable, u32 frame_count);
+
+       /* collect_misr: reads and stores MISR data from HW register */
+       u32 (*collect_misr)(struct dpu_hw_mixer *ctx);
+};
+
+struct dpu_hw_mixer {
+       struct dpu_hw_blk base;
+       struct dpu_hw_blk_reg_map hw;
+
+       /* lm */
+       enum dpu_lm  idx;
+       const struct dpu_lm_cfg   *cap;
+       const struct dpu_mdp_cfg  *mdp;
+       const struct dpu_ctl_cfg  *ctl;
+
+       /* ops */
+       struct dpu_hw_lm_ops ops;
+
+       /* store mixer info specific to display */
+       struct dpu_hw_mixer_cfg cfg;
+};
+
+/**
+ * to_dpu_hw_mixer - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_mixer *to_dpu_hw_mixer(struct dpu_hw_blk *hw)
+{
+       return container_of(hw, struct dpu_hw_mixer, base);
+}
+
+/**
+ * dpu_hw_lm_init(): Initializes the mixer hw driver object.
+ * should be called once before accessing every mixer.
+ * @idx:  mixer index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m :   pointer to mdss catalog data
+ */
+struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
+               void __iomem *addr,
+               struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_lm_destroy(): Destroys layer mixer driver context
+ * @lm:   Pointer to LM driver context
+ */
+void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm);
+
+#endif /*_DPU_HW_LM_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
new file mode 100644 (file)
index 0000000..35e6bf9
--- /dev/null
@@ -0,0 +1,465 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_MDSS_H
+#define _DPU_HW_MDSS_H
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+
+#include "msm_drv.h"
+
+#define DPU_DBG_NAME                   "dpu"
+
+#define DPU_NONE                        0
+
+#ifndef DPU_CSC_MATRIX_COEFF_SIZE
+#define DPU_CSC_MATRIX_COEFF_SIZE      9
+#endif
+
+#ifndef DPU_CSC_CLAMP_SIZE
+#define DPU_CSC_CLAMP_SIZE             6
+#endif
+
+#ifndef DPU_CSC_BIAS_SIZE
+#define DPU_CSC_BIAS_SIZE              3
+#endif
+
+#ifndef DPU_MAX_PLANES
+#define DPU_MAX_PLANES                 4
+#endif
+
+#define PIPES_PER_STAGE                        2
+#ifndef DPU_MAX_DE_CURVES
+#define DPU_MAX_DE_CURVES              3
+#endif
+
+enum dpu_format_flags {
+       DPU_FORMAT_FLAG_YUV_BIT,
+       DPU_FORMAT_FLAG_DX_BIT,
+       DPU_FORMAT_FLAG_COMPRESSED_BIT,
+       DPU_FORMAT_FLAG_BIT_MAX,
+};
+
+#define DPU_FORMAT_FLAG_YUV            BIT(DPU_FORMAT_FLAG_YUV_BIT)
+#define DPU_FORMAT_FLAG_DX             BIT(DPU_FORMAT_FLAG_DX_BIT)
+#define DPU_FORMAT_FLAG_COMPRESSED     BIT(DPU_FORMAT_FLAG_COMPRESSED_BIT)
+#define DPU_FORMAT_IS_YUV(X)           \
+       (test_bit(DPU_FORMAT_FLAG_YUV_BIT, (X)->flag))
+#define DPU_FORMAT_IS_DX(X)            \
+       (test_bit(DPU_FORMAT_FLAG_DX_BIT, (X)->flag))
+#define DPU_FORMAT_IS_LINEAR(X)                ((X)->fetch_mode == DPU_FETCH_LINEAR)
+#define DPU_FORMAT_IS_TILE(X) \
+       (((X)->fetch_mode == DPU_FETCH_UBWC) && \
+                       !test_bit(DPU_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
+#define DPU_FORMAT_IS_UBWC(X) \
+       (((X)->fetch_mode == DPU_FETCH_UBWC) && \
+                       test_bit(DPU_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
+
+#define DPU_BLEND_FG_ALPHA_FG_CONST    (0 << 0)
+#define DPU_BLEND_FG_ALPHA_BG_CONST    (1 << 0)
+#define DPU_BLEND_FG_ALPHA_FG_PIXEL    (2 << 0)
+#define DPU_BLEND_FG_ALPHA_BG_PIXEL    (3 << 0)
+#define DPU_BLEND_FG_INV_ALPHA         (1 << 2)
+#define DPU_BLEND_FG_MOD_ALPHA         (1 << 3)
+#define DPU_BLEND_FG_INV_MOD_ALPHA     (1 << 4)
+#define DPU_BLEND_FG_TRANSP_EN         (1 << 5)
+#define DPU_BLEND_BG_ALPHA_FG_CONST    (0 << 8)
+#define DPU_BLEND_BG_ALPHA_BG_CONST    (1 << 8)
+#define DPU_BLEND_BG_ALPHA_FG_PIXEL    (2 << 8)
+#define DPU_BLEND_BG_ALPHA_BG_PIXEL    (3 << 8)
+#define DPU_BLEND_BG_INV_ALPHA         (1 << 10)
+#define DPU_BLEND_BG_MOD_ALPHA         (1 << 11)
+#define DPU_BLEND_BG_INV_MOD_ALPHA     (1 << 12)
+#define DPU_BLEND_BG_TRANSP_EN         (1 << 13)
+
+#define DPU_VSYNC0_SOURCE_GPIO         0
+#define DPU_VSYNC1_SOURCE_GPIO         1
+#define DPU_VSYNC2_SOURCE_GPIO         2
+#define DPU_VSYNC_SOURCE_INTF_0                3
+#define DPU_VSYNC_SOURCE_INTF_1                4
+#define DPU_VSYNC_SOURCE_INTF_2                5
+#define DPU_VSYNC_SOURCE_INTF_3                6
+#define DPU_VSYNC_SOURCE_WD_TIMER_4    11
+#define DPU_VSYNC_SOURCE_WD_TIMER_3    12
+#define DPU_VSYNC_SOURCE_WD_TIMER_2    13
+#define DPU_VSYNC_SOURCE_WD_TIMER_1    14
+#define DPU_VSYNC_SOURCE_WD_TIMER_0    15
+
+enum dpu_hw_blk_type {
+       DPU_HW_BLK_TOP = 0,
+       DPU_HW_BLK_SSPP,
+       DPU_HW_BLK_LM,
+       DPU_HW_BLK_CTL,
+       DPU_HW_BLK_CDM,
+       DPU_HW_BLK_PINGPONG,
+       DPU_HW_BLK_INTF,
+       DPU_HW_BLK_WB,
+       DPU_HW_BLK_MAX,
+};
+
+enum dpu_mdp {
+       MDP_TOP = 0x1,
+       MDP_MAX,
+};
+
+enum dpu_sspp {
+       SSPP_NONE,
+       SSPP_VIG0,
+       SSPP_VIG1,
+       SSPP_VIG2,
+       SSPP_VIG3,
+       SSPP_RGB0,
+       SSPP_RGB1,
+       SSPP_RGB2,
+       SSPP_RGB3,
+       SSPP_DMA0,
+       SSPP_DMA1,
+       SSPP_DMA2,
+       SSPP_DMA3,
+       SSPP_CURSOR0,
+       SSPP_CURSOR1,
+       SSPP_MAX
+};
+
+enum dpu_sspp_type {
+       SSPP_TYPE_VIG,
+       SSPP_TYPE_RGB,
+       SSPP_TYPE_DMA,
+       SSPP_TYPE_CURSOR,
+       SSPP_TYPE_MAX
+};
+
+enum dpu_lm {
+       LM_0 = 1,
+       LM_1,
+       LM_2,
+       LM_3,
+       LM_4,
+       LM_5,
+       LM_6,
+       LM_MAX
+};
+
+enum dpu_stage {
+       DPU_STAGE_BASE = 0,
+       DPU_STAGE_0,
+       DPU_STAGE_1,
+       DPU_STAGE_2,
+       DPU_STAGE_3,
+       DPU_STAGE_4,
+       DPU_STAGE_5,
+       DPU_STAGE_6,
+       DPU_STAGE_7,
+       DPU_STAGE_8,
+       DPU_STAGE_9,
+       DPU_STAGE_10,
+       DPU_STAGE_MAX
+};
+enum dpu_dspp {
+       DSPP_0 = 1,
+       DSPP_1,
+       DSPP_2,
+       DSPP_3,
+       DSPP_MAX
+};
+
+enum dpu_ds {
+       DS_TOP,
+       DS_0,
+       DS_1,
+       DS_MAX
+};
+
+enum dpu_ctl {
+       CTL_0 = 1,
+       CTL_1,
+       CTL_2,
+       CTL_3,
+       CTL_4,
+       CTL_MAX
+};
+
+enum dpu_cdm {
+       CDM_0 = 1,
+       CDM_1,
+       CDM_MAX
+};
+
+enum dpu_pingpong {
+       PINGPONG_0 = 1,
+       PINGPONG_1,
+       PINGPONG_2,
+       PINGPONG_3,
+       PINGPONG_4,
+       PINGPONG_S0,
+       PINGPONG_MAX
+};
+
+enum dpu_intf {
+       INTF_0 = 1,
+       INTF_1,
+       INTF_2,
+       INTF_3,
+       INTF_4,
+       INTF_5,
+       INTF_6,
+       INTF_MAX
+};
+
+enum dpu_intf_type {
+       INTF_NONE = 0x0,
+       INTF_DSI = 0x1,
+       INTF_HDMI = 0x3,
+       INTF_LCDC = 0x5,
+       INTF_EDP = 0x9,
+       INTF_DP = 0xa,
+       INTF_TYPE_MAX,
+
+       /* virtual interfaces */
+       INTF_WB = 0x100,
+};
+
+enum dpu_intf_mode {
+       INTF_MODE_NONE = 0,
+       INTF_MODE_CMD,
+       INTF_MODE_VIDEO,
+       INTF_MODE_WB_BLOCK,
+       INTF_MODE_WB_LINE,
+       INTF_MODE_MAX
+};
+
+enum dpu_wb {
+       WB_0 = 1,
+       WB_1,
+       WB_2,
+       WB_3,
+       WB_MAX
+};
+
+enum dpu_ad {
+       AD_0 = 0x1,
+       AD_1,
+       AD_MAX
+};
+
+enum dpu_cwb {
+       CWB_0 = 0x1,
+       CWB_1,
+       CWB_2,
+       CWB_3,
+       CWB_MAX
+};
+
+enum dpu_wd_timer {
+       WD_TIMER_0 = 0x1,
+       WD_TIMER_1,
+       WD_TIMER_2,
+       WD_TIMER_3,
+       WD_TIMER_4,
+       WD_TIMER_5,
+       WD_TIMER_MAX
+};
+
+enum dpu_vbif {
+       VBIF_0,
+       VBIF_1,
+       VBIF_MAX,
+       VBIF_RT = VBIF_0,
+       VBIF_NRT = VBIF_1
+};
+
+enum dpu_iommu_domain {
+       DPU_IOMMU_DOMAIN_UNSECURE,
+       DPU_IOMMU_DOMAIN_SECURE,
+       DPU_IOMMU_DOMAIN_MAX
+};
+
+/**
+ * DPU HW,Component order color map
+ */
+enum {
+       C0_G_Y = 0,
+       C1_B_Cb = 1,
+       C2_R_Cr = 2,
+       C3_ALPHA = 3
+};
+
+/**
+ * enum dpu_plane_type - defines how the color component pixel packing
+ * @DPU_PLANE_INTERLEAVED   : Color components in single plane
+ * @DPU_PLANE_PLANAR        : Color component in separate planes
+ * @DPU_PLANE_PSEUDO_PLANAR : Chroma components interleaved in separate plane
+ */
+enum dpu_plane_type {
+       DPU_PLANE_INTERLEAVED,
+       DPU_PLANE_PLANAR,
+       DPU_PLANE_PSEUDO_PLANAR,
+};
+
+/**
+ * enum dpu_chroma_samp_type - chroma sub-samplng type
+ * @DPU_CHROMA_RGB   : No chroma subsampling
+ * @DPU_CHROMA_H2V1  : Chroma pixels are horizontally subsampled
+ * @DPU_CHROMA_H1V2  : Chroma pixels are vertically subsampled
+ * @DPU_CHROMA_420   : 420 subsampling
+ */
+enum dpu_chroma_samp_type {
+       DPU_CHROMA_RGB,
+       DPU_CHROMA_H2V1,
+       DPU_CHROMA_H1V2,
+       DPU_CHROMA_420
+};
+
+/**
+ * dpu_fetch_type - Defines How DPU HW fetches data
+ * @DPU_FETCH_LINEAR   : fetch is line by line
+ * @DPU_FETCH_TILE     : fetches data in Z order from a tile
+ * @DPU_FETCH_UBWC     : fetch and decompress data
+ */
+enum dpu_fetch_type {
+       DPU_FETCH_LINEAR,
+       DPU_FETCH_TILE,
+       DPU_FETCH_UBWC
+};
+
+/**
+ * Value of enum chosen to fit the number of bits
+ * expected by the HW programming.
+ */
+enum {
+       COLOR_ALPHA_1BIT = 0,
+       COLOR_ALPHA_4BIT = 1,
+       COLOR_4BIT = 0,
+       COLOR_5BIT = 1, /* No 5-bit Alpha */
+       COLOR_6BIT = 2, /* 6-Bit Alpha also = 2 */
+       COLOR_8BIT = 3, /* 8-Bit Alpha also = 3 */
+};
+
+/**
+ * enum dpu_3d_blend_mode
+ * Desribes how the 3d data is blended
+ * @BLEND_3D_NONE      : 3d blending not enabled
+ * @BLEND_3D_FRAME_INT : Frame interleaving
+ * @BLEND_3D_H_ROW_INT : Horizontal row interleaving
+ * @BLEND_3D_V_ROW_INT : vertical row interleaving
+ * @BLEND_3D_COL_INT   : column interleaving
+ * @BLEND_3D_MAX       :
+ */
+enum dpu_3d_blend_mode {
+       BLEND_3D_NONE = 0,
+       BLEND_3D_FRAME_INT,
+       BLEND_3D_H_ROW_INT,
+       BLEND_3D_V_ROW_INT,
+       BLEND_3D_COL_INT,
+       BLEND_3D_MAX
+};
+
+/** struct dpu_format - defines the format configuration which
+ * allows DPU HW to correctly fetch and decode the format
+ * @base: base msm_format struture containing fourcc code
+ * @fetch_planes: how the color components are packed in pixel format
+ * @element: element color ordering
+ * @bits: element bit widths
+ * @chroma_sample: chroma sub-samplng type
+ * @unpack_align_msb: unpack aligned, 0 to LSB, 1 to MSB
+ * @unpack_tight: 0 for loose, 1 for tight
+ * @unpack_count: 0 = 1 component, 1 = 2 component
+ * @bpp: bytes per pixel
+ * @alpha_enable: whether the format has an alpha channel
+ * @num_planes: number of planes (including meta data planes)
+ * @fetch_mode: linear, tiled, or ubwc hw fetch behavior
+ * @is_yuv: is format a yuv variant
+ * @flag: usage bit flags
+ * @tile_width: format tile width
+ * @tile_height: format tile height
+ */
+struct dpu_format {
+       struct msm_format base;
+       enum dpu_plane_type fetch_planes;
+       u8 element[DPU_MAX_PLANES];
+       u8 bits[DPU_MAX_PLANES];
+       enum dpu_chroma_samp_type chroma_sample;
+       u8 unpack_align_msb;
+       u8 unpack_tight;
+       u8 unpack_count;
+       u8 bpp;
+       u8 alpha_enable;
+       u8 num_planes;
+       enum dpu_fetch_type fetch_mode;
+       DECLARE_BITMAP(flag, DPU_FORMAT_FLAG_BIT_MAX);
+       u16 tile_width;
+       u16 tile_height;
+};
+#define to_dpu_format(x) container_of(x, struct dpu_format, base)
+
+/**
+ * struct dpu_hw_fmt_layout - format information of the source pixel data
+ * @format: pixel format parameters
+ * @num_planes: number of planes (including meta data planes)
+ * @width: image width
+ * @height: image height
+ * @total_size: total size in bytes
+ * @plane_addr: address of each plane
+ * @plane_size: length of each plane
+ * @plane_pitch: pitch of each plane
+ */
+struct dpu_hw_fmt_layout {
+       const struct dpu_format *format;
+       uint32_t num_planes;
+       uint32_t width;
+       uint32_t height;
+       uint32_t total_size;
+       uint32_t plane_addr[DPU_MAX_PLANES];
+       uint32_t plane_size[DPU_MAX_PLANES];
+       uint32_t plane_pitch[DPU_MAX_PLANES];
+};
+
+struct dpu_csc_cfg {
+       /* matrix coefficients in S15.16 format */
+       uint32_t csc_mv[DPU_CSC_MATRIX_COEFF_SIZE];
+       uint32_t csc_pre_bv[DPU_CSC_BIAS_SIZE];
+       uint32_t csc_post_bv[DPU_CSC_BIAS_SIZE];
+       uint32_t csc_pre_lv[DPU_CSC_CLAMP_SIZE];
+       uint32_t csc_post_lv[DPU_CSC_CLAMP_SIZE];
+};
+
+/**
+ * struct dpu_mdss_color - mdss color description
+ * color 0 : green
+ * color 1 : blue
+ * color 2 : red
+ * color 3 : alpha
+ */
+struct dpu_mdss_color {
+       u32 color_0;
+       u32 color_1;
+       u32 color_2;
+       u32 color_3;
+};
+
+/*
+ * Define bit masks for h/w logging.
+ */
+#define DPU_DBG_MASK_NONE     (1 << 0)
+#define DPU_DBG_MASK_CDM      (1 << 1)
+#define DPU_DBG_MASK_INTF     (1 << 2)
+#define DPU_DBG_MASK_LM       (1 << 3)
+#define DPU_DBG_MASK_CTL      (1 << 4)
+#define DPU_DBG_MASK_PINGPONG (1 << 5)
+#define DPU_DBG_MASK_SSPP     (1 << 6)
+#define DPU_DBG_MASK_WB       (1 << 7)
+#define DPU_DBG_MASK_TOP      (1 << 8)
+#define DPU_DBG_MASK_VBIF     (1 << 9)
+#define DPU_DBG_MASK_ROT      (1 << 10)
+
+#endif  /* _DPU_HW_MDSS_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
new file mode 100644 (file)
index 0000000..cc3a623
--- /dev/null
@@ -0,0 +1,250 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/iopoll.h>
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+#include "dpu_trace.h"
+
+#define PP_TEAR_CHECK_EN                0x000
+#define PP_SYNC_CONFIG_VSYNC            0x004
+#define PP_SYNC_CONFIG_HEIGHT           0x008
+#define PP_SYNC_WRCOUNT                 0x00C
+#define PP_VSYNC_INIT_VAL               0x010
+#define PP_INT_COUNT_VAL                0x014
+#define PP_SYNC_THRESH                  0x018
+#define PP_START_POS                    0x01C
+#define PP_RD_PTR_IRQ                   0x020
+#define PP_WR_PTR_IRQ                   0x024
+#define PP_OUT_LINE_COUNT               0x028
+#define PP_LINE_COUNT                   0x02C
+
+#define PP_FBC_MODE                     0x034
+#define PP_FBC_BUDGET_CTL               0x038
+#define PP_FBC_LOSSY_MODE               0x03C
+
+static struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp,
+               struct dpu_mdss_cfg *m,
+               void __iomem *addr,
+               struct dpu_hw_blk_reg_map *b)
+{
+       int i;
+
+       for (i = 0; i < m->pingpong_count; i++) {
+               if (pp == m->pingpong[i].id) {
+                       b->base_off = addr;
+                       b->blk_off = m->pingpong[i].base;
+                       b->length = m->pingpong[i].len;
+                       b->hwversion = m->hwversion;
+                       b->log_mask = DPU_DBG_MASK_PINGPONG;
+                       return &m->pingpong[i];
+               }
+       }
+
+       return ERR_PTR(-EINVAL);
+}
+
+static int dpu_hw_pp_setup_te_config(struct dpu_hw_pingpong *pp,
+               struct dpu_hw_tear_check *te)
+{
+       struct dpu_hw_blk_reg_map *c;
+       int cfg;
+
+       if (!pp || !te)
+               return -EINVAL;
+       c = &pp->hw;
+
+       cfg = BIT(19); /*VSYNC_COUNTER_EN */
+       if (te->hw_vsync_mode)
+               cfg |= BIT(20);
+
+       cfg |= te->vsync_count;
+
+       DPU_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
+       DPU_REG_WRITE(c, PP_SYNC_CONFIG_HEIGHT, te->sync_cfg_height);
+       DPU_REG_WRITE(c, PP_VSYNC_INIT_VAL, te->vsync_init_val);
+       DPU_REG_WRITE(c, PP_RD_PTR_IRQ, te->rd_ptr_irq);
+       DPU_REG_WRITE(c, PP_START_POS, te->start_pos);
+       DPU_REG_WRITE(c, PP_SYNC_THRESH,
+                       ((te->sync_threshold_continue << 16) |
+                        te->sync_threshold_start));
+       DPU_REG_WRITE(c, PP_SYNC_WRCOUNT,
+                       (te->start_pos + te->sync_threshold_start + 1));
+
+       return 0;
+}
+
+static int dpu_hw_pp_poll_timeout_wr_ptr(struct dpu_hw_pingpong *pp,
+               u32 timeout_us)
+{
+       struct dpu_hw_blk_reg_map *c;
+       u32 val;
+       int rc;
+
+       if (!pp)
+               return -EINVAL;
+
+       c = &pp->hw;
+       rc = readl_poll_timeout(c->base_off + c->blk_off + PP_LINE_COUNT,
+                       val, (val & 0xffff) >= 1, 10, timeout_us);
+
+       return rc;
+}
+
+static int dpu_hw_pp_enable_te(struct dpu_hw_pingpong *pp, bool enable)
+{
+       struct dpu_hw_blk_reg_map *c;
+
+       if (!pp)
+               return -EINVAL;
+       c = &pp->hw;
+
+       DPU_REG_WRITE(c, PP_TEAR_CHECK_EN, enable);
+       return 0;
+}
+
+static int dpu_hw_pp_connect_external_te(struct dpu_hw_pingpong *pp,
+               bool enable_external_te)
+{
+       struct dpu_hw_blk_reg_map *c = &pp->hw;
+       u32 cfg;
+       int orig;
+
+       if (!pp)
+               return -EINVAL;
+
+       c = &pp->hw;
+       cfg = DPU_REG_READ(c, PP_SYNC_CONFIG_VSYNC);
+       orig = (bool)(cfg & BIT(20));
+       if (enable_external_te)
+               cfg |= BIT(20);
+       else
+               cfg &= ~BIT(20);
+       DPU_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
+       trace_dpu_pp_connect_ext_te(pp->idx - PINGPONG_0, cfg);
+
+       return orig;
+}
+
+static int dpu_hw_pp_get_vsync_info(struct dpu_hw_pingpong *pp,
+               struct dpu_hw_pp_vsync_info *info)
+{
+       struct dpu_hw_blk_reg_map *c;
+       u32 val;
+
+       if (!pp || !info)
+               return -EINVAL;
+       c = &pp->hw;
+
+       val = DPU_REG_READ(c, PP_VSYNC_INIT_VAL);
+       info->rd_ptr_init_val = val & 0xffff;
+
+       val = DPU_REG_READ(c, PP_INT_COUNT_VAL);
+       info->rd_ptr_frame_count = (val & 0xffff0000) >> 16;
+       info->rd_ptr_line_count = val & 0xffff;
+
+       val = DPU_REG_READ(c, PP_LINE_COUNT);
+       info->wr_ptr_line_count = val & 0xffff;
+
+       return 0;
+}
+
+static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp)
+{
+       struct dpu_hw_blk_reg_map *c = &pp->hw;
+       u32 height, init;
+       u32 line = 0xFFFF;
+
+       if (!pp)
+               return 0;
+       c = &pp->hw;
+
+       init = DPU_REG_READ(c, PP_VSYNC_INIT_VAL) & 0xFFFF;
+       height = DPU_REG_READ(c, PP_SYNC_CONFIG_HEIGHT) & 0xFFFF;
+
+       if (height < init)
+               goto line_count_exit;
+
+       line = DPU_REG_READ(c, PP_INT_COUNT_VAL) & 0xFFFF;
+
+       if (line < init)
+               line += (0xFFFF - init);
+       else
+               line -= init;
+
+line_count_exit:
+       return line;
+}
+
+static void _setup_pingpong_ops(struct dpu_hw_pingpong_ops *ops,
+       const struct dpu_pingpong_cfg *hw_cap)
+{
+       ops->setup_tearcheck = dpu_hw_pp_setup_te_config;
+       ops->enable_tearcheck = dpu_hw_pp_enable_te;
+       ops->connect_external_te = dpu_hw_pp_connect_external_te;
+       ops->get_vsync_info = dpu_hw_pp_get_vsync_info;
+       ops->poll_timeout_wr_ptr = dpu_hw_pp_poll_timeout_wr_ptr;
+       ops->get_line_count = dpu_hw_pp_get_line_count;
+};
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+       .start = NULL,
+       .stop = NULL,
+};
+
+struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
+               void __iomem *addr,
+               struct dpu_mdss_cfg *m)
+{
+       struct dpu_hw_pingpong *c;
+       struct dpu_pingpong_cfg *cfg;
+       int rc;
+
+       c = kzalloc(sizeof(*c), GFP_KERNEL);
+       if (!c)
+               return ERR_PTR(-ENOMEM);
+
+       cfg = _pingpong_offset(idx, m, addr, &c->hw);
+       if (IS_ERR_OR_NULL(cfg)) {
+               kfree(c);
+               return ERR_PTR(-EINVAL);
+       }
+
+       c->idx = idx;
+       c->caps = cfg;
+       _setup_pingpong_ops(&c->ops, c->caps);
+
+       rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_PINGPONG, idx, &dpu_hw_ops);
+       if (rc) {
+               DPU_ERROR("failed to init hw blk %d\n", rc);
+               goto blk_init_error;
+       }
+
+       return c;
+
+blk_init_error:
+       kzfree(c);
+
+       return ERR_PTR(rc);
+}
+
+void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp)
+{
+       if (pp)
+               dpu_hw_blk_destroy(&pp->base);
+       kfree(pp);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
new file mode 100644 (file)
index 0000000..3caccd7
--- /dev/null
@@ -0,0 +1,136 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_PINGPONG_H
+#define _DPU_HW_PINGPONG_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_pingpong;
+
+struct dpu_hw_tear_check {
+       /*
+        * This is ratio of MDP VSYNC clk freq(Hz) to
+        * refresh rate divided by no of lines
+        */
+       u32 vsync_count;
+       u32 sync_cfg_height;
+       u32 vsync_init_val;
+       u32 sync_threshold_start;
+       u32 sync_threshold_continue;
+       u32 start_pos;
+       u32 rd_ptr_irq;
+       u8 hw_vsync_mode;
+};
+
+struct dpu_hw_pp_vsync_info {
+       u32 rd_ptr_init_val;    /* value of rd pointer at vsync edge */
+       u32 rd_ptr_frame_count; /* num frames sent since enabling interface */
+       u32 rd_ptr_line_count;  /* current line on panel (rd ptr) */
+       u32 wr_ptr_line_count;  /* current line within pp fifo (wr ptr) */
+};
+
+/**
+ *
+ * struct dpu_hw_pingpong_ops : Interface to the pingpong Hw driver functions
+ *  Assumption is these functions will be called after clocks are enabled
+ *  @setup_tearcheck : program tear check values
+ *  @enable_tearcheck : enables tear check
+ *  @get_vsync_info : retries timing info of the panel
+ *  @setup_dither : function to program the dither hw block
+ *  @get_line_count: obtain current vertical line counter
+ */
+struct dpu_hw_pingpong_ops {
+       /**
+        * enables vysnc generation and sets up init value of
+        * read pointer and programs the tear check cofiguration
+        */
+       int (*setup_tearcheck)(struct dpu_hw_pingpong *pp,
+                       struct dpu_hw_tear_check *cfg);
+
+       /**
+        * enables tear check block
+        */
+       int (*enable_tearcheck)(struct dpu_hw_pingpong *pp,
+                       bool enable);
+
+       /**
+        * read, modify, write to either set or clear listening to external TE
+        * @Return: 1 if TE was originally connected, 0 if not, or -ERROR
+        */
+       int (*connect_external_te)(struct dpu_hw_pingpong *pp,
+                       bool enable_external_te);
+
+       /**
+        * provides the programmed and current
+        * line_count
+        */
+       int (*get_vsync_info)(struct dpu_hw_pingpong *pp,
+                       struct dpu_hw_pp_vsync_info  *info);
+
+       /**
+        * poll until write pointer transmission starts
+        * @Return: 0 on success, -ETIMEDOUT on timeout
+        */
+       int (*poll_timeout_wr_ptr)(struct dpu_hw_pingpong *pp, u32 timeout_us);
+
+       /**
+        * Obtain current vertical line counter
+        */
+       u32 (*get_line_count)(struct dpu_hw_pingpong *pp);
+};
+
+struct dpu_hw_pingpong {
+       struct dpu_hw_blk base;
+       struct dpu_hw_blk_reg_map hw;
+
+       /* pingpong */
+       enum dpu_pingpong idx;
+       const struct dpu_pingpong_cfg *caps;
+
+       /* ops */
+       struct dpu_hw_pingpong_ops ops;
+};
+
+/**
+ * dpu_hw_pingpong - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw)
+{
+       return container_of(hw, struct dpu_hw_pingpong, base);
+}
+
+/**
+ * dpu_hw_pingpong_init - initializes the pingpong driver for the passed
+ *     pingpong idx.
+ * @idx:  Pingpong index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m:    Pointer to mdss catalog data
+ * Returns: Error code or allocated dpu_hw_pingpong context
+ */
+struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
+               void __iomem *addr,
+               struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_pingpong_destroy - destroys pingpong driver context
+ *     should be called to free the context
+ * @pp:   Pointer to PP driver context returned by dpu_hw_pingpong_init
+ */
+void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp);
+
+#endif /*_DPU_HW_PINGPONG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
new file mode 100644 (file)
index 0000000..c25b52a
--- /dev/null
@@ -0,0 +1,753 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_sspp.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define DPU_FETCH_CONFIG_RESET_VALUE   0x00000087
+
+/* DPU_SSPP_SRC */
+#define SSPP_SRC_SIZE                      0x00
+#define SSPP_SRC_XY                        0x08
+#define SSPP_OUT_SIZE                      0x0c
+#define SSPP_OUT_XY                        0x10
+#define SSPP_SRC0_ADDR                     0x14
+#define SSPP_SRC1_ADDR                     0x18
+#define SSPP_SRC2_ADDR                     0x1C
+#define SSPP_SRC3_ADDR                     0x20
+#define SSPP_SRC_YSTRIDE0                  0x24
+#define SSPP_SRC_YSTRIDE1                  0x28
+#define SSPP_SRC_FORMAT                    0x30
+#define SSPP_SRC_UNPACK_PATTERN            0x34
+#define SSPP_SRC_OP_MODE                   0x38
+
+/* SSPP_MULTIRECT*/
+#define SSPP_SRC_SIZE_REC1                 0x16C
+#define SSPP_SRC_XY_REC1                   0x168
+#define SSPP_OUT_SIZE_REC1                 0x160
+#define SSPP_OUT_XY_REC1                   0x164
+#define SSPP_SRC_FORMAT_REC1               0x174
+#define SSPP_SRC_UNPACK_PATTERN_REC1       0x178
+#define SSPP_SRC_OP_MODE_REC1              0x17C
+#define SSPP_MULTIRECT_OPMODE              0x170
+#define SSPP_SRC_CONSTANT_COLOR_REC1       0x180
+#define SSPP_EXCL_REC_SIZE_REC1            0x184
+#define SSPP_EXCL_REC_XY_REC1              0x188
+
+#define MDSS_MDP_OP_DEINTERLACE            BIT(22)
+#define MDSS_MDP_OP_DEINTERLACE_ODD        BIT(23)
+#define MDSS_MDP_OP_IGC_ROM_1              BIT(18)
+#define MDSS_MDP_OP_IGC_ROM_0              BIT(17)
+#define MDSS_MDP_OP_IGC_EN                 BIT(16)
+#define MDSS_MDP_OP_FLIP_UD                BIT(14)
+#define MDSS_MDP_OP_FLIP_LR                BIT(13)
+#define MDSS_MDP_OP_BWC_EN                 BIT(0)
+#define MDSS_MDP_OP_PE_OVERRIDE            BIT(31)
+#define MDSS_MDP_OP_BWC_LOSSLESS           (0 << 1)
+#define MDSS_MDP_OP_BWC_Q_HIGH             (1 << 1)
+#define MDSS_MDP_OP_BWC_Q_MED              (2 << 1)
+
+#define SSPP_SRC_CONSTANT_COLOR            0x3c
+#define SSPP_EXCL_REC_CTL                  0x40
+#define SSPP_UBWC_STATIC_CTRL              0x44
+#define SSPP_FETCH_CONFIG                  0x048
+#define SSPP_DANGER_LUT                    0x60
+#define SSPP_SAFE_LUT                      0x64
+#define SSPP_CREQ_LUT                      0x68
+#define SSPP_QOS_CTRL                      0x6C
+#define SSPP_DECIMATION_CONFIG             0xB4
+#define SSPP_SRC_ADDR_SW_STATUS            0x70
+#define SSPP_CREQ_LUT_0                    0x74
+#define SSPP_CREQ_LUT_1                    0x78
+#define SSPP_SW_PIX_EXT_C0_LR              0x100
+#define SSPP_SW_PIX_EXT_C0_TB              0x104
+#define SSPP_SW_PIX_EXT_C0_REQ_PIXELS      0x108
+#define SSPP_SW_PIX_EXT_C1C2_LR            0x110
+#define SSPP_SW_PIX_EXT_C1C2_TB            0x114
+#define SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS    0x118
+#define SSPP_SW_PIX_EXT_C3_LR              0x120
+#define SSPP_SW_PIX_EXT_C3_TB              0x124
+#define SSPP_SW_PIX_EXT_C3_REQ_PIXELS      0x128
+#define SSPP_TRAFFIC_SHAPER                0x130
+#define SSPP_CDP_CNTL                      0x134
+#define SSPP_UBWC_ERROR_STATUS             0x138
+#define SSPP_TRAFFIC_SHAPER_PREFILL        0x150
+#define SSPP_TRAFFIC_SHAPER_REC1_PREFILL   0x154
+#define SSPP_TRAFFIC_SHAPER_REC1           0x158
+#define SSPP_EXCL_REC_SIZE                 0x1B4
+#define SSPP_EXCL_REC_XY                   0x1B8
+#define SSPP_VIG_OP_MODE                   0x0
+#define SSPP_VIG_CSC_10_OP_MODE            0x0
+#define SSPP_TRAFFIC_SHAPER_BPC_MAX        0xFF
+
+/* SSPP_QOS_CTRL */
+#define SSPP_QOS_CTRL_VBLANK_EN            BIT(16)
+#define SSPP_QOS_CTRL_DANGER_SAFE_EN       BIT(0)
+#define SSPP_QOS_CTRL_DANGER_VBLANK_MASK   0x3
+#define SSPP_QOS_CTRL_DANGER_VBLANK_OFF    4
+#define SSPP_QOS_CTRL_CREQ_VBLANK_MASK     0x3
+#define SSPP_QOS_CTRL_CREQ_VBLANK_OFF      20
+
+/* DPU_SSPP_SCALER_QSEED2 */
+#define SCALE_CONFIG                       0x04
+#define COMP0_3_PHASE_STEP_X               0x10
+#define COMP0_3_PHASE_STEP_Y               0x14
+#define COMP1_2_PHASE_STEP_X               0x18
+#define COMP1_2_PHASE_STEP_Y               0x1c
+#define COMP0_3_INIT_PHASE_X               0x20
+#define COMP0_3_INIT_PHASE_Y               0x24
+#define COMP1_2_INIT_PHASE_X               0x28
+#define COMP1_2_INIT_PHASE_Y               0x2C
+#define VIG_0_QSEED2_SHARP                 0x30
+
+/*
+ * Definitions for ViG op modes
+ */
+#define VIG_OP_CSC_DST_DATAFMT BIT(19)
+#define VIG_OP_CSC_SRC_DATAFMT BIT(18)
+#define VIG_OP_CSC_EN          BIT(17)
+#define VIG_OP_MEM_PROT_CONT   BIT(15)
+#define VIG_OP_MEM_PROT_VAL    BIT(14)
+#define VIG_OP_MEM_PROT_SAT    BIT(13)
+#define VIG_OP_MEM_PROT_HUE    BIT(12)
+#define VIG_OP_HIST            BIT(8)
+#define VIG_OP_SKY_COL         BIT(7)
+#define VIG_OP_FOIL            BIT(6)
+#define VIG_OP_SKIN_COL        BIT(5)
+#define VIG_OP_PA_EN           BIT(4)
+#define VIG_OP_PA_SAT_ZERO_EXP BIT(2)
+#define VIG_OP_MEM_PROT_BLEND  BIT(1)
+
+/*
+ * Definitions for CSC 10 op modes
+ */
+#define VIG_CSC_10_SRC_DATAFMT BIT(1)
+#define VIG_CSC_10_EN          BIT(0)
+#define CSC_10BIT_OFFSET       4
+
+/* traffic shaper clock in Hz */
+#define TS_CLK                 19200000
+
+static inline int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
+               int s_id,
+               u32 *idx)
+{
+       int rc = 0;
+       const struct dpu_sspp_sub_blks *sblk = ctx->cap->sblk;
+
+       if (!ctx)
+               return -EINVAL;
+
+       switch (s_id) {
+       case DPU_SSPP_SRC:
+               *idx = sblk->src_blk.base;
+               break;
+       case DPU_SSPP_SCALER_QSEED2:
+       case DPU_SSPP_SCALER_QSEED3:
+       case DPU_SSPP_SCALER_RGB:
+               *idx = sblk->scaler_blk.base;
+               break;
+       case DPU_SSPP_CSC:
+       case DPU_SSPP_CSC_10BIT:
+               *idx = sblk->csc_blk.base;
+               break;
+       default:
+               rc = -EINVAL;
+       }
+
+       return rc;
+}
+
+static void dpu_hw_sspp_setup_multirect(struct dpu_hw_pipe *ctx,
+               enum dpu_sspp_multirect_index index,
+               enum dpu_sspp_multirect_mode mode)
+{
+       u32 mode_mask;
+       u32 idx;
+
+       if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+               return;
+
+       if (index == DPU_SSPP_RECT_SOLO) {
+               /**
+                * if rect index is RECT_SOLO, we cannot expect a
+                * virtual plane sharing the same SSPP id. So we go
+                * and disable multirect
+                */
+               mode_mask = 0;
+       } else {
+               mode_mask = DPU_REG_READ(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx);
+               mode_mask |= index;
+               if (mode == DPU_SSPP_MULTIRECT_TIME_MX)
+                       mode_mask |= BIT(2);
+               else
+                       mode_mask &= ~BIT(2);
+       }
+
+       DPU_REG_WRITE(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx, mode_mask);
+}
+
+static void _sspp_setup_opmode(struct dpu_hw_pipe *ctx,
+               u32 mask, u8 en)
+{
+       u32 idx;
+       u32 opmode;
+
+       if (!test_bit(DPU_SSPP_SCALER_QSEED2, &ctx->cap->features) ||
+               _sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED2, &idx) ||
+               !test_bit(DPU_SSPP_CSC, &ctx->cap->features))
+               return;
+
+       opmode = DPU_REG_READ(&ctx->hw, SSPP_VIG_OP_MODE + idx);
+
+       if (en)
+               opmode |= mask;
+       else
+               opmode &= ~mask;
+
+       DPU_REG_WRITE(&ctx->hw, SSPP_VIG_OP_MODE + idx, opmode);
+}
+
+static void _sspp_setup_csc10_opmode(struct dpu_hw_pipe *ctx,
+               u32 mask, u8 en)
+{
+       u32 idx;
+       u32 opmode;
+
+       if (_sspp_subblk_offset(ctx, DPU_SSPP_CSC_10BIT, &idx))
+               return;
+
+       opmode = DPU_REG_READ(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx);
+       if (en)
+               opmode |= mask;
+       else
+               opmode &= ~mask;
+
+       DPU_REG_WRITE(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx, opmode);
+}
+
+/**
+ * Setup source pixel format, flip,
+ */
+static void dpu_hw_sspp_setup_format(struct dpu_hw_pipe *ctx,
+               const struct dpu_format *fmt, u32 flags,
+               enum dpu_sspp_multirect_index rect_mode)
+{
+       struct dpu_hw_blk_reg_map *c;
+       u32 chroma_samp, unpack, src_format;
+       u32 opmode = 0;
+       u32 fast_clear = 0;
+       u32 op_mode_off, unpack_pat_off, format_off;
+       u32 idx;
+
+       if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !fmt)
+               return;
+
+       if (rect_mode == DPU_SSPP_RECT_SOLO || rect_mode == DPU_SSPP_RECT_0) {
+               op_mode_off = SSPP_SRC_OP_MODE;
+               unpack_pat_off = SSPP_SRC_UNPACK_PATTERN;
+               format_off = SSPP_SRC_FORMAT;
+       } else {
+               op_mode_off = SSPP_SRC_OP_MODE_REC1;
+               unpack_pat_off = SSPP_SRC_UNPACK_PATTERN_REC1;
+               format_off = SSPP_SRC_FORMAT_REC1;
+       }
+
+       c = &ctx->hw;
+       opmode = DPU_REG_READ(c, op_mode_off + idx);
+       opmode &= ~(MDSS_MDP_OP_FLIP_LR | MDSS_MDP_OP_FLIP_UD |
+                       MDSS_MDP_OP_BWC_EN | MDSS_MDP_OP_PE_OVERRIDE);
+
+       if (flags & DPU_SSPP_FLIP_LR)
+               opmode |= MDSS_MDP_OP_FLIP_LR;
+       if (flags & DPU_SSPP_FLIP_UD)
+               opmode |= MDSS_MDP_OP_FLIP_UD;
+
+       chroma_samp = fmt->chroma_sample;
+       if (flags & DPU_SSPP_SOURCE_ROTATED_90) {
+               if (chroma_samp == DPU_CHROMA_H2V1)
+                       chroma_samp = DPU_CHROMA_H1V2;
+               else if (chroma_samp == DPU_CHROMA_H1V2)
+                       chroma_samp = DPU_CHROMA_H2V1;
+       }
+
+       src_format = (chroma_samp << 23) | (fmt->fetch_planes << 19) |
+               (fmt->bits[C3_ALPHA] << 6) | (fmt->bits[C2_R_Cr] << 4) |
+               (fmt->bits[C1_B_Cb] << 2) | (fmt->bits[C0_G_Y] << 0);
+
+       if (flags & DPU_SSPP_ROT_90)
+               src_format |= BIT(11); /* ROT90 */
+
+       if (fmt->alpha_enable && fmt->fetch_planes == DPU_PLANE_INTERLEAVED)
+               src_format |= BIT(8); /* SRCC3_EN */
+
+       if (flags & DPU_SSPP_SOLID_FILL)
+               src_format |= BIT(22);
+
+       unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
+               (fmt->element[1] << 8) | (fmt->element[0] << 0);
+       src_format |= ((fmt->unpack_count - 1) << 12) |
+               (fmt->unpack_tight << 17) |
+               (fmt->unpack_align_msb << 18) |
+               ((fmt->bpp - 1) << 9);
+
+       if (fmt->fetch_mode != DPU_FETCH_LINEAR) {
+               if (DPU_FORMAT_IS_UBWC(fmt))
+                       opmode |= MDSS_MDP_OP_BWC_EN;
+               src_format |= (fmt->fetch_mode & 3) << 30; /*FRAME_FORMAT */
+               DPU_REG_WRITE(c, SSPP_FETCH_CONFIG,
+                       DPU_FETCH_CONFIG_RESET_VALUE |
+                       ctx->mdp->highest_bank_bit << 18);
+               if (IS_UBWC_20_SUPPORTED(ctx->catalog->caps->ubwc_version)) {
+                       fast_clear = fmt->alpha_enable ? BIT(31) : 0;
+                       DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
+                                       fast_clear | (ctx->mdp->ubwc_swizzle) |
+                                       (ctx->mdp->highest_bank_bit << 4));
+               }
+       }
+
+       opmode |= MDSS_MDP_OP_PE_OVERRIDE;
+
+       /* if this is YUV pixel format, enable CSC */
+       if (DPU_FORMAT_IS_YUV(fmt))
+               src_format |= BIT(15);
+
+       if (DPU_FORMAT_IS_DX(fmt))
+               src_format |= BIT(14);
+
+       /* update scaler opmode, if appropriate */
+       if (test_bit(DPU_SSPP_CSC, &ctx->cap->features))
+               _sspp_setup_opmode(ctx, VIG_OP_CSC_EN | VIG_OP_CSC_SRC_DATAFMT,
+                       DPU_FORMAT_IS_YUV(fmt));
+       else if (test_bit(DPU_SSPP_CSC_10BIT, &ctx->cap->features))
+               _sspp_setup_csc10_opmode(ctx,
+                       VIG_CSC_10_EN | VIG_CSC_10_SRC_DATAFMT,
+                       DPU_FORMAT_IS_YUV(fmt));
+
+       DPU_REG_WRITE(c, format_off + idx, src_format);
+       DPU_REG_WRITE(c, unpack_pat_off + idx, unpack);
+       DPU_REG_WRITE(c, op_mode_off + idx, opmode);
+
+       /* clear previous UBWC error */
+       DPU_REG_WRITE(c, SSPP_UBWC_ERROR_STATUS + idx, BIT(31));
+}
+
+static void dpu_hw_sspp_setup_pe_config(struct dpu_hw_pipe *ctx,
+               struct dpu_hw_pixel_ext *pe_ext)
+{
+       struct dpu_hw_blk_reg_map *c;
+       u8 color;
+       u32 lr_pe[4], tb_pe[4], tot_req_pixels[4];
+       const u32 bytemask = 0xff;
+       const u32 shortmask = 0xffff;
+       u32 idx;
+
+       if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !pe_ext)
+               return;
+
+       c = &ctx->hw;
+
+       /* program SW pixel extension override for all pipes*/
+       for (color = 0; color < DPU_MAX_PLANES; color++) {
+               /* color 2 has the same set of registers as color 1 */
+               if (color == 2)
+                       continue;
+
+               lr_pe[color] = ((pe_ext->right_ftch[color] & bytemask) << 24)|
+                       ((pe_ext->right_rpt[color] & bytemask) << 16)|
+                       ((pe_ext->left_ftch[color] & bytemask) << 8)|
+                       (pe_ext->left_rpt[color] & bytemask);
+
+               tb_pe[color] = ((pe_ext->btm_ftch[color] & bytemask) << 24)|
+                       ((pe_ext->btm_rpt[color] & bytemask) << 16)|
+                       ((pe_ext->top_ftch[color] & bytemask) << 8)|
+                       (pe_ext->top_rpt[color] & bytemask);
+
+               tot_req_pixels[color] = (((pe_ext->roi_h[color] +
+                       pe_ext->num_ext_pxls_top[color] +
+                       pe_ext->num_ext_pxls_btm[color]) & shortmask) << 16) |
+                       ((pe_ext->roi_w[color] +
+                       pe_ext->num_ext_pxls_left[color] +
+                       pe_ext->num_ext_pxls_right[color]) & shortmask);
+       }
+
+       /* color 0 */
+       DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_LR + idx, lr_pe[0]);
+       DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_TB + idx, tb_pe[0]);
+       DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_REQ_PIXELS + idx,
+                       tot_req_pixels[0]);
+
+       /* color 1 and color 2 */
+       DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_LR + idx, lr_pe[1]);
+       DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_TB + idx, tb_pe[1]);
+       DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS + idx,
+                       tot_req_pixels[1]);
+
+       /* color 3 */
+       DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_LR + idx, lr_pe[3]);
+       DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_TB + idx, lr_pe[3]);
+       DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_REQ_PIXELS + idx,
+                       tot_req_pixels[3]);
+}
+
+static void _dpu_hw_sspp_setup_scaler3(struct dpu_hw_pipe *ctx,
+               struct dpu_hw_pipe_cfg *sspp,
+               struct dpu_hw_pixel_ext *pe,
+               void *scaler_cfg)
+{
+       u32 idx;
+       struct dpu_hw_scaler3_cfg *scaler3_cfg = scaler_cfg;
+
+       (void)pe;
+       if (_sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx) || !sspp
+               || !scaler3_cfg || !ctx || !ctx->cap || !ctx->cap->sblk)
+               return;
+
+       dpu_hw_setup_scaler3(&ctx->hw, scaler3_cfg, idx,
+                       ctx->cap->sblk->scaler_blk.version,
+                       sspp->layout.format);
+}
+
+static u32 _dpu_hw_sspp_get_scaler3_ver(struct dpu_hw_pipe *ctx)
+{
+       u32 idx;
+
+       if (!ctx || _sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx))
+               return 0;
+
+       return dpu_hw_get_scaler3_ver(&ctx->hw, idx);
+}
+
+/**
+ * dpu_hw_sspp_setup_rects()
+ */
+static void dpu_hw_sspp_setup_rects(struct dpu_hw_pipe *ctx,
+               struct dpu_hw_pipe_cfg *cfg,
+               enum dpu_sspp_multirect_index rect_index)
+{
+       struct dpu_hw_blk_reg_map *c;
+       u32 src_size, src_xy, dst_size, dst_xy, ystride0, ystride1;
+       u32 src_size_off, src_xy_off, out_size_off, out_xy_off;
+       u32 idx;
+
+       if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !cfg)
+               return;
+
+       c = &ctx->hw;
+
+       if (rect_index == DPU_SSPP_RECT_SOLO || rect_index == DPU_SSPP_RECT_0) {
+               src_size_off = SSPP_SRC_SIZE;
+               src_xy_off = SSPP_SRC_XY;
+               out_size_off = SSPP_OUT_SIZE;
+               out_xy_off = SSPP_OUT_XY;
+       } else {
+               src_size_off = SSPP_SRC_SIZE_REC1;
+               src_xy_off = SSPP_SRC_XY_REC1;
+               out_size_off = SSPP_OUT_SIZE_REC1;
+               out_xy_off = SSPP_OUT_XY_REC1;
+       }
+
+
+       /* src and dest rect programming */
+       src_xy = (cfg->src_rect.y1 << 16) | cfg->src_rect.x1;
+       src_size = (drm_rect_height(&cfg->src_rect) << 16) |
+                  drm_rect_width(&cfg->src_rect);
+       dst_xy = (cfg->dst_rect.y1 << 16) | cfg->dst_rect.x1;
+       dst_size = (drm_rect_height(&cfg->dst_rect) << 16) |
+               drm_rect_width(&cfg->dst_rect);
+
+       if (rect_index == DPU_SSPP_RECT_SOLO) {
+               ystride0 = (cfg->layout.plane_pitch[0]) |
+                       (cfg->layout.plane_pitch[1] << 16);
+               ystride1 = (cfg->layout.plane_pitch[2]) |
+                       (cfg->layout.plane_pitch[3] << 16);
+       } else {
+               ystride0 = DPU_REG_READ(c, SSPP_SRC_YSTRIDE0 + idx);
+               ystride1 = DPU_REG_READ(c, SSPP_SRC_YSTRIDE1 + idx);
+
+               if (rect_index == DPU_SSPP_RECT_0) {
+                       ystride0 = (ystride0 & 0xFFFF0000) |
+                               (cfg->layout.plane_pitch[0] & 0x0000FFFF);
+                       ystride1 = (ystride1 & 0xFFFF0000)|
+                               (cfg->layout.plane_pitch[2] & 0x0000FFFF);
+               } else {
+                       ystride0 = (ystride0 & 0x0000FFFF) |
+                               ((cfg->layout.plane_pitch[0] << 16) &
+                                0xFFFF0000);
+                       ystride1 = (ystride1 & 0x0000FFFF) |
+                               ((cfg->layout.plane_pitch[2] << 16) &
+                                0xFFFF0000);
+               }
+       }
+
+       /* rectangle register programming */
+       DPU_REG_WRITE(c, src_size_off + idx, src_size);
+       DPU_REG_WRITE(c, src_xy_off + idx, src_xy);
+       DPU_REG_WRITE(c, out_size_off + idx, dst_size);
+       DPU_REG_WRITE(c, out_xy_off + idx, dst_xy);
+
+       DPU_REG_WRITE(c, SSPP_SRC_YSTRIDE0 + idx, ystride0);
+       DPU_REG_WRITE(c, SSPP_SRC_YSTRIDE1 + idx, ystride1);
+}
+
+static void dpu_hw_sspp_setup_sourceaddress(struct dpu_hw_pipe *ctx,
+               struct dpu_hw_pipe_cfg *cfg,
+               enum dpu_sspp_multirect_index rect_mode)
+{
+       int i;
+       u32 idx;
+
+       if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+               return;
+
+       if (rect_mode == DPU_SSPP_RECT_SOLO) {
+               for (i = 0; i < ARRAY_SIZE(cfg->layout.plane_addr); i++)
+                       DPU_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx + i * 0x4,
+                                       cfg->layout.plane_addr[i]);
+       } else if (rect_mode == DPU_SSPP_RECT_0) {
+               DPU_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx,
+                               cfg->layout.plane_addr[0]);
+               DPU_REG_WRITE(&ctx->hw, SSPP_SRC2_ADDR + idx,
+                               cfg->layout.plane_addr[2]);
+       } else {
+               DPU_REG_WRITE(&ctx->hw, SSPP_SRC1_ADDR + idx,
+                               cfg->layout.plane_addr[0]);
+               DPU_REG_WRITE(&ctx->hw, SSPP_SRC3_ADDR + idx,
+                               cfg->layout.plane_addr[2]);
+       }
+}
+
+static void dpu_hw_sspp_setup_csc(struct dpu_hw_pipe *ctx,
+               struct dpu_csc_cfg *data)
+{
+       u32 idx;
+       bool csc10 = false;
+
+       if (_sspp_subblk_offset(ctx, DPU_SSPP_CSC, &idx) || !data)
+               return;
+
+       if (test_bit(DPU_SSPP_CSC_10BIT, &ctx->cap->features)) {
+               idx += CSC_10BIT_OFFSET;
+               csc10 = true;
+       }
+
+       dpu_hw_csc_setup(&ctx->hw, idx, data, csc10);
+}
+
+static void dpu_hw_sspp_setup_solidfill(struct dpu_hw_pipe *ctx, u32 color, enum
+               dpu_sspp_multirect_index rect_index)
+{
+       u32 idx;
+
+       if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+               return;
+
+       if (rect_index == DPU_SSPP_RECT_SOLO || rect_index == DPU_SSPP_RECT_0)
+               DPU_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR + idx, color);
+       else
+               DPU_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR_REC1 + idx,
+                               color);
+}
+
+static void dpu_hw_sspp_setup_danger_safe_lut(struct dpu_hw_pipe *ctx,
+               struct dpu_hw_pipe_qos_cfg *cfg)
+{
+       u32 idx;
+
+       if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+               return;
+
+       DPU_REG_WRITE(&ctx->hw, SSPP_DANGER_LUT + idx, cfg->danger_lut);
+       DPU_REG_WRITE(&ctx->hw, SSPP_SAFE_LUT + idx, cfg->safe_lut);
+}
+
+static void dpu_hw_sspp_setup_creq_lut(struct dpu_hw_pipe *ctx,
+               struct dpu_hw_pipe_qos_cfg *cfg)
+{
+       u32 idx;
+
+       if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+               return;
+
+       if (ctx->cap && test_bit(DPU_SSPP_QOS_8LVL, &ctx->cap->features)) {
+               DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_0 + idx, cfg->creq_lut);
+               DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_1 + idx,
+                               cfg->creq_lut >> 32);
+       } else {
+               DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT + idx, cfg->creq_lut);
+       }
+}
+
+static void dpu_hw_sspp_setup_qos_ctrl(struct dpu_hw_pipe *ctx,
+               struct dpu_hw_pipe_qos_cfg *cfg)
+{
+       u32 idx;
+       u32 qos_ctrl = 0;
+
+       if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+               return;
+
+       if (cfg->vblank_en) {
+               qos_ctrl |= ((cfg->creq_vblank &
+                               SSPP_QOS_CTRL_CREQ_VBLANK_MASK) <<
+                               SSPP_QOS_CTRL_CREQ_VBLANK_OFF);
+               qos_ctrl |= ((cfg->danger_vblank &
+                               SSPP_QOS_CTRL_DANGER_VBLANK_MASK) <<
+                               SSPP_QOS_CTRL_DANGER_VBLANK_OFF);
+               qos_ctrl |= SSPP_QOS_CTRL_VBLANK_EN;
+       }
+
+       if (cfg->danger_safe_en)
+               qos_ctrl |= SSPP_QOS_CTRL_DANGER_SAFE_EN;
+
+       DPU_REG_WRITE(&ctx->hw, SSPP_QOS_CTRL + idx, qos_ctrl);
+}
+
+static void dpu_hw_sspp_setup_cdp(struct dpu_hw_pipe *ctx,
+               struct dpu_hw_pipe_cdp_cfg *cfg)
+{
+       u32 idx;
+       u32 cdp_cntl = 0;
+
+       if (!ctx || !cfg)
+               return;
+
+       if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+               return;
+
+       if (cfg->enable)
+               cdp_cntl |= BIT(0);
+       if (cfg->ubwc_meta_enable)
+               cdp_cntl |= BIT(1);
+       if (cfg->tile_amortize_enable)
+               cdp_cntl |= BIT(2);
+       if (cfg->preload_ahead == DPU_SSPP_CDP_PRELOAD_AHEAD_64)
+               cdp_cntl |= BIT(3);
+
+       DPU_REG_WRITE(&ctx->hw, SSPP_CDP_CNTL, cdp_cntl);
+}
+
+static void _setup_layer_ops(struct dpu_hw_pipe *c,
+               unsigned long features)
+{
+       if (test_bit(DPU_SSPP_SRC, &features)) {
+               c->ops.setup_format = dpu_hw_sspp_setup_format;
+               c->ops.setup_rects = dpu_hw_sspp_setup_rects;
+               c->ops.setup_sourceaddress = dpu_hw_sspp_setup_sourceaddress;
+               c->ops.setup_solidfill = dpu_hw_sspp_setup_solidfill;
+               c->ops.setup_pe = dpu_hw_sspp_setup_pe_config;
+       }
+
+       if (test_bit(DPU_SSPP_QOS, &features)) {
+               c->ops.setup_danger_safe_lut =
+                       dpu_hw_sspp_setup_danger_safe_lut;
+               c->ops.setup_creq_lut = dpu_hw_sspp_setup_creq_lut;
+               c->ops.setup_qos_ctrl = dpu_hw_sspp_setup_qos_ctrl;
+       }
+
+       if (test_bit(DPU_SSPP_CSC, &features) ||
+               test_bit(DPU_SSPP_CSC_10BIT, &features))
+               c->ops.setup_csc = dpu_hw_sspp_setup_csc;
+
+       if (dpu_hw_sspp_multirect_enabled(c->cap))
+               c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
+
+       if (test_bit(DPU_SSPP_SCALER_QSEED3, &features)) {
+               c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3;
+               c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver;
+       }
+
+       if (test_bit(DPU_SSPP_CDP, &features))
+               c->ops.setup_cdp = dpu_hw_sspp_setup_cdp;
+}
+
+static struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
+               void __iomem *addr,
+               struct dpu_mdss_cfg *catalog,
+               struct dpu_hw_blk_reg_map *b)
+{
+       int i;
+
+       if ((sspp < SSPP_MAX) && catalog && addr && b) {
+               for (i = 0; i < catalog->sspp_count; i++) {
+                       if (sspp == catalog->sspp[i].id) {
+                               b->base_off = addr;
+                               b->blk_off = catalog->sspp[i].base;
+                               b->length = catalog->sspp[i].len;
+                               b->hwversion = catalog->hwversion;
+                               b->log_mask = DPU_DBG_MASK_SSPP;
+                               return &catalog->sspp[i];
+                       }
+               }
+       }
+
+       return ERR_PTR(-ENOMEM);
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+       .start = NULL,
+       .stop = NULL,
+};
+
+struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
+               void __iomem *addr, struct dpu_mdss_cfg *catalog,
+               bool is_virtual_pipe)
+{
+       struct dpu_hw_pipe *hw_pipe;
+       struct dpu_sspp_cfg *cfg;
+       int rc;
+
+       if (!addr || !catalog)
+               return ERR_PTR(-EINVAL);
+
+       hw_pipe = kzalloc(sizeof(*hw_pipe), GFP_KERNEL);
+       if (!hw_pipe)
+               return ERR_PTR(-ENOMEM);
+
+       cfg = _sspp_offset(idx, addr, catalog, &hw_pipe->hw);
+       if (IS_ERR_OR_NULL(cfg)) {
+               kfree(hw_pipe);
+               return ERR_PTR(-EINVAL);
+       }
+
+       /* Assign ops */
+       hw_pipe->catalog = catalog;
+       hw_pipe->mdp = &catalog->mdp[0];
+       hw_pipe->idx = idx;
+       hw_pipe->cap = cfg;
+       _setup_layer_ops(hw_pipe, hw_pipe->cap->features);
+
+       rc = dpu_hw_blk_init(&hw_pipe->base, DPU_HW_BLK_SSPP, idx, &dpu_hw_ops);
+       if (rc) {
+               DPU_ERROR("failed to init hw blk %d\n", rc);
+               goto blk_init_error;
+       }
+
+       return hw_pipe;
+
+blk_init_error:
+       kzfree(hw_pipe);
+
+       return ERR_PTR(rc);
+}
+
+void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx)
+{
+       if (ctx)
+               dpu_hw_blk_destroy(&ctx->base);
+       kfree(ctx);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
new file mode 100644 (file)
index 0000000..4d81e5f
--- /dev/null
@@ -0,0 +1,424 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_SSPP_H
+#define _DPU_HW_SSPP_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+#include "dpu_formats.h"
+
+struct dpu_hw_pipe;
+
+/**
+ * Flags
+ */
+#define DPU_SSPP_FLIP_LR               BIT(0)
+#define DPU_SSPP_FLIP_UD               BIT(1)
+#define DPU_SSPP_SOURCE_ROTATED_90     BIT(2)
+#define DPU_SSPP_ROT_90                        BIT(3)
+#define DPU_SSPP_SOLID_FILL            BIT(4)
+
+/**
+ * Define all scaler feature bits in catalog
+ */
+#define DPU_SSPP_SCALER ((1UL << DPU_SSPP_SCALER_RGB) | \
+       (1UL << DPU_SSPP_SCALER_QSEED2) | \
+       (1UL << DPU_SSPP_SCALER_QSEED3))
+
+/**
+ * Component indices
+ */
+enum {
+       DPU_SSPP_COMP_0,
+       DPU_SSPP_COMP_1_2,
+       DPU_SSPP_COMP_2,
+       DPU_SSPP_COMP_3,
+
+       DPU_SSPP_COMP_MAX
+};
+
+/**
+ * DPU_SSPP_RECT_SOLO - multirect disabled
+ * DPU_SSPP_RECT_0 - rect0 of a multirect pipe
+ * DPU_SSPP_RECT_1 - rect1 of a multirect pipe
+ *
+ * Note: HW supports multirect with either RECT0 or
+ * RECT1. Considering no benefit of such configs over
+ * SOLO mode and to keep the plane management simple,
+ * we dont support single rect multirect configs.
+ */
+enum dpu_sspp_multirect_index {
+       DPU_SSPP_RECT_SOLO = 0,
+       DPU_SSPP_RECT_0,
+       DPU_SSPP_RECT_1,
+};
+
+enum dpu_sspp_multirect_mode {
+       DPU_SSPP_MULTIRECT_NONE = 0,
+       DPU_SSPP_MULTIRECT_PARALLEL,
+       DPU_SSPP_MULTIRECT_TIME_MX,
+};
+
+enum {
+       DPU_FRAME_LINEAR,
+       DPU_FRAME_TILE_A4X,
+       DPU_FRAME_TILE_A5X,
+};
+
+enum dpu_hw_filter {
+       DPU_SCALE_FILTER_NEAREST = 0,
+       DPU_SCALE_FILTER_BIL,
+       DPU_SCALE_FILTER_PCMN,
+       DPU_SCALE_FILTER_CA,
+       DPU_SCALE_FILTER_MAX
+};
+
+enum dpu_hw_filter_alpa {
+       DPU_SCALE_ALPHA_PIXEL_REP,
+       DPU_SCALE_ALPHA_BIL
+};
+
+enum dpu_hw_filter_yuv {
+       DPU_SCALE_2D_4X4,
+       DPU_SCALE_2D_CIR,
+       DPU_SCALE_1D_SEP,
+       DPU_SCALE_BIL
+};
+
+struct dpu_hw_sharp_cfg {
+       u32 strength;
+       u32 edge_thr;
+       u32 smooth_thr;
+       u32 noise_thr;
+};
+
+struct dpu_hw_pixel_ext {
+       /* scaling factors are enabled for this input layer */
+       uint8_t enable_pxl_ext;
+
+       int init_phase_x[DPU_MAX_PLANES];
+       int phase_step_x[DPU_MAX_PLANES];
+       int init_phase_y[DPU_MAX_PLANES];
+       int phase_step_y[DPU_MAX_PLANES];
+
+       /*
+        * Number of pixels extension in left, right, top and bottom direction
+        * for all color components. This pixel value for each color component
+        * should be sum of fetch + repeat pixels.
+        */
+       int num_ext_pxls_left[DPU_MAX_PLANES];
+       int num_ext_pxls_right[DPU_MAX_PLANES];
+       int num_ext_pxls_top[DPU_MAX_PLANES];
+       int num_ext_pxls_btm[DPU_MAX_PLANES];
+
+       /*
+        * Number of pixels needs to be overfetched in left, right, top and
+        * bottom directions from source image for scaling.
+        */
+       int left_ftch[DPU_MAX_PLANES];
+       int right_ftch[DPU_MAX_PLANES];
+       int top_ftch[DPU_MAX_PLANES];
+       int btm_ftch[DPU_MAX_PLANES];
+
+       /*
+        * Number of pixels needs to be repeated in left, right, top and
+        * bottom directions for scaling.
+        */
+       int left_rpt[DPU_MAX_PLANES];
+       int right_rpt[DPU_MAX_PLANES];
+       int top_rpt[DPU_MAX_PLANES];
+       int btm_rpt[DPU_MAX_PLANES];
+
+       uint32_t roi_w[DPU_MAX_PLANES];
+       uint32_t roi_h[DPU_MAX_PLANES];
+
+       /*
+        * Filter type to be used for scaling in horizontal and vertical
+        * directions
+        */
+       enum dpu_hw_filter horz_filter[DPU_MAX_PLANES];
+       enum dpu_hw_filter vert_filter[DPU_MAX_PLANES];
+
+};
+
+/**
+ * struct dpu_hw_pipe_cfg : Pipe description
+ * @layout:    format layout information for programming buffer to hardware
+ * @src_rect:  src ROI, caller takes into account the different operations
+ *             such as decimation, flip etc to program this field
+ * @dest_rect: destination ROI.
+ * @index:     index of the rectangle of SSPP
+ * @mode:      parallel or time multiplex multirect mode
+ */
+struct dpu_hw_pipe_cfg {
+       struct dpu_hw_fmt_layout layout;
+       struct drm_rect src_rect;
+       struct drm_rect dst_rect;
+       enum dpu_sspp_multirect_index index;
+       enum dpu_sspp_multirect_mode mode;
+};
+
+/**
+ * struct dpu_hw_pipe_qos_cfg : Source pipe QoS configuration
+ * @danger_lut: LUT for generate danger level based on fill level
+ * @safe_lut: LUT for generate safe level based on fill level
+ * @creq_lut: LUT for generate creq level based on fill level
+ * @creq_vblank: creq value generated to vbif during vertical blanking
+ * @danger_vblank: danger value generated during vertical blanking
+ * @vblank_en: enable creq_vblank and danger_vblank during vblank
+ * @danger_safe_en: enable danger safe generation
+ */
+struct dpu_hw_pipe_qos_cfg {
+       u32 danger_lut;
+       u32 safe_lut;
+       u64 creq_lut;
+       u32 creq_vblank;
+       u32 danger_vblank;
+       bool vblank_en;
+       bool danger_safe_en;
+};
+
+/**
+ * enum CDP preload ahead address size
+ */
+enum {
+       DPU_SSPP_CDP_PRELOAD_AHEAD_32,
+       DPU_SSPP_CDP_PRELOAD_AHEAD_64
+};
+
+/**
+ * struct dpu_hw_pipe_cdp_cfg : CDP configuration
+ * @enable: true to enable CDP
+ * @ubwc_meta_enable: true to enable ubwc metadata preload
+ * @tile_amortize_enable: true to enable amortization control for tile format
+ * @preload_ahead: number of request to preload ahead
+ *     DPU_SSPP_CDP_PRELOAD_AHEAD_32,
+ *     DPU_SSPP_CDP_PRELOAD_AHEAD_64
+ */
+struct dpu_hw_pipe_cdp_cfg {
+       bool enable;
+       bool ubwc_meta_enable;
+       bool tile_amortize_enable;
+       u32 preload_ahead;
+};
+
+/**
+ * struct dpu_hw_pipe_ts_cfg - traffic shaper configuration
+ * @size: size to prefill in bytes, or zero to disable
+ * @time: time to prefill in usec, or zero to disable
+ */
+struct dpu_hw_pipe_ts_cfg {
+       u64 size;
+       u64 time;
+};
+
+/**
+ * struct dpu_hw_sspp_ops - interface to the SSPP Hw driver functions
+ * Caller must call the init function to get the pipe context for each pipe
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_sspp_ops {
+       /**
+        * setup_format - setup pixel format cropping rectangle, flip
+        * @ctx: Pointer to pipe context
+        * @cfg: Pointer to pipe config structure
+        * @flags: Extra flags for format config
+        * @index: rectangle index in multirect
+        */
+       void (*setup_format)(struct dpu_hw_pipe *ctx,
+                       const struct dpu_format *fmt, u32 flags,
+                       enum dpu_sspp_multirect_index index);
+
+       /**
+        * setup_rects - setup pipe ROI rectangles
+        * @ctx: Pointer to pipe context
+        * @cfg: Pointer to pipe config structure
+        * @index: rectangle index in multirect
+        */
+       void (*setup_rects)(struct dpu_hw_pipe *ctx,
+                       struct dpu_hw_pipe_cfg *cfg,
+                       enum dpu_sspp_multirect_index index);
+
+       /**
+        * setup_pe - setup pipe pixel extension
+        * @ctx: Pointer to pipe context
+        * @pe_ext: Pointer to pixel ext settings
+        */
+       void (*setup_pe)(struct dpu_hw_pipe *ctx,
+                       struct dpu_hw_pixel_ext *pe_ext);
+
+       /**
+        * setup_sourceaddress - setup pipe source addresses
+        * @ctx: Pointer to pipe context
+        * @cfg: Pointer to pipe config structure
+        * @index: rectangle index in multirect
+        */
+       void (*setup_sourceaddress)(struct dpu_hw_pipe *ctx,
+                       struct dpu_hw_pipe_cfg *cfg,
+                       enum dpu_sspp_multirect_index index);
+
+       /**
+        * setup_csc - setup color space coversion
+        * @ctx: Pointer to pipe context
+        * @data: Pointer to config structure
+        */
+       void (*setup_csc)(struct dpu_hw_pipe *ctx, struct dpu_csc_cfg *data);
+
+       /**
+        * setup_solidfill - enable/disable colorfill
+        * @ctx: Pointer to pipe context
+        * @const_color: Fill color value
+        * @flags: Pipe flags
+        * @index: rectangle index in multirect
+        */
+       void (*setup_solidfill)(struct dpu_hw_pipe *ctx, u32 color,
+                       enum dpu_sspp_multirect_index index);
+
+       /**
+        * setup_multirect - setup multirect configuration
+        * @ctx: Pointer to pipe context
+        * @index: rectangle index in multirect
+        * @mode: parallel fetch / time multiplex multirect mode
+        */
+
+       void (*setup_multirect)(struct dpu_hw_pipe *ctx,
+                       enum dpu_sspp_multirect_index index,
+                       enum dpu_sspp_multirect_mode mode);
+
+       /**
+        * setup_sharpening - setup sharpening
+        * @ctx: Pointer to pipe context
+        * @cfg: Pointer to config structure
+        */
+       void (*setup_sharpening)(struct dpu_hw_pipe *ctx,
+                       struct dpu_hw_sharp_cfg *cfg);
+
+       /**
+        * setup_danger_safe_lut - setup danger safe LUTs
+        * @ctx: Pointer to pipe context
+        * @cfg: Pointer to pipe QoS configuration
+        *
+        */
+       void (*setup_danger_safe_lut)(struct dpu_hw_pipe *ctx,
+                       struct dpu_hw_pipe_qos_cfg *cfg);
+
+       /**
+        * setup_creq_lut - setup CREQ LUT
+        * @ctx: Pointer to pipe context
+        * @cfg: Pointer to pipe QoS configuration
+        *
+        */
+       void (*setup_creq_lut)(struct dpu_hw_pipe *ctx,
+                       struct dpu_hw_pipe_qos_cfg *cfg);
+
+       /**
+        * setup_qos_ctrl - setup QoS control
+        * @ctx: Pointer to pipe context
+        * @cfg: Pointer to pipe QoS configuration
+        *
+        */
+       void (*setup_qos_ctrl)(struct dpu_hw_pipe *ctx,
+                       struct dpu_hw_pipe_qos_cfg *cfg);
+
+       /**
+        * setup_histogram - setup histograms
+        * @ctx: Pointer to pipe context
+        * @cfg: Pointer to histogram configuration
+        */
+       void (*setup_histogram)(struct dpu_hw_pipe *ctx,
+                       void *cfg);
+
+       /**
+        * setup_scaler - setup scaler
+        * @ctx: Pointer to pipe context
+        * @pipe_cfg: Pointer to pipe configuration
+        * @pe_cfg: Pointer to pixel extension configuration
+        * @scaler_cfg: Pointer to scaler configuration
+        */
+       void (*setup_scaler)(struct dpu_hw_pipe *ctx,
+               struct dpu_hw_pipe_cfg *pipe_cfg,
+               struct dpu_hw_pixel_ext *pe_cfg,
+               void *scaler_cfg);
+
+       /**
+        * get_scaler_ver - get scaler h/w version
+        * @ctx: Pointer to pipe context
+        */
+       u32 (*get_scaler_ver)(struct dpu_hw_pipe *ctx);
+
+       /**
+        * setup_cdp - setup client driven prefetch
+        * @ctx: Pointer to pipe context
+        * @cfg: Pointer to cdp configuration
+        */
+       void (*setup_cdp)(struct dpu_hw_pipe *ctx,
+                       struct dpu_hw_pipe_cdp_cfg *cfg);
+};
+
+/**
+ * struct dpu_hw_pipe - pipe description
+ * @base: hardware block base structure
+ * @hw: block hardware details
+ * @catalog: back pointer to catalog
+ * @mdp: pointer to associated mdp portion of the catalog
+ * @idx: pipe index
+ * @cap: pointer to layer_cfg
+ * @ops: pointer to operations possible for this pipe
+ */
+struct dpu_hw_pipe {
+       struct dpu_hw_blk base;
+       struct dpu_hw_blk_reg_map hw;
+       struct dpu_mdss_cfg *catalog;
+       struct dpu_mdp_cfg *mdp;
+
+       /* Pipe */
+       enum dpu_sspp idx;
+       const struct dpu_sspp_cfg *cap;
+
+       /* Ops */
+       struct dpu_hw_sspp_ops ops;
+};
+
+/**
+ * dpu_hw_pipe - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_pipe *to_dpu_hw_pipe(struct dpu_hw_blk *hw)
+{
+       return container_of(hw, struct dpu_hw_pipe, base);
+}
+
+/**
+ * dpu_hw_sspp_init - initializes the sspp hw driver object.
+ * Should be called once before accessing every pipe.
+ * @idx:  Pipe index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @catalog : Pointer to mdss catalog data
+ * @is_virtual_pipe: is this pipe virtual pipe
+ */
+struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
+               void __iomem *addr, struct dpu_mdss_cfg *catalog,
+               bool is_virtual_pipe);
+
+/**
+ * dpu_hw_sspp_destroy(): Destroys SSPP driver context
+ * should be called during Hw pipe cleanup.
+ * @ctx:  Pointer to SSPP driver context returned by dpu_hw_sspp_init
+ */
+void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx);
+
+#endif /*_DPU_HW_SSPP_H */
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
new file mode 100644 (file)
index 0000000..db2798e
--- /dev/null
@@ -0,0 +1,398 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_top.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define SSPP_SPARE                        0x28
+#define UBWC_STATIC                       0x144
+
+#define FLD_SPLIT_DISPLAY_CMD             BIT(1)
+#define FLD_SMART_PANEL_FREE_RUN          BIT(2)
+#define FLD_INTF_1_SW_TRG_MUX             BIT(4)
+#define FLD_INTF_2_SW_TRG_MUX             BIT(8)
+#define FLD_TE_LINE_INTER_WATERLEVEL_MASK 0xFFFF
+
+#define DANGER_STATUS                     0x360
+#define SAFE_STATUS                       0x364
+
+#define TE_LINE_INTERVAL                  0x3F4
+
+#define TRAFFIC_SHAPER_EN                 BIT(31)
+#define TRAFFIC_SHAPER_RD_CLIENT(num)     (0x030 + (num * 4))
+#define TRAFFIC_SHAPER_WR_CLIENT(num)     (0x060 + (num * 4))
+#define TRAFFIC_SHAPER_FIXPOINT_FACTOR    4
+
+#define MDP_WD_TIMER_0_CTL                0x380
+#define MDP_WD_TIMER_0_CTL2               0x384
+#define MDP_WD_TIMER_0_LOAD_VALUE         0x388
+#define MDP_WD_TIMER_1_CTL                0x390
+#define MDP_WD_TIMER_1_CTL2               0x394
+#define MDP_WD_TIMER_1_LOAD_VALUE         0x398
+#define MDP_WD_TIMER_2_CTL                0x420
+#define MDP_WD_TIMER_2_CTL2               0x424
+#define MDP_WD_TIMER_2_LOAD_VALUE         0x428
+#define MDP_WD_TIMER_3_CTL                0x430
+#define MDP_WD_TIMER_3_CTL2               0x434
+#define MDP_WD_TIMER_3_LOAD_VALUE         0x438
+#define MDP_WD_TIMER_4_CTL                0x440
+#define MDP_WD_TIMER_4_CTL2               0x444
+#define MDP_WD_TIMER_4_LOAD_VALUE         0x448
+
+#define MDP_TICK_COUNT                    16
+#define XO_CLK_RATE                       19200
+#define MS_TICKS_IN_SEC                   1000
+
+#define CALCULATE_WD_LOAD_VALUE(fps) \
+       ((uint32_t)((MS_TICKS_IN_SEC * XO_CLK_RATE)/(MDP_TICK_COUNT * fps)))
+
+#define DCE_SEL                           0x450
+
+static void dpu_hw_setup_split_pipe(struct dpu_hw_mdp *mdp,
+               struct split_pipe_cfg *cfg)
+{
+       struct dpu_hw_blk_reg_map *c;
+       u32 upper_pipe = 0;
+       u32 lower_pipe = 0;
+
+       if (!mdp || !cfg)
+               return;
+
+       c = &mdp->hw;
+
+       if (cfg->en) {
+               if (cfg->mode == INTF_MODE_CMD) {
+                       lower_pipe = FLD_SPLIT_DISPLAY_CMD;
+                       /* interface controlling sw trigger */
+                       if (cfg->intf == INTF_2)
+                               lower_pipe |= FLD_INTF_1_SW_TRG_MUX;
+                       else
+                               lower_pipe |= FLD_INTF_2_SW_TRG_MUX;
+                       upper_pipe = lower_pipe;
+               } else {
+                       if (cfg->intf == INTF_2) {
+                               lower_pipe = FLD_INTF_1_SW_TRG_MUX;
+                               upper_pipe = FLD_INTF_2_SW_TRG_MUX;
+                       } else {
+                               lower_pipe = FLD_INTF_2_SW_TRG_MUX;
+                               upper_pipe = FLD_INTF_1_SW_TRG_MUX;
+                       }
+               }
+       }
+
+       DPU_REG_WRITE(c, SSPP_SPARE, cfg->split_flush_en ? 0x1 : 0x0);
+       DPU_REG_WRITE(c, SPLIT_DISPLAY_LOWER_PIPE_CTRL, lower_pipe);
+       DPU_REG_WRITE(c, SPLIT_DISPLAY_UPPER_PIPE_CTRL, upper_pipe);
+       DPU_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1);
+}
+
+static void dpu_hw_setup_cdm_output(struct dpu_hw_mdp *mdp,
+               struct cdm_output_cfg *cfg)
+{
+       struct dpu_hw_blk_reg_map *c;
+       u32 out_ctl = 0;
+
+       if (!mdp || !cfg)
+               return;
+
+       c = &mdp->hw;
+
+       if (cfg->intf_en)
+               out_ctl |= BIT(19);
+
+       DPU_REG_WRITE(c, MDP_OUT_CTL_0, out_ctl);
+}
+
+static bool dpu_hw_setup_clk_force_ctrl(struct dpu_hw_mdp *mdp,
+               enum dpu_clk_ctrl_type clk_ctrl, bool enable)
+{
+       struct dpu_hw_blk_reg_map *c;
+       u32 reg_off, bit_off;
+       u32 reg_val, new_val;
+       bool clk_forced_on;
+
+       if (!mdp)
+               return false;
+
+       c = &mdp->hw;
+
+       if (clk_ctrl <= DPU_CLK_CTRL_NONE || clk_ctrl >= DPU_CLK_CTRL_MAX)
+               return false;
+
+       reg_off = mdp->caps->clk_ctrls[clk_ctrl].reg_off;
+       bit_off = mdp->caps->clk_ctrls[clk_ctrl].bit_off;
+
+       reg_val = DPU_REG_READ(c, reg_off);
+
+       if (enable)
+               new_val = reg_val | BIT(bit_off);
+       else
+               new_val = reg_val & ~BIT(bit_off);
+
+       DPU_REG_WRITE(c, reg_off, new_val);
+
+       clk_forced_on = !(reg_val & BIT(bit_off));
+
+       return clk_forced_on;
+}
+
+
+static void dpu_hw_get_danger_status(struct dpu_hw_mdp *mdp,
+               struct dpu_danger_safe_status *status)
+{
+       struct dpu_hw_blk_reg_map *c;
+       u32 value;
+
+       if (!mdp || !status)
+               return;
+
+       c = &mdp->hw;
+
+       value = DPU_REG_READ(c, DANGER_STATUS);
+       status->mdp = (value >> 0) & 0x3;
+       status->sspp[SSPP_VIG0] = (value >> 4) & 0x3;
+       status->sspp[SSPP_VIG1] = (value >> 6) & 0x3;
+       status->sspp[SSPP_VIG2] = (value >> 8) & 0x3;
+       status->sspp[SSPP_VIG3] = (value >> 10) & 0x3;
+       status->sspp[SSPP_RGB0] = (value >> 12) & 0x3;
+       status->sspp[SSPP_RGB1] = (value >> 14) & 0x3;
+       status->sspp[SSPP_RGB2] = (value >> 16) & 0x3;
+       status->sspp[SSPP_RGB3] = (value >> 18) & 0x3;
+       status->sspp[SSPP_DMA0] = (value >> 20) & 0x3;
+       status->sspp[SSPP_DMA1] = (value >> 22) & 0x3;
+       status->sspp[SSPP_DMA2] = (value >> 28) & 0x3;
+       status->sspp[SSPP_DMA3] = (value >> 30) & 0x3;
+       status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x3;
+       status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x3;
+}
+
+static void dpu_hw_setup_vsync_source(struct dpu_hw_mdp *mdp,
+               struct dpu_vsync_source_cfg *cfg)
+{
+       struct dpu_hw_blk_reg_map *c;
+       u32 reg, wd_load_value, wd_ctl, wd_ctl2, i;
+       static const u32 pp_offset[PINGPONG_MAX] = {0xC, 0x8, 0x4, 0x13, 0x18};
+
+       if (!mdp || !cfg || (cfg->pp_count > ARRAY_SIZE(cfg->ppnumber)))
+               return;
+
+       c = &mdp->hw;
+       reg = DPU_REG_READ(c, MDP_VSYNC_SEL);
+       for (i = 0; i < cfg->pp_count; i++) {
+               int pp_idx = cfg->ppnumber[i] - PINGPONG_0;
+
+               if (pp_idx >= ARRAY_SIZE(pp_offset))
+                       continue;
+
+               reg &= ~(0xf << pp_offset[pp_idx]);
+               reg |= (cfg->vsync_source & 0xf) << pp_offset[pp_idx];
+       }
+       DPU_REG_WRITE(c, MDP_VSYNC_SEL, reg);
+
+       if (cfg->vsync_source >= DPU_VSYNC_SOURCE_WD_TIMER_4 &&
+                       cfg->vsync_source <= DPU_VSYNC_SOURCE_WD_TIMER_0) {
+               switch (cfg->vsync_source) {
+               case DPU_VSYNC_SOURCE_WD_TIMER_4:
+                       wd_load_value = MDP_WD_TIMER_4_LOAD_VALUE;
+                       wd_ctl = MDP_WD_TIMER_4_CTL;
+                       wd_ctl2 = MDP_WD_TIMER_4_CTL2;
+                       break;
+               case DPU_VSYNC_SOURCE_WD_TIMER_3:
+                       wd_load_value = MDP_WD_TIMER_3_LOAD_VALUE;
+                       wd_ctl = MDP_WD_TIMER_3_CTL;
+                       wd_ctl2 = MDP_WD_TIMER_3_CTL2;
+                       break;
+               case DPU_VSYNC_SOURCE_WD_TIMER_2:
+                       wd_load_value = MDP_WD_TIMER_2_LOAD_VALUE;
+                       wd_ctl = MDP_WD_TIMER_2_CTL;
+                       wd_ctl2 = MDP_WD_TIMER_2_CTL2;
+                       break;
+               case DPU_VSYNC_SOURCE_WD_TIMER_1:
+                       wd_load_value = MDP_WD_TIMER_1_LOAD_VALUE;
+                       wd_ctl = MDP_WD_TIMER_1_CTL;
+                       wd_ctl2 = MDP_WD_TIMER_1_CTL2;
+                       break;
+               case DPU_VSYNC_SOURCE_WD_TIMER_0:
+               default:
+                       wd_load_value = MDP_WD_TIMER_0_LOAD_VALUE;
+                       wd_ctl = MDP_WD_TIMER_0_CTL;
+                       wd_ctl2 = MDP_WD_TIMER_0_CTL2;
+                       break;
+               }
+
+               DPU_REG_WRITE(c, wd_load_value,
+                       CALCULATE_WD_LOAD_VALUE(cfg->frame_rate));
+
+               DPU_REG_WRITE(c, wd_ctl, BIT(0)); /* clear timer */
+               reg = DPU_REG_READ(c, wd_ctl2);
+               reg |= BIT(8);          /* enable heartbeat timer */
+               reg |= BIT(0);          /* enable WD timer */
+               DPU_REG_WRITE(c, wd_ctl2, reg);
+
+               /* make sure that timers are enabled/disabled for vsync state */
+               wmb();
+       }
+}
+
+static void dpu_hw_get_safe_status(struct dpu_hw_mdp *mdp,
+               struct dpu_danger_safe_status *status)
+{
+       struct dpu_hw_blk_reg_map *c;
+       u32 value;
+
+       if (!mdp || !status)
+               return;
+
+       c = &mdp->hw;
+
+       value = DPU_REG_READ(c, SAFE_STATUS);
+       status->mdp = (value >> 0) & 0x1;
+       status->sspp[SSPP_VIG0] = (value >> 4) & 0x1;
+       status->sspp[SSPP_VIG1] = (value >> 6) & 0x1;
+       status->sspp[SSPP_VIG2] = (value >> 8) & 0x1;
+       status->sspp[SSPP_VIG3] = (value >> 10) & 0x1;
+       status->sspp[SSPP_RGB0] = (value >> 12) & 0x1;
+       status->sspp[SSPP_RGB1] = (value >> 14) & 0x1;
+       status->sspp[SSPP_RGB2] = (value >> 16) & 0x1;
+       status->sspp[SSPP_RGB3] = (value >> 18) & 0x1;
+       status->sspp[SSPP_DMA0] = (value >> 20) & 0x1;
+       status->sspp[SSPP_DMA1] = (value >> 22) & 0x1;
+       status->sspp[SSPP_DMA2] = (value >> 28) & 0x1;
+       status->sspp[SSPP_DMA3] = (value >> 30) & 0x1;
+       status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x1;
+       status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x1;
+}
+
+static void dpu_hw_reset_ubwc(struct dpu_hw_mdp *mdp, struct dpu_mdss_cfg *m)
+{
+       struct dpu_hw_blk_reg_map c;
+
+       if (!mdp || !m)
+               return;
+
+       if (!IS_UBWC_20_SUPPORTED(m->caps->ubwc_version))
+               return;
+
+       /* force blk offset to zero to access beginning of register region */
+       c = mdp->hw;
+       c.blk_off = 0x0;
+       DPU_REG_WRITE(&c, UBWC_STATIC, m->mdp[0].ubwc_static);
+}
+
+static void dpu_hw_intf_audio_select(struct dpu_hw_mdp *mdp)
+{
+       struct dpu_hw_blk_reg_map *c;
+
+       if (!mdp)
+               return;
+
+       c = &mdp->hw;
+
+       DPU_REG_WRITE(c, HDMI_DP_CORE_SELECT, 0x1);
+}
+
+static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
+               unsigned long cap)
+{
+       ops->setup_split_pipe = dpu_hw_setup_split_pipe;
+       ops->setup_cdm_output = dpu_hw_setup_cdm_output;
+       ops->setup_clk_force_ctrl = dpu_hw_setup_clk_force_ctrl;
+       ops->get_danger_status = dpu_hw_get_danger_status;
+       ops->setup_vsync_source = dpu_hw_setup_vsync_source;
+       ops->get_safe_status = dpu_hw_get_safe_status;
+       ops->reset_ubwc = dpu_hw_reset_ubwc;
+       ops->intf_audio_select = dpu_hw_intf_audio_select;
+}
+
+static const struct dpu_mdp_cfg *_top_offset(enum dpu_mdp mdp,
+               const struct dpu_mdss_cfg *m,
+               void __iomem *addr,
+               struct dpu_hw_blk_reg_map *b)
+{
+       int i;
+
+       if (!m || !addr || !b)
+               return ERR_PTR(-EINVAL);
+
+       for (i = 0; i < m->mdp_count; i++) {
+               if (mdp == m->mdp[i].id) {
+                       b->base_off = addr;
+                       b->blk_off = m->mdp[i].base;
+                       b->length = m->mdp[i].len;
+                       b->hwversion = m->hwversion;
+                       b->log_mask = DPU_DBG_MASK_TOP;
+                       return &m->mdp[i];
+               }
+       }
+
+       return ERR_PTR(-EINVAL);
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+       .start = NULL,
+       .stop = NULL,
+};
+
+struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
+               void __iomem *addr,
+               const struct dpu_mdss_cfg *m)
+{
+       struct dpu_hw_mdp *mdp;
+       const struct dpu_mdp_cfg *cfg;
+       int rc;
+
+       if (!addr || !m)
+               return ERR_PTR(-EINVAL);
+
+       mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
+       if (!mdp)
+               return ERR_PTR(-ENOMEM);
+
+       cfg = _top_offset(idx, m, addr, &mdp->hw);
+       if (IS_ERR_OR_NULL(cfg)) {
+               kfree(mdp);
+               return ERR_PTR(-EINVAL);
+       }
+
+       /*
+        * Assign ops
+        */
+       mdp->idx = idx;
+       mdp->caps = cfg;
+       _setup_mdp_ops(&mdp->ops, mdp->caps->features);
+
+       rc = dpu_hw_blk_init(&mdp->base, DPU_HW_BLK_TOP, idx, &dpu_hw_ops);
+       if (rc) {
+               DPU_ERROR("failed to init hw blk %d\n", rc);
+               goto blk_init_error;
+       }
+
+       dpu_dbg_set_dpu_top_offset(mdp->hw.blk_off);
+
+       return mdp;
+
+blk_init_error:
+       kzfree(mdp);
+
+       return ERR_PTR(rc);
+}
+
+void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp)
+{
+       if (mdp)
+               dpu_hw_blk_destroy(&mdp->base);
+       kfree(mdp);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
new file mode 100644 (file)
index 0000000..899925a
--- /dev/null
@@ -0,0 +1,202 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_TOP_H
+#define _DPU_HW_TOP_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_mdp;
+
+/**
+ * struct traffic_shaper_cfg: traffic shaper configuration
+ * @en        : enable/disable traffic shaper
+ * @rd_client : true if read client; false if write client
+ * @client_id : client identifier
+ * @bpc_denom : denominator of byte per clk
+ * @bpc_numer : numerator of byte per clk
+ */
+struct traffic_shaper_cfg {
+       bool en;
+       bool rd_client;
+       u32 client_id;
+       u32 bpc_denom;
+       u64 bpc_numer;
+};
+
+/**
+ * struct split_pipe_cfg - pipe configuration for dual display panels
+ * @en        : Enable/disable dual pipe confguration
+ * @mode      : Panel interface mode
+ * @intf      : Interface id for main control path
+ * @split_flush_en: Allows both the paths to be flushed when master path is
+ *              flushed
+ */
+struct split_pipe_cfg {
+       bool en;
+       enum dpu_intf_mode mode;
+       enum dpu_intf intf;
+       bool split_flush_en;
+};
+
+/**
+ * struct cdm_output_cfg: output configuration for cdm
+ * @intf_en   : enable/disable interface output
+ */
+struct cdm_output_cfg {
+       bool intf_en;
+};
+
+/**
+ * struct dpu_danger_safe_status: danger and safe status signals
+ * @mdp: top level status
+ * @sspp: source pipe status
+ */
+struct dpu_danger_safe_status {
+       u8 mdp;
+       u8 sspp[SSPP_MAX];
+};
+
+/**
+ * struct dpu_vsync_source_cfg - configure vsync source and configure the
+ *                                    watchdog timers if required.
+ * @pp_count: number of ping pongs active
+ * @frame_rate: Display frame rate
+ * @ppnumber: ping pong index array
+ * @vsync_source: vsync source selection
+ */
+struct dpu_vsync_source_cfg {
+       u32 pp_count;
+       u32 frame_rate;
+       u32 ppnumber[PINGPONG_MAX];
+       u32 vsync_source;
+};
+
+/**
+ * struct dpu_hw_mdp_ops - interface to the MDP TOP Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled.
+ * @setup_split_pipe : Programs the pipe control registers
+ * @setup_pp_split : Programs the pp split control registers
+ * @setup_cdm_output : programs cdm control
+ * @setup_traffic_shaper : programs traffic shaper control
+ */
+struct dpu_hw_mdp_ops {
+       /** setup_split_pipe() : Regsiters are not double buffered, thisk
+        * function should be called before timing control enable
+        * @mdp  : mdp top context driver
+        * @cfg  : upper and lower part of pipe configuration
+        */
+       void (*setup_split_pipe)(struct dpu_hw_mdp *mdp,
+                       struct split_pipe_cfg *p);
+
+       /**
+        * setup_cdm_output() : Setup selection control of the cdm data path
+        * @mdp  : mdp top context driver
+        * @cfg  : cdm output configuration
+        */
+       void (*setup_cdm_output)(struct dpu_hw_mdp *mdp,
+                       struct cdm_output_cfg *cfg);
+
+       /**
+        * setup_traffic_shaper() : Setup traffic shaper control
+        * @mdp  : mdp top context driver
+        * @cfg  : traffic shaper configuration
+        */
+       void (*setup_traffic_shaper)(struct dpu_hw_mdp *mdp,
+                       struct traffic_shaper_cfg *cfg);
+
+       /**
+        * setup_clk_force_ctrl - set clock force control
+        * @mdp: mdp top context driver
+        * @clk_ctrl: clock to be controlled
+        * @enable: force on enable
+        * @return: if the clock is forced-on by this function
+        */
+       bool (*setup_clk_force_ctrl)(struct dpu_hw_mdp *mdp,
+                       enum dpu_clk_ctrl_type clk_ctrl, bool enable);
+
+       /**
+        * get_danger_status - get danger status
+        * @mdp: mdp top context driver
+        * @status: Pointer to danger safe status
+        */
+       void (*get_danger_status)(struct dpu_hw_mdp *mdp,
+                       struct dpu_danger_safe_status *status);
+
+       /**
+        * setup_vsync_source - setup vsync source configuration details
+        * @mdp: mdp top context driver
+        * @cfg: vsync source selection configuration
+        */
+       void (*setup_vsync_source)(struct dpu_hw_mdp *mdp,
+                               struct dpu_vsync_source_cfg *cfg);
+
+       /**
+        * get_safe_status - get safe status
+        * @mdp: mdp top context driver
+        * @status: Pointer to danger safe status
+        */
+       void (*get_safe_status)(struct dpu_hw_mdp *mdp,
+                       struct dpu_danger_safe_status *status);
+
+       /**
+        * reset_ubwc - reset top level UBWC configuration
+        * @mdp: mdp top context driver
+        * @m: pointer to mdss catalog data
+        */
+       void (*reset_ubwc)(struct dpu_hw_mdp *mdp, struct dpu_mdss_cfg *m);
+
+       /**
+        * intf_audio_select - select the external interface for audio
+        * @mdp: mdp top context driver
+        */
+       void (*intf_audio_select)(struct dpu_hw_mdp *mdp);
+};
+
+struct dpu_hw_mdp {
+       struct dpu_hw_blk base;
+       struct dpu_hw_blk_reg_map hw;
+
+       /* top */
+       enum dpu_mdp idx;
+       const struct dpu_mdp_cfg *caps;
+
+       /* ops */
+       struct dpu_hw_mdp_ops ops;
+};
+
+/**
+ * to_dpu_hw_mdp - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_mdp *to_dpu_hw_mdp(struct dpu_hw_blk *hw)
+{
+       return container_of(hw, struct dpu_hw_mdp, base);
+}
+
+/**
+ * dpu_hw_mdptop_init - initializes the top driver for the passed idx
+ * @idx:  Interface index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m:    Pointer to mdss catalog data
+ */
+struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
+               void __iomem *addr,
+               const struct dpu_mdss_cfg *m);
+
+void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp);
+
+#endif /*_DPU_HW_TOP_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
new file mode 100644 (file)
index 0000000..4cabae4
--- /dev/null
@@ -0,0 +1,368 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt)    "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+
+/* using a file static variables for debugfs access */
+static u32 dpu_hw_util_log_mask = DPU_DBG_MASK_NONE;
+
+/* DPU_SCALER_QSEED3 */
+#define QSEED3_HW_VERSION                  0x00
+#define QSEED3_OP_MODE                     0x04
+#define QSEED3_RGB2Y_COEFF                 0x08
+#define QSEED3_PHASE_INIT                  0x0C
+#define QSEED3_PHASE_STEP_Y_H              0x10
+#define QSEED3_PHASE_STEP_Y_V              0x14
+#define QSEED3_PHASE_STEP_UV_H             0x18
+#define QSEED3_PHASE_STEP_UV_V             0x1C
+#define QSEED3_PRELOAD                     0x20
+#define QSEED3_DE_SHARPEN                  0x24
+#define QSEED3_DE_SHARPEN_CTL              0x28
+#define QSEED3_DE_SHAPE_CTL                0x2C
+#define QSEED3_DE_THRESHOLD                0x30
+#define QSEED3_DE_ADJUST_DATA_0            0x34
+#define QSEED3_DE_ADJUST_DATA_1            0x38
+#define QSEED3_DE_ADJUST_DATA_2            0x3C
+#define QSEED3_SRC_SIZE_Y_RGB_A            0x40
+#define QSEED3_SRC_SIZE_UV                 0x44
+#define QSEED3_DST_SIZE                    0x48
+#define QSEED3_COEF_LUT_CTRL               0x4C
+#define QSEED3_COEF_LUT_SWAP_BIT           0
+#define QSEED3_COEF_LUT_DIR_BIT            1
+#define QSEED3_COEF_LUT_Y_CIR_BIT          2
+#define QSEED3_COEF_LUT_UV_CIR_BIT         3
+#define QSEED3_COEF_LUT_Y_SEP_BIT          4
+#define QSEED3_COEF_LUT_UV_SEP_BIT         5
+#define QSEED3_BUFFER_CTRL                 0x50
+#define QSEED3_CLK_CTRL0                   0x54
+#define QSEED3_CLK_CTRL1                   0x58
+#define QSEED3_CLK_STATUS                  0x5C
+#define QSEED3_MISR_CTRL                   0x70
+#define QSEED3_MISR_SIGNATURE_0            0x74
+#define QSEED3_MISR_SIGNATURE_1            0x78
+#define QSEED3_PHASE_INIT_Y_H              0x90
+#define QSEED3_PHASE_INIT_Y_V              0x94
+#define QSEED3_PHASE_INIT_UV_H             0x98
+#define QSEED3_PHASE_INIT_UV_V             0x9C
+#define QSEED3_COEF_LUT                    0x100
+#define QSEED3_FILTERS                     5
+#define QSEED3_LUT_REGIONS                 4
+#define QSEED3_CIRCULAR_LUTS               9
+#define QSEED3_SEPARABLE_LUTS              10
+#define QSEED3_LUT_SIZE                    60
+#define QSEED3_ENABLE                      2
+#define QSEED3_DIR_LUT_SIZE                (200 * sizeof(u32))
+#define QSEED3_CIR_LUT_SIZE \
+       (QSEED3_LUT_SIZE * QSEED3_CIRCULAR_LUTS * sizeof(u32))
+#define QSEED3_SEP_LUT_SIZE \
+       (QSEED3_LUT_SIZE * QSEED3_SEPARABLE_LUTS * sizeof(u32))
+
+void dpu_reg_write(struct dpu_hw_blk_reg_map *c,
+               u32 reg_off,
+               u32 val,
+               const char *name)
+{
+       /* don't need to mutex protect this */
+       if (c->log_mask & dpu_hw_util_log_mask)
+               DPU_DEBUG_DRIVER("[%s:0x%X] <= 0x%X\n",
+                               name, c->blk_off + reg_off, val);
+       writel_relaxed(val, c->base_off + c->blk_off + reg_off);
+}
+
+int dpu_reg_read(struct dpu_hw_blk_reg_map *c, u32 reg_off)
+{
+       return readl_relaxed(c->base_off + c->blk_off + reg_off);
+}
+
+u32 *dpu_hw_util_get_log_mask_ptr(void)
+{
+       return &dpu_hw_util_log_mask;
+}
+
+static void _dpu_hw_setup_scaler3_lut(struct dpu_hw_blk_reg_map *c,
+               struct dpu_hw_scaler3_cfg *scaler3_cfg, u32 offset)
+{
+       int i, j, filter;
+       int config_lut = 0x0;
+       unsigned long lut_flags;
+       u32 lut_addr, lut_offset, lut_len;
+       u32 *lut[QSEED3_FILTERS] = {NULL, NULL, NULL, NULL, NULL};
+       static const uint32_t off_tbl[QSEED3_FILTERS][QSEED3_LUT_REGIONS][2] = {
+               {{18, 0x000}, {12, 0x120}, {12, 0x1E0}, {8, 0x2A0} },
+               {{6, 0x320}, {3, 0x3E0}, {3, 0x440}, {3, 0x4A0} },
+               {{6, 0x500}, {3, 0x5c0}, {3, 0x620}, {3, 0x680} },
+               {{6, 0x380}, {3, 0x410}, {3, 0x470}, {3, 0x4d0} },
+               {{6, 0x560}, {3, 0x5f0}, {3, 0x650}, {3, 0x6b0} },
+       };
+
+       lut_flags = (unsigned long) scaler3_cfg->lut_flag;
+       if (test_bit(QSEED3_COEF_LUT_DIR_BIT, &lut_flags) &&
+               (scaler3_cfg->dir_len == QSEED3_DIR_LUT_SIZE)) {
+               lut[0] = scaler3_cfg->dir_lut;
+               config_lut = 1;
+       }
+       if (test_bit(QSEED3_COEF_LUT_Y_CIR_BIT, &lut_flags) &&
+               (scaler3_cfg->y_rgb_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
+               (scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
+               lut[1] = scaler3_cfg->cir_lut +
+                       scaler3_cfg->y_rgb_cir_lut_idx * QSEED3_LUT_SIZE;
+               config_lut = 1;
+       }
+       if (test_bit(QSEED3_COEF_LUT_UV_CIR_BIT, &lut_flags) &&
+               (scaler3_cfg->uv_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
+               (scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
+               lut[2] = scaler3_cfg->cir_lut +
+                       scaler3_cfg->uv_cir_lut_idx * QSEED3_LUT_SIZE;
+               config_lut = 1;
+       }
+       if (test_bit(QSEED3_COEF_LUT_Y_SEP_BIT, &lut_flags) &&
+               (scaler3_cfg->y_rgb_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
+               (scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
+               lut[3] = scaler3_cfg->sep_lut +
+                       scaler3_cfg->y_rgb_sep_lut_idx * QSEED3_LUT_SIZE;
+               config_lut = 1;
+       }
+       if (test_bit(QSEED3_COEF_LUT_UV_SEP_BIT, &lut_flags) &&
+               (scaler3_cfg->uv_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
+               (scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
+               lut[4] = scaler3_cfg->sep_lut +
+                       scaler3_cfg->uv_sep_lut_idx * QSEED3_LUT_SIZE;
+               config_lut = 1;
+       }
+
+       if (config_lut) {
+               for (filter = 0; filter < QSEED3_FILTERS; filter++) {
+                       if (!lut[filter])
+                               continue;
+                       lut_offset = 0;
+                       for (i = 0; i < QSEED3_LUT_REGIONS; i++) {
+                               lut_addr = QSEED3_COEF_LUT + offset
+                                       + off_tbl[filter][i][1];
+                               lut_len = off_tbl[filter][i][0] << 2;
+                               for (j = 0; j < lut_len; j++) {
+                                       DPU_REG_WRITE(c,
+                                               lut_addr,
+                                               (lut[filter])[lut_offset++]);
+                                       lut_addr += 4;
+                               }
+                       }
+               }
+       }
+
+       if (test_bit(QSEED3_COEF_LUT_SWAP_BIT, &lut_flags))
+               DPU_REG_WRITE(c, QSEED3_COEF_LUT_CTRL + offset, BIT(0));
+
+}
+
+static void _dpu_hw_setup_scaler3_de(struct dpu_hw_blk_reg_map *c,
+               struct dpu_hw_scaler3_de_cfg *de_cfg, u32 offset)
+{
+       u32 sharp_lvl, sharp_ctl, shape_ctl, de_thr;
+       u32 adjust_a, adjust_b, adjust_c;
+
+       if (!de_cfg->enable)
+               return;
+
+       sharp_lvl = (de_cfg->sharpen_level1 & 0x1FF) |
+               ((de_cfg->sharpen_level2 & 0x1FF) << 16);
+
+       sharp_ctl = ((de_cfg->limit & 0xF) << 9) |
+               ((de_cfg->prec_shift & 0x7) << 13) |
+               ((de_cfg->clip & 0x7) << 16);
+
+       shape_ctl = (de_cfg->thr_quiet & 0xFF) |
+               ((de_cfg->thr_dieout & 0x3FF) << 16);
+
+       de_thr = (de_cfg->thr_low & 0x3FF) |
+               ((de_cfg->thr_high & 0x3FF) << 16);
+
+       adjust_a = (de_cfg->adjust_a[0] & 0x3FF) |
+               ((de_cfg->adjust_a[1] & 0x3FF) << 10) |
+               ((de_cfg->adjust_a[2] & 0x3FF) << 20);
+
+       adjust_b = (de_cfg->adjust_b[0] & 0x3FF) |
+               ((de_cfg->adjust_b[1] & 0x3FF) << 10) |
+               ((de_cfg->adjust_b[2] & 0x3FF) << 20);
+
+       adjust_c = (de_cfg->adjust_c[0] & 0x3FF) |
+               ((de_cfg->adjust_c[1] & 0x3FF) << 10) |
+               ((de_cfg->adjust_c[2] & 0x3FF) << 20);
+
+       DPU_REG_WRITE(c, QSEED3_DE_SHARPEN + offset, sharp_lvl);
+       DPU_REG_WRITE(c, QSEED3_DE_SHARPEN_CTL + offset, sharp_ctl);
+       DPU_REG_WRITE(c, QSEED3_DE_SHAPE_CTL + offset, shape_ctl);
+       DPU_REG_WRITE(c, QSEED3_DE_THRESHOLD + offset, de_thr);
+       DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_0 + offset, adjust_a);
+       DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_1 + offset, adjust_b);
+       DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_2 + offset, adjust_c);
+
+}
+
+void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
+               struct dpu_hw_scaler3_cfg *scaler3_cfg,
+               u32 scaler_offset, u32 scaler_version,
+               const struct dpu_format *format)
+{
+       u32 op_mode = 0;
+       u32 phase_init, preload, src_y_rgb, src_uv, dst;
+
+       if (!scaler3_cfg->enable)
+               goto end;
+
+       op_mode |= BIT(0);
+       op_mode |= (scaler3_cfg->y_rgb_filter_cfg & 0x3) << 16;
+
+       if (format && DPU_FORMAT_IS_YUV(format)) {
+               op_mode |= BIT(12);
+               op_mode |= (scaler3_cfg->uv_filter_cfg & 0x3) << 24;
+       }
+
+       op_mode |= (scaler3_cfg->blend_cfg & 1) << 31;
+       op_mode |= (scaler3_cfg->dir_en) ? BIT(4) : 0;
+
+       preload =
+               ((scaler3_cfg->preload_x[0] & 0x7F) << 0) |
+               ((scaler3_cfg->preload_y[0] & 0x7F) << 8) |
+               ((scaler3_cfg->preload_x[1] & 0x7F) << 16) |
+               ((scaler3_cfg->preload_y[1] & 0x7F) << 24);
+
+       src_y_rgb = (scaler3_cfg->src_width[0] & 0x1FFFF) |
+               ((scaler3_cfg->src_height[0] & 0x1FFFF) << 16);
+
+       src_uv = (scaler3_cfg->src_width[1] & 0x1FFFF) |
+               ((scaler3_cfg->src_height[1] & 0x1FFFF) << 16);
+
+       dst = (scaler3_cfg->dst_width & 0x1FFFF) |
+               ((scaler3_cfg->dst_height & 0x1FFFF) << 16);
+
+       if (scaler3_cfg->de.enable) {
+               _dpu_hw_setup_scaler3_de(c, &scaler3_cfg->de, scaler_offset);
+               op_mode |= BIT(8);
+       }
+
+       if (scaler3_cfg->lut_flag)
+               _dpu_hw_setup_scaler3_lut(c, scaler3_cfg,
+                                                               scaler_offset);
+
+       if (scaler_version == 0x1002) {
+               phase_init =
+                       ((scaler3_cfg->init_phase_x[0] & 0x3F) << 0) |
+                       ((scaler3_cfg->init_phase_y[0] & 0x3F) << 8) |
+                       ((scaler3_cfg->init_phase_x[1] & 0x3F) << 16) |
+                       ((scaler3_cfg->init_phase_y[1] & 0x3F) << 24);
+               DPU_REG_WRITE(c, QSEED3_PHASE_INIT + scaler_offset, phase_init);
+       } else {
+               DPU_REG_WRITE(c, QSEED3_PHASE_INIT_Y_H + scaler_offset,
+                       scaler3_cfg->init_phase_x[0] & 0x1FFFFF);
+               DPU_REG_WRITE(c, QSEED3_PHASE_INIT_Y_V + scaler_offset,
+                       scaler3_cfg->init_phase_y[0] & 0x1FFFFF);
+               DPU_REG_WRITE(c, QSEED3_PHASE_INIT_UV_H + scaler_offset,
+                       scaler3_cfg->init_phase_x[1] & 0x1FFFFF);
+               DPU_REG_WRITE(c, QSEED3_PHASE_INIT_UV_V + scaler_offset,
+                       scaler3_cfg->init_phase_y[1] & 0x1FFFFF);
+       }
+
+       DPU_REG_WRITE(c, QSEED3_PHASE_STEP_Y_H + scaler_offset,
+               scaler3_cfg->phase_step_x[0] & 0xFFFFFF);
+
+       DPU_REG_WRITE(c, QSEED3_PHASE_STEP_Y_V + scaler_offset,
+               scaler3_cfg->phase_step_y[0] & 0xFFFFFF);
+
+       DPU_REG_WRITE(c, QSEED3_PHASE_STEP_UV_H + scaler_offset,
+               scaler3_cfg->phase_step_x[1] & 0xFFFFFF);
+
+       DPU_REG_WRITE(c, QSEED3_PHASE_STEP_UV_V + scaler_offset,
+               scaler3_cfg->phase_step_y[1] & 0xFFFFFF);
+
+       DPU_REG_WRITE(c, QSEED3_PRELOAD + scaler_offset, preload);
+
+       DPU_REG_WRITE(c, QSEED3_SRC_SIZE_Y_RGB_A + scaler_offset, src_y_rgb);
+
+       DPU_REG_WRITE(c, QSEED3_SRC_SIZE_UV + scaler_offset, src_uv);
+
+       DPU_REG_WRITE(c, QSEED3_DST_SIZE + scaler_offset, dst);
+
+end:
+       if (format && !DPU_FORMAT_IS_DX(format))
+               op_mode |= BIT(14);
+
+       if (format && format->alpha_enable) {
+               op_mode |= BIT(10);
+               if (scaler_version == 0x1002)
+                       op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x1) << 30;
+               else
+                       op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x3) << 29;
+       }
+
+       DPU_REG_WRITE(c, QSEED3_OP_MODE + scaler_offset, op_mode);
+}
+
+u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
+                       u32 scaler_offset)
+{
+       return DPU_REG_READ(c, QSEED3_HW_VERSION + scaler_offset);
+}
+
+void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
+               u32 csc_reg_off,
+               struct dpu_csc_cfg *data, bool csc10)
+{
+       static const u32 matrix_shift = 7;
+       u32 clamp_shift = csc10 ? 16 : 8;
+       u32 val;
+
+       /* matrix coeff - convert S15.16 to S4.9 */
+       val = ((data->csc_mv[0] >> matrix_shift) & 0x1FFF) |
+               (((data->csc_mv[1] >> matrix_shift) & 0x1FFF) << 16);
+       DPU_REG_WRITE(c, csc_reg_off, val);
+       val = ((data->csc_mv[2] >> matrix_shift) & 0x1FFF) |
+               (((data->csc_mv[3] >> matrix_shift) & 0x1FFF) << 16);
+       DPU_REG_WRITE(c, csc_reg_off + 0x4, val);
+       val = ((data->csc_mv[4] >> matrix_shift) & 0x1FFF) |
+               (((data->csc_mv[5] >> matrix_shift) & 0x1FFF) << 16);
+       DPU_REG_WRITE(c, csc_reg_off + 0x8, val);
+       val = ((data->csc_mv[6] >> matrix_shift) & 0x1FFF) |
+               (((data->csc_mv[7] >> matrix_shift) & 0x1FFF) << 16);
+       DPU_REG_WRITE(c, csc_reg_off + 0xc, val);
+       val = (data->csc_mv[8] >> matrix_shift) & 0x1FFF;
+       DPU_REG_WRITE(c, csc_reg_off + 0x10, val);
+
+       /* Pre clamp */
+       val = (data->csc_pre_lv[0] << clamp_shift) | data->csc_pre_lv[1];
+       DPU_REG_WRITE(c, csc_reg_off + 0x14, val);
+       val = (data->csc_pre_lv[2] << clamp_shift) | data->csc_pre_lv[3];
+       DPU_REG_WRITE(c, csc_reg_off + 0x18, val);
+       val = (data->csc_pre_lv[4] << clamp_shift) | data->csc_pre_lv[5];
+       DPU_REG_WRITE(c, csc_reg_off + 0x1c, val);
+
+       /* Post clamp */
+       val = (data->csc_post_lv[0] << clamp_shift) | data->csc_post_lv[1];
+       DPU_REG_WRITE(c, csc_reg_off + 0x20, val);
+       val = (data->csc_post_lv[2] << clamp_shift) | data->csc_post_lv[3];
+       DPU_REG_WRITE(c, csc_reg_off + 0x24, val);
+       val = (data->csc_post_lv[4] << clamp_shift) | data->csc_post_lv[5];
+       DPU_REG_WRITE(c, csc_reg_off + 0x28, val);
+
+       /* Pre-Bias */
+       DPU_REG_WRITE(c, csc_reg_off + 0x2c, data->csc_pre_bv[0]);
+       DPU_REG_WRITE(c, csc_reg_off + 0x30, data->csc_pre_bv[1]);
+       DPU_REG_WRITE(c, csc_reg_off + 0x34, data->csc_pre_bv[2]);
+
+       /* Post-Bias */
+       DPU_REG_WRITE(c, csc_reg_off + 0x38, data->csc_post_bv[0]);
+       DPU_REG_WRITE(c, csc_reg_off + 0x3c, data->csc_post_bv[1]);
+       DPU_REG_WRITE(c, csc_reg_off + 0x40, data->csc_post_bv[2]);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
new file mode 100644 (file)
index 0000000..1240f50
--- /dev/null
@@ -0,0 +1,348 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_UTIL_H
+#define _DPU_HW_UTIL_H
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include "dpu_hw_mdss.h"
+
+#define REG_MASK(n)                     ((BIT(n)) - 1)
+struct dpu_format_extended;
+
+/*
+ * This is the common struct maintained by each sub block
+ * for mapping the register offsets in this block to the
+ * absoulute IO address
+ * @base_off:     mdp register mapped offset
+ * @blk_off:      pipe offset relative to mdss offset
+ * @length        length of register block offset
+ * @xin_id        xin id
+ * @hwversion     mdss hw version number
+ */
+struct dpu_hw_blk_reg_map {
+       void __iomem *base_off;
+       u32 blk_off;
+       u32 length;
+       u32 xin_id;
+       u32 hwversion;
+       u32 log_mask;
+};
+
+/**
+ * struct dpu_hw_scaler3_de_cfg : QSEEDv3 detail enhancer configuration
+ * @enable:         detail enhancer enable/disable
+ * @sharpen_level1: sharpening strength for noise
+ * @sharpen_level2: sharpening strength for signal
+ * @ clip:          clip shift
+ * @ limit:         limit value
+ * @ thr_quiet:     quiet threshold
+ * @ thr_dieout:    dieout threshold
+ * @ thr_high:      low threshold
+ * @ thr_high:      high threshold
+ * @ prec_shift:    precision shift
+ * @ adjust_a:      A-coefficients for mapping curve
+ * @ adjust_b:      B-coefficients for mapping curve
+ * @ adjust_c:      C-coefficients for mapping curve
+ */
+struct dpu_hw_scaler3_de_cfg {
+       u32 enable;
+       int16_t sharpen_level1;
+       int16_t sharpen_level2;
+       uint16_t clip;
+       uint16_t limit;
+       uint16_t thr_quiet;
+       uint16_t thr_dieout;
+       uint16_t thr_low;
+       uint16_t thr_high;
+       uint16_t prec_shift;
+       int16_t adjust_a[DPU_MAX_DE_CURVES];
+       int16_t adjust_b[DPU_MAX_DE_CURVES];
+       int16_t adjust_c[DPU_MAX_DE_CURVES];
+};
+
+
+/**
+ * struct dpu_hw_scaler3_cfg : QSEEDv3 configuration
+ * @enable:        scaler enable
+ * @dir_en:        direction detection block enable
+ * @ init_phase_x: horizontal initial phase
+ * @ phase_step_x: horizontal phase step
+ * @ init_phase_y: vertical initial phase
+ * @ phase_step_y: vertical phase step
+ * @ preload_x:    horizontal preload value
+ * @ preload_y:    vertical preload value
+ * @ src_width:    source width
+ * @ src_height:   source height
+ * @ dst_width:    destination width
+ * @ dst_height:   destination height
+ * @ y_rgb_filter_cfg: y/rgb plane filter configuration
+ * @ uv_filter_cfg: uv plane filter configuration
+ * @ alpha_filter_cfg: alpha filter configuration
+ * @ blend_cfg:    blend coefficients configuration
+ * @ lut_flag:     scaler LUT update flags
+ *                 0x1 swap LUT bank
+ *                 0x2 update 2D filter LUT
+ *                 0x4 update y circular filter LUT
+ *                 0x8 update uv circular filter LUT
+ *                 0x10 update y separable filter LUT
+ *                 0x20 update uv separable filter LUT
+ * @ dir_lut_idx:  2D filter LUT index
+ * @ y_rgb_cir_lut_idx: y circular filter LUT index
+ * @ uv_cir_lut_idx: uv circular filter LUT index
+ * @ y_rgb_sep_lut_idx: y circular filter LUT index
+ * @ uv_sep_lut_idx: uv separable filter LUT index
+ * @ dir_lut:      pointer to 2D LUT
+ * @ cir_lut:      pointer to circular filter LUT
+ * @ sep_lut:      pointer to separable filter LUT
+ * @ de: detail enhancer configuration
+ */
+struct dpu_hw_scaler3_cfg {
+       u32 enable;
+       u32 dir_en;
+       int32_t init_phase_x[DPU_MAX_PLANES];
+       int32_t phase_step_x[DPU_MAX_PLANES];
+       int32_t init_phase_y[DPU_MAX_PLANES];
+       int32_t phase_step_y[DPU_MAX_PLANES];
+
+       u32 preload_x[DPU_MAX_PLANES];
+       u32 preload_y[DPU_MAX_PLANES];
+       u32 src_width[DPU_MAX_PLANES];
+       u32 src_height[DPU_MAX_PLANES];
+
+       u32 dst_width;
+       u32 dst_height;
+
+       u32 y_rgb_filter_cfg;
+       u32 uv_filter_cfg;
+       u32 alpha_filter_cfg;
+       u32 blend_cfg;
+
+       u32 lut_flag;
+       u32 dir_lut_idx;
+
+       u32 y_rgb_cir_lut_idx;
+       u32 uv_cir_lut_idx;
+       u32 y_rgb_sep_lut_idx;
+       u32 uv_sep_lut_idx;
+       u32 *dir_lut;
+       size_t dir_len;
+       u32 *cir_lut;
+       size_t cir_len;
+       u32 *sep_lut;
+       size_t sep_len;
+
+       /*
+        * Detail enhancer settings
+        */
+       struct dpu_hw_scaler3_de_cfg de;
+};
+
+struct dpu_hw_scaler3_lut_cfg {
+       bool is_configured;
+       u32 *dir_lut;
+       size_t dir_len;
+       u32 *cir_lut;
+       size_t cir_len;
+       u32 *sep_lut;
+       size_t sep_len;
+};
+
+/**
+ * struct dpu_drm_pix_ext_v1 - version 1 of pixel ext structure
+ * @num_ext_pxls_lr: Number of total horizontal pixels
+ * @num_ext_pxls_tb: Number of total vertical lines
+ * @left_ftch:       Number of extra pixels to overfetch from left
+ * @right_ftch:      Number of extra pixels to overfetch from right
+ * @top_ftch:        Number of extra lines to overfetch from top
+ * @btm_ftch:        Number of extra lines to overfetch from bottom
+ * @left_rpt:        Number of extra pixels to repeat from left
+ * @right_rpt:       Number of extra pixels to repeat from right
+ * @top_rpt:         Number of extra lines to repeat from top
+ * @btm_rpt:         Number of extra lines to repeat from bottom
+ */
+struct dpu_drm_pix_ext_v1 {
+       /*
+        * Number of pixels ext in left, right, top and bottom direction
+        * for all color components.
+        */
+       int32_t num_ext_pxls_lr[DPU_MAX_PLANES];
+       int32_t num_ext_pxls_tb[DPU_MAX_PLANES];
+
+       /*
+        * Number of pixels needs to be overfetched in left, right, top
+        * and bottom directions from source image for scaling.
+        */
+       int32_t left_ftch[DPU_MAX_PLANES];
+       int32_t right_ftch[DPU_MAX_PLANES];
+       int32_t top_ftch[DPU_MAX_PLANES];
+       int32_t btm_ftch[DPU_MAX_PLANES];
+       /*
+        * Number of pixels needs to be repeated in left, right, top and
+        * bottom directions for scaling.
+        */
+       int32_t left_rpt[DPU_MAX_PLANES];
+       int32_t right_rpt[DPU_MAX_PLANES];
+       int32_t top_rpt[DPU_MAX_PLANES];
+       int32_t btm_rpt[DPU_MAX_PLANES];
+
+};
+
+/**
+ * struct dpu_drm_de_v1 - version 1 of detail enhancer structure
+ * @enable:         Enables/disables detail enhancer
+ * @sharpen_level1: Sharpening strength for noise
+ * @sharpen_level2: Sharpening strength for context
+ * @clip:           Clip coefficient
+ * @limit:          Detail enhancer limit factor
+ * @thr_quiet:      Quite zone threshold
+ * @thr_dieout:     Die-out zone threshold
+ * @thr_low:        Linear zone left threshold
+ * @thr_high:       Linear zone right threshold
+ * @prec_shift:     Detail enhancer precision
+ * @adjust_a:       Mapping curves A coefficients
+ * @adjust_b:       Mapping curves B coefficients
+ * @adjust_c:       Mapping curves C coefficients
+ */
+struct dpu_drm_de_v1 {
+       uint32_t enable;
+       int16_t sharpen_level1;
+       int16_t sharpen_level2;
+       uint16_t clip;
+       uint16_t limit;
+       uint16_t thr_quiet;
+       uint16_t thr_dieout;
+       uint16_t thr_low;
+       uint16_t thr_high;
+       uint16_t prec_shift;
+       int16_t adjust_a[DPU_MAX_DE_CURVES];
+       int16_t adjust_b[DPU_MAX_DE_CURVES];
+       int16_t adjust_c[DPU_MAX_DE_CURVES];
+};
+
+/**
+ * struct dpu_drm_scaler_v2 - version 2 of struct dpu_drm_scaler
+ * @enable:            Scaler enable
+ * @dir_en:            Detail enhancer enable
+ * @pe:                Pixel extension settings
+ * @horz_decimate:     Horizontal decimation factor
+ * @vert_decimate:     Vertical decimation factor
+ * @init_phase_x:      Initial scaler phase values for x
+ * @phase_step_x:      Phase step values for x
+ * @init_phase_y:      Initial scaler phase values for y
+ * @phase_step_y:      Phase step values for y
+ * @preload_x:         Horizontal preload value
+ * @preload_y:         Vertical preload value
+ * @src_width:         Source width
+ * @src_height:        Source height
+ * @dst_width:         Destination width
+ * @dst_height:        Destination height
+ * @y_rgb_filter_cfg:  Y/RGB plane filter configuration
+ * @uv_filter_cfg:     UV plane filter configuration
+ * @alpha_filter_cfg:  Alpha filter configuration
+ * @blend_cfg:         Selection of blend coefficients
+ * @lut_flag:          LUT configuration flags
+ * @dir_lut_idx:       2d 4x4 LUT index
+ * @y_rgb_cir_lut_idx: Y/RGB circular LUT index
+ * @uv_cir_lut_idx:    UV circular LUT index
+ * @y_rgb_sep_lut_idx: Y/RGB separable LUT index
+ * @uv_sep_lut_idx:    UV separable LUT index
+ * @de:                Detail enhancer settings
+ */
+struct dpu_drm_scaler_v2 {
+       /*
+        * General definitions
+        */
+       uint32_t enable;
+       uint32_t dir_en;
+
+       /*
+        * Pix ext settings
+        */
+       struct dpu_drm_pix_ext_v1 pe;
+
+       /*
+        * Decimation settings
+        */
+       uint32_t horz_decimate;
+       uint32_t vert_decimate;
+
+       /*
+        * Phase settings
+        */
+       int32_t init_phase_x[DPU_MAX_PLANES];
+       int32_t phase_step_x[DPU_MAX_PLANES];
+       int32_t init_phase_y[DPU_MAX_PLANES];
+       int32_t phase_step_y[DPU_MAX_PLANES];
+
+       uint32_t preload_x[DPU_MAX_PLANES];
+       uint32_t preload_y[DPU_MAX_PLANES];
+       uint32_t src_width[DPU_MAX_PLANES];
+       uint32_t src_height[DPU_MAX_PLANES];
+
+       uint32_t dst_width;
+       uint32_t dst_height;
+
+       uint32_t y_rgb_filter_cfg;
+       uint32_t uv_filter_cfg;
+       uint32_t alpha_filter_cfg;
+       uint32_t blend_cfg;
+
+       uint32_t lut_flag;
+       uint32_t dir_lut_idx;
+
+       /* for Y(RGB) and UV planes*/
+       uint32_t y_rgb_cir_lut_idx;
+       uint32_t uv_cir_lut_idx;
+       uint32_t y_rgb_sep_lut_idx;
+       uint32_t uv_sep_lut_idx;
+
+       /*
+        * Detail enhancer settings
+        */
+       struct dpu_drm_de_v1 de;
+};
+
+
+u32 *dpu_hw_util_get_log_mask_ptr(void);
+
+void dpu_reg_write(struct dpu_hw_blk_reg_map *c,
+               u32 reg_off,
+               u32 val,
+               const char *name);
+int dpu_reg_read(struct dpu_hw_blk_reg_map *c, u32 reg_off);
+
+#define DPU_REG_WRITE(c, off, val) dpu_reg_write(c, off, val, #off)
+#define DPU_REG_READ(c, off) dpu_reg_read(c, off)
+
+#define MISR_FRAME_COUNT_MASK          0xFF
+#define MISR_CTRL_ENABLE               BIT(8)
+#define MISR_CTRL_STATUS               BIT(9)
+#define MISR_CTRL_STATUS_CLEAR         BIT(10)
+#define INTF_MISR_CTRL_FREE_RUN_MASK   BIT(31)
+
+void *dpu_hw_util_get_dir(void);
+
+void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
+               struct dpu_hw_scaler3_cfg *scaler3_cfg,
+               u32 scaler_offset, u32 scaler_version,
+               const struct dpu_format *format);
+
+u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
+               u32 scaler_offset);
+
+void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map  *c,
+               u32 csc_reg_off,
+               struct dpu_csc_cfg *data, bool csc10);
+
+#endif /* _DPU_HW_UTIL_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
new file mode 100644 (file)
index 0000000..d439055
--- /dev/null
@@ -0,0 +1,275 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_dbg.h"
+
+#define VBIF_VERSION                   0x0000
+#define VBIF_CLK_FORCE_CTRL0           0x0008
+#define VBIF_CLK_FORCE_CTRL1           0x000C
+#define VBIF_QOS_REMAP_00              0x0020
+#define VBIF_QOS_REMAP_01              0x0024
+#define VBIF_QOS_REMAP_10              0x0028
+#define VBIF_QOS_REMAP_11              0x002C
+#define VBIF_WRITE_GATHER_EN           0x00AC
+#define VBIF_IN_RD_LIM_CONF0           0x00B0
+#define VBIF_IN_RD_LIM_CONF1           0x00B4
+#define VBIF_IN_RD_LIM_CONF2           0x00B8
+#define VBIF_IN_WR_LIM_CONF0           0x00C0
+#define VBIF_IN_WR_LIM_CONF1           0x00C4
+#define VBIF_IN_WR_LIM_CONF2           0x00C8
+#define VBIF_OUT_RD_LIM_CONF0          0x00D0
+#define VBIF_OUT_WR_LIM_CONF0          0x00D4
+#define VBIF_OUT_AXI_AMEMTYPE_CONF0    0x0160
+#define VBIF_OUT_AXI_AMEMTYPE_CONF1    0x0164
+#define VBIF_XIN_PND_ERR               0x0190
+#define VBIF_XIN_SRC_ERR               0x0194
+#define VBIF_XIN_CLR_ERR               0x019C
+#define VBIF_XIN_HALT_CTRL0            0x0200
+#define VBIF_XIN_HALT_CTRL1            0x0204
+#define VBIF_XINL_QOS_RP_REMAP_000     0x0550
+#define VBIF_XINL_QOS_LVL_REMAP_000    0x0590
+
+static void dpu_hw_clear_errors(struct dpu_hw_vbif *vbif,
+               u32 *pnd_errors, u32 *src_errors)
+{
+       struct dpu_hw_blk_reg_map *c;
+       u32 pnd, src;
+
+       if (!vbif)
+               return;
+       c = &vbif->hw;
+       pnd = DPU_REG_READ(c, VBIF_XIN_PND_ERR);
+       src = DPU_REG_READ(c, VBIF_XIN_SRC_ERR);
+
+       if (pnd_errors)
+               *pnd_errors = pnd;
+       if (src_errors)
+               *src_errors = src;
+
+       DPU_REG_WRITE(c, VBIF_XIN_CLR_ERR, pnd | src);
+}
+
+static void dpu_hw_set_mem_type(struct dpu_hw_vbif *vbif,
+               u32 xin_id, u32 value)
+{
+       struct dpu_hw_blk_reg_map *c;
+       u32 reg_off;
+       u32 bit_off;
+       u32 reg_val;
+
+       /*
+        * Assume 4 bits per bit field, 8 fields per 32-bit register so
+        * 16 bit fields maximum across two registers
+        */
+       if (!vbif || xin_id >= MAX_XIN_COUNT || xin_id >= 16)
+               return;
+
+       c = &vbif->hw;
+
+       if (xin_id >= 8) {
+               xin_id -= 8;
+               reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF1;
+       } else {
+               reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF0;
+       }
+       bit_off = (xin_id & 0x7) * 4;
+       reg_val = DPU_REG_READ(c, reg_off);
+       reg_val &= ~(0x7 << bit_off);
+       reg_val |= (value & 0x7) << bit_off;
+       DPU_REG_WRITE(c, reg_off, reg_val);
+}
+
+static void dpu_hw_set_limit_conf(struct dpu_hw_vbif *vbif,
+               u32 xin_id, bool rd, u32 limit)
+{
+       struct dpu_hw_blk_reg_map *c = &vbif->hw;
+       u32 reg_val;
+       u32 reg_off;
+       u32 bit_off;
+
+       if (rd)
+               reg_off = VBIF_IN_RD_LIM_CONF0;
+       else
+               reg_off = VBIF_IN_WR_LIM_CONF0;
+
+       reg_off += (xin_id / 4) * 4;
+       bit_off = (xin_id % 4) * 8;
+       reg_val = DPU_REG_READ(c, reg_off);
+       reg_val &= ~(0xFF << bit_off);
+       reg_val |= (limit) << bit_off;
+       DPU_REG_WRITE(c, reg_off, reg_val);
+}
+
+static u32 dpu_hw_get_limit_conf(struct dpu_hw_vbif *vbif,
+               u32 xin_id, bool rd)
+{
+       struct dpu_hw_blk_reg_map *c = &vbif->hw;
+       u32 reg_val;
+       u32 reg_off;
+       u32 bit_off;
+       u32 limit;
+
+       if (rd)
+               reg_off = VBIF_IN_RD_LIM_CONF0;
+       else
+               reg_off = VBIF_IN_WR_LIM_CONF0;
+
+       reg_off += (xin_id / 4) * 4;
+       bit_off = (xin_id % 4) * 8;
+       reg_val = DPU_REG_READ(c, reg_off);
+       limit = (reg_val >> bit_off) & 0xFF;
+
+       return limit;
+}
+
+static void dpu_hw_set_halt_ctrl(struct dpu_hw_vbif *vbif,
+               u32 xin_id, bool enable)
+{
+       struct dpu_hw_blk_reg_map *c = &vbif->hw;
+       u32 reg_val;
+
+       reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL0);
+
+       if (enable)
+               reg_val |= BIT(xin_id);
+       else
+               reg_val &= ~BIT(xin_id);
+
+       DPU_REG_WRITE(c, VBIF_XIN_HALT_CTRL0, reg_val);
+}
+
+static bool dpu_hw_get_halt_ctrl(struct dpu_hw_vbif *vbif,
+               u32 xin_id)
+{
+       struct dpu_hw_blk_reg_map *c = &vbif->hw;
+       u32 reg_val;
+
+       reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL1);
+
+       return (reg_val & BIT(xin_id)) ? true : false;
+}
+
+static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif,
+               u32 xin_id, u32 level, u32 remap_level)
+{
+       struct dpu_hw_blk_reg_map *c;
+       u32 reg_val, reg_val_lvl, mask, reg_high, reg_shift;
+
+       if (!vbif)
+               return;
+
+       c = &vbif->hw;
+
+       reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8);
+       reg_shift = (xin_id & 0x7) * 4;
+
+       reg_val = DPU_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high);
+       reg_val_lvl = DPU_REG_READ(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high);
+
+       mask = 0x7 << reg_shift;
+
+       reg_val &= ~mask;
+       reg_val |= (remap_level << reg_shift) & mask;
+
+       reg_val_lvl &= ~mask;
+       reg_val_lvl |= (remap_level << reg_shift) & mask;
+
+       DPU_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val);
+       DPU_REG_WRITE(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high, reg_val_lvl);
+}
+
+static void dpu_hw_set_write_gather_en(struct dpu_hw_vbif *vbif, u32 xin_id)
+{
+       struct dpu_hw_blk_reg_map *c;
+       u32 reg_val;
+
+       if (!vbif || xin_id >= MAX_XIN_COUNT)
+               return;
+
+       c = &vbif->hw;
+
+       reg_val = DPU_REG_READ(c, VBIF_WRITE_GATHER_EN);
+       reg_val |= BIT(xin_id);
+       DPU_REG_WRITE(c, VBIF_WRITE_GATHER_EN, reg_val);
+}
+
+static void _setup_vbif_ops(struct dpu_hw_vbif_ops *ops,
+               unsigned long cap)
+{
+       ops->set_limit_conf = dpu_hw_set_limit_conf;
+       ops->get_limit_conf = dpu_hw_get_limit_conf;
+       ops->set_halt_ctrl = dpu_hw_set_halt_ctrl;
+       ops->get_halt_ctrl = dpu_hw_get_halt_ctrl;
+       if (test_bit(DPU_VBIF_QOS_REMAP, &cap))
+               ops->set_qos_remap = dpu_hw_set_qos_remap;
+       ops->set_mem_type = dpu_hw_set_mem_type;
+       ops->clear_errors = dpu_hw_clear_errors;
+       ops->set_write_gather_en = dpu_hw_set_write_gather_en;
+}
+
+static const struct dpu_vbif_cfg *_top_offset(enum dpu_vbif vbif,
+               const struct dpu_mdss_cfg *m,
+               void __iomem *addr,
+               struct dpu_hw_blk_reg_map *b)
+{
+       int i;
+
+       for (i = 0; i < m->vbif_count; i++) {
+               if (vbif == m->vbif[i].id) {
+                       b->base_off = addr;
+                       b->blk_off = m->vbif[i].base;
+                       b->length = m->vbif[i].len;
+                       b->hwversion = m->hwversion;
+                       b->log_mask = DPU_DBG_MASK_VBIF;
+                       return &m->vbif[i];
+               }
+       }
+
+       return ERR_PTR(-EINVAL);
+}
+
+struct dpu_hw_vbif *dpu_hw_vbif_init(enum dpu_vbif idx,
+               void __iomem *addr,
+               const struct dpu_mdss_cfg *m)
+{
+       struct dpu_hw_vbif *c;
+       const struct dpu_vbif_cfg *cfg;
+
+       c = kzalloc(sizeof(*c), GFP_KERNEL);
+       if (!c)
+               return ERR_PTR(-ENOMEM);
+
+       cfg = _top_offset(idx, m, addr, &c->hw);
+       if (IS_ERR_OR_NULL(cfg)) {
+               kfree(c);
+               return ERR_PTR(-EINVAL);
+       }
+
+       /*
+        * Assign ops
+        */
+       c->idx = idx;
+       c->cap = cfg;
+       _setup_vbif_ops(&c->ops, c->cap->features);
+
+       /* no need to register sub-range in dpu dbg, dump entire vbif io base */
+
+       return c;
+}
+
+void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif)
+{
+       kfree(vbif);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
new file mode 100644 (file)
index 0000000..471ff67
--- /dev/null
@@ -0,0 +1,128 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_VBIF_H
+#define _DPU_HW_VBIF_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+
+struct dpu_hw_vbif;
+
+/**
+ * struct dpu_hw_vbif_ops : Interface to the VBIF hardware driver functions
+ *  Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_vbif_ops {
+       /**
+        * set_limit_conf - set transaction limit config
+        * @vbif: vbif context driver
+        * @xin_id: client interface identifier
+        * @rd: true for read limit; false for write limit
+        * @limit: outstanding transaction limit
+        */
+       void (*set_limit_conf)(struct dpu_hw_vbif *vbif,
+                       u32 xin_id, bool rd, u32 limit);
+
+       /**
+        * get_limit_conf - get transaction limit config
+        * @vbif: vbif context driver
+        * @xin_id: client interface identifier
+        * @rd: true for read limit; false for write limit
+        * @return: outstanding transaction limit
+        */
+       u32 (*get_limit_conf)(struct dpu_hw_vbif *vbif,
+                       u32 xin_id, bool rd);
+
+       /**
+        * set_halt_ctrl - set halt control
+        * @vbif: vbif context driver
+        * @xin_id: client interface identifier
+        * @enable: halt control enable
+        */
+       void (*set_halt_ctrl)(struct dpu_hw_vbif *vbif,
+                       u32 xin_id, bool enable);
+
+       /**
+        * get_halt_ctrl - get halt control
+        * @vbif: vbif context driver
+        * @xin_id: client interface identifier
+        * @return: halt control enable
+        */
+       bool (*get_halt_ctrl)(struct dpu_hw_vbif *vbif,
+                       u32 xin_id);
+
+       /**
+        * set_qos_remap - set QoS priority remap
+        * @vbif: vbif context driver
+        * @xin_id: client interface identifier
+        * @level: priority level
+        * @remap_level: remapped level
+        */
+       void (*set_qos_remap)(struct dpu_hw_vbif *vbif,
+                       u32 xin_id, u32 level, u32 remap_level);
+
+       /**
+        * set_mem_type - set memory type
+        * @vbif: vbif context driver
+        * @xin_id: client interface identifier
+        * @value: memory type value
+        */
+       void (*set_mem_type)(struct dpu_hw_vbif *vbif,
+                       u32 xin_id, u32 value);
+
+       /**
+        * clear_errors - clear any vbif errors
+        *      This function clears any detected pending/source errors
+        *      on the VBIF interface, and optionally returns the detected
+        *      error mask(s).
+        * @vbif: vbif context driver
+        * @pnd_errors: pointer to pending error reporting variable
+        * @src_errors: pointer to source error reporting variable
+        */
+       void (*clear_errors)(struct dpu_hw_vbif *vbif,
+               u32 *pnd_errors, u32 *src_errors);
+
+       /**
+        * set_write_gather_en - set write_gather enable
+        * @vbif: vbif context driver
+        * @xin_id: client interface identifier
+        */
+       void (*set_write_gather_en)(struct dpu_hw_vbif *vbif, u32 xin_id);
+};
+
+struct dpu_hw_vbif {
+       /* base */
+       struct dpu_hw_blk_reg_map hw;
+
+       /* vbif */
+       enum dpu_vbif idx;
+       const struct dpu_vbif_cfg *cap;
+
+       /* ops */
+       struct dpu_hw_vbif_ops ops;
+};
+
+/**
+ * dpu_hw_vbif_init - initializes the vbif driver for the passed interface idx
+ * @idx:  Interface index for which driver object is required
+ * @addr: Mapped register io address of MDSS
+ * @m:    Pointer to mdss catalog data
+ */
+struct dpu_hw_vbif *dpu_hw_vbif_init(enum dpu_vbif idx,
+               void __iomem *addr,
+               const struct dpu_mdss_cfg *m);
+
+void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif);
+
+#endif /*_DPU_HW_VBIF_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
new file mode 100644 (file)
index 0000000..5b2bc9b
--- /dev/null
@@ -0,0 +1,56 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HWIO_H
+#define _DPU_HWIO_H
+
+#include "dpu_hw_util.h"
+
+/**
+ * MDP TOP block Register and bit fields and defines
+ */
+#define DISP_INTF_SEL                   0x004
+#define INTR_EN                         0x010
+#define INTR_STATUS                     0x014
+#define INTR_CLEAR                      0x018
+#define INTR2_EN                        0x008
+#define INTR2_STATUS                    0x00c
+#define INTR2_CLEAR                     0x02c
+#define HIST_INTR_EN                    0x01c
+#define HIST_INTR_STATUS                0x020
+#define HIST_INTR_CLEAR                 0x024
+#define INTF_INTR_EN                    0x1C0
+#define INTF_INTR_STATUS                0x1C4
+#define INTF_INTR_CLEAR                 0x1C8
+#define SPLIT_DISPLAY_EN                0x2F4
+#define SPLIT_DISPLAY_UPPER_PIPE_CTRL   0x2F8
+#define DSPP_IGC_COLOR0_RAM_LUTN        0x300
+#define DSPP_IGC_COLOR1_RAM_LUTN        0x304
+#define DSPP_IGC_COLOR2_RAM_LUTN        0x308
+#define HW_EVENTS_CTL                   0x37C
+#define CLK_CTRL3                       0x3A8
+#define CLK_STATUS3                     0x3AC
+#define CLK_CTRL4                       0x3B0
+#define CLK_STATUS4                     0x3B4
+#define CLK_CTRL5                       0x3B8
+#define CLK_STATUS5                     0x3BC
+#define CLK_CTRL7                       0x3D0
+#define CLK_STATUS7                     0x3D4
+#define SPLIT_DISPLAY_LOWER_PIPE_CTRL   0x3F0
+#define SPLIT_DISPLAY_TE_LINE_INTERVAL  0x3F4
+#define INTF_SW_RESET_MASK              0x3FC
+#define HDMI_DP_CORE_SELECT             0x408
+#define MDP_OUT_CTL_0                   0x410
+#define MDP_VSYNC_SEL                   0x414
+#define DCE_SEL                         0x450
+
+#endif /*_DPU_HWIO_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
new file mode 100644 (file)
index 0000000..790d39f
--- /dev/null
@@ -0,0 +1,203 @@
+/* Copyright (c) 2012-2015, 2017-2018, The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk/clk-conf.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+
+#include "dpu_io_util.h"
+
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk)
+{
+       int i;
+
+       for (i = num_clk - 1; i >= 0; i--) {
+               if (clk_arry[i].clk)
+                       clk_put(clk_arry[i].clk);
+               clk_arry[i].clk = NULL;
+       }
+}
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk)
+{
+       int i, rc = 0;
+
+       for (i = 0; i < num_clk; i++) {
+               clk_arry[i].clk = clk_get(dev, clk_arry[i].clk_name);
+               rc = PTR_ERR_OR_ZERO(clk_arry[i].clk);
+               if (rc) {
+                       DEV_ERR("%pS->%s: '%s' get failed. rc=%d\n",
+                               __builtin_return_address(0), __func__,
+                               clk_arry[i].clk_name, rc);
+                       goto error;
+               }
+       }
+
+       return rc;
+
+error:
+       for (i--; i >= 0; i--) {
+               if (clk_arry[i].clk)
+                       clk_put(clk_arry[i].clk);
+               clk_arry[i].clk = NULL;
+       }
+
+       return rc;
+}
+
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk)
+{
+       int i, rc = 0;
+
+       for (i = 0; i < num_clk; i++) {
+               if (clk_arry[i].clk) {
+                       if (clk_arry[i].type != DSS_CLK_AHB) {
+                               DEV_DBG("%pS->%s: '%s' rate %ld\n",
+                                       __builtin_return_address(0), __func__,
+                                       clk_arry[i].clk_name,
+                                       clk_arry[i].rate);
+                               rc = clk_set_rate(clk_arry[i].clk,
+                                       clk_arry[i].rate);
+                               if (rc) {
+                                       DEV_ERR("%pS->%s: %s failed. rc=%d\n",
+                                               __builtin_return_address(0),
+                                               __func__,
+                                               clk_arry[i].clk_name, rc);
+                                       break;
+                               }
+                       }
+               } else {
+                       DEV_ERR("%pS->%s: '%s' is not available\n",
+                               __builtin_return_address(0), __func__,
+                               clk_arry[i].clk_name);
+                       rc = -EPERM;
+                       break;
+               }
+       }
+
+       return rc;
+}
+
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
+{
+       int i, rc = 0;
+
+       if (enable) {
+               for (i = 0; i < num_clk; i++) {
+                       DEV_DBG("%pS->%s: enable '%s'\n",
+                               __builtin_return_address(0), __func__,
+                               clk_arry[i].clk_name);
+                       if (clk_arry[i].clk) {
+                               rc = clk_prepare_enable(clk_arry[i].clk);
+                               if (rc)
+                                       DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
+                                               __builtin_return_address(0),
+                                               __func__,
+                                               clk_arry[i].clk_name, rc);
+                       } else {
+                               DEV_ERR("%pS->%s: '%s' is not available\n",
+                                       __builtin_return_address(0), __func__,
+                                       clk_arry[i].clk_name);
+                               rc = -EPERM;
+                       }
+
+                       if (rc) {
+                               msm_dss_enable_clk(&clk_arry[i],
+                                       i, false);
+                               break;
+                       }
+               }
+       } else {
+               for (i = num_clk - 1; i >= 0; i--) {
+                       DEV_DBG("%pS->%s: disable '%s'\n",
+                               __builtin_return_address(0), __func__,
+                               clk_arry[i].clk_name);
+
+                       if (clk_arry[i].clk)
+                               clk_disable_unprepare(clk_arry[i].clk);
+                       else
+                               DEV_ERR("%pS->%s: '%s' is not available\n",
+                                       __builtin_return_address(0), __func__,
+                                       clk_arry[i].clk_name);
+               }
+       }
+
+       return rc;
+}
+
+int msm_dss_parse_clock(struct platform_device *pdev,
+                       struct dss_module_power *mp)
+{
+       u32 i, rc = 0;
+       const char *clock_name;
+       int num_clk = 0;
+
+       if (!pdev || !mp)
+               return -EINVAL;
+
+       mp->num_clk = 0;
+       num_clk = of_property_count_strings(pdev->dev.of_node, "clock-names");
+       if (num_clk <= 0) {
+               pr_debug("clocks are not defined\n");
+               return 0;
+       }
+
+       mp->clk_config = devm_kzalloc(&pdev->dev,
+                                     sizeof(struct dss_clk) * num_clk,
+                                     GFP_KERNEL);
+       if (!mp->clk_config)
+               return -ENOMEM;
+
+       for (i = 0; i < num_clk; i++) {
+               rc = of_property_read_string_index(pdev->dev.of_node,
+                                                  "clock-names", i,
+                                                  &clock_name);
+               if (rc) {
+                       dev_err(&pdev->dev, "Failed to get clock name for %d\n",
+                               i);
+                       break;
+               }
+               strlcpy(mp->clk_config[i].clk_name, clock_name,
+                       sizeof(mp->clk_config[i].clk_name));
+
+               mp->clk_config[i].type = DSS_CLK_AHB;
+       }
+
+       rc = msm_dss_get_clk(&pdev->dev, mp->clk_config, num_clk);
+       if (rc) {
+               dev_err(&pdev->dev, "Failed to get clock refs %d\n", rc);
+               goto err;
+       }
+
+       rc = of_clk_set_defaults(pdev->dev.of_node, false);
+       if (rc) {
+               dev_err(&pdev->dev, "Failed to set clock defaults %d\n", rc);
+               goto err;
+       }
+
+       for (i = 0; i < num_clk; i++) {
+               u32 rate = clk_get_rate(mp->clk_config[i].clk);
+               if (!rate)
+                       continue;
+               mp->clk_config[i].rate = rate;
+               mp->clk_config[i].type = DSS_CLK_PCLK;
+       }
+
+       mp->num_clk = num_clk;
+       return 0;
+
+err:
+       msm_dss_put_clk(mp->clk_config, num_clk);
+       return rc;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h
new file mode 100644 (file)
index 0000000..bc07381
--- /dev/null
@@ -0,0 +1,57 @@
+/* Copyright (c) 2012, 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_IO_UTIL_H__
+#define __DPU_IO_UTIL_H__
+
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#define DEV_DBG(fmt, args...)   pr_debug(fmt, ##args)
+#define DEV_INFO(fmt, args...)  pr_info(fmt, ##args)
+#define DEV_WARN(fmt, args...)  pr_warn(fmt, ##args)
+#define DEV_ERR(fmt, args...)   pr_err(fmt, ##args)
+
+struct dss_gpio {
+       unsigned int gpio;
+       unsigned int value;
+       char gpio_name[32];
+};
+
+enum dss_clk_type {
+       DSS_CLK_AHB, /* no set rate. rate controlled through rpm */
+       DSS_CLK_PCLK,
+};
+
+struct dss_clk {
+       struct clk *clk; /* clk handle */
+       char clk_name[32];
+       enum dss_clk_type type;
+       unsigned long rate;
+       unsigned long max_rate;
+};
+
+struct dss_module_power {
+       unsigned int num_gpio;
+       struct dss_gpio *gpio_config;
+       unsigned int num_clk;
+       struct dss_clk *clk_config;
+};
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk);
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable);
+int msm_dss_parse_clock(struct platform_device *pdev,
+               struct dss_module_power *mp);
+#endif /* __DPU_IO_UTIL_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c
new file mode 100644 (file)
index 0000000..d5e6ce0
--- /dev/null
@@ -0,0 +1,66 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)    "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+
+#include "dpu_irq.h"
+#include "dpu_core_irq.h"
+
+irqreturn_t dpu_irq(struct msm_kms *kms)
+{
+       struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+       return dpu_core_irq(dpu_kms);
+}
+
+void dpu_irq_preinstall(struct msm_kms *kms)
+{
+       struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+       if (!dpu_kms->dev || !dpu_kms->dev->dev) {
+               pr_err("invalid device handles\n");
+               return;
+       }
+
+       dpu_core_irq_preinstall(dpu_kms);
+}
+
+int dpu_irq_postinstall(struct msm_kms *kms)
+{
+       struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+       int rc;
+
+       if (!kms) {
+               DPU_ERROR("invalid parameters\n");
+               return -EINVAL;
+       }
+
+       rc = dpu_core_irq_postinstall(dpu_kms);
+
+       return rc;
+}
+
+void dpu_irq_uninstall(struct msm_kms *kms)
+{
+       struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+       if (!kms) {
+               DPU_ERROR("invalid parameters\n");
+               return;
+       }
+
+       dpu_core_irq_uninstall(dpu_kms);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h
new file mode 100644 (file)
index 0000000..3e147f7
--- /dev/null
@@ -0,0 +1,59 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_IRQ_H__
+#define __DPU_IRQ_H__
+
+#include <linux/kernel.h>
+#include <linux/irqdomain.h>
+
+#include "msm_kms.h"
+
+/**
+ * dpu_irq_controller - define MDSS level interrupt controller context
+ * @enabled_mask:      enable status of MDSS level interrupt
+ * @domain:            interrupt domain of this controller
+ */
+struct dpu_irq_controller {
+       unsigned long enabled_mask;
+       struct irq_domain *domain;
+};
+
+/**
+ * dpu_irq_preinstall - perform pre-installation of MDSS IRQ handler
+ * @kms:               pointer to kms context
+ * @return:            none
+ */
+void dpu_irq_preinstall(struct msm_kms *kms);
+
+/**
+ * dpu_irq_postinstall - perform post-installation of MDSS IRQ handler
+ * @kms:               pointer to kms context
+ * @return:            0 if success; error code otherwise
+ */
+int dpu_irq_postinstall(struct msm_kms *kms);
+
+/**
+ * dpu_irq_uninstall - uninstall MDSS IRQ handler
+ * @drm_dev:           pointer to kms context
+ * @return:            none
+ */
+void dpu_irq_uninstall(struct msm_kms *kms);
+
+/**
+ * dpu_irq - MDSS level IRQ handler
+ * @kms:               pointer to kms context
+ * @return:            interrupt handling status
+ */
+irqreturn_t dpu_irq(struct msm_kms *kms);
+
+#endif /* __DPU_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
new file mode 100644 (file)
index 0000000..7dd6bd2
--- /dev/null
@@ -0,0 +1,1345 @@
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt)    "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <drm/drm_crtc.h>
+#include <linux/debugfs.h>
+#include <linux/of_irq.h>
+#include <linux/dma-buf.h>
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+#include "msm_gem.h"
+
+#include "dpu_kms.h"
+#include "dpu_core_irq.h"
+#include "dpu_formats.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_vbif.h"
+#include "dpu_encoder.h"
+#include "dpu_plane.h"
+#include "dpu_crtc.h"
+
+#define CREATE_TRACE_POINTS
+#include "dpu_trace.h"
+
+static const char * const iommu_ports[] = {
+               "mdp_0",
+};
+
+/*
+ * To enable overall DRM driver logging
+ * # echo 0x2 > /sys/module/drm/parameters/debug
+ *
+ * To enable DRM driver h/w logging
+ * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
+ *
+ * See dpu_hw_mdss.h for h/w logging mask definitions (search for DPU_DBG_MASK_)
+ */
+#define DPU_DEBUGFS_DIR "msm_dpu"
+#define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
+
+static int dpu_kms_hw_init(struct msm_kms *kms);
+static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
+
+static unsigned long dpu_iomap_size(struct platform_device *pdev,
+                                   const char *name)
+{
+       struct resource *res;
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+       if (!res) {
+               DRM_ERROR("failed to get memory resource: %s\n", name);
+               return 0;
+       }
+
+       return resource_size(res);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _dpu_danger_signal_status(struct seq_file *s,
+               bool danger_status)
+{
+       struct dpu_kms *kms = (struct dpu_kms *)s->private;
+       struct msm_drm_private *priv;
+       struct dpu_danger_safe_status status;
+       int i;
+
+       if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
+               DPU_ERROR("invalid arg(s)\n");
+               return 0;
+       }
+
+       priv = kms->dev->dev_private;
+       memset(&status, 0, sizeof(struct dpu_danger_safe_status));
+
+       pm_runtime_get_sync(&kms->pdev->dev);
+       if (danger_status) {
+               seq_puts(s, "\nDanger signal status:\n");
+               if (kms->hw_mdp->ops.get_danger_status)
+                       kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
+                                       &status);
+       } else {
+               seq_puts(s, "\nSafe signal status:\n");
+               if (kms->hw_mdp->ops.get_danger_status)
+                       kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
+                                       &status);
+       }
+       pm_runtime_put_sync(&kms->pdev->dev);
+
+       seq_printf(s, "MDP     :  0x%x\n", status.mdp);
+
+       for (i = SSPP_VIG0; i < SSPP_MAX; i++)
+               seq_printf(s, "SSPP%d   :  0x%x  \t", i - SSPP_VIG0,
+                               status.sspp[i]);
+       seq_puts(s, "\n");
+
+       return 0;
+}
+
+#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix)                          \
+static int __prefix ## _open(struct inode *inode, struct file *file)   \
+{                                                                      \
+       return single_open(file, __prefix ## _show, inode->i_private);  \
+}                                                                      \
+static const struct file_operations __prefix ## _fops = {              \
+       .owner = THIS_MODULE,                                           \
+       .open = __prefix ## _open,                                      \
+       .release = single_release,                                      \
+       .read = seq_read,                                               \
+       .llseek = seq_lseek,                                            \
+}
+
+static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v)
+{
+       return _dpu_danger_signal_status(s, true);
+}
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_danger_stats);
+
+static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
+{
+       return _dpu_danger_signal_status(s, false);
+}
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_safe_stats);
+
+static void dpu_debugfs_danger_destroy(struct dpu_kms *dpu_kms)
+{
+       debugfs_remove_recursive(dpu_kms->debugfs_danger);
+       dpu_kms->debugfs_danger = NULL;
+}
+
+static int dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
+               struct dentry *parent)
+{
+       dpu_kms->debugfs_danger = debugfs_create_dir("danger",
+                       parent);
+       if (!dpu_kms->debugfs_danger) {
+               DPU_ERROR("failed to create danger debugfs\n");
+               return -EINVAL;
+       }
+
+       debugfs_create_file("danger_status", 0600, dpu_kms->debugfs_danger,
+                       dpu_kms, &dpu_debugfs_danger_stats_fops);
+       debugfs_create_file("safe_status", 0600, dpu_kms->debugfs_danger,
+                       dpu_kms, &dpu_debugfs_safe_stats_fops);
+
+       return 0;
+}
+
+static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
+{
+       struct dpu_debugfs_regset32 *regset;
+       struct dpu_kms *dpu_kms;
+       struct drm_device *dev;
+       struct msm_drm_private *priv;
+       void __iomem *base;
+       uint32_t i, addr;
+
+       if (!s || !s->private)
+               return 0;
+
+       regset = s->private;
+
+       dpu_kms = regset->dpu_kms;
+       if (!dpu_kms || !dpu_kms->mmio)
+               return 0;
+
+       dev = dpu_kms->dev;
+       if (!dev)
+               return 0;
+
+       priv = dev->dev_private;
+       if (!priv)
+               return 0;
+
+       base = dpu_kms->mmio + regset->offset;
+
+       /* insert padding spaces, if needed */
+       if (regset->offset & 0xF) {
+               seq_printf(s, "[%x]", regset->offset & ~0xF);
+               for (i = 0; i < (regset->offset & 0xF); i += 4)
+                       seq_puts(s, "         ");
+       }
+
+       pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+       /* main register output */
+       for (i = 0; i < regset->blk_len; i += 4) {
+               addr = regset->offset + i;
+               if ((addr & 0xF) == 0x0)
+                       seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
+               seq_printf(s, " %08x", readl_relaxed(base + i));
+       }
+       seq_puts(s, "\n");
+       pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+       return 0;
+}
+
+static int dpu_debugfs_open_regset32(struct inode *inode,
+               struct file *file)
+{
+       return single_open(file, _dpu_debugfs_show_regset32, inode->i_private);
+}
+
+static const struct file_operations dpu_fops_regset32 = {
+       .open =         dpu_debugfs_open_regset32,
+       .read =         seq_read,
+       .llseek =       seq_lseek,
+       .release =      single_release,
+};
+
+void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
+               uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms)
+{
+       if (regset) {
+               regset->offset = offset;
+               regset->blk_len = length;
+               regset->dpu_kms = dpu_kms;
+       }
+}
+
+void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
+               void *parent, struct dpu_debugfs_regset32 *regset)
+{
+       if (!name || !regset || !regset->dpu_kms || !regset->blk_len)
+               return NULL;
+
+       /* make sure offset is a multiple of 4 */
+       regset->offset = round_down(regset->offset, 4);
+
+       return debugfs_create_file(name, mode, parent,
+                       regset, &dpu_fops_regset32);
+}
+
+static int _dpu_debugfs_init(struct dpu_kms *dpu_kms)
+{
+       void *p;
+       int rc;
+
+       p = dpu_hw_util_get_log_mask_ptr();
+
+       if (!dpu_kms || !p)
+               return -EINVAL;
+
+       dpu_kms->debugfs_root = debugfs_create_dir("debug",
+                                          dpu_kms->dev->primary->debugfs_root);
+       if (IS_ERR_OR_NULL(dpu_kms->debugfs_root)) {
+               DRM_ERROR("debugfs create_dir failed %ld\n",
+                         PTR_ERR(dpu_kms->debugfs_root));
+               return PTR_ERR(dpu_kms->debugfs_root);
+       }
+
+       rc = dpu_dbg_debugfs_register(dpu_kms->debugfs_root);
+       if (rc) {
+               DRM_ERROR("failed to reg dpu dbg debugfs: %d\n", rc);
+               return rc;
+       }
+
+       /* allow root to be NULL */
+       debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, dpu_kms->debugfs_root, p);
+
+       (void) dpu_debugfs_danger_init(dpu_kms, dpu_kms->debugfs_root);
+       (void) dpu_debugfs_vbif_init(dpu_kms, dpu_kms->debugfs_root);
+       (void) dpu_debugfs_core_irq_init(dpu_kms, dpu_kms->debugfs_root);
+
+       rc = dpu_core_perf_debugfs_init(&dpu_kms->perf, dpu_kms->debugfs_root);
+       if (rc) {
+               DPU_ERROR("failed to init perf %d\n", rc);
+               return rc;
+       }
+
+       return 0;
+}
+
+static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
+{
+       /* don't need to NULL check debugfs_root */
+       if (dpu_kms) {
+               dpu_debugfs_vbif_destroy(dpu_kms);
+               dpu_debugfs_danger_destroy(dpu_kms);
+               dpu_debugfs_core_irq_destroy(dpu_kms);
+               debugfs_remove_recursive(dpu_kms->debugfs_root);
+       }
+}
+#else
+static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
+{
+}
+#endif
+
+static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+       return dpu_crtc_vblank(crtc, true);
+}
+
+static void dpu_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+       dpu_crtc_vblank(crtc, false);
+}
+
+static void dpu_kms_prepare_commit(struct msm_kms *kms,
+               struct drm_atomic_state *state)
+{
+       struct dpu_kms *dpu_kms;
+       struct msm_drm_private *priv;
+       struct drm_device *dev;
+       struct drm_encoder *encoder;
+
+       if (!kms)
+               return;
+       dpu_kms = to_dpu_kms(kms);
+       dev = dpu_kms->dev;
+
+       if (!dev || !dev->dev_private)
+               return;
+       priv = dev->dev_private;
+       pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+               if (encoder->crtc != NULL)
+                       dpu_encoder_prepare_commit(encoder);
+}
+
+/*
+ * Override the encoder enable since we need to setup the inline rotator and do
+ * some crtc magic before enabling any bridge that might be present.
+ */
+void dpu_kms_encoder_enable(struct drm_encoder *encoder)
+{
+       const struct drm_encoder_helper_funcs *funcs = encoder->helper_private;
+       struct drm_crtc *crtc = encoder->crtc;
+
+       /* Forward this enable call to the commit hook */
+       if (funcs && funcs->commit)
+               funcs->commit(encoder);
+
+       if (crtc && crtc->state->active) {
+               trace_dpu_kms_enc_enable(DRMID(crtc));
+               dpu_crtc_commit_kickoff(crtc);
+       }
+}
+
+static void dpu_kms_commit(struct msm_kms *kms, struct drm_atomic_state *state)
+{
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
+       int i;
+
+       for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+               /* If modeset is required, kickoff is run in encoder_enable */
+               if (drm_atomic_crtc_needs_modeset(crtc_state))
+                       continue;
+
+               if (crtc->state->active) {
+                       trace_dpu_kms_commit(DRMID(crtc));
+                       dpu_crtc_commit_kickoff(crtc);
+               }
+       }
+}
+
+static void dpu_kms_complete_commit(struct msm_kms *kms,
+               struct drm_atomic_state *old_state)
+{
+       struct dpu_kms *dpu_kms;
+       struct msm_drm_private *priv;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *old_crtc_state;
+       int i;
+
+       if (!kms || !old_state)
+               return;
+       dpu_kms = to_dpu_kms(kms);
+
+       if (!dpu_kms->dev || !dpu_kms->dev->dev_private)
+               return;
+       priv = dpu_kms->dev->dev_private;
+
+       DPU_ATRACE_BEGIN("kms_complete_commit");
+
+       for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
+               dpu_crtc_complete_commit(crtc, old_crtc_state);
+
+       pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+       DPU_ATRACE_END("kms_complete_commit");
+}
+
+static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
+               struct drm_crtc *crtc)
+{
+       struct drm_encoder *encoder;
+       struct drm_device *dev;
+       int ret;
+
+       if (!kms || !crtc || !crtc->state) {
+               DPU_ERROR("invalid params\n");
+               return;
+       }
+
+       dev = crtc->dev;
+
+       if (!crtc->state->enable) {
+               DPU_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
+               return;
+       }
+
+       if (!crtc->state->active) {
+               DPU_DEBUG("[crtc:%d] not active\n", crtc->base.id);
+               return;
+       }
+
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               if (encoder->crtc != crtc)
+                       continue;
+               /*
+                * Wait for post-flush if necessary to delay before
+                * plane_cleanup. For example, wait for vsync in case of video
+                * mode panels. This may be a no-op for command mode panels.
+                */
+               trace_dpu_kms_wait_for_commit_done(DRMID(crtc));
+               ret = dpu_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
+               if (ret && ret != -EWOULDBLOCK) {
+                       DPU_ERROR("wait for commit done returned %d\n", ret);
+                       break;
+               }
+       }
+}
+
+static void _dpu_kms_initialize_dsi(struct drm_device *dev,
+                                   struct msm_drm_private *priv,
+                                   struct dpu_kms *dpu_kms)
+{
+       struct drm_encoder *encoder = NULL;
+       int i, rc;
+
+       /*TODO: Support two independent DSI connectors */
+       encoder = dpu_encoder_init(dev, DRM_MODE_CONNECTOR_DSI);
+       if (IS_ERR_OR_NULL(encoder)) {
+               DPU_ERROR("encoder init failed for dsi display\n");
+               return;
+       }
+
+       priv->encoders[priv->num_encoders++] = encoder;
+
+       for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
+               if (!priv->dsi[i]) {
+                       DPU_DEBUG("invalid msm_dsi for ctrl %d\n", i);
+                       return;
+               }
+
+               rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
+               if (rc) {
+                       DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
+                               i, rc);
+                       continue;
+               }
+       }
+}
+
+/**
+ * _dpu_kms_setup_displays - create encoders, bridges and connectors
+ *                           for underlying displays
+ * @dev:        Pointer to drm device structure
+ * @priv:       Pointer to private drm device data
+ * @dpu_kms:    Pointer to dpu kms structure
+ * Returns:     Zero on success
+ */
+static void _dpu_kms_setup_displays(struct drm_device *dev,
+                                   struct msm_drm_private *priv,
+                                   struct dpu_kms *dpu_kms)
+{
+       _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
+
+       /**
+        * Extend this function to initialize other
+        * types of displays
+        */
+}
+
+static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms)
+{
+       struct msm_drm_private *priv;
+       int i;
+
+       if (!dpu_kms) {
+               DPU_ERROR("invalid dpu_kms\n");
+               return;
+       } else if (!dpu_kms->dev) {
+               DPU_ERROR("invalid dev\n");
+               return;
+       } else if (!dpu_kms->dev->dev_private) {
+               DPU_ERROR("invalid dev_private\n");
+               return;
+       }
+       priv = dpu_kms->dev->dev_private;
+
+       for (i = 0; i < priv->num_crtcs; i++)
+               priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
+       priv->num_crtcs = 0;
+
+       for (i = 0; i < priv->num_planes; i++)
+               priv->planes[i]->funcs->destroy(priv->planes[i]);
+       priv->num_planes = 0;
+
+       for (i = 0; i < priv->num_connectors; i++)
+               priv->connectors[i]->funcs->destroy(priv->connectors[i]);
+       priv->num_connectors = 0;
+
+       for (i = 0; i < priv->num_encoders; i++)
+               priv->encoders[i]->funcs->destroy(priv->encoders[i]);
+       priv->num_encoders = 0;
+}
+
+static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
+{
+       struct drm_device *dev;
+       struct drm_plane *primary_planes[MAX_PLANES], *plane;
+       struct drm_crtc *crtc;
+
+       struct msm_drm_private *priv;
+       struct dpu_mdss_cfg *catalog;
+
+       int primary_planes_idx = 0, i, ret;
+       int max_crtc_count;
+
+       if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
+               DPU_ERROR("invalid dpu_kms\n");
+               return -EINVAL;
+       }
+
+       dev = dpu_kms->dev;
+       priv = dev->dev_private;
+       catalog = dpu_kms->catalog;
+
+       /*
+        * Create encoder and query display drivers to create
+        * bridges and connectors
+        */
+       _dpu_kms_setup_displays(dev, priv, dpu_kms);
+
+       max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
+
+       /* Create the planes */
+       for (i = 0; i < catalog->sspp_count; i++) {
+               bool primary = true;
+
+               if (catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR)
+                       || primary_planes_idx >= max_crtc_count)
+                       primary = false;
+
+               plane = dpu_plane_init(dev, catalog->sspp[i].id, primary,
+                               (1UL << max_crtc_count) - 1, 0);
+               if (IS_ERR(plane)) {
+                       DPU_ERROR("dpu_plane_init failed\n");
+                       ret = PTR_ERR(plane);
+                       goto fail;
+               }
+               priv->planes[priv->num_planes++] = plane;
+
+               if (primary)
+                       primary_planes[primary_planes_idx++] = plane;
+       }
+
+       max_crtc_count = min(max_crtc_count, primary_planes_idx);
+
+       /* Create one CRTC per encoder */
+       for (i = 0; i < max_crtc_count; i++) {
+               crtc = dpu_crtc_init(dev, primary_planes[i]);
+               if (IS_ERR(crtc)) {
+                       ret = PTR_ERR(crtc);
+                       goto fail;
+               }
+               priv->crtcs[priv->num_crtcs++] = crtc;
+       }
+
+       /* All CRTCs are compatible with all encoders */
+       for (i = 0; i < priv->num_encoders; i++)
+               priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
+
+       return 0;
+fail:
+       _dpu_kms_drm_obj_destroy(dpu_kms);
+       return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
+{
+       struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+       struct drm_device *dev;
+       int rc;
+
+       if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
+               DPU_ERROR("invalid dpu_kms\n");
+               return -EINVAL;
+       }
+
+       dev = dpu_kms->dev;
+
+       rc = _dpu_debugfs_init(dpu_kms);
+       if (rc)
+               DPU_ERROR("dpu_debugfs init failed: %d\n", rc);
+
+       return rc;
+}
+#endif
+
+static long dpu_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
+               struct drm_encoder *encoder)
+{
+       return rate;
+}
+
+static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
+{
+       struct drm_device *dev;
+       int i;
+
+       dev = dpu_kms->dev;
+       if (!dev)
+               return;
+
+       if (dpu_kms->hw_intr)
+               dpu_hw_intr_destroy(dpu_kms->hw_intr);
+       dpu_kms->hw_intr = NULL;
+
+       if (dpu_kms->power_event)
+               dpu_power_handle_unregister_event(
+                               &dpu_kms->phandle, dpu_kms->power_event);
+
+       /* safe to call these more than once during shutdown */
+       _dpu_debugfs_destroy(dpu_kms);
+       _dpu_kms_mmu_destroy(dpu_kms);
+
+       if (dpu_kms->catalog) {
+               for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+                       u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
+
+                       if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx])
+                               dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]);
+               }
+       }
+
+       if (dpu_kms->rm_init)
+               dpu_rm_destroy(&dpu_kms->rm);
+       dpu_kms->rm_init = false;
+
+       if (dpu_kms->catalog)
+               dpu_hw_catalog_deinit(dpu_kms->catalog);
+       dpu_kms->catalog = NULL;
+
+       if (dpu_kms->core_client)
+               dpu_power_client_destroy(&dpu_kms->phandle,
+                       dpu_kms->core_client);
+       dpu_kms->core_client = NULL;
+
+       if (dpu_kms->vbif[VBIF_NRT])
+               devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
+       dpu_kms->vbif[VBIF_NRT] = NULL;
+
+       if (dpu_kms->vbif[VBIF_RT])
+               devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]);
+       dpu_kms->vbif[VBIF_RT] = NULL;
+
+       if (dpu_kms->mmio)
+               devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio);
+       dpu_kms->mmio = NULL;
+}
+
+static void dpu_kms_destroy(struct msm_kms *kms)
+{
+       struct dpu_kms *dpu_kms;
+
+       if (!kms) {
+               DPU_ERROR("invalid kms\n");
+               return;
+       }
+
+       dpu_kms = to_dpu_kms(kms);
+
+       dpu_dbg_destroy();
+       _dpu_kms_hw_destroy(dpu_kms);
+}
+
+static int dpu_kms_pm_suspend(struct device *dev)
+{
+       struct drm_device *ddev;
+       struct drm_modeset_acquire_ctx ctx;
+       struct drm_atomic_state *state;
+       struct dpu_kms *dpu_kms;
+       int ret = 0, num_crtcs = 0;
+
+       if (!dev)
+               return -EINVAL;
+
+       ddev = dev_get_drvdata(dev);
+       if (!ddev || !ddev_to_msm_kms(ddev))
+               return -EINVAL;
+
+       dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
+
+       /* disable hot-plug polling */
+       drm_kms_helper_poll_disable(ddev);
+
+       /* acquire modeset lock(s) */
+       drm_modeset_acquire_init(&ctx, 0);
+
+retry:
+       DPU_ATRACE_BEGIN("kms_pm_suspend");
+
+       ret = drm_modeset_lock_all_ctx(ddev, &ctx);
+       if (ret)
+               goto unlock;
+
+       /* save current state for resume */
+       if (dpu_kms->suspend_state)
+               drm_atomic_state_put(dpu_kms->suspend_state);
+       dpu_kms->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx);
+       if (IS_ERR_OR_NULL(dpu_kms->suspend_state)) {
+               DRM_ERROR("failed to back up suspend state\n");
+               dpu_kms->suspend_state = NULL;
+               goto unlock;
+       }
+
+       /* create atomic state to disable all CRTCs */
+       state = drm_atomic_state_alloc(ddev);
+       if (IS_ERR_OR_NULL(state)) {
+               DRM_ERROR("failed to allocate crtc disable state\n");
+               goto unlock;
+       }
+
+       state->acquire_ctx = &ctx;
+
+       /* check for nothing to do */
+       if (num_crtcs == 0) {
+               DRM_DEBUG("all crtcs are already in the off state\n");
+               drm_atomic_state_put(state);
+               goto suspended;
+       }
+
+       /* commit the "disable all" state */
+       ret = drm_atomic_commit(state);
+       if (ret < 0) {
+               DRM_ERROR("failed to disable crtcs, %d\n", ret);
+               drm_atomic_state_put(state);
+               goto unlock;
+       }
+
+suspended:
+       dpu_kms->suspend_block = true;
+
+unlock:
+       if (ret == -EDEADLK) {
+               drm_modeset_backoff(&ctx);
+               goto retry;
+       }
+       drm_modeset_drop_locks(&ctx);
+       drm_modeset_acquire_fini(&ctx);
+
+       DPU_ATRACE_END("kms_pm_suspend");
+       return 0;
+}
+
+static int dpu_kms_pm_resume(struct device *dev)
+{
+       struct drm_device *ddev;
+       struct dpu_kms *dpu_kms;
+       int ret;
+
+       if (!dev)
+               return -EINVAL;
+
+       ddev = dev_get_drvdata(dev);
+       if (!ddev || !ddev_to_msm_kms(ddev))
+               return -EINVAL;
+
+       dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
+
+       DPU_ATRACE_BEGIN("kms_pm_resume");
+
+       drm_mode_config_reset(ddev);
+
+       drm_modeset_lock_all(ddev);
+
+       dpu_kms->suspend_block = false;
+
+       if (dpu_kms->suspend_state) {
+               dpu_kms->suspend_state->acquire_ctx =
+                       ddev->mode_config.acquire_ctx;
+               ret = drm_atomic_commit(dpu_kms->suspend_state);
+               if (ret < 0) {
+                       DRM_ERROR("failed to restore state, %d\n", ret);
+                       drm_atomic_state_put(dpu_kms->suspend_state);
+               }
+               dpu_kms->suspend_state = NULL;
+       }
+       drm_modeset_unlock_all(ddev);
+
+       /* enable hot-plug polling */
+       drm_kms_helper_poll_enable(ddev);
+
+       DPU_ATRACE_END("kms_pm_resume");
+       return 0;
+}
+
+static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
+                                struct drm_encoder *encoder,
+                                bool cmd_mode)
+{
+       struct msm_display_info info;
+       struct msm_drm_private *priv = encoder->dev->dev_private;
+       int i, rc = 0;
+
+       memset(&info, 0, sizeof(info));
+
+       info.intf_type = encoder->encoder_type;
+       info.capabilities = cmd_mode ? MSM_DISPLAY_CAP_CMD_MODE :
+                       MSM_DISPLAY_CAP_VID_MODE;
+
+       /* TODO: No support for DSI swap */
+       for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
+               if (priv->dsi[i]) {
+                       info.h_tile_instance[info.num_of_h_tiles] = i;
+                       info.num_of_h_tiles++;
+               }
+       }
+
+       rc = dpu_encoder_setup(encoder->dev, encoder, &info);
+       if (rc)
+               DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
+                       encoder->base.id, rc);
+}
+
+static const struct msm_kms_funcs kms_funcs = {
+       .hw_init         = dpu_kms_hw_init,
+       .irq_preinstall  = dpu_irq_preinstall,
+       .irq_postinstall = dpu_irq_postinstall,
+       .irq_uninstall   = dpu_irq_uninstall,
+       .irq             = dpu_irq,
+       .prepare_commit  = dpu_kms_prepare_commit,
+       .commit          = dpu_kms_commit,
+       .complete_commit = dpu_kms_complete_commit,
+       .wait_for_crtc_commit_done = dpu_kms_wait_for_commit_done,
+       .enable_vblank   = dpu_kms_enable_vblank,
+       .disable_vblank  = dpu_kms_disable_vblank,
+       .check_modified_format = dpu_format_check_modified_format,
+       .get_format      = dpu_get_msm_format,
+       .round_pixclk    = dpu_kms_round_pixclk,
+       .pm_suspend      = dpu_kms_pm_suspend,
+       .pm_resume       = dpu_kms_pm_resume,
+       .destroy         = dpu_kms_destroy,
+       .set_encoder_mode = _dpu_kms_set_encoder_mode,
+#ifdef CONFIG_DEBUG_FS
+       .debugfs_init    = dpu_kms_debugfs_init,
+#endif
+};
+
+/* the caller api needs to turn on clock before calling it */
+static inline void _dpu_kms_core_hw_rev_init(struct dpu_kms *dpu_kms)
+{
+       dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
+}
+
+static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
+{
+       struct msm_mmu *mmu;
+
+       mmu = dpu_kms->base.aspace->mmu;
+
+       mmu->funcs->detach(mmu, (const char **)iommu_ports,
+                       ARRAY_SIZE(iommu_ports));
+       msm_gem_address_space_put(dpu_kms->base.aspace);
+
+       return 0;
+}
+
+static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
+{
+       struct iommu_domain *domain;
+       struct msm_gem_address_space *aspace;
+       int ret;
+
+       domain = iommu_domain_alloc(&platform_bus_type);
+       if (!domain)
+               return 0;
+
+       aspace = msm_gem_address_space_create(dpu_kms->dev->dev,
+                       domain, "dpu1");
+       if (IS_ERR(aspace)) {
+               ret = PTR_ERR(aspace);
+               goto fail;
+       }
+
+       dpu_kms->base.aspace = aspace;
+
+       ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
+                       ARRAY_SIZE(iommu_ports));
+       if (ret) {
+               DPU_ERROR("failed to attach iommu %d\n", ret);
+               msm_gem_address_space_put(aspace);
+               goto fail;
+       }
+
+       return 0;
+fail:
+       _dpu_kms_mmu_destroy(dpu_kms);
+
+       return ret;
+}
+
+static struct dss_clk *_dpu_kms_get_clk(struct dpu_kms *dpu_kms,
+               char *clock_name)
+{
+       struct dss_module_power *mp = &dpu_kms->mp;
+       int i;
+
+       for (i = 0; i < mp->num_clk; i++) {
+               if (!strcmp(mp->clk_config[i].clk_name, clock_name))
+                       return &mp->clk_config[i];
+       }
+
+       return NULL;
+}
+
+u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
+{
+       struct dss_clk *clk;
+
+       clk = _dpu_kms_get_clk(dpu_kms, clock_name);
+       if (!clk)
+               return -EINVAL;
+
+       return clk_get_rate(clk->clk);
+}
+
+static void dpu_kms_handle_power_event(u32 event_type, void *usr)
+{
+       struct dpu_kms *dpu_kms = usr;
+
+       if (!dpu_kms)
+               return;
+
+       if (event_type == DPU_POWER_EVENT_POST_ENABLE)
+               dpu_vbif_init_memtypes(dpu_kms);
+}
+
+static int dpu_kms_hw_init(struct msm_kms *kms)
+{
+       struct dpu_kms *dpu_kms;
+       struct drm_device *dev;
+       struct msm_drm_private *priv;
+       int i, rc = -EINVAL;
+
+       if (!kms) {
+               DPU_ERROR("invalid kms\n");
+               goto end;
+       }
+
+       dpu_kms = to_dpu_kms(kms);
+       dev = dpu_kms->dev;
+       if (!dev) {
+               DPU_ERROR("invalid device\n");
+               goto end;
+       }
+
+       rc = dpu_dbg_init(&dpu_kms->pdev->dev);
+       if (rc) {
+               DRM_ERROR("failed to init dpu dbg: %d\n", rc);
+               goto end;
+       }
+
+       priv = dev->dev_private;
+       if (!priv) {
+               DPU_ERROR("invalid private data\n");
+               goto dbg_destroy;
+       }
+
+       dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp", "mdp");
+       if (IS_ERR(dpu_kms->mmio)) {
+               rc = PTR_ERR(dpu_kms->mmio);
+               DPU_ERROR("mdp register memory map failed: %d\n", rc);
+               dpu_kms->mmio = NULL;
+               goto error;
+       }
+       DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
+       dpu_kms->mmio_len = dpu_iomap_size(dpu_kms->pdev, "mdp");
+
+       dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif", "vbif");
+       if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
+               rc = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
+               DPU_ERROR("vbif register memory map failed: %d\n", rc);
+               dpu_kms->vbif[VBIF_RT] = NULL;
+               goto error;
+       }
+       dpu_kms->vbif_len[VBIF_RT] = dpu_iomap_size(dpu_kms->pdev, "vbif");
+       dpu_kms->vbif[VBIF_NRT] = msm_ioremap(dpu_kms->pdev, "vbif_nrt", "vbif_nrt");
+       if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
+               dpu_kms->vbif[VBIF_NRT] = NULL;
+               DPU_DEBUG("VBIF NRT is not defined");
+       } else {
+               dpu_kms->vbif_len[VBIF_NRT] = dpu_iomap_size(dpu_kms->pdev,
+                                                            "vbif_nrt");
+       }
+
+       dpu_kms->reg_dma = msm_ioremap(dpu_kms->pdev, "regdma", "regdma");
+       if (IS_ERR(dpu_kms->reg_dma)) {
+               dpu_kms->reg_dma = NULL;
+               DPU_DEBUG("REG_DMA is not defined");
+       } else {
+               dpu_kms->reg_dma_len = dpu_iomap_size(dpu_kms->pdev, "regdma");
+       }
+
+       dpu_kms->core_client = dpu_power_client_create(&dpu_kms->phandle,
+                                       "core");
+       if (IS_ERR_OR_NULL(dpu_kms->core_client)) {
+               rc = PTR_ERR(dpu_kms->core_client);
+               if (!dpu_kms->core_client)
+                       rc = -EINVAL;
+               DPU_ERROR("dpu power client create failed: %d\n", rc);
+               dpu_kms->core_client = NULL;
+               goto error;
+       }
+
+       pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+       _dpu_kms_core_hw_rev_init(dpu_kms);
+
+       pr_info("dpu hardware revision:0x%x\n", dpu_kms->core_rev);
+
+       dpu_kms->catalog = dpu_hw_catalog_init(dpu_kms->core_rev);
+       if (IS_ERR_OR_NULL(dpu_kms->catalog)) {
+               rc = PTR_ERR(dpu_kms->catalog);
+               if (!dpu_kms->catalog)
+                       rc = -EINVAL;
+               DPU_ERROR("catalog init failed: %d\n", rc);
+               dpu_kms->catalog = NULL;
+               goto power_error;
+       }
+
+       dpu_dbg_init_dbg_buses(dpu_kms->core_rev);
+
+       /*
+        * Now we need to read the HW catalog and initialize resources such as
+        * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
+        */
+       rc = _dpu_kms_mmu_init(dpu_kms);
+       if (rc) {
+               DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc);
+               goto power_error;
+       }
+
+       rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio,
+                       dpu_kms->dev);
+       if (rc) {
+               DPU_ERROR("rm init failed: %d\n", rc);
+               goto power_error;
+       }
+
+       dpu_kms->rm_init = true;
+
+       dpu_kms->hw_mdp = dpu_rm_get_mdp(&dpu_kms->rm);
+       if (IS_ERR_OR_NULL(dpu_kms->hw_mdp)) {
+               rc = PTR_ERR(dpu_kms->hw_mdp);
+               if (!dpu_kms->hw_mdp)
+                       rc = -EINVAL;
+               DPU_ERROR("failed to get hw_mdp: %d\n", rc);
+               dpu_kms->hw_mdp = NULL;
+               goto power_error;
+       }
+
+       for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+               u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
+
+               dpu_kms->hw_vbif[i] = dpu_hw_vbif_init(vbif_idx,
+                               dpu_kms->vbif[vbif_idx], dpu_kms->catalog);
+               if (IS_ERR_OR_NULL(dpu_kms->hw_vbif[vbif_idx])) {
+                       rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]);
+                       if (!dpu_kms->hw_vbif[vbif_idx])
+                               rc = -EINVAL;
+                       DPU_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
+                       dpu_kms->hw_vbif[vbif_idx] = NULL;
+                       goto power_error;
+               }
+       }
+
+       rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog,
+                       &dpu_kms->phandle,
+                       _dpu_kms_get_clk(dpu_kms, "core"));
+       if (rc) {
+               DPU_ERROR("failed to init perf %d\n", rc);
+               goto perf_err;
+       }
+
+       dpu_kms->hw_intr = dpu_hw_intr_init(dpu_kms->mmio, dpu_kms->catalog);
+       if (IS_ERR_OR_NULL(dpu_kms->hw_intr)) {
+               rc = PTR_ERR(dpu_kms->hw_intr);
+               DPU_ERROR("hw_intr init failed: %d\n", rc);
+               dpu_kms->hw_intr = NULL;
+               goto hw_intr_init_err;
+       }
+
+       /*
+        * _dpu_kms_drm_obj_init should create the DRM related objects
+        * i.e. CRTCs, planes, encoders, connectors and so forth
+        */
+       rc = _dpu_kms_drm_obj_init(dpu_kms);
+       if (rc) {
+               DPU_ERROR("modeset init failed: %d\n", rc);
+               goto drm_obj_init_err;
+       }
+
+       dev->mode_config.min_width = 0;
+       dev->mode_config.min_height = 0;
+
+       /*
+        * max crtc width is equal to the max mixer width * 2 and max height is
+        * is 4K
+        */
+       dev->mode_config.max_width =
+                       dpu_kms->catalog->caps->max_mixer_width * 2;
+       dev->mode_config.max_height = 4096;
+
+       /*
+        * Support format modifiers for compression etc.
+        */
+       dev->mode_config.allow_fb_modifiers = true;
+
+       /*
+        * Handle (re)initializations during power enable
+        */
+       dpu_kms_handle_power_event(DPU_POWER_EVENT_POST_ENABLE, dpu_kms);
+       dpu_kms->power_event = dpu_power_handle_register_event(
+                       &dpu_kms->phandle,
+                       DPU_POWER_EVENT_POST_ENABLE,
+                       dpu_kms_handle_power_event, dpu_kms, "kms");
+
+       pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+       return 0;
+
+drm_obj_init_err:
+       dpu_core_perf_destroy(&dpu_kms->perf);
+hw_intr_init_err:
+perf_err:
+power_error:
+       pm_runtime_put_sync(&dpu_kms->pdev->dev);
+error:
+       _dpu_kms_hw_destroy(dpu_kms);
+dbg_destroy:
+       dpu_dbg_destroy();
+end:
+       return rc;
+}
+
+struct msm_kms *dpu_kms_init(struct drm_device *dev)
+{
+       struct msm_drm_private *priv;
+       struct dpu_kms *dpu_kms;
+       int irq;
+
+       if (!dev || !dev->dev_private) {
+               DPU_ERROR("drm device node invalid\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       priv = dev->dev_private;
+       dpu_kms = to_dpu_kms(priv->kms);
+
+       irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0);
+       if (irq < 0) {
+               DPU_ERROR("failed to get irq: %d\n", irq);
+               return ERR_PTR(irq);
+       }
+       dpu_kms->base.irq = irq;
+
+       return &dpu_kms->base;
+}
+
+static int dpu_bind(struct device *dev, struct device *master, void *data)
+{
+       struct drm_device *ddev = dev_get_drvdata(master);
+       struct platform_device *pdev = to_platform_device(dev);
+       struct msm_drm_private *priv = ddev->dev_private;
+       struct dpu_kms *dpu_kms;
+       struct dss_module_power *mp;
+       int ret = 0;
+
+       dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL);
+       if (!dpu_kms)
+               return -ENOMEM;
+
+       mp = &dpu_kms->mp;
+       ret = msm_dss_parse_clock(pdev, mp);
+       if (ret) {
+               DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
+               return ret;
+       }
+
+       dpu_power_resource_init(pdev, &dpu_kms->phandle);
+
+       platform_set_drvdata(pdev, dpu_kms);
+
+       msm_kms_init(&dpu_kms->base, &kms_funcs);
+       dpu_kms->dev = ddev;
+       dpu_kms->pdev = pdev;
+
+       pm_runtime_enable(&pdev->dev);
+       dpu_kms->rpm_enabled = true;
+
+       priv->kms = &dpu_kms->base;
+       return ret;
+}
+
+static void dpu_unbind(struct device *dev, struct device *master, void *data)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+       struct dss_module_power *mp = &dpu_kms->mp;
+
+       dpu_power_resource_deinit(pdev, &dpu_kms->phandle);
+       msm_dss_put_clk(mp->clk_config, mp->num_clk);
+       devm_kfree(&pdev->dev, mp->clk_config);
+       mp->num_clk = 0;
+
+       if (dpu_kms->rpm_enabled)
+               pm_runtime_disable(&pdev->dev);
+}
+
+static const struct component_ops dpu_ops = {
+       .bind   = dpu_bind,
+       .unbind = dpu_unbind,
+};
+
+static int dpu_dev_probe(struct platform_device *pdev)
+{
+       return component_add(&pdev->dev, &dpu_ops);
+}
+
+static int dpu_dev_remove(struct platform_device *pdev)
+{
+       component_del(&pdev->dev, &dpu_ops);
+       return 0;
+}
+
+static int __maybe_unused dpu_runtime_suspend(struct device *dev)
+{
+       int rc = -1;
+       struct platform_device *pdev = to_platform_device(dev);
+       struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+       struct drm_device *ddev;
+       struct dss_module_power *mp = &dpu_kms->mp;
+
+       ddev = dpu_kms->dev;
+       if (!ddev) {
+               DPU_ERROR("invalid drm_device\n");
+               goto exit;
+       }
+
+       rc = dpu_power_resource_enable(&dpu_kms->phandle,
+                       dpu_kms->core_client, false);
+       if (rc)
+               DPU_ERROR("resource disable failed: %d\n", rc);
+
+       rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
+       if (rc)
+               DPU_ERROR("clock disable failed rc:%d\n", rc);
+
+exit:
+       return rc;
+}
+
+static int __maybe_unused dpu_runtime_resume(struct device *dev)
+{
+       int rc = -1;
+       struct platform_device *pdev = to_platform_device(dev);
+       struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+       struct drm_device *ddev;
+       struct dss_module_power *mp = &dpu_kms->mp;
+
+       ddev = dpu_kms->dev;
+       if (!ddev) {
+               DPU_ERROR("invalid drm_device\n");
+               goto exit;
+       }
+
+       rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
+       if (rc) {
+               DPU_ERROR("clock enable failed rc:%d\n", rc);
+               goto exit;
+       }
+
+       rc = dpu_power_resource_enable(&dpu_kms->phandle,
+                       dpu_kms->core_client, true);
+       if (rc)
+               DPU_ERROR("resource enable failed: %d\n", rc);
+
+exit:
+       return rc;
+}
+
+static const struct dev_pm_ops dpu_pm_ops = {
+       SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
+};
+
+static const struct of_device_id dpu_dt_match[] = {
+       { .compatible = "qcom,sdm845-dpu", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, dpu_dt_match);
+
+static struct platform_driver dpu_driver = {
+       .probe = dpu_dev_probe,
+       .remove = dpu_dev_remove,
+       .driver = {
+               .name = "msm_dpu",
+               .of_match_table = dpu_dt_match,
+               .pm = &dpu_pm_ops,
+       },
+};
+
+void __init msm_dpu_register(void)
+{
+       platform_driver_register(&dpu_driver);
+}
+
+void __exit msm_dpu_unregister(void)
+{
+       platform_driver_unregister(&dpu_driver);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
new file mode 100644 (file)
index 0000000..66d4666
--- /dev/null
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __DPU_KMS_H__
+#define __DPU_KMS_H__
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "msm_mmu.h"
+#include "msm_gem.h"
+#include "dpu_dbg.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_hw_top.h"
+#include "dpu_rm.h"
+#include "dpu_power_handle.h"
+#include "dpu_irq.h"
+#include "dpu_core_perf.h"
+
+#define DRMID(x) ((x) ? (x)->base.id : -1)
+
+/**
+ * DPU_DEBUG - macro for kms/plane/crtc/encoder/connector logs
+ * @fmt: Pointer to format string
+ */
+#define DPU_DEBUG(fmt, ...)                                                \
+       do {                                                               \
+               if (unlikely(drm_debug & DRM_UT_KMS))                      \
+                       DRM_DEBUG(fmt, ##__VA_ARGS__); \
+               else                                                       \
+                       pr_debug(fmt, ##__VA_ARGS__);                      \
+       } while (0)
+
+/**
+ * DPU_DEBUG_DRIVER - macro for hardware driver logging
+ * @fmt: Pointer to format string
+ */
+#define DPU_DEBUG_DRIVER(fmt, ...)                                         \
+       do {                                                               \
+               if (unlikely(drm_debug & DRM_UT_DRIVER))                   \
+                       DRM_ERROR(fmt, ##__VA_ARGS__); \
+               else                                                       \
+                       pr_debug(fmt, ##__VA_ARGS__);                      \
+       } while (0)
+
+#define DPU_ERROR(fmt, ...) pr_err("[dpu error]" fmt, ##__VA_ARGS__)
+
+/**
+ * ktime_compare_safe - compare two ktime structures
+ *     This macro is similar to the standard ktime_compare() function, but
+ *     attempts to also handle ktime overflows.
+ * @A: First ktime value
+ * @B: Second ktime value
+ * Returns: -1 if A < B, 0 if A == B, 1 if A > B
+ */
+#define ktime_compare_safe(A, B) \
+       ktime_compare(ktime_sub((A), (B)), ktime_set(0, 0))
+
+#define DPU_NAME_SIZE  12
+
+/* timeout in frames waiting for frame done */
+#define DPU_FRAME_DONE_TIMEOUT 60
+
+/*
+ * struct dpu_irq_callback - IRQ callback handlers
+ * @list: list to callback
+ * @func: intr handler
+ * @arg: argument for the handler
+ */
+struct dpu_irq_callback {
+       struct list_head list;
+       void (*func)(void *arg, int irq_idx);
+       void *arg;
+};
+
+/**
+ * struct dpu_irq: IRQ structure contains callback registration info
+ * @total_irq:    total number of irq_idx obtained from HW interrupts mapping
+ * @irq_cb_tbl:   array of IRQ callbacks setting
+ * @enable_counts array of IRQ enable counts
+ * @cb_lock:      callback lock
+ * @debugfs_file: debugfs file for irq statistics
+ */
+struct dpu_irq {
+       u32 total_irqs;
+       struct list_head *irq_cb_tbl;
+       atomic_t *enable_counts;
+       atomic_t *irq_counts;
+       spinlock_t cb_lock;
+       struct dentry *debugfs_file;
+};
+
+struct dpu_kms {
+       struct msm_kms base;
+       struct drm_device *dev;
+       int core_rev;
+       struct dpu_mdss_cfg *catalog;
+
+       struct dpu_power_handle phandle;
+       struct dpu_power_client *core_client;
+       struct dpu_power_event *power_event;
+
+       /* directory entry for debugfs */
+       struct dentry *debugfs_root;
+       struct dentry *debugfs_danger;
+       struct dentry *debugfs_vbif;
+
+       /* io/register spaces: */
+       void __iomem *mmio, *vbif[VBIF_MAX], *reg_dma;
+       unsigned long mmio_len, vbif_len[VBIF_MAX], reg_dma_len;
+
+       struct regulator *vdd;
+       struct regulator *mmagic;
+       struct regulator *venus;
+
+       struct dpu_hw_intr *hw_intr;
+       struct dpu_irq irq_obj;
+
+       struct dpu_core_perf perf;
+
+       /* saved atomic state during system suspend */
+       struct drm_atomic_state *suspend_state;
+       bool suspend_block;
+
+       struct dpu_rm rm;
+       bool rm_init;
+
+       struct dpu_hw_vbif *hw_vbif[VBIF_MAX];
+       struct dpu_hw_mdp *hw_mdp;
+
+       bool has_danger_ctrl;
+
+       struct platform_device *pdev;
+       bool rpm_enabled;
+       struct dss_module_power mp;
+};
+
+struct vsync_info {
+       u32 frame_count;
+       u32 line_count;
+};
+
+#define to_dpu_kms(x) container_of(x, struct dpu_kms, base)
+
+/* get struct msm_kms * from drm_device * */
+#define ddev_to_msm_kms(D) ((D) && (D)->dev_private ? \
+               ((struct msm_drm_private *)((D)->dev_private))->kms : NULL)
+
+/**
+ * dpu_kms_is_suspend_state - whether or not the system is pm suspended
+ * @dev: Pointer to drm device
+ * Return: Suspend status
+ */
+static inline bool dpu_kms_is_suspend_state(struct drm_device *dev)
+{
+       if (!ddev_to_msm_kms(dev))
+               return false;
+
+       return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_state != NULL;
+}
+
+/**
+ * dpu_kms_is_suspend_blocked - whether or not commits are blocked due to pm
+ *                             suspend status
+ * @dev: Pointer to drm device
+ * Return: True if commits should be rejected due to pm suspend
+ */
+static inline bool dpu_kms_is_suspend_blocked(struct drm_device *dev)
+{
+       if (!dpu_kms_is_suspend_state(dev))
+               return false;
+
+       return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_block;
+}
+
+/**
+ * Debugfs functions - extra helper functions for debugfs support
+ *
+ * Main debugfs documentation is located at,
+ *
+ * Documentation/filesystems/debugfs.txt
+ *
+ * @dpu_debugfs_setup_regset32: Initialize data for dpu_debugfs_create_regset32
+ * @dpu_debugfs_create_regset32: Create 32-bit register dump file
+ * @dpu_debugfs_get_root: Get root dentry for DPU_KMS's debugfs node
+ */
+
+/**
+ * Companion structure for dpu_debugfs_create_regset32. Do not initialize the
+ * members of this structure explicitly; use dpu_debugfs_setup_regset32 instead.
+ */
+struct dpu_debugfs_regset32 {
+       uint32_t offset;
+       uint32_t blk_len;
+       struct dpu_kms *dpu_kms;
+};
+
+/**
+ * dpu_debugfs_setup_regset32 - Initialize register block definition for debugfs
+ * This function is meant to initialize dpu_debugfs_regset32 structures for use
+ * with dpu_debugfs_create_regset32.
+ * @regset: opaque register definition structure
+ * @offset: sub-block offset
+ * @length: sub-block length, in bytes
+ * @dpu_kms: pointer to dpu kms structure
+ */
+void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
+               uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_debugfs_create_regset32 - Create register read back file for debugfs
+ *
+ * This function is almost identical to the standard debugfs_create_regset32()
+ * function, with the main difference being that a list of register
+ * names/offsets do not need to be provided. The 'read' function simply outputs
+ * sequential register values over a specified range.
+ *
+ * Similar to the related debugfs_create_regset32 API, the structure pointed to
+ * by regset needs to persist for the lifetime of the created file. The calling
+ * code is responsible for initialization/management of this structure.
+ *
+ * The structure pointed to by regset is meant to be opaque. Please use
+ * dpu_debugfs_setup_regset32 to initialize it.
+ *
+ * @name:   File name within debugfs
+ * @mode:   File mode within debugfs
+ * @parent: Parent directory entry within debugfs, can be NULL
+ * @regset: Pointer to persistent register block definition
+ *
+ * Return: dentry pointer for newly created file, use either debugfs_remove()
+ *         or debugfs_remove_recursive() (on a parent directory) to remove the
+ *         file
+ */
+void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
+               void *parent, struct dpu_debugfs_regset32 *regset);
+
+/**
+ * dpu_debugfs_get_root - Return root directory entry for KMS's debugfs
+ *
+ * The return value should be passed as the 'parent' argument to subsequent
+ * debugfs create calls.
+ *
+ * @dpu_kms: Pointer to DPU's KMS structure
+ *
+ * Return: dentry pointer for DPU's debugfs location
+ */
+void *dpu_debugfs_get_root(struct dpu_kms *dpu_kms);
+
+/**
+ * DPU info management functions
+ * These functions/definitions allow for building up a 'dpu_info' structure
+ * containing one or more "key=value\n" entries.
+ */
+#define DPU_KMS_INFO_MAX_SIZE  4096
+
+/**
+ * Vblank enable/disable functions
+ */
+int dpu_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+void dpu_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+
+void dpu_kms_encoder_enable(struct drm_encoder *encoder);
+
+/**
+ * dpu_kms_get_clk_rate() - get the clock rate
+ * @dpu_kms:  poiner to dpu_kms structure
+ * @clock_name: clock name to get the rate
+ *
+ * Return: current clock rate
+ */
+u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name);
+
+#endif /* __dpu_kms_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
new file mode 100644 (file)
index 0000000..9e533b8
--- /dev/null
@@ -0,0 +1,245 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018, The Linux Foundation
+ */
+
+#include "dpu_kms.h"
+
+#define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base)
+
+#define HW_INTR_STATUS                 0x0010
+
+struct dpu_mdss {
+       struct msm_mdss base;
+       void __iomem *mmio;
+       unsigned long mmio_len;
+       u32 hwversion;
+       struct dss_module_power mp;
+       struct dpu_irq_controller irq_controller;
+};
+
+static irqreturn_t dpu_mdss_irq(int irq, void *arg)
+{
+       struct dpu_mdss *dpu_mdss = arg;
+       u32 interrupts;
+
+       interrupts = readl_relaxed(dpu_mdss->mmio + HW_INTR_STATUS);
+
+       while (interrupts) {
+               irq_hw_number_t hwirq = fls(interrupts) - 1;
+               unsigned int mapping;
+               int rc;
+
+               mapping = irq_find_mapping(dpu_mdss->irq_controller.domain,
+                                          hwirq);
+               if (mapping == 0) {
+                       DRM_ERROR("couldn't find irq mapping for %lu\n", hwirq);
+                       return IRQ_NONE;
+               }
+
+               rc = generic_handle_irq(mapping);
+               if (rc < 0) {
+                       DRM_ERROR("handle irq fail: irq=%lu mapping=%u rc=%d\n",
+                                 hwirq, mapping, rc);
+                       return IRQ_NONE;
+               }
+
+               interrupts &= ~(1 << hwirq);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static void dpu_mdss_irq_mask(struct irq_data *irqd)
+{
+       struct dpu_mdss *dpu_mdss = irq_data_get_irq_chip_data(irqd);
+
+       /* memory barrier */
+       smp_mb__before_atomic();
+       clear_bit(irqd->hwirq, &dpu_mdss->irq_controller.enabled_mask);
+       /* memory barrier */
+       smp_mb__after_atomic();
+}
+
+static void dpu_mdss_irq_unmask(struct irq_data *irqd)
+{
+       struct dpu_mdss *dpu_mdss = irq_data_get_irq_chip_data(irqd);
+
+       /* memory barrier */
+       smp_mb__before_atomic();
+       set_bit(irqd->hwirq, &dpu_mdss->irq_controller.enabled_mask);
+       /* memory barrier */
+       smp_mb__after_atomic();
+}
+
+static struct irq_chip dpu_mdss_irq_chip = {
+       .name = "dpu_mdss",
+       .irq_mask = dpu_mdss_irq_mask,
+       .irq_unmask = dpu_mdss_irq_unmask,
+};
+
+static int dpu_mdss_irqdomain_map(struct irq_domain *domain,
+               unsigned int irq, irq_hw_number_t hwirq)
+{
+       struct dpu_mdss *dpu_mdss = domain->host_data;
+       int ret;
+
+       irq_set_chip_and_handler(irq, &dpu_mdss_irq_chip, handle_level_irq);
+       ret = irq_set_chip_data(irq, dpu_mdss);
+
+       return ret;
+}
+
+static const struct irq_domain_ops dpu_mdss_irqdomain_ops = {
+       .map = dpu_mdss_irqdomain_map,
+       .xlate = irq_domain_xlate_onecell,
+};
+
+static int _dpu_mdss_irq_domain_add(struct dpu_mdss *dpu_mdss)
+{
+       struct device *dev;
+       struct irq_domain *domain;
+
+       dev = dpu_mdss->base.dev->dev;
+
+       domain = irq_domain_add_linear(dev->of_node, 32,
+                       &dpu_mdss_irqdomain_ops, dpu_mdss);
+       if (!domain) {
+               DPU_ERROR("failed to add irq_domain\n");
+               return -EINVAL;
+       }
+
+       dpu_mdss->irq_controller.enabled_mask = 0;
+       dpu_mdss->irq_controller.domain = domain;
+
+       return 0;
+}
+
+static int _dpu_mdss_irq_domain_fini(struct dpu_mdss *dpu_mdss)
+{
+       if (dpu_mdss->irq_controller.domain) {
+               irq_domain_remove(dpu_mdss->irq_controller.domain);
+               dpu_mdss->irq_controller.domain = NULL;
+       }
+       return 0;
+}
+static int dpu_mdss_enable(struct msm_mdss *mdss)
+{
+       struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
+       struct dss_module_power *mp = &dpu_mdss->mp;
+       int ret;
+
+       ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
+       if (ret)
+               DPU_ERROR("clock enable failed, ret:%d\n", ret);
+
+       return ret;
+}
+
+static int dpu_mdss_disable(struct msm_mdss *mdss)
+{
+       struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
+       struct dss_module_power *mp = &dpu_mdss->mp;
+       int ret;
+
+       ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
+       if (ret)
+               DPU_ERROR("clock disable failed, ret:%d\n", ret);
+
+       return ret;
+}
+
+static void dpu_mdss_destroy(struct drm_device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev->dev);
+       struct msm_drm_private *priv = dev->dev_private;
+       struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
+       struct dss_module_power *mp = &dpu_mdss->mp;
+
+       _dpu_mdss_irq_domain_fini(dpu_mdss);
+
+       msm_dss_put_clk(mp->clk_config, mp->num_clk);
+       devm_kfree(&pdev->dev, mp->clk_config);
+
+       if (dpu_mdss->mmio)
+               devm_iounmap(&pdev->dev, dpu_mdss->mmio);
+       dpu_mdss->mmio = NULL;
+
+       pm_runtime_disable(dev->dev);
+       priv->mdss = NULL;
+}
+
+static const struct msm_mdss_funcs mdss_funcs = {
+       .enable = dpu_mdss_enable,
+       .disable = dpu_mdss_disable,
+       .destroy = dpu_mdss_destroy,
+};
+
+int dpu_mdss_init(struct drm_device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev->dev);
+       struct msm_drm_private *priv = dev->dev_private;
+       struct resource *res;
+       struct dpu_mdss *dpu_mdss;
+       struct dss_module_power *mp;
+       int ret = 0;
+
+       dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
+       if (!dpu_mdss)
+               return -ENOMEM;
+
+       dpu_mdss->mmio = msm_ioremap(pdev, "mdss", "mdss");
+       if (IS_ERR(dpu_mdss->mmio))
+               return PTR_ERR(dpu_mdss->mmio);
+
+       DRM_DEBUG("mapped mdss address space @%pK\n", dpu_mdss->mmio);
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mdss");
+       if (!res) {
+               DRM_ERROR("failed to get memory resource for mdss\n");
+               return -ENOMEM;
+       }
+       dpu_mdss->mmio_len = resource_size(res);
+
+       mp = &dpu_mdss->mp;
+       ret = msm_dss_parse_clock(pdev, mp);
+       if (ret) {
+               DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
+               goto clk_parse_err;
+       }
+
+       dpu_mdss->base.dev = dev;
+       dpu_mdss->base.funcs = &mdss_funcs;
+
+       ret = _dpu_mdss_irq_domain_add(dpu_mdss);
+       if (ret)
+               goto irq_domain_error;
+
+       ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
+                       dpu_mdss_irq, 0, "dpu_mdss_isr", dpu_mdss);
+       if (ret) {
+               DPU_ERROR("failed to init irq: %d\n", ret);
+               goto irq_error;
+       }
+
+       pm_runtime_enable(dev->dev);
+
+       pm_runtime_get_sync(dev->dev);
+       dpu_mdss->hwversion = readl_relaxed(dpu_mdss->mmio);
+       pm_runtime_put_sync(dev->dev);
+
+       priv->mdss = &dpu_mdss->base;
+
+       return ret;
+
+irq_error:
+       _dpu_mdss_irq_domain_fini(dpu_mdss);
+irq_domain_error:
+       msm_dss_put_clk(mp->clk_config, mp->num_clk);
+clk_parse_err:
+       devm_kfree(&pdev->dev, mp->clk_config);
+       if (dpu_mdss->mmio)
+               devm_iounmap(&pdev->dev, dpu_mdss->mmio);
+       dpu_mdss->mmio = NULL;
+       return ret;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
new file mode 100644 (file)
index 0000000..b640e39
--- /dev/null
@@ -0,0 +1,1963 @@
+/*
+ * Copyright (C) 2014-2018 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt)    "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include "dpu_formats.h"
+#include "dpu_hw_sspp.h"
+#include "dpu_hw_catalog_format.h"
+#include "dpu_trace.h"
+#include "dpu_crtc.h"
+#include "dpu_vbif.h"
+#include "dpu_plane.h"
+
+#define DPU_DEBUG_PLANE(pl, fmt, ...) DPU_DEBUG("plane%d " fmt,\
+               (pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_PLANE(pl, fmt, ...) DPU_ERROR("plane%d " fmt,\
+               (pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DECIMATED_DIMENSION(dim, deci) (((dim) + ((1 << (deci)) - 1)) >> (deci))
+#define PHASE_STEP_SHIFT       21
+#define PHASE_STEP_UNIT_SCALE   ((int) (1 << PHASE_STEP_SHIFT))
+#define PHASE_RESIDUAL         15
+
+#define SHARP_STRENGTH_DEFAULT 32
+#define SHARP_EDGE_THR_DEFAULT 112
+#define SHARP_SMOOTH_THR_DEFAULT       8
+#define SHARP_NOISE_THR_DEFAULT        2
+
+#define DPU_NAME_SIZE  12
+
+#define DPU_PLANE_COLOR_FILL_FLAG      BIT(31)
+#define DPU_ZPOS_MAX 255
+
+/* multirect rect index */
+enum {
+       R0,
+       R1,
+       R_MAX
+};
+
+#define DPU_QSEED3_DEFAULT_PRELOAD_H 0x4
+#define DPU_QSEED3_DEFAULT_PRELOAD_V 0x3
+
+#define DEFAULT_REFRESH_RATE   60
+
+/**
+ * enum dpu_plane_qos - Different qos configurations for each pipe
+ *
+ * @DPU_PLANE_QOS_VBLANK_CTRL: Setup VBLANK qos for the pipe.
+ * @DPU_PLANE_QOS_VBLANK_AMORTIZE: Enables Amortization within pipe.
+ *     this configuration is mutually exclusive from VBLANK_CTRL.
+ * @DPU_PLANE_QOS_PANIC_CTRL: Setup panic for the pipe.
+ */
+enum dpu_plane_qos {
+       DPU_PLANE_QOS_VBLANK_CTRL = BIT(0),
+       DPU_PLANE_QOS_VBLANK_AMORTIZE = BIT(1),
+       DPU_PLANE_QOS_PANIC_CTRL = BIT(2),
+};
+
+/*
+ * struct dpu_plane - local dpu plane structure
+ * @aspace: address space pointer
+ * @csc_ptr: Points to dpu_csc_cfg structure to use for current
+ * @mplane_list: List of multirect planes of the same pipe
+ * @catalog: Points to dpu catalog structure
+ * @revalidate: force revalidation of all the plane properties
+ */
+struct dpu_plane {
+       struct drm_plane base;
+
+       struct mutex lock;
+
+       enum dpu_sspp pipe;
+       uint32_t features;      /* capabilities from catalog */
+       uint32_t nformats;
+       uint32_t formats[64];
+
+       struct dpu_hw_pipe *pipe_hw;
+       struct dpu_hw_pipe_cfg pipe_cfg;
+       struct dpu_hw_pipe_qos_cfg pipe_qos_cfg;
+       uint32_t color_fill;
+       bool is_error;
+       bool is_rt_pipe;
+       bool is_virtual;
+       struct list_head mplane_list;
+       struct dpu_mdss_cfg *catalog;
+
+       struct dpu_csc_cfg *csc_ptr;
+
+       const struct dpu_sspp_sub_blks *pipe_sblk;
+       char pipe_name[DPU_NAME_SIZE];
+
+       /* debugfs related stuff */
+       struct dentry *debugfs_root;
+       struct dpu_debugfs_regset32 debugfs_src;
+       struct dpu_debugfs_regset32 debugfs_scaler;
+       struct dpu_debugfs_regset32 debugfs_csc;
+       bool debugfs_default_scale;
+};
+
+#define to_dpu_plane(x) container_of(x, struct dpu_plane, base)
+
+static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
+{
+       struct msm_drm_private *priv;
+
+       if (!plane || !plane->dev)
+               return NULL;
+       priv = plane->dev->dev_private;
+       if (!priv)
+               return NULL;
+       return to_dpu_kms(priv->kms);
+}
+
+static bool dpu_plane_enabled(struct drm_plane_state *state)
+{
+       return state && state->fb && state->crtc;
+}
+
+static bool dpu_plane_sspp_enabled(struct drm_plane_state *state)
+{
+       return state && state->crtc;
+}
+
+/**
+ * _dpu_plane_calc_fill_level - calculate fill level of the given source format
+ * @plane:             Pointer to drm plane
+ * @fmt:               Pointer to source buffer format
+ * @src_wdith:         width of source buffer
+ * Return: fill level corresponding to the source buffer/format or 0 if error
+ */
+static inline int _dpu_plane_calc_fill_level(struct drm_plane *plane,
+               const struct dpu_format *fmt, u32 src_width)
+{
+       struct dpu_plane *pdpu, *tmp;
+       struct dpu_plane_state *pstate;
+       u32 fixed_buff_size;
+       u32 total_fl;
+
+       if (!plane || !fmt || !plane->state || !src_width || !fmt->bpp) {
+               DPU_ERROR("invalid arguments\n");
+               return 0;
+       }
+
+       pdpu = to_dpu_plane(plane);
+       pstate = to_dpu_plane_state(plane->state);
+       fixed_buff_size = pdpu->pipe_sblk->common->pixel_ram_size;
+
+       list_for_each_entry(tmp, &pdpu->mplane_list, mplane_list) {
+               if (!dpu_plane_enabled(tmp->base.state))
+                       continue;
+               DPU_DEBUG("plane%d/%d src_width:%d/%d\n",
+                               pdpu->base.base.id, tmp->base.base.id,
+                               src_width,
+                               drm_rect_width(&tmp->pipe_cfg.src_rect));
+               src_width = max_t(u32, src_width,
+                                 drm_rect_width(&tmp->pipe_cfg.src_rect));
+       }
+
+       if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) {
+               if (fmt->chroma_sample == DPU_CHROMA_420) {
+                       /* NV12 */
+                       total_fl = (fixed_buff_size / 2) /
+                               ((src_width + 32) * fmt->bpp);
+               } else {
+                       /* non NV12 */
+                       total_fl = (fixed_buff_size / 2) * 2 /
+                               ((src_width + 32) * fmt->bpp);
+               }
+       } else {
+               if (pstate->multirect_mode == DPU_SSPP_MULTIRECT_PARALLEL) {
+                       total_fl = (fixed_buff_size / 2) * 2 /
+                               ((src_width + 32) * fmt->bpp);
+               } else {
+                       total_fl = (fixed_buff_size) * 2 /
+                               ((src_width + 32) * fmt->bpp);
+               }
+       }
+
+       DPU_DEBUG("plane%u: pnum:%d fmt: %4.4s w:%u fl:%u\n",
+                       plane->base.id, pdpu->pipe - SSPP_VIG0,
+                       (char *)&fmt->base.pixel_format,
+                       src_width, total_fl);
+
+       return total_fl;
+}
+
+/**
+ * _dpu_plane_get_qos_lut - get LUT mapping based on fill level
+ * @tbl:               Pointer to LUT table
+ * @total_fl:          fill level
+ * Return: LUT setting corresponding to the fill level
+ */
+static u64 _dpu_plane_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
+               u32 total_fl)
+{
+       int i;
+
+       if (!tbl || !tbl->nentry || !tbl->entries)
+               return 0;
+
+       for (i = 0; i < tbl->nentry; i++)
+               if (total_fl <= tbl->entries[i].fl)
+                       return tbl->entries[i].lut;
+
+       /* if last fl is zero, use as default */
+       if (!tbl->entries[i-1].fl)
+               return tbl->entries[i-1].lut;
+
+       return 0;
+}
+
+/**
+ * _dpu_plane_set_qos_lut - set QoS LUT of the given plane
+ * @plane:             Pointer to drm plane
+ * @fb:                        Pointer to framebuffer associated with the given plane
+ */
+static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
+               struct drm_framebuffer *fb)
+{
+       struct dpu_plane *pdpu;
+       const struct dpu_format *fmt = NULL;
+       u64 qos_lut;
+       u32 total_fl = 0, lut_usage;
+
+       if (!plane || !fb) {
+               DPU_ERROR("invalid arguments plane %d fb %d\n",
+                               plane != 0, fb != 0);
+               return;
+       }
+
+       pdpu = to_dpu_plane(plane);
+
+       if (!pdpu->pipe_hw || !pdpu->pipe_sblk || !pdpu->catalog) {
+               DPU_ERROR("invalid arguments\n");
+               return;
+       } else if (!pdpu->pipe_hw->ops.setup_creq_lut) {
+               return;
+       }
+
+       if (!pdpu->is_rt_pipe) {
+               lut_usage = DPU_QOS_LUT_USAGE_NRT;
+       } else {
+               fmt = dpu_get_dpu_format_ext(
+                               fb->format->format,
+                               fb->modifier);
+               total_fl = _dpu_plane_calc_fill_level(plane, fmt,
+                               drm_rect_width(&pdpu->pipe_cfg.src_rect));
+
+               if (fmt && DPU_FORMAT_IS_LINEAR(fmt))
+                       lut_usage = DPU_QOS_LUT_USAGE_LINEAR;
+               else
+                       lut_usage = DPU_QOS_LUT_USAGE_MACROTILE;
+       }
+
+       qos_lut = _dpu_plane_get_qos_lut(
+                       &pdpu->catalog->perf.qos_lut_tbl[lut_usage], total_fl);
+
+       pdpu->pipe_qos_cfg.creq_lut = qos_lut;
+
+       trace_dpu_perf_set_qos_luts(pdpu->pipe - SSPP_VIG0,
+                       (fmt) ? fmt->base.pixel_format : 0,
+                       pdpu->is_rt_pipe, total_fl, qos_lut, lut_usage);
+
+       DPU_DEBUG("plane%u: pnum:%d fmt: %4.4s rt:%d fl:%u lut:0x%llx\n",
+                       plane->base.id,
+                       pdpu->pipe - SSPP_VIG0,
+                       fmt ? (char *)&fmt->base.pixel_format : NULL,
+                       pdpu->is_rt_pipe, total_fl, qos_lut);
+
+       pdpu->pipe_hw->ops.setup_creq_lut(pdpu->pipe_hw, &pdpu->pipe_qos_cfg);
+}
+
+/**
+ * _dpu_plane_set_panic_lut - set danger/safe LUT of the given plane
+ * @plane:             Pointer to drm plane
+ * @fb:                        Pointer to framebuffer associated with the given plane
+ */
+static void _dpu_plane_set_danger_lut(struct drm_plane *plane,
+               struct drm_framebuffer *fb)
+{
+       struct dpu_plane *pdpu;
+       const struct dpu_format *fmt = NULL;
+       u32 danger_lut, safe_lut;
+
+       if (!plane || !fb) {
+               DPU_ERROR("invalid arguments\n");
+               return;
+       }
+
+       pdpu = to_dpu_plane(plane);
+
+       if (!pdpu->pipe_hw || !pdpu->pipe_sblk || !pdpu->catalog) {
+               DPU_ERROR("invalid arguments\n");
+               return;
+       } else if (!pdpu->pipe_hw->ops.setup_danger_safe_lut) {
+               return;
+       }
+
+       if (!pdpu->is_rt_pipe) {
+               danger_lut = pdpu->catalog->perf.danger_lut_tbl
+                               [DPU_QOS_LUT_USAGE_NRT];
+               safe_lut = pdpu->catalog->perf.safe_lut_tbl
+                               [DPU_QOS_LUT_USAGE_NRT];
+       } else {
+               fmt = dpu_get_dpu_format_ext(
+                               fb->format->format,
+                               fb->modifier);
+
+               if (fmt && DPU_FORMAT_IS_LINEAR(fmt)) {
+                       danger_lut = pdpu->catalog->perf.danger_lut_tbl
+                                       [DPU_QOS_LUT_USAGE_LINEAR];
+                       safe_lut = pdpu->catalog->perf.safe_lut_tbl
+                                       [DPU_QOS_LUT_USAGE_LINEAR];
+               } else {
+                       danger_lut = pdpu->catalog->perf.danger_lut_tbl
+                                       [DPU_QOS_LUT_USAGE_MACROTILE];
+                       safe_lut = pdpu->catalog->perf.safe_lut_tbl
+                                       [DPU_QOS_LUT_USAGE_MACROTILE];
+               }
+       }
+
+       pdpu->pipe_qos_cfg.danger_lut = danger_lut;
+       pdpu->pipe_qos_cfg.safe_lut = safe_lut;
+
+       trace_dpu_perf_set_danger_luts(pdpu->pipe - SSPP_VIG0,
+                       (fmt) ? fmt->base.pixel_format : 0,
+                       (fmt) ? fmt->fetch_mode : 0,
+                       pdpu->pipe_qos_cfg.danger_lut,
+                       pdpu->pipe_qos_cfg.safe_lut);
+
+       DPU_DEBUG("plane%u: pnum:%d fmt: %4.4s mode:%d luts[0x%x, 0x%x]\n",
+               plane->base.id,
+               pdpu->pipe - SSPP_VIG0,
+               fmt ? (char *)&fmt->base.pixel_format : NULL,
+               fmt ? fmt->fetch_mode : -1,
+               pdpu->pipe_qos_cfg.danger_lut,
+               pdpu->pipe_qos_cfg.safe_lut);
+
+       pdpu->pipe_hw->ops.setup_danger_safe_lut(pdpu->pipe_hw,
+                       &pdpu->pipe_qos_cfg);
+}
+
+/**
+ * _dpu_plane_set_qos_ctrl - set QoS control of the given plane
+ * @plane:             Pointer to drm plane
+ * @enable:            true to enable QoS control
+ * @flags:             QoS control mode (enum dpu_plane_qos)
+ */
+static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
+       bool enable, u32 flags)
+{
+       struct dpu_plane *pdpu;
+
+       if (!plane) {
+               DPU_ERROR("invalid arguments\n");
+               return;
+       }
+
+       pdpu = to_dpu_plane(plane);
+
+       if (!pdpu->pipe_hw || !pdpu->pipe_sblk) {
+               DPU_ERROR("invalid arguments\n");
+               return;
+       } else if (!pdpu->pipe_hw->ops.setup_qos_ctrl) {
+               return;
+       }
+
+       if (flags & DPU_PLANE_QOS_VBLANK_CTRL) {
+               pdpu->pipe_qos_cfg.creq_vblank = pdpu->pipe_sblk->creq_vblank;
+               pdpu->pipe_qos_cfg.danger_vblank =
+                               pdpu->pipe_sblk->danger_vblank;
+               pdpu->pipe_qos_cfg.vblank_en = enable;
+       }
+
+       if (flags & DPU_PLANE_QOS_VBLANK_AMORTIZE) {
+               /* this feature overrules previous VBLANK_CTRL */
+               pdpu->pipe_qos_cfg.vblank_en = false;
+               pdpu->pipe_qos_cfg.creq_vblank = 0; /* clear vblank bits */
+       }
+
+       if (flags & DPU_PLANE_QOS_PANIC_CTRL)
+               pdpu->pipe_qos_cfg.danger_safe_en = enable;
+
+       if (!pdpu->is_rt_pipe) {
+               pdpu->pipe_qos_cfg.vblank_en = false;
+               pdpu->pipe_qos_cfg.danger_safe_en = false;
+       }
+
+       DPU_DEBUG("plane%u: pnum:%d ds:%d vb:%d pri[0x%x, 0x%x] is_rt:%d\n",
+               plane->base.id,
+               pdpu->pipe - SSPP_VIG0,
+               pdpu->pipe_qos_cfg.danger_safe_en,
+               pdpu->pipe_qos_cfg.vblank_en,
+               pdpu->pipe_qos_cfg.creq_vblank,
+               pdpu->pipe_qos_cfg.danger_vblank,
+               pdpu->is_rt_pipe);
+
+       pdpu->pipe_hw->ops.setup_qos_ctrl(pdpu->pipe_hw,
+                       &pdpu->pipe_qos_cfg);
+}
+
+int dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
+{
+       struct dpu_plane *pdpu;
+       struct msm_drm_private *priv;
+       struct dpu_kms *dpu_kms;
+
+       if (!plane || !plane->dev) {
+               DPU_ERROR("invalid arguments\n");
+               return -EINVAL;
+       }
+
+       priv = plane->dev->dev_private;
+       if (!priv || !priv->kms) {
+               DPU_ERROR("invalid KMS reference\n");
+               return -EINVAL;
+       }
+
+       dpu_kms = to_dpu_kms(priv->kms);
+       pdpu = to_dpu_plane(plane);
+
+       if (!pdpu->is_rt_pipe)
+               goto end;
+
+       pm_runtime_get_sync(&dpu_kms->pdev->dev);
+       _dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL);
+       pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+end:
+       return 0;
+}
+
+/**
+ * _dpu_plane_set_ot_limit - set OT limit for the given plane
+ * @plane:             Pointer to drm plane
+ * @crtc:              Pointer to drm crtc
+ */
+static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
+               struct drm_crtc *crtc)
+{
+       struct dpu_plane *pdpu;
+       struct dpu_vbif_set_ot_params ot_params;
+       struct msm_drm_private *priv;
+       struct dpu_kms *dpu_kms;
+
+       if (!plane || !plane->dev || !crtc) {
+               DPU_ERROR("invalid arguments plane %d crtc %d\n",
+                               plane != 0, crtc != 0);
+               return;
+       }
+
+       priv = plane->dev->dev_private;
+       if (!priv || !priv->kms) {
+               DPU_ERROR("invalid KMS reference\n");
+               return;
+       }
+
+       dpu_kms = to_dpu_kms(priv->kms);
+       pdpu = to_dpu_plane(plane);
+       if (!pdpu->pipe_hw) {
+               DPU_ERROR("invalid pipe reference\n");
+               return;
+       }
+
+       memset(&ot_params, 0, sizeof(ot_params));
+       ot_params.xin_id = pdpu->pipe_hw->cap->xin_id;
+       ot_params.num = pdpu->pipe_hw->idx - SSPP_NONE;
+       ot_params.width = drm_rect_width(&pdpu->pipe_cfg.src_rect);
+       ot_params.height = drm_rect_height(&pdpu->pipe_cfg.src_rect);
+       ot_params.is_wfd = !pdpu->is_rt_pipe;
+       ot_params.frame_rate = crtc->mode.vrefresh;
+       ot_params.vbif_idx = VBIF_RT;
+       ot_params.clk_ctrl = pdpu->pipe_hw->cap->clk_ctrl;
+       ot_params.rd = true;
+
+       dpu_vbif_set_ot_limit(dpu_kms, &ot_params);
+}
+
+/**
+ * _dpu_plane_set_vbif_qos - set vbif QoS for the given plane
+ * @plane:             Pointer to drm plane
+ */
+static void _dpu_plane_set_qos_remap(struct drm_plane *plane)
+{
+       struct dpu_plane *pdpu;
+       struct dpu_vbif_set_qos_params qos_params;
+       struct msm_drm_private *priv;
+       struct dpu_kms *dpu_kms;
+
+       if (!plane || !plane->dev) {
+               DPU_ERROR("invalid arguments\n");
+               return;
+       }
+
+       priv = plane->dev->dev_private;
+       if (!priv || !priv->kms) {
+               DPU_ERROR("invalid KMS reference\n");
+               return;
+       }
+
+       dpu_kms = to_dpu_kms(priv->kms);
+       pdpu = to_dpu_plane(plane);
+       if (!pdpu->pipe_hw) {
+               DPU_ERROR("invalid pipe reference\n");
+               return;
+       }
+
+       memset(&qos_params, 0, sizeof(qos_params));
+       qos_params.vbif_idx = VBIF_RT;
+       qos_params.clk_ctrl = pdpu->pipe_hw->cap->clk_ctrl;
+       qos_params.xin_id = pdpu->pipe_hw->cap->xin_id;
+       qos_params.num = pdpu->pipe_hw->idx - SSPP_VIG0;
+       qos_params.is_rt = pdpu->is_rt_pipe;
+
+       DPU_DEBUG("plane%d pipe:%d vbif:%d xin:%d rt:%d, clk_ctrl:%d\n",
+                       plane->base.id, qos_params.num,
+                       qos_params.vbif_idx,
+                       qos_params.xin_id, qos_params.is_rt,
+                       qos_params.clk_ctrl);
+
+       dpu_vbif_set_qos_remap(dpu_kms, &qos_params);
+}
+
+/**
+ * _dpu_plane_get_aspace: gets the address space
+ */
+static int _dpu_plane_get_aspace(
+               struct dpu_plane *pdpu,
+               struct dpu_plane_state *pstate,
+               struct msm_gem_address_space **aspace)
+{
+       struct dpu_kms *kms;
+
+       if (!pdpu || !pstate || !aspace) {
+               DPU_ERROR("invalid parameters\n");
+               return -EINVAL;
+       }
+
+       kms = _dpu_plane_get_kms(&pdpu->base);
+       if (!kms) {
+               DPU_ERROR("invalid kms\n");
+               return -EINVAL;
+       }
+
+       *aspace = kms->base.aspace;
+
+       return 0;
+}
+
+static inline void _dpu_plane_set_scanout(struct drm_plane *plane,
+               struct dpu_plane_state *pstate,
+               struct dpu_hw_pipe_cfg *pipe_cfg,
+               struct drm_framebuffer *fb)
+{
+       struct dpu_plane *pdpu;
+       struct msm_gem_address_space *aspace = NULL;
+       int ret;
+
+       if (!plane || !pstate || !pipe_cfg || !fb) {
+               DPU_ERROR(
+                       "invalid arg(s), plane %d state %d cfg %d fb %d\n",
+                       plane != 0, pstate != 0, pipe_cfg != 0, fb != 0);
+               return;
+       }
+
+       pdpu = to_dpu_plane(plane);
+       if (!pdpu->pipe_hw) {
+               DPU_ERROR_PLANE(pdpu, "invalid pipe_hw\n");
+               return;
+       }
+
+       ret = _dpu_plane_get_aspace(pdpu, pstate, &aspace);
+       if (ret) {
+               DPU_ERROR_PLANE(pdpu, "Failed to get aspace %d\n", ret);
+               return;
+       }
+
+       ret = dpu_format_populate_layout(aspace, fb, &pipe_cfg->layout);
+       if (ret == -EAGAIN)
+               DPU_DEBUG_PLANE(pdpu, "not updating same src addrs\n");
+       else if (ret)
+               DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret);
+       else if (pdpu->pipe_hw->ops.setup_sourceaddress) {
+               trace_dpu_plane_set_scanout(pdpu->pipe_hw->idx,
+                                           &pipe_cfg->layout,
+                                           pstate->multirect_index);
+               pdpu->pipe_hw->ops.setup_sourceaddress(pdpu->pipe_hw, pipe_cfg,
+                                               pstate->multirect_index);
+       }
+}
+
+static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
+               struct dpu_plane_state *pstate,
+               uint32_t src_w, uint32_t src_h, uint32_t dst_w, uint32_t dst_h,
+               struct dpu_hw_scaler3_cfg *scale_cfg,
+               const struct dpu_format *fmt,
+               uint32_t chroma_subsmpl_h, uint32_t chroma_subsmpl_v)
+{
+       uint32_t i;
+
+       if (!pdpu || !pstate || !scale_cfg || !fmt || !chroma_subsmpl_h ||
+                       !chroma_subsmpl_v) {
+               DPU_ERROR(
+                       "pdpu %d pstate %d scale_cfg %d fmt %d smp_h %d smp_v %d\n",
+                       !!pdpu, !!pstate, !!scale_cfg, !!fmt, chroma_subsmpl_h,
+                       chroma_subsmpl_v);
+               return;
+       }
+
+       memset(scale_cfg, 0, sizeof(*scale_cfg));
+       memset(&pstate->pixel_ext, 0, sizeof(struct dpu_hw_pixel_ext));
+
+       scale_cfg->phase_step_x[DPU_SSPP_COMP_0] =
+               mult_frac((1 << PHASE_STEP_SHIFT), src_w, dst_w);
+       scale_cfg->phase_step_y[DPU_SSPP_COMP_0] =
+               mult_frac((1 << PHASE_STEP_SHIFT), src_h, dst_h);
+
+
+       scale_cfg->phase_step_y[DPU_SSPP_COMP_1_2] =
+               scale_cfg->phase_step_y[DPU_SSPP_COMP_0] / chroma_subsmpl_v;
+       scale_cfg->phase_step_x[DPU_SSPP_COMP_1_2] =
+               scale_cfg->phase_step_x[DPU_SSPP_COMP_0] / chroma_subsmpl_h;
+
+       scale_cfg->phase_step_x[DPU_SSPP_COMP_2] =
+               scale_cfg->phase_step_x[DPU_SSPP_COMP_1_2];
+       scale_cfg->phase_step_y[DPU_SSPP_COMP_2] =
+               scale_cfg->phase_step_y[DPU_SSPP_COMP_1_2];
+
+       scale_cfg->phase_step_x[DPU_SSPP_COMP_3] =
+               scale_cfg->phase_step_x[DPU_SSPP_COMP_0];
+       scale_cfg->phase_step_y[DPU_SSPP_COMP_3] =
+               scale_cfg->phase_step_y[DPU_SSPP_COMP_0];
+
+       for (i = 0; i < DPU_MAX_PLANES; i++) {
+               scale_cfg->src_width[i] = src_w;
+               scale_cfg->src_height[i] = src_h;
+               if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) {
+                       scale_cfg->src_width[i] /= chroma_subsmpl_h;
+                       scale_cfg->src_height[i] /= chroma_subsmpl_v;
+               }
+               scale_cfg->preload_x[i] = DPU_QSEED3_DEFAULT_PRELOAD_H;
+               scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V;
+               pstate->pixel_ext.num_ext_pxls_top[i] =
+                       scale_cfg->src_height[i];
+               pstate->pixel_ext.num_ext_pxls_left[i] =
+                       scale_cfg->src_width[i];
+       }
+       if (!(DPU_FORMAT_IS_YUV(fmt)) && (src_h == dst_h)
+               && (src_w == dst_w))
+               return;
+
+       scale_cfg->dst_width = dst_w;
+       scale_cfg->dst_height = dst_h;
+       scale_cfg->y_rgb_filter_cfg = DPU_SCALE_BIL;
+       scale_cfg->uv_filter_cfg = DPU_SCALE_BIL;
+       scale_cfg->alpha_filter_cfg = DPU_SCALE_ALPHA_BIL;
+       scale_cfg->lut_flag = 0;
+       scale_cfg->blend_cfg = 1;
+       scale_cfg->enable = 1;
+}
+
+static inline void _dpu_plane_setup_csc(struct dpu_plane *pdpu)
+{
+       static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = {
+               {
+                       /* S15.16 format */
+                       0x00012A00, 0x00000000, 0x00019880,
+                       0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+                       0x00012A00, 0x00020480, 0x00000000,
+               },
+               /* signed bias */
+               { 0xfff0, 0xff80, 0xff80,},
+               { 0x0, 0x0, 0x0,},
+               /* unsigned clamp */
+               { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
+               { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,},
+       };
+       static const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L = {
+               {
+                       /* S15.16 format */
+                       0x00012A00, 0x00000000, 0x00019880,
+                       0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+                       0x00012A00, 0x00020480, 0x00000000,
+                       },
+               /* signed bias */
+               { 0xffc0, 0xfe00, 0xfe00,},
+               { 0x0, 0x0, 0x0,},
+               /* unsigned clamp */
+               { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
+               { 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,},
+       };
+
+       if (!pdpu) {
+               DPU_ERROR("invalid plane\n");
+               return;
+       }
+
+       if (BIT(DPU_SSPP_CSC_10BIT) & pdpu->features)
+               pdpu->csc_ptr = (struct dpu_csc_cfg *)&dpu_csc10_YUV2RGB_601L;
+       else
+               pdpu->csc_ptr = (struct dpu_csc_cfg *)&dpu_csc_YUV2RGB_601L;
+
+       DPU_DEBUG_PLANE(pdpu, "using 0x%X 0x%X 0x%X...\n",
+                       pdpu->csc_ptr->csc_mv[0],
+                       pdpu->csc_ptr->csc_mv[1],
+                       pdpu->csc_ptr->csc_mv[2]);
+}
+
+static void _dpu_plane_setup_scaler(struct dpu_plane *pdpu,
+               struct dpu_plane_state *pstate,
+               const struct dpu_format *fmt, bool color_fill)
+{
+       struct dpu_hw_pixel_ext *pe;
+       uint32_t chroma_subsmpl_h, chroma_subsmpl_v;
+
+       if (!pdpu || !fmt || !pstate) {
+               DPU_ERROR("invalid arg(s), plane %d fmt %d state %d\n",
+                               pdpu != 0, fmt != 0, pstate != 0);
+               return;
+       }
+
+       pe = &pstate->pixel_ext;
+
+       /* don't chroma subsample if decimating */
+       chroma_subsmpl_h =
+               drm_format_horz_chroma_subsampling(fmt->base.pixel_format);
+       chroma_subsmpl_v =
+               drm_format_vert_chroma_subsampling(fmt->base.pixel_format);
+
+       /* update scaler. calculate default config for QSEED3 */
+       _dpu_plane_setup_scaler3(pdpu, pstate,
+                       drm_rect_width(&pdpu->pipe_cfg.src_rect),
+                       drm_rect_height(&pdpu->pipe_cfg.src_rect),
+                       drm_rect_width(&pdpu->pipe_cfg.dst_rect),
+                       drm_rect_height(&pdpu->pipe_cfg.dst_rect),
+                       &pstate->scaler3_cfg, fmt,
+                       chroma_subsmpl_h, chroma_subsmpl_v);
+}
+
+/**
+ * _dpu_plane_color_fill - enables color fill on plane
+ * @pdpu:   Pointer to DPU plane object
+ * @color:  RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
+ * @alpha:  8-bit fill alpha value, 255 selects 100% alpha
+ * Returns: 0 on success
+ */
+static int _dpu_plane_color_fill(struct dpu_plane *pdpu,
+               uint32_t color, uint32_t alpha)
+{
+       const struct dpu_format *fmt;
+       const struct drm_plane *plane;
+       struct dpu_plane_state *pstate;
+
+       if (!pdpu || !pdpu->base.state) {
+               DPU_ERROR("invalid plane\n");
+               return -EINVAL;
+       }
+
+       if (!pdpu->pipe_hw) {
+               DPU_ERROR_PLANE(pdpu, "invalid plane h/w pointer\n");
+               return -EINVAL;
+       }
+
+       plane = &pdpu->base;
+       pstate = to_dpu_plane_state(plane->state);
+
+       DPU_DEBUG_PLANE(pdpu, "\n");
+
+       /*
+        * select fill format to match user property expectation,
+        * h/w only supports RGB variants
+        */
+       fmt = dpu_get_dpu_format(DRM_FORMAT_ABGR8888);
+
+       /* update sspp */
+       if (fmt && pdpu->pipe_hw->ops.setup_solidfill) {
+               pdpu->pipe_hw->ops.setup_solidfill(pdpu->pipe_hw,
+                               (color & 0xFFFFFF) | ((alpha & 0xFF) << 24),
+                               pstate->multirect_index);
+
+               /* override scaler/decimation if solid fill */
+               pdpu->pipe_cfg.src_rect.x1 = 0;
+               pdpu->pipe_cfg.src_rect.y1 = 0;
+               pdpu->pipe_cfg.src_rect.x2 =
+                       drm_rect_width(&pdpu->pipe_cfg.dst_rect);
+               pdpu->pipe_cfg.src_rect.y2 =
+                       drm_rect_height(&pdpu->pipe_cfg.dst_rect);
+               _dpu_plane_setup_scaler(pdpu, pstate, fmt, true);
+
+               if (pdpu->pipe_hw->ops.setup_format)
+                       pdpu->pipe_hw->ops.setup_format(pdpu->pipe_hw,
+                                       fmt, DPU_SSPP_SOLID_FILL,
+                                       pstate->multirect_index);
+
+               if (pdpu->pipe_hw->ops.setup_rects)
+                       pdpu->pipe_hw->ops.setup_rects(pdpu->pipe_hw,
+                                       &pdpu->pipe_cfg,
+                                       pstate->multirect_index);
+
+               if (pdpu->pipe_hw->ops.setup_pe)
+                       pdpu->pipe_hw->ops.setup_pe(pdpu->pipe_hw,
+                                       &pstate->pixel_ext);
+
+               if (pdpu->pipe_hw->ops.setup_scaler &&
+                               pstate->multirect_index != DPU_SSPP_RECT_1)
+                       pdpu->pipe_hw->ops.setup_scaler(pdpu->pipe_hw,
+                                       &pdpu->pipe_cfg, &pstate->pixel_ext,
+                                       &pstate->scaler3_cfg);
+       }
+
+       return 0;
+}
+
+void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state)
+{
+       struct dpu_plane_state *pstate;
+
+       if (!drm_state)
+               return;
+
+       pstate = to_dpu_plane_state(drm_state);
+
+       pstate->multirect_index = DPU_SSPP_RECT_SOLO;
+       pstate->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+}
+
+int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane)
+{
+       struct dpu_plane_state *pstate[R_MAX];
+       const struct drm_plane_state *drm_state[R_MAX];
+       struct drm_rect src[R_MAX], dst[R_MAX];
+       struct dpu_plane *dpu_plane[R_MAX];
+       const struct dpu_format *fmt[R_MAX];
+       int i, buffer_lines;
+       unsigned int max_tile_height = 1;
+       bool parallel_fetch_qualified = true;
+       bool has_tiled_rect = false;
+
+       for (i = 0; i < R_MAX; i++) {
+               const struct msm_format *msm_fmt;
+
+               drm_state[i] = i ? plane->r1 : plane->r0;
+               msm_fmt = msm_framebuffer_format(drm_state[i]->fb);
+               fmt[i] = to_dpu_format(msm_fmt);
+
+               if (DPU_FORMAT_IS_UBWC(fmt[i])) {
+                       has_tiled_rect = true;
+                       if (fmt[i]->tile_height > max_tile_height)
+                               max_tile_height = fmt[i]->tile_height;
+               }
+       }
+
+       for (i = 0; i < R_MAX; i++) {
+               int width_threshold;
+
+               pstate[i] = to_dpu_plane_state(drm_state[i]);
+               dpu_plane[i] = to_dpu_plane(drm_state[i]->plane);
+
+               if (pstate[i] == NULL) {
+                       DPU_ERROR("DPU plane state of plane id %d is NULL\n",
+                               drm_state[i]->plane->base.id);
+                       return -EINVAL;
+               }
+
+               src[i].x1 = drm_state[i]->src_x >> 16;
+               src[i].y1 = drm_state[i]->src_y >> 16;
+               src[i].x2 = src[i].x1 + (drm_state[i]->src_w >> 16);
+               src[i].y2 = src[i].y1 + (drm_state[i]->src_h >> 16);
+
+               dst[i] = drm_plane_state_dest(drm_state[i]);
+
+               if (drm_rect_calc_hscale(&src[i], &dst[i], 1, 1) != 1 ||
+                   drm_rect_calc_vscale(&src[i], &dst[i], 1, 1) != 1) {
+                       DPU_ERROR_PLANE(dpu_plane[i],
+                               "scaling is not supported in multirect mode\n");
+                       return -EINVAL;
+               }
+
+               if (DPU_FORMAT_IS_YUV(fmt[i])) {
+                       DPU_ERROR_PLANE(dpu_plane[i],
+                               "Unsupported format for multirect mode\n");
+                       return -EINVAL;
+               }
+
+               /**
+                * SSPP PD_MEM is split half - one for each RECT.
+                * Tiled formats need 5 lines of buffering while fetching
+                * whereas linear formats need only 2 lines.
+                * So we cannot support more than half of the supported SSPP
+                * width for tiled formats.
+                */
+               width_threshold = dpu_plane[i]->pipe_sblk->common->maxlinewidth;
+               if (has_tiled_rect)
+                       width_threshold /= 2;
+
+               if (parallel_fetch_qualified &&
+                   drm_rect_width(&src[i]) > width_threshold)
+                       parallel_fetch_qualified = false;
+
+       }
+
+       /* Validate RECT's and set the mode */
+
+       /* Prefer PARALLEL FETCH Mode over TIME_MX Mode */
+       if (parallel_fetch_qualified) {
+               pstate[R0]->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
+               pstate[R1]->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
+
+               goto done;
+       }
+
+       /* TIME_MX Mode */
+       buffer_lines = 2 * max_tile_height;
+
+       if (dst[R1].y1 >= dst[R0].y2 + buffer_lines ||
+           dst[R0].y1 >= dst[R1].y2 + buffer_lines) {
+               pstate[R0]->multirect_mode = DPU_SSPP_MULTIRECT_TIME_MX;
+               pstate[R1]->multirect_mode = DPU_SSPP_MULTIRECT_TIME_MX;
+       } else {
+               DPU_ERROR(
+                       "No multirect mode possible for the planes (%d - %d)\n",
+                       drm_state[R0]->plane->base.id,
+                       drm_state[R1]->plane->base.id);
+               return -EINVAL;
+       }
+
+done:
+       if (dpu_plane[R0]->is_virtual) {
+               pstate[R0]->multirect_index = DPU_SSPP_RECT_1;
+               pstate[R1]->multirect_index = DPU_SSPP_RECT_0;
+       } else {
+               pstate[R0]->multirect_index = DPU_SSPP_RECT_0;
+               pstate[R1]->multirect_index = DPU_SSPP_RECT_1;
+       };
+
+       DPU_DEBUG_PLANE(dpu_plane[R0], "R0: %d - %d\n",
+               pstate[R0]->multirect_mode, pstate[R0]->multirect_index);
+       DPU_DEBUG_PLANE(dpu_plane[R1], "R1: %d - %d\n",
+               pstate[R1]->multirect_mode, pstate[R1]->multirect_index);
+       return 0;
+}
+
+/**
+ * dpu_plane_get_ctl_flush - get control flush for the given plane
+ * @plane: Pointer to drm plane structure
+ * @ctl: Pointer to hardware control driver
+ * @flush_sspp: Pointer to sspp flush control word
+ */
+void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
+               u32 *flush_sspp)
+{
+       struct dpu_plane_state *pstate;
+
+       if (!plane || !flush_sspp) {
+               DPU_ERROR("invalid parameters\n");
+               return;
+       }
+
+       pstate = to_dpu_plane_state(plane->state);
+
+       *flush_sspp = ctl->ops.get_bitmask_sspp(ctl, dpu_plane_pipe(plane));
+}
+
+static int dpu_plane_prepare_fb(struct drm_plane *plane,
+               struct drm_plane_state *new_state)
+{
+       struct drm_framebuffer *fb = new_state->fb;
+       struct dpu_plane *pdpu = to_dpu_plane(plane);
+       struct dpu_plane_state *pstate = to_dpu_plane_state(new_state);
+       struct dpu_hw_fmt_layout layout;
+       struct drm_gem_object *obj;
+       struct msm_gem_object *msm_obj;
+       struct dma_fence *fence;
+       struct msm_gem_address_space *aspace;
+       int ret;
+
+       if (!new_state->fb)
+               return 0;
+
+       DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", fb->base.id);
+
+       ret = _dpu_plane_get_aspace(pdpu, pstate, &aspace);
+       if (ret) {
+               DPU_ERROR_PLANE(pdpu, "Failed to get aspace\n");
+               return ret;
+       }
+
+       /* cache aspace */
+       pstate->aspace = aspace;
+
+       /*
+        * TODO: Need to sort out the msm_framebuffer_prepare() call below so
+        *       we can use msm_atomic_prepare_fb() instead of doing the
+        *       implicit fence and fb prepare by hand here.
+        */
+       obj = msm_framebuffer_bo(new_state->fb, 0);
+       msm_obj = to_msm_bo(obj);
+       fence = reservation_object_get_excl_rcu(msm_obj->resv);
+       if (fence)
+               drm_atomic_set_fence_for_plane(new_state, fence);
+
+       if (pstate->aspace) {
+               ret = msm_framebuffer_prepare(new_state->fb,
+                               pstate->aspace);
+               if (ret) {
+                       DPU_ERROR("failed to prepare framebuffer\n");
+                       return ret;
+               }
+       }
+
+       /* validate framebuffer layout before commit */
+       ret = dpu_format_populate_layout(pstate->aspace,
+                       new_state->fb, &layout);
+       if (ret) {
+               DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void dpu_plane_cleanup_fb(struct drm_plane *plane,
+               struct drm_plane_state *old_state)
+{
+       struct dpu_plane *pdpu = to_dpu_plane(plane);
+       struct dpu_plane_state *old_pstate;
+
+       if (!old_state || !old_state->fb)
+               return;
+
+       old_pstate = to_dpu_plane_state(old_state);
+
+       DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", old_state->fb->base.id);
+
+       msm_framebuffer_cleanup(old_state->fb, old_pstate->aspace);
+}
+
+static bool dpu_plane_validate_src(struct drm_rect *src,
+                                  struct drm_rect *fb_rect,
+                                  uint32_t min_src_size)
+{
+       /* Ensure fb size is supported */
+       if (drm_rect_width(fb_rect) > MAX_IMG_WIDTH ||
+           drm_rect_height(fb_rect) > MAX_IMG_HEIGHT)
+               return false;
+
+       /* Ensure src rect is above the minimum size */
+       if (drm_rect_width(src) < min_src_size ||
+           drm_rect_height(src) < min_src_size)
+               return false;
+
+       /* Ensure src is fully encapsulated in fb */
+       return drm_rect_intersect(fb_rect, src) &&
+               drm_rect_equals(fb_rect, src);
+}
+
+static int dpu_plane_sspp_atomic_check(struct drm_plane *plane,
+               struct drm_plane_state *state)
+{
+       int ret = 0;
+       struct dpu_plane *pdpu;
+       struct dpu_plane_state *pstate;
+       const struct dpu_format *fmt;
+       struct drm_rect src, dst, fb_rect = { 0 };
+       uint32_t max_upscale = 1, max_downscale = 1;
+       uint32_t min_src_size, max_linewidth;
+       int hscale = 1, vscale = 1;
+
+       if (!plane || !state) {
+               DPU_ERROR("invalid arg(s), plane %d state %d\n",
+                               plane != 0, state != 0);
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       pdpu = to_dpu_plane(plane);
+       pstate = to_dpu_plane_state(state);
+
+       if (!pdpu->pipe_sblk) {
+               DPU_ERROR_PLANE(pdpu, "invalid catalog\n");
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       src.x1 = state->src_x >> 16;
+       src.y1 = state->src_y >> 16;
+       src.x2 = src.x1 + (state->src_w >> 16);
+       src.y2 = src.y1 + (state->src_h >> 16);
+
+       dst = drm_plane_state_dest(state);
+
+       fb_rect.x2 = state->fb->width;
+       fb_rect.y2 = state->fb->height;
+
+       max_linewidth = pdpu->pipe_sblk->common->maxlinewidth;
+
+       if (pdpu->features & DPU_SSPP_SCALER) {
+               max_downscale = pdpu->pipe_sblk->maxdwnscale;
+               max_upscale = pdpu->pipe_sblk->maxupscale;
+       }
+       if (drm_rect_width(&src) < drm_rect_width(&dst))
+               hscale = drm_rect_calc_hscale(&src, &dst, 1, max_upscale);
+       else
+               hscale = drm_rect_calc_hscale(&dst, &src, 1, max_downscale);
+       if (drm_rect_height(&src) < drm_rect_height(&dst))
+               vscale = drm_rect_calc_vscale(&src, &dst, 1, max_upscale);
+       else
+               vscale = drm_rect_calc_vscale(&dst, &src, 1, max_downscale);
+
+       DPU_DEBUG_PLANE(pdpu, "check %d -> %d\n",
+               dpu_plane_enabled(plane->state), dpu_plane_enabled(state));
+
+       if (!dpu_plane_enabled(state))
+               goto exit;
+
+       fmt = to_dpu_format(msm_framebuffer_format(state->fb));
+
+       min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1;
+
+       if (DPU_FORMAT_IS_YUV(fmt) &&
+               (!(pdpu->features & DPU_SSPP_SCALER) ||
+                !(pdpu->features & (BIT(DPU_SSPP_CSC)
+                | BIT(DPU_SSPP_CSC_10BIT))))) {
+               DPU_ERROR_PLANE(pdpu,
+                               "plane doesn't have scaler/csc for yuv\n");
+               ret = -EINVAL;
+
+       /* check src bounds */
+       } else if (!dpu_plane_validate_src(&src, &fb_rect, min_src_size)) {
+               DPU_ERROR_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n",
+                               DRM_RECT_ARG(&src));
+               ret = -E2BIG;
+
+       /* valid yuv image */
+       } else if (DPU_FORMAT_IS_YUV(fmt) &&
+                  (src.x1 & 0x1 || src.y1 & 0x1 ||
+                   drm_rect_width(&src) & 0x1 ||
+                   drm_rect_height(&src) & 0x1)) {
+               DPU_ERROR_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n",
+                               DRM_RECT_ARG(&src));
+               ret = -EINVAL;
+
+       /* min dst support */
+       } else if (drm_rect_width(&dst) < 0x1 || drm_rect_height(&dst) < 0x1) {
+               DPU_ERROR_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n",
+                               DRM_RECT_ARG(&dst));
+               ret = -EINVAL;
+
+       /* check decimated source width */
+       } else if (drm_rect_width(&src) > max_linewidth) {
+               DPU_ERROR_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
+                               DRM_RECT_ARG(&src), max_linewidth);
+               ret = -E2BIG;
+
+       /* check scaler capability */
+       } else if (hscale < 0 || vscale < 0) {
+               DPU_ERROR_PLANE(pdpu, "invalid scaling requested src="
+                               DRM_RECT_FMT " dst=" DRM_RECT_FMT "\n",
+                               DRM_RECT_ARG(&src), DRM_RECT_ARG(&dst));
+               ret = -E2BIG;
+       }
+
+exit:
+       return ret;
+}
+
+static int dpu_plane_atomic_check(struct drm_plane *plane,
+               struct drm_plane_state *state)
+{
+       if (!state->fb)
+               return 0;
+
+       DPU_DEBUG_PLANE(to_dpu_plane(plane), "\n");
+
+       return dpu_plane_sspp_atomic_check(plane, state);
+}
+
+void dpu_plane_flush(struct drm_plane *plane)
+{
+       struct dpu_plane *pdpu;
+       struct dpu_plane_state *pstate;
+
+       if (!plane || !plane->state) {
+               DPU_ERROR("invalid plane\n");
+               return;
+       }
+
+       pdpu = to_dpu_plane(plane);
+       pstate = to_dpu_plane_state(plane->state);
+
+       /*
+        * These updates have to be done immediately before the plane flush
+        * timing, and may not be moved to the atomic_update/mode_set functions.
+        */
+       if (pdpu->is_error)
+               /* force white frame with 100% alpha pipe output on error */
+               _dpu_plane_color_fill(pdpu, 0xFFFFFF, 0xFF);
+       else if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG)
+               /* force 100% alpha */
+               _dpu_plane_color_fill(pdpu, pdpu->color_fill, 0xFF);
+       else if (pdpu->pipe_hw && pdpu->csc_ptr && pdpu->pipe_hw->ops.setup_csc)
+               pdpu->pipe_hw->ops.setup_csc(pdpu->pipe_hw, pdpu->csc_ptr);
+
+       /* flag h/w flush complete */
+       if (plane->state)
+               pstate->pending = false;
+}
+
+/**
+ * dpu_plane_set_error: enable/disable error condition
+ * @plane: pointer to drm_plane structure
+ */
+void dpu_plane_set_error(struct drm_plane *plane, bool error)
+{
+       struct dpu_plane *pdpu;
+
+       if (!plane)
+               return;
+
+       pdpu = to_dpu_plane(plane);
+       pdpu->is_error = error;
+}
+
+static int dpu_plane_sspp_atomic_update(struct drm_plane *plane,
+                               struct drm_plane_state *old_state)
+{
+       uint32_t nplanes, src_flags;
+       struct dpu_plane *pdpu;
+       struct drm_plane_state *state;
+       struct dpu_plane_state *pstate;
+       struct dpu_plane_state *old_pstate;
+       const struct dpu_format *fmt;
+       struct drm_crtc *crtc;
+       struct drm_framebuffer *fb;
+       struct drm_rect src, dst;
+
+       if (!plane) {
+               DPU_ERROR("invalid plane\n");
+               return -EINVAL;
+       } else if (!plane->state) {
+               DPU_ERROR("invalid plane state\n");
+               return -EINVAL;
+       } else if (!old_state) {
+               DPU_ERROR("invalid old state\n");
+               return -EINVAL;
+       }
+
+       pdpu = to_dpu_plane(plane);
+       state = plane->state;
+
+       pstate = to_dpu_plane_state(state);
+
+       old_pstate = to_dpu_plane_state(old_state);
+
+       crtc = state->crtc;
+       fb = state->fb;
+       if (!crtc || !fb) {
+               DPU_ERROR_PLANE(pdpu, "invalid crtc %d or fb %d\n",
+                               crtc != 0, fb != 0);
+               return -EINVAL;
+       }
+       fmt = to_dpu_format(msm_framebuffer_format(fb));
+       nplanes = fmt->num_planes;
+
+       memset(&(pdpu->pipe_cfg), 0, sizeof(struct dpu_hw_pipe_cfg));
+
+       _dpu_plane_set_scanout(plane, pstate, &pdpu->pipe_cfg, fb);
+
+       pstate->pending = true;
+
+       pdpu->is_rt_pipe = (dpu_crtc_get_client_type(crtc) != NRT_CLIENT);
+       _dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL);
+
+       src.x1 = state->src_x >> 16;
+       src.y1 = state->src_y >> 16;
+       src.x2 = src.x1 + (state->src_w >> 16);
+       src.y2 = src.y1 + (state->src_h >> 16);
+
+       dst = drm_plane_state_dest(state);
+
+       DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FMT "->crtc%u " DRM_RECT_FMT
+                       ", %4.4s ubwc %d\n", fb->base.id, DRM_RECT_ARG(&src),
+                       crtc->base.id, DRM_RECT_ARG(&dst),
+                       (char *)&fmt->base.pixel_format,
+                       DPU_FORMAT_IS_UBWC(fmt));
+
+       pdpu->pipe_cfg.src_rect = src;
+       pdpu->pipe_cfg.dst_rect = dst;
+
+       _dpu_plane_setup_scaler(pdpu, pstate, fmt, false);
+
+       /* override for color fill */
+       if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG) {
+               /* skip remaining processing on color fill */
+               return 0;
+       }
+
+       if (pdpu->pipe_hw->ops.setup_rects) {
+               pdpu->pipe_hw->ops.setup_rects(pdpu->pipe_hw,
+                               &pdpu->pipe_cfg,
+                               pstate->multirect_index);
+       }
+
+       if (pdpu->pipe_hw->ops.setup_pe &&
+                       (pstate->multirect_index != DPU_SSPP_RECT_1))
+               pdpu->pipe_hw->ops.setup_pe(pdpu->pipe_hw,
+                               &pstate->pixel_ext);
+
+       /**
+        * when programmed in multirect mode, scalar block will be
+        * bypassed. Still we need to update alpha and bitwidth
+        * ONLY for RECT0
+        */
+       if (pdpu->pipe_hw->ops.setup_scaler &&
+                       pstate->multirect_index != DPU_SSPP_RECT_1)
+               pdpu->pipe_hw->ops.setup_scaler(pdpu->pipe_hw,
+                               &pdpu->pipe_cfg, &pstate->pixel_ext,
+                               &pstate->scaler3_cfg);
+
+       if (pdpu->pipe_hw->ops.setup_multirect)
+               pdpu->pipe_hw->ops.setup_multirect(
+                               pdpu->pipe_hw,
+                               pstate->multirect_index,
+                               pstate->multirect_mode);
+
+       if (pdpu->pipe_hw->ops.setup_format) {
+               src_flags = 0x0;
+
+               /* update format */
+               pdpu->pipe_hw->ops.setup_format(pdpu->pipe_hw, fmt, src_flags,
+                               pstate->multirect_index);
+
+               if (pdpu->pipe_hw->ops.setup_cdp) {
+                       struct dpu_hw_pipe_cdp_cfg *cdp_cfg = &pstate->cdp_cfg;
+
+                       memset(cdp_cfg, 0, sizeof(struct dpu_hw_pipe_cdp_cfg));
+
+                       cdp_cfg->enable = pdpu->catalog->perf.cdp_cfg
+                                       [DPU_PERF_CDP_USAGE_RT].rd_enable;
+                       cdp_cfg->ubwc_meta_enable =
+                                       DPU_FORMAT_IS_UBWC(fmt);
+                       cdp_cfg->tile_amortize_enable =
+                                       DPU_FORMAT_IS_UBWC(fmt) ||
+                                       DPU_FORMAT_IS_TILE(fmt);
+                       cdp_cfg->preload_ahead = DPU_SSPP_CDP_PRELOAD_AHEAD_64;
+
+                       pdpu->pipe_hw->ops.setup_cdp(pdpu->pipe_hw, cdp_cfg);
+               }
+
+               /* update csc */
+               if (DPU_FORMAT_IS_YUV(fmt))
+                       _dpu_plane_setup_csc(pdpu);
+               else
+                       pdpu->csc_ptr = 0;
+       }
+
+       _dpu_plane_set_qos_lut(plane, fb);
+       _dpu_plane_set_danger_lut(plane, fb);
+
+       if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+               _dpu_plane_set_qos_ctrl(plane, true, DPU_PLANE_QOS_PANIC_CTRL);
+               _dpu_plane_set_ot_limit(plane, crtc);
+       }
+
+       _dpu_plane_set_qos_remap(plane);
+       return 0;
+}
+
+static void _dpu_plane_atomic_disable(struct drm_plane *plane,
+                               struct drm_plane_state *old_state)
+{
+       struct dpu_plane *pdpu;
+       struct drm_plane_state *state;
+       struct dpu_plane_state *pstate;
+
+       if (!plane) {
+               DPU_ERROR("invalid plane\n");
+               return;
+       } else if (!plane->state) {
+               DPU_ERROR("invalid plane state\n");
+               return;
+       } else if (!old_state) {
+               DPU_ERROR("invalid old state\n");
+               return;
+       }
+
+       pdpu = to_dpu_plane(plane);
+       state = plane->state;
+       pstate = to_dpu_plane_state(state);
+
+       trace_dpu_plane_disable(DRMID(plane), is_dpu_plane_virtual(plane),
+                               pstate->multirect_mode);
+
+       pstate->pending = true;
+
+       if (is_dpu_plane_virtual(plane) &&
+                       pdpu->pipe_hw && pdpu->pipe_hw->ops.setup_multirect)
+               pdpu->pipe_hw->ops.setup_multirect(pdpu->pipe_hw,
+                               DPU_SSPP_RECT_SOLO, DPU_SSPP_MULTIRECT_NONE);
+}
+
+static void dpu_plane_atomic_update(struct drm_plane *plane,
+                               struct drm_plane_state *old_state)
+{
+       struct dpu_plane *pdpu;
+       struct drm_plane_state *state;
+
+       if (!plane) {
+               DPU_ERROR("invalid plane\n");
+               return;
+       } else if (!plane->state) {
+               DPU_ERROR("invalid plane state\n");
+               return;
+       }
+
+       pdpu = to_dpu_plane(plane);
+       pdpu->is_error = false;
+       state = plane->state;
+
+       DPU_DEBUG_PLANE(pdpu, "\n");
+
+       if (!dpu_plane_sspp_enabled(state)) {
+               _dpu_plane_atomic_disable(plane, old_state);
+       } else {
+               int ret;
+
+               ret = dpu_plane_sspp_atomic_update(plane, old_state);
+               /* atomic_check should have ensured that this doesn't fail */
+               WARN_ON(ret < 0);
+       }
+}
+
+void dpu_plane_restore(struct drm_plane *plane)
+{
+       struct dpu_plane *pdpu;
+
+       if (!plane || !plane->state) {
+               DPU_ERROR("invalid plane\n");
+               return;
+       }
+
+       pdpu = to_dpu_plane(plane);
+
+       DPU_DEBUG_PLANE(pdpu, "\n");
+
+       /* last plane state is same as current state */
+       dpu_plane_atomic_update(plane, plane->state);
+}
+
+static void dpu_plane_destroy(struct drm_plane *plane)
+{
+       struct dpu_plane *pdpu = plane ? to_dpu_plane(plane) : NULL;
+
+       DPU_DEBUG_PLANE(pdpu, "\n");
+
+       if (pdpu) {
+               _dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL);
+
+               mutex_destroy(&pdpu->lock);
+
+               drm_plane_helper_disable(plane, NULL);
+
+               /* this will destroy the states as well */
+               drm_plane_cleanup(plane);
+
+               if (pdpu->pipe_hw)
+                       dpu_hw_sspp_destroy(pdpu->pipe_hw);
+
+               kfree(pdpu);
+       }
+}
+
+static void dpu_plane_destroy_state(struct drm_plane *plane,
+               struct drm_plane_state *state)
+{
+       struct dpu_plane_state *pstate;
+
+       if (!plane || !state) {
+               DPU_ERROR("invalid arg(s), plane %d state %d\n",
+                               plane != 0, state != 0);
+               return;
+       }
+
+       pstate = to_dpu_plane_state(state);
+
+       /* remove ref count for frame buffers */
+       if (state->fb)
+               drm_framebuffer_put(state->fb);
+
+       kfree(pstate);
+}
+
+static struct drm_plane_state *
+dpu_plane_duplicate_state(struct drm_plane *plane)
+{
+       struct dpu_plane *pdpu;
+       struct dpu_plane_state *pstate;
+       struct dpu_plane_state *old_state;
+
+       if (!plane) {
+               DPU_ERROR("invalid plane\n");
+               return NULL;
+       } else if (!plane->state) {
+               DPU_ERROR("invalid plane state\n");
+               return NULL;
+       }
+
+       old_state = to_dpu_plane_state(plane->state);
+       pdpu = to_dpu_plane(plane);
+       pstate = kmemdup(old_state, sizeof(*old_state), GFP_KERNEL);
+       if (!pstate) {
+               DPU_ERROR_PLANE(pdpu, "failed to allocate state\n");
+               return NULL;
+       }
+
+       DPU_DEBUG_PLANE(pdpu, "\n");
+
+       pstate->pending = false;
+
+       __drm_atomic_helper_plane_duplicate_state(plane, &pstate->base);
+
+       return &pstate->base;
+}
+
+static void dpu_plane_reset(struct drm_plane *plane)
+{
+       struct dpu_plane *pdpu;
+       struct dpu_plane_state *pstate;
+
+       if (!plane) {
+               DPU_ERROR("invalid plane\n");
+               return;
+       }
+
+       pdpu = to_dpu_plane(plane);
+       DPU_DEBUG_PLANE(pdpu, "\n");
+
+       /* remove previous state, if present */
+       if (plane->state) {
+               dpu_plane_destroy_state(plane, plane->state);
+               plane->state = 0;
+       }
+
+       pstate = kzalloc(sizeof(*pstate), GFP_KERNEL);
+       if (!pstate) {
+               DPU_ERROR_PLANE(pdpu, "failed to allocate state\n");
+               return;
+       }
+
+       pstate->base.plane = plane;
+
+       plane->state = &pstate->base;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static ssize_t _dpu_plane_danger_read(struct file *file,
+                       char __user *buff, size_t count, loff_t *ppos)
+{
+       struct dpu_kms *kms = file->private_data;
+       struct dpu_mdss_cfg *cfg = kms->catalog;
+       int len = 0;
+       char buf[40] = {'\0'};
+
+       if (!cfg)
+               return -ENODEV;
+
+       if (*ppos)
+               return 0; /* the end */
+
+       len = snprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
+       if (len < 0 || len >= sizeof(buf))
+               return 0;
+
+       if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+               return -EFAULT;
+
+       *ppos += len;   /* increase offset */
+
+       return len;
+}
+
+static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable)
+{
+       struct drm_plane *plane;
+
+       drm_for_each_plane(plane, kms->dev) {
+               if (plane->fb && plane->state) {
+                       dpu_plane_danger_signal_ctrl(plane, enable);
+                       DPU_DEBUG("plane:%d img:%dx%d ",
+                               plane->base.id, plane->fb->width,
+                               plane->fb->height);
+                       DPU_DEBUG("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n",
+                               plane->state->src_x >> 16,
+                               plane->state->src_y >> 16,
+                               plane->state->src_w >> 16,
+                               plane->state->src_h >> 16,
+                               plane->state->crtc_x, plane->state->crtc_y,
+                               plane->state->crtc_w, plane->state->crtc_h);
+               } else {
+                       DPU_DEBUG("Inactive plane:%d\n", plane->base.id);
+               }
+       }
+}
+
+static ssize_t _dpu_plane_danger_write(struct file *file,
+                   const char __user *user_buf, size_t count, loff_t *ppos)
+{
+       struct dpu_kms *kms = file->private_data;
+       struct dpu_mdss_cfg *cfg = kms->catalog;
+       int disable_panic;
+       char buf[10];
+
+       if (!cfg)
+               return -EFAULT;
+
+       if (count >= sizeof(buf))
+               return -EFAULT;
+
+       if (copy_from_user(buf, user_buf, count))
+               return -EFAULT;
+
+       buf[count] = 0; /* end of string */
+
+       if (kstrtoint(buf, 0, &disable_panic))
+               return -EFAULT;
+
+       if (disable_panic) {
+               /* Disable panic signal for all active pipes */
+               DPU_DEBUG("Disabling danger:\n");
+               _dpu_plane_set_danger_state(kms, false);
+               kms->has_danger_ctrl = false;
+       } else {
+               /* Enable panic signal for all active pipes */
+               DPU_DEBUG("Enabling danger:\n");
+               kms->has_danger_ctrl = true;
+               _dpu_plane_set_danger_state(kms, true);
+       }
+
+       return count;
+}
+
+static const struct file_operations dpu_plane_danger_enable = {
+       .open = simple_open,
+       .read = _dpu_plane_danger_read,
+       .write = _dpu_plane_danger_write,
+};
+
+static int _dpu_plane_init_debugfs(struct drm_plane *plane)
+{
+       struct dpu_plane *pdpu;
+       struct dpu_kms *kms;
+       struct msm_drm_private *priv;
+       const struct dpu_sspp_sub_blks *sblk = 0;
+       const struct dpu_sspp_cfg *cfg = 0;
+
+       if (!plane || !plane->dev) {
+               DPU_ERROR("invalid arguments\n");
+               return -EINVAL;
+       }
+
+       priv = plane->dev->dev_private;
+       if (!priv || !priv->kms) {
+               DPU_ERROR("invalid KMS reference\n");
+               return -EINVAL;
+       }
+
+       kms = to_dpu_kms(priv->kms);
+       pdpu = to_dpu_plane(plane);
+
+       if (pdpu && pdpu->pipe_hw)
+               cfg = pdpu->pipe_hw->cap;
+       if (cfg)
+               sblk = cfg->sblk;
+
+       if (!sblk)
+               return 0;
+
+       /* create overall sub-directory for the pipe */
+       pdpu->debugfs_root =
+               debugfs_create_dir(pdpu->pipe_name,
+                               plane->dev->primary->debugfs_root);
+
+       if (!pdpu->debugfs_root)
+               return -ENOMEM;
+
+       /* don't error check these */
+       debugfs_create_x32("features", 0600,
+                       pdpu->debugfs_root, &pdpu->features);
+
+       /* add register dump support */
+       dpu_debugfs_setup_regset32(&pdpu->debugfs_src,
+                       sblk->src_blk.base + cfg->base,
+                       sblk->src_blk.len,
+                       kms);
+       dpu_debugfs_create_regset32("src_blk", 0400,
+                       pdpu->debugfs_root, &pdpu->debugfs_src);
+
+       if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) ||
+                       cfg->features & BIT(DPU_SSPP_SCALER_QSEED2)) {
+               dpu_debugfs_setup_regset32(&pdpu->debugfs_scaler,
+                               sblk->scaler_blk.base + cfg->base,
+                               sblk->scaler_blk.len,
+                               kms);
+               dpu_debugfs_create_regset32("scaler_blk", 0400,
+                               pdpu->debugfs_root,
+                               &pdpu->debugfs_scaler);
+               debugfs_create_bool("default_scaling",
+                               0600,
+                               pdpu->debugfs_root,
+                               &pdpu->debugfs_default_scale);
+       }
+
+       if (cfg->features & BIT(DPU_SSPP_CSC) ||
+                       cfg->features & BIT(DPU_SSPP_CSC_10BIT)) {
+               dpu_debugfs_setup_regset32(&pdpu->debugfs_csc,
+                               sblk->csc_blk.base + cfg->base,
+                               sblk->csc_blk.len,
+                               kms);
+               dpu_debugfs_create_regset32("csc_blk", 0400,
+                               pdpu->debugfs_root, &pdpu->debugfs_csc);
+       }
+
+       debugfs_create_u32("xin_id",
+                       0400,
+                       pdpu->debugfs_root,
+                       (u32 *) &cfg->xin_id);
+       debugfs_create_u32("clk_ctrl",
+                       0400,
+                       pdpu->debugfs_root,
+                       (u32 *) &cfg->clk_ctrl);
+       debugfs_create_x32("creq_vblank",
+                       0600,
+                       pdpu->debugfs_root,
+                       (u32 *) &sblk->creq_vblank);
+       debugfs_create_x32("danger_vblank",
+                       0600,
+                       pdpu->debugfs_root,
+                       (u32 *) &sblk->danger_vblank);
+
+       debugfs_create_file("disable_danger",
+                       0600,
+                       pdpu->debugfs_root,
+                       kms, &dpu_plane_danger_enable);
+
+       return 0;
+}
+
+static void _dpu_plane_destroy_debugfs(struct drm_plane *plane)
+{
+       struct dpu_plane *pdpu;
+
+       if (!plane)
+               return;
+       pdpu = to_dpu_plane(plane);
+
+       debugfs_remove_recursive(pdpu->debugfs_root);
+}
+#else
+static int _dpu_plane_init_debugfs(struct drm_plane *plane)
+{
+       return 0;
+}
+static void _dpu_plane_destroy_debugfs(struct drm_plane *plane)
+{
+}
+#endif
+
+static int dpu_plane_late_register(struct drm_plane *plane)
+{
+       return _dpu_plane_init_debugfs(plane);
+}
+
+static void dpu_plane_early_unregister(struct drm_plane *plane)
+{
+       _dpu_plane_destroy_debugfs(plane);
+}
+
+static const struct drm_plane_funcs dpu_plane_funcs = {
+               .update_plane = drm_atomic_helper_update_plane,
+               .disable_plane = drm_atomic_helper_disable_plane,
+               .destroy = dpu_plane_destroy,
+               .reset = dpu_plane_reset,
+               .atomic_duplicate_state = dpu_plane_duplicate_state,
+               .atomic_destroy_state = dpu_plane_destroy_state,
+               .late_register = dpu_plane_late_register,
+               .early_unregister = dpu_plane_early_unregister,
+};
+
+static const struct drm_plane_helper_funcs dpu_plane_helper_funcs = {
+               .prepare_fb = dpu_plane_prepare_fb,
+               .cleanup_fb = dpu_plane_cleanup_fb,
+               .atomic_check = dpu_plane_atomic_check,
+               .atomic_update = dpu_plane_atomic_update,
+};
+
+enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane)
+{
+       return plane ? to_dpu_plane(plane)->pipe : SSPP_NONE;
+}
+
+bool is_dpu_plane_virtual(struct drm_plane *plane)
+{
+       return plane ? to_dpu_plane(plane)->is_virtual : false;
+}
+
+/* initialize plane */
+struct drm_plane *dpu_plane_init(struct drm_device *dev,
+               uint32_t pipe, bool primary_plane,
+               unsigned long possible_crtcs, u32 master_plane_id)
+{
+       struct drm_plane *plane = NULL, *master_plane = NULL;
+       const struct dpu_format_extended *format_list;
+       struct dpu_plane *pdpu;
+       struct msm_drm_private *priv;
+       struct dpu_kms *kms;
+       enum drm_plane_type type;
+       int zpos_max = DPU_ZPOS_MAX;
+       int ret = -EINVAL;
+
+       if (!dev) {
+               DPU_ERROR("[%u]device is NULL\n", pipe);
+               goto exit;
+       }
+
+       priv = dev->dev_private;
+       if (!priv) {
+               DPU_ERROR("[%u]private data is NULL\n", pipe);
+               goto exit;
+       }
+
+       if (!priv->kms) {
+               DPU_ERROR("[%u]invalid KMS reference\n", pipe);
+               goto exit;
+       }
+       kms = to_dpu_kms(priv->kms);
+
+       if (!kms->catalog) {
+               DPU_ERROR("[%u]invalid catalog reference\n", pipe);
+               goto exit;
+       }
+
+       /* create and zero local structure */
+       pdpu = kzalloc(sizeof(*pdpu), GFP_KERNEL);
+       if (!pdpu) {
+               DPU_ERROR("[%u]failed to allocate local plane struct\n", pipe);
+               ret = -ENOMEM;
+               goto exit;
+       }
+
+       /* cache local stuff for later */
+       plane = &pdpu->base;
+       pdpu->pipe = pipe;
+       pdpu->is_virtual = (master_plane_id != 0);
+       INIT_LIST_HEAD(&pdpu->mplane_list);
+       master_plane = drm_plane_find(dev, NULL, master_plane_id);
+       if (master_plane) {
+               struct dpu_plane *mpdpu = to_dpu_plane(master_plane);
+
+               list_add_tail(&pdpu->mplane_list, &mpdpu->mplane_list);
+       }
+
+       /* initialize underlying h/w driver */
+       pdpu->pipe_hw = dpu_hw_sspp_init(pipe, kms->mmio, kms->catalog,
+                                                       master_plane_id != 0);
+       if (IS_ERR(pdpu->pipe_hw)) {
+               DPU_ERROR("[%u]SSPP init failed\n", pipe);
+               ret = PTR_ERR(pdpu->pipe_hw);
+               goto clean_plane;
+       } else if (!pdpu->pipe_hw->cap || !pdpu->pipe_hw->cap->sblk) {
+               DPU_ERROR("[%u]SSPP init returned invalid cfg\n", pipe);
+               goto clean_sspp;
+       }
+
+       /* cache features mask for later */
+       pdpu->features = pdpu->pipe_hw->cap->features;
+       pdpu->pipe_sblk = pdpu->pipe_hw->cap->sblk;
+       if (!pdpu->pipe_sblk) {
+               DPU_ERROR("[%u]invalid sblk\n", pipe);
+               goto clean_sspp;
+       }
+
+       if (!master_plane_id)
+               format_list = pdpu->pipe_sblk->format_list;
+       else
+               format_list = pdpu->pipe_sblk->virt_format_list;
+
+       pdpu->nformats = dpu_populate_formats(format_list,
+                               pdpu->formats,
+                               0,
+                               ARRAY_SIZE(pdpu->formats));
+
+       if (!pdpu->nformats) {
+               DPU_ERROR("[%u]no valid formats for plane\n", pipe);
+               goto clean_sspp;
+       }
+
+       if (pdpu->features & BIT(DPU_SSPP_CURSOR))
+               type = DRM_PLANE_TYPE_CURSOR;
+       else if (primary_plane)
+               type = DRM_PLANE_TYPE_PRIMARY;
+       else
+               type = DRM_PLANE_TYPE_OVERLAY;
+       ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs,
+                               pdpu->formats, pdpu->nformats,
+                               NULL, type, NULL);
+       if (ret)
+               goto clean_sspp;
+
+       pdpu->catalog = kms->catalog;
+
+       if (kms->catalog->mixer_count &&
+               kms->catalog->mixer[0].sblk->maxblendstages) {
+               zpos_max = kms->catalog->mixer[0].sblk->maxblendstages - 1;
+               if (zpos_max > DPU_STAGE_MAX - DPU_STAGE_0 - 1)
+                       zpos_max = DPU_STAGE_MAX - DPU_STAGE_0 - 1;
+       }
+
+       ret = drm_plane_create_zpos_property(plane, 0, 0, zpos_max);
+       if (ret)
+               DPU_ERROR("failed to install zpos property, rc = %d\n", ret);
+
+       /* success! finalize initialization */
+       drm_plane_helper_add(plane, &dpu_plane_helper_funcs);
+
+       /* save user friendly pipe name for later */
+       snprintf(pdpu->pipe_name, DPU_NAME_SIZE, "plane%u", plane->base.id);
+
+       mutex_init(&pdpu->lock);
+
+       DPU_DEBUG("%s created for pipe:%u id:%u virtual:%u\n", pdpu->pipe_name,
+                                       pipe, plane->base.id, master_plane_id);
+       return plane;
+
+clean_sspp:
+       if (pdpu && pdpu->pipe_hw)
+               dpu_hw_sspp_destroy(pdpu->pipe_hw);
+clean_plane:
+       kfree(pdpu);
+exit:
+       return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
new file mode 100644 (file)
index 0000000..f6fe6dd
--- /dev/null
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DPU_PLANE_H_
+#define _DPU_PLANE_H_
+
+#include <drm/drm_crtc.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_sspp.h"
+
+/**
+ * struct dpu_plane_state: Define dpu extension of drm plane state object
+ * @base:      base drm plane state object
+ * @property_state: Local storage for msm_prop properties
+ * @property_values:   cached plane property values
+ * @aspace:    pointer to address space for input/output buffers
+ * @input_fence:       dereferenced input fence pointer
+ * @stage:     assigned by crtc blender
+ * @multirect_index: index of the rectangle of SSPP
+ * @multirect_mode: parallel or time multiplex multirect mode
+ * @pending:   whether the current update is still pending
+ * @scaler3_cfg: configuration data for scaler3
+ * @pixel_ext: configuration data for pixel extensions
+ * @scaler_check_state: indicates status of user provided pixel extension data
+ * @cdp_cfg:   CDP configuration
+ */
+struct dpu_plane_state {
+       struct drm_plane_state base;
+       struct msm_gem_address_space *aspace;
+       void *input_fence;
+       enum dpu_stage stage;
+       uint32_t multirect_index;
+       uint32_t multirect_mode;
+       bool pending;
+
+       /* scaler configuration */
+       struct dpu_hw_scaler3_cfg scaler3_cfg;
+       struct dpu_hw_pixel_ext pixel_ext;
+
+       struct dpu_hw_pipe_cdp_cfg cdp_cfg;
+};
+
+/**
+ * struct dpu_multirect_plane_states: Defines multirect pair of drm plane states
+ * @r0: drm plane configured on rect 0
+ * @r1: drm plane configured on rect 1
+ */
+struct dpu_multirect_plane_states {
+       const struct drm_plane_state *r0;
+       const struct drm_plane_state *r1;
+};
+
+#define to_dpu_plane_state(x) \
+       container_of(x, struct dpu_plane_state, base)
+
+/**
+ * dpu_plane_pipe - return sspp identifier for the given plane
+ * @plane:   Pointer to DRM plane object
+ * Returns: sspp identifier of the given plane
+ */
+enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane);
+
+/**
+ * is_dpu_plane_virtual - check for virtual plane
+ * @plane: Pointer to DRM plane object
+ * returns: true - if the plane is virtual
+ *          false - if the plane is primary
+ */
+bool is_dpu_plane_virtual(struct drm_plane *plane);
+
+/**
+ * dpu_plane_get_ctl_flush - get control flush mask
+ * @plane:   Pointer to DRM plane object
+ * @ctl: Pointer to control hardware
+ * @flush_sspp: Pointer to sspp flush control word
+ */
+void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
+               u32 *flush_sspp);
+
+/**
+ * dpu_plane_restore - restore hw state if previously power collapsed
+ * @plane: Pointer to drm plane structure
+ */
+void dpu_plane_restore(struct drm_plane *plane);
+
+/**
+ * dpu_plane_flush - final plane operations before commit flush
+ * @plane: Pointer to drm plane structure
+ */
+void dpu_plane_flush(struct drm_plane *plane);
+
+/**
+ * dpu_plane_kickoff - final plane operations before commit kickoff
+ * @plane: Pointer to drm plane structure
+ */
+void dpu_plane_kickoff(struct drm_plane *plane);
+
+/**
+ * dpu_plane_set_error: enable/disable error condition
+ * @plane: pointer to drm_plane structure
+ */
+void dpu_plane_set_error(struct drm_plane *plane, bool error);
+
+/**
+ * dpu_plane_init - create new dpu plane for the given pipe
+ * @dev:   Pointer to DRM device
+ * @pipe:  dpu hardware pipe identifier
+ * @primary_plane: true if this pipe is primary plane for crtc
+ * @possible_crtcs: bitmask of crtc that can be attached to the given pipe
+ * @master_plane_id: primary plane id of a multirect pipe. 0 value passed for
+ *                   a regular plane initialization. A non-zero primary plane
+ *                   id will be passed for a virtual pipe initialization.
+ *
+ */
+struct drm_plane *dpu_plane_init(struct drm_device *dev,
+               uint32_t pipe, bool primary_plane,
+               unsigned long possible_crtcs, u32 master_plane_id);
+
+/**
+ * dpu_plane_validate_multirecti_v2 - validate the multirect planes
+ *                                   against hw limitations
+ * @plane: drm plate states of the multirect pair
+ */
+int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane);
+
+/**
+ * dpu_plane_clear_multirect - clear multirect bits for the given pipe
+ * @drm_state: Pointer to DRM plane state
+ */
+void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state);
+
+/**
+ * dpu_plane_wait_input_fence - wait for input fence object
+ * @plane:   Pointer to DRM plane object
+ * @wait_ms: Wait timeout value
+ * Returns: Zero on success
+ */
+int dpu_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms);
+
+/**
+ * dpu_plane_color_fill - enables color fill on plane
+ * @plane:  Pointer to DRM plane object
+ * @color:  RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
+ * @alpha:  8-bit fill alpha value, 255 selects 100% alpha
+ * Returns: 0 on success
+ */
+int dpu_plane_color_fill(struct drm_plane *plane,
+               uint32_t color, uint32_t alpha);
+
+/**
+ * dpu_plane_set_revalidate - sets revalidate flag which forces a full
+ *     validation of the plane properties in the next atomic check
+ * @plane: Pointer to DRM plane object
+ * @enable: Boolean to set/unset the flag
+ */
+void dpu_plane_set_revalidate(struct drm_plane *plane, bool enable);
+
+#endif /* _DPU_PLANE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
new file mode 100644 (file)
index 0000000..a68f124
--- /dev/null
@@ -0,0 +1,249 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)    "[drm:%s:%d]: " fmt, __func__, __LINE__
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/string.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+
+#include "dpu_power_handle.h"
+#include "dpu_trace.h"
+
+static const char *data_bus_name[DPU_POWER_HANDLE_DBUS_ID_MAX] = {
+       [DPU_POWER_HANDLE_DBUS_ID_MNOC] = "qcom,dpu-data-bus",
+       [DPU_POWER_HANDLE_DBUS_ID_LLCC] = "qcom,dpu-llcc-bus",
+       [DPU_POWER_HANDLE_DBUS_ID_EBI] = "qcom,dpu-ebi-bus",
+};
+
+const char *dpu_power_handle_get_dbus_name(u32 bus_id)
+{
+       if (bus_id < DPU_POWER_HANDLE_DBUS_ID_MAX)
+               return data_bus_name[bus_id];
+
+       return NULL;
+}
+
+static void dpu_power_event_trigger_locked(struct dpu_power_handle *phandle,
+               u32 event_type)
+{
+       struct dpu_power_event *event;
+
+       list_for_each_entry(event, &phandle->event_list, list) {
+               if (event->event_type & event_type)
+                       event->cb_fnc(event_type, event->usr);
+       }
+}
+
+struct dpu_power_client *dpu_power_client_create(
+       struct dpu_power_handle *phandle, char *client_name)
+{
+       struct dpu_power_client *client;
+       static u32 id;
+
+       if (!client_name || !phandle) {
+               pr_err("client name is null or invalid power data\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       client = kzalloc(sizeof(struct dpu_power_client), GFP_KERNEL);
+       if (!client)
+               return ERR_PTR(-ENOMEM);
+
+       mutex_lock(&phandle->phandle_lock);
+       strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
+       client->usecase_ndx = VOTE_INDEX_DISABLE;
+       client->id = id;
+       client->active = true;
+       pr_debug("client %s created:%pK id :%d\n", client_name,
+               client, id);
+       id++;
+       list_add(&client->list, &phandle->power_client_clist);
+       mutex_unlock(&phandle->phandle_lock);
+
+       return client;
+}
+
+void dpu_power_client_destroy(struct dpu_power_handle *phandle,
+       struct dpu_power_client *client)
+{
+       if (!client  || !phandle) {
+               pr_err("reg bus vote: invalid client handle\n");
+       } else if (!client->active) {
+               pr_err("dpu power deinit already done\n");
+               kfree(client);
+       } else {
+               pr_debug("bus vote client %s destroyed:%pK id:%u\n",
+                       client->name, client, client->id);
+               mutex_lock(&phandle->phandle_lock);
+               list_del_init(&client->list);
+               mutex_unlock(&phandle->phandle_lock);
+               kfree(client);
+       }
+}
+
+void dpu_power_resource_init(struct platform_device *pdev,
+       struct dpu_power_handle *phandle)
+{
+       phandle->dev = &pdev->dev;
+
+       INIT_LIST_HEAD(&phandle->power_client_clist);
+       INIT_LIST_HEAD(&phandle->event_list);
+
+       mutex_init(&phandle->phandle_lock);
+}
+
+void dpu_power_resource_deinit(struct platform_device *pdev,
+       struct dpu_power_handle *phandle)
+{
+       struct dpu_power_client *curr_client, *next_client;
+       struct dpu_power_event *curr_event, *next_event;
+
+       if (!phandle || !pdev) {
+               pr_err("invalid input param\n");
+               return;
+       }
+
+       mutex_lock(&phandle->phandle_lock);
+       list_for_each_entry_safe(curr_client, next_client,
+                       &phandle->power_client_clist, list) {
+               pr_err("cliend:%s-%d still registered with refcount:%d\n",
+                               curr_client->name, curr_client->id,
+                               curr_client->refcount);
+               curr_client->active = false;
+               list_del(&curr_client->list);
+       }
+
+       list_for_each_entry_safe(curr_event, next_event,
+                       &phandle->event_list, list) {
+               pr_err("event:%d, client:%s still registered\n",
+                               curr_event->event_type,
+                               curr_event->client_name);
+               curr_event->active = false;
+               list_del(&curr_event->list);
+       }
+       mutex_unlock(&phandle->phandle_lock);
+}
+
+int dpu_power_resource_enable(struct dpu_power_handle *phandle,
+       struct dpu_power_client *pclient, bool enable)
+{
+       bool changed = false;
+       u32 max_usecase_ndx = VOTE_INDEX_DISABLE, prev_usecase_ndx;
+       struct dpu_power_client *client;
+
+       if (!phandle || !pclient) {
+               pr_err("invalid input argument\n");
+               return -EINVAL;
+       }
+
+       mutex_lock(&phandle->phandle_lock);
+       if (enable)
+               pclient->refcount++;
+       else if (pclient->refcount)
+               pclient->refcount--;
+
+       if (pclient->refcount)
+               pclient->usecase_ndx = VOTE_INDEX_LOW;
+       else
+               pclient->usecase_ndx = VOTE_INDEX_DISABLE;
+
+       list_for_each_entry(client, &phandle->power_client_clist, list) {
+               if (client->usecase_ndx < VOTE_INDEX_MAX &&
+                   client->usecase_ndx > max_usecase_ndx)
+                       max_usecase_ndx = client->usecase_ndx;
+       }
+
+       if (phandle->current_usecase_ndx != max_usecase_ndx) {
+               changed = true;
+               prev_usecase_ndx = phandle->current_usecase_ndx;
+               phandle->current_usecase_ndx = max_usecase_ndx;
+       }
+
+       pr_debug("%pS: changed=%d current idx=%d request client %s id:%u enable:%d refcount:%d\n",
+               __builtin_return_address(0), changed, max_usecase_ndx,
+               pclient->name, pclient->id, enable, pclient->refcount);
+
+       if (!changed)
+               goto end;
+
+       if (enable) {
+               dpu_power_event_trigger_locked(phandle,
+                               DPU_POWER_EVENT_PRE_ENABLE);
+               dpu_power_event_trigger_locked(phandle,
+                               DPU_POWER_EVENT_POST_ENABLE);
+
+       } else {
+               dpu_power_event_trigger_locked(phandle,
+                               DPU_POWER_EVENT_PRE_DISABLE);
+               dpu_power_event_trigger_locked(phandle,
+                               DPU_POWER_EVENT_POST_DISABLE);
+       }
+
+end:
+       mutex_unlock(&phandle->phandle_lock);
+       return 0;
+}
+
+struct dpu_power_event *dpu_power_handle_register_event(
+               struct dpu_power_handle *phandle,
+               u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
+               void *usr, char *client_name)
+{
+       struct dpu_power_event *event;
+
+       if (!phandle) {
+               pr_err("invalid power handle\n");
+               return ERR_PTR(-EINVAL);
+       } else if (!cb_fnc || !event_type) {
+               pr_err("no callback fnc or event type\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       event = kzalloc(sizeof(struct dpu_power_event), GFP_KERNEL);
+       if (!event)
+               return ERR_PTR(-ENOMEM);
+
+       event->event_type = event_type;
+       event->cb_fnc = cb_fnc;
+       event->usr = usr;
+       strlcpy(event->client_name, client_name, MAX_CLIENT_NAME_LEN);
+       event->active = true;
+
+       mutex_lock(&phandle->phandle_lock);
+       list_add(&event->list, &phandle->event_list);
+       mutex_unlock(&phandle->phandle_lock);
+
+       return event;
+}
+
+void dpu_power_handle_unregister_event(
+               struct dpu_power_handle *phandle,
+               struct dpu_power_event *event)
+{
+       if (!phandle || !event) {
+               pr_err("invalid phandle or event\n");
+       } else if (!event->active) {
+               pr_err("power handle deinit already done\n");
+               kfree(event);
+       } else {
+               mutex_lock(&phandle->phandle_lock);
+               list_del_init(&event->list);
+               mutex_unlock(&phandle->phandle_lock);
+               kfree(event);
+       }
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
new file mode 100644 (file)
index 0000000..344f744
--- /dev/null
@@ -0,0 +1,225 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DPU_POWER_HANDLE_H_
+#define _DPU_POWER_HANDLE_H_
+
+#define MAX_CLIENT_NAME_LEN 128
+
+#define DPU_POWER_HANDLE_ENABLE_BUS_AB_QUOTA   0
+#define DPU_POWER_HANDLE_DISABLE_BUS_AB_QUOTA  0
+#define DPU_POWER_HANDLE_ENABLE_BUS_IB_QUOTA   1600000000
+#define DPU_POWER_HANDLE_DISABLE_BUS_IB_QUOTA  0
+
+#include "dpu_io_util.h"
+
+/* event will be triggered before power handler disable */
+#define DPU_POWER_EVENT_PRE_DISABLE    0x1
+
+/* event will be triggered after power handler disable */
+#define DPU_POWER_EVENT_POST_DISABLE   0x2
+
+/* event will be triggered before power handler enable */
+#define DPU_POWER_EVENT_PRE_ENABLE     0x4
+
+/* event will be triggered after power handler enable */
+#define DPU_POWER_EVENT_POST_ENABLE    0x8
+
+/**
+ * mdss_bus_vote_type: register bus vote type
+ * VOTE_INDEX_DISABLE: removes the client vote
+ * VOTE_INDEX_LOW: keeps the lowest vote for register bus
+ * VOTE_INDEX_MAX: invalid
+ */
+enum mdss_bus_vote_type {
+       VOTE_INDEX_DISABLE,
+       VOTE_INDEX_LOW,
+       VOTE_INDEX_MAX,
+};
+
+/**
+ * enum dpu_power_handle_data_bus_client - type of axi bus clients
+ * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT: core real-time bus client
+ * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT: core non-real-time bus client
+ * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX: maximum number of bus client type
+ */
+enum dpu_power_handle_data_bus_client {
+       DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT,
+       DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT,
+       DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX
+};
+
+/**
+ * enum DPU_POWER_HANDLE_DBUS_ID - data bus identifier
+ * @DPU_POWER_HANDLE_DBUS_ID_MNOC: DPU/MNOC data bus
+ * @DPU_POWER_HANDLE_DBUS_ID_LLCC: MNOC/LLCC data bus
+ * @DPU_POWER_HANDLE_DBUS_ID_EBI: LLCC/EBI data bus
+ */
+enum DPU_POWER_HANDLE_DBUS_ID {
+       DPU_POWER_HANDLE_DBUS_ID_MNOC,
+       DPU_POWER_HANDLE_DBUS_ID_LLCC,
+       DPU_POWER_HANDLE_DBUS_ID_EBI,
+       DPU_POWER_HANDLE_DBUS_ID_MAX,
+};
+
+/**
+ * struct dpu_power_client: stores the power client for dpu driver
+ * @name:      name of the client
+ * @usecase_ndx: current regs bus vote type
+ * @refcount:  current refcount if multiple modules are using same
+ *              same client for enable/disable. Power module will
+ *              aggregate the refcount and vote accordingly for this
+ *              client.
+ * @id:                assigned during create. helps for debugging.
+ * @list:      list to attach power handle master list
+ * @ab:         arbitrated bandwidth for each bus client
+ * @ib:         instantaneous bandwidth for each bus client
+ * @active:    inidcates the state of dpu power handle
+ */
+struct dpu_power_client {
+       char name[MAX_CLIENT_NAME_LEN];
+       short usecase_ndx;
+       short refcount;
+       u32 id;
+       struct list_head list;
+       u64 ab[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
+       u64 ib[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
+       bool active;
+};
+
+/*
+ * struct dpu_power_event - local event registration structure
+ * @client_name: name of the client registering
+ * @cb_fnc: pointer to desired callback function
+ * @usr: user pointer to pass to callback event trigger
+ * @event: refer to DPU_POWER_HANDLE_EVENT_*
+ * @list: list to attach event master list
+ * @active: indicates the state of dpu power handle
+ */
+struct dpu_power_event {
+       char client_name[MAX_CLIENT_NAME_LEN];
+       void (*cb_fnc)(u32 event_type, void *usr);
+       void *usr;
+       u32 event_type;
+       struct list_head list;
+       bool active;
+};
+
+/**
+ * struct dpu_power_handle: power handle main struct
+ * @client_clist: master list to store all clients
+ * @phandle_lock: lock to synchronize the enable/disable
+ * @dev: pointer to device structure
+ * @usecase_ndx: current usecase index
+ * @event_list: current power handle event list
+ */
+struct dpu_power_handle {
+       struct list_head power_client_clist;
+       struct mutex phandle_lock;
+       struct device *dev;
+       u32 current_usecase_ndx;
+       struct list_head event_list;
+};
+
+/**
+ * dpu_power_resource_init() - initializes the dpu power handle
+ * @pdev:   platform device to search the power resources
+ * @pdata:  power handle to store the power resources
+ */
+void dpu_power_resource_init(struct platform_device *pdev,
+       struct dpu_power_handle *pdata);
+
+/**
+ * dpu_power_resource_deinit() - release the dpu power handle
+ * @pdev:   platform device for power resources
+ * @pdata:  power handle containing the resources
+ *
+ * Return: error code.
+ */
+void dpu_power_resource_deinit(struct platform_device *pdev,
+       struct dpu_power_handle *pdata);
+
+/**
+ * dpu_power_client_create() - create the client on power handle
+ * @pdata:  power handle containing the resources
+ * @client_name: new client name for registration
+ *
+ * Return: error code.
+ */
+struct dpu_power_client *dpu_power_client_create(struct dpu_power_handle *pdata,
+       char *client_name);
+
+/**
+ * dpu_power_client_destroy() - destroy the client on power handle
+ * @pdata:  power handle containing the resources
+ * @client_name: new client name for registration
+ *
+ * Return: none
+ */
+void dpu_power_client_destroy(struct dpu_power_handle *phandle,
+       struct dpu_power_client *client);
+
+/**
+ * dpu_power_resource_enable() - enable/disable the power resources
+ * @pdata:  power handle containing the resources
+ * @client: client information to enable/disable its vote
+ * @enable: boolean request for enable/disable
+ *
+ * Return: error code.
+ */
+int dpu_power_resource_enable(struct dpu_power_handle *pdata,
+       struct dpu_power_client *pclient, bool enable);
+
+/**
+ * dpu_power_data_bus_bandwidth_ctrl() - control data bus bandwidth enable
+ * @phandle:  power handle containing the resources
+ * @client: client information to bandwidth control
+ * @enable: true to enable bandwidth for data base
+ *
+ * Return: none
+ */
+void dpu_power_data_bus_bandwidth_ctrl(struct dpu_power_handle *phandle,
+               struct dpu_power_client *pclient, int enable);
+
+/**
+ * dpu_power_handle_register_event - register a callback function for an event.
+ *     Clients can register for multiple events with a single register.
+ *     Any block with access to phandle can register for the event
+ *     notification.
+ * @phandle:   power handle containing the resources
+ * @event_type:        event type to register; refer DPU_POWER_HANDLE_EVENT_*
+ * @cb_fnc:    pointer to desired callback function
+ * @usr:       user pointer to pass to callback on event trigger
+ *
+ * Return:     event pointer if success, or error code otherwise
+ */
+struct dpu_power_event *dpu_power_handle_register_event(
+               struct dpu_power_handle *phandle,
+               u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
+               void *usr, char *client_name);
+/**
+ * dpu_power_handle_unregister_event - unregister callback for event(s)
+ * @phandle:   power handle containing the resources
+ * @event:     event pointer returned after power handle register
+ */
+void dpu_power_handle_unregister_event(struct dpu_power_handle *phandle,
+               struct dpu_power_event *event);
+
+/**
+ * dpu_power_handle_get_dbus_name - get name of given data bus identifier
+ * @bus_id:    data bus identifier
+ * Return:     Pointer to name string if success; NULL otherwise
+ */
+const char *dpu_power_handle_get_dbus_name(u32 bus_id);
+
+#endif /* _DPU_POWER_HANDLE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
new file mode 100644 (file)
index 0000000..13c0a36
--- /dev/null
@@ -0,0 +1,1079 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)    "[drm:%s] " fmt, __func__
+#include "dpu_kms.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_cdm.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_hw_intf.h"
+#include "dpu_encoder.h"
+#include "dpu_trace.h"
+
+#define RESERVED_BY_OTHER(h, r) \
+       ((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id))
+
+#define RM_RQ_LOCK(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_RESERVE_LOCK))
+#define RM_RQ_CLEAR(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_RESERVE_CLEAR))
+#define RM_RQ_DS(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_DS))
+#define RM_IS_TOPOLOGY_MATCH(t, r) ((t).num_lm == (r).num_lm && \
+                               (t).num_comp_enc == (r).num_enc && \
+                               (t).num_intf == (r).num_intf)
+
+struct dpu_rm_topology_def {
+       enum dpu_rm_topology_name top_name;
+       int num_lm;
+       int num_comp_enc;
+       int num_intf;
+       int num_ctl;
+       int needs_split_display;
+};
+
+static const struct dpu_rm_topology_def g_top_table[] = {
+       {   DPU_RM_TOPOLOGY_NONE,                 0, 0, 0, 0, false },
+       {   DPU_RM_TOPOLOGY_SINGLEPIPE,           1, 0, 1, 1, false },
+       {   DPU_RM_TOPOLOGY_DUALPIPE,             2, 0, 2, 2, true  },
+       {   DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE,     2, 0, 1, 1, false },
+};
+
+/**
+ * struct dpu_rm_requirements - Reservation requirements parameter bundle
+ * @top_ctrl:  topology control preference from kernel client
+ * @top:       selected topology for the display
+ * @hw_res:       Hardware resources required as reported by the encoders
+ */
+struct dpu_rm_requirements {
+       uint64_t top_ctrl;
+       const struct dpu_rm_topology_def *topology;
+       struct dpu_encoder_hw_resources hw_res;
+};
+
+/**
+ * struct dpu_rm_rsvp - Use Case Reservation tagging structure
+ *     Used to tag HW blocks as reserved by a CRTC->Encoder->Connector chain
+ *     By using as a tag, rather than lists of pointers to HW blocks used
+ *     we can avoid some list management since we don't know how many blocks
+ *     of each type a given use case may require.
+ * @list:      List head for list of all reservations
+ * @seq:       Global RSVP sequence number for debugging, especially for
+ *             differentiating differenct allocations for same encoder.
+ * @enc_id:    Reservations are tracked by Encoder DRM object ID.
+ *             CRTCs may be connected to multiple Encoders.
+ *             An encoder or connector id identifies the display path.
+ * @topology   DRM<->HW topology use case
+ */
+struct dpu_rm_rsvp {
+       struct list_head list;
+       uint32_t seq;
+       uint32_t enc_id;
+       enum dpu_rm_topology_name topology;
+};
+
+/**
+ * struct dpu_rm_hw_blk - hardware block tracking list member
+ * @list:      List head for list of all hardware blocks tracking items
+ * @rsvp:      Pointer to use case reservation if reserved by a client
+ * @rsvp_nxt:  Temporary pointer used during reservation to the incoming
+ *             request. Will be swapped into rsvp if proposal is accepted
+ * @type:      Type of hardware block this structure tracks
+ * @id:                Hardware ID number, within it's own space, ie. LM_X
+ * @catalog:   Pointer to the hardware catalog entry for this block
+ * @hw:                Pointer to the hardware register access object for this block
+ */
+struct dpu_rm_hw_blk {
+       struct list_head list;
+       struct dpu_rm_rsvp *rsvp;
+       struct dpu_rm_rsvp *rsvp_nxt;
+       enum dpu_hw_blk_type type;
+       uint32_t id;
+       struct dpu_hw_blk *hw;
+};
+
+/**
+ * dpu_rm_dbg_rsvp_stage - enum of steps in making reservation for event logging
+ */
+enum dpu_rm_dbg_rsvp_stage {
+       DPU_RM_STAGE_BEGIN,
+       DPU_RM_STAGE_AFTER_CLEAR,
+       DPU_RM_STAGE_AFTER_RSVPNEXT,
+       DPU_RM_STAGE_FINAL
+};
+
+static void _dpu_rm_print_rsvps(
+               struct dpu_rm *rm,
+               enum dpu_rm_dbg_rsvp_stage stage)
+{
+       struct dpu_rm_rsvp *rsvp;
+       struct dpu_rm_hw_blk *blk;
+       enum dpu_hw_blk_type type;
+
+       DPU_DEBUG("%d\n", stage);
+
+       list_for_each_entry(rsvp, &rm->rsvps, list) {
+               DRM_DEBUG_KMS("%d rsvp[s%ue%u] topology %d\n", stage, rsvp->seq,
+                             rsvp->enc_id, rsvp->topology);
+       }
+
+       for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+               list_for_each_entry(blk, &rm->hw_blks[type], list) {
+                       if (!blk->rsvp && !blk->rsvp_nxt)
+                               continue;
+
+                       DRM_DEBUG_KMS("%d rsvp[s%ue%u->s%ue%u] %d %d\n", stage,
+                               (blk->rsvp) ? blk->rsvp->seq : 0,
+                               (blk->rsvp) ? blk->rsvp->enc_id : 0,
+                               (blk->rsvp_nxt) ? blk->rsvp_nxt->seq : 0,
+                               (blk->rsvp_nxt) ? blk->rsvp_nxt->enc_id : 0,
+                               blk->type, blk->id);
+               }
+       }
+}
+
+struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm)
+{
+       return rm->hw_mdp;
+}
+
+enum dpu_rm_topology_name
+dpu_rm_get_topology_name(struct msm_display_topology topology)
+{
+       int i;
+
+       for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++)
+               if (RM_IS_TOPOLOGY_MATCH(g_top_table[i], topology))
+                       return g_top_table[i].top_name;
+
+       return DPU_RM_TOPOLOGY_NONE;
+}
+
+void dpu_rm_init_hw_iter(
+               struct dpu_rm_hw_iter *iter,
+               uint32_t enc_id,
+               enum dpu_hw_blk_type type)
+{
+       memset(iter, 0, sizeof(*iter));
+       iter->enc_id = enc_id;
+       iter->type = type;
+}
+
+static bool _dpu_rm_get_hw_locked(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
+{
+       struct list_head *blk_list;
+
+       if (!rm || !i || i->type >= DPU_HW_BLK_MAX) {
+               DPU_ERROR("invalid rm\n");
+               return false;
+       }
+
+       i->hw = NULL;
+       blk_list = &rm->hw_blks[i->type];
+
+       if (i->blk && (&i->blk->list == blk_list)) {
+               DPU_DEBUG("attempt resume iteration past last\n");
+               return false;
+       }
+
+       i->blk = list_prepare_entry(i->blk, blk_list, list);
+
+       list_for_each_entry_continue(i->blk, blk_list, list) {
+               struct dpu_rm_rsvp *rsvp = i->blk->rsvp;
+
+               if (i->blk->type != i->type) {
+                       DPU_ERROR("found incorrect block type %d on %d list\n",
+                                       i->blk->type, i->type);
+                       return false;
+               }
+
+               if ((i->enc_id == 0) || (rsvp && rsvp->enc_id == i->enc_id)) {
+                       i->hw = i->blk->hw;
+                       DPU_DEBUG("found type %d id %d for enc %d\n",
+                                       i->type, i->blk->id, i->enc_id);
+                       return true;
+               }
+       }
+
+       DPU_DEBUG("no match, type %d for enc %d\n", i->type, i->enc_id);
+
+       return false;
+}
+
+bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
+{
+       bool ret;
+
+       mutex_lock(&rm->rm_lock);
+       ret = _dpu_rm_get_hw_locked(rm, i);
+       mutex_unlock(&rm->rm_lock);
+
+       return ret;
+}
+
+static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw)
+{
+       switch (type) {
+       case DPU_HW_BLK_LM:
+               dpu_hw_lm_destroy(hw);
+               break;
+       case DPU_HW_BLK_CTL:
+               dpu_hw_ctl_destroy(hw);
+               break;
+       case DPU_HW_BLK_CDM:
+               dpu_hw_cdm_destroy(hw);
+               break;
+       case DPU_HW_BLK_PINGPONG:
+               dpu_hw_pingpong_destroy(hw);
+               break;
+       case DPU_HW_BLK_INTF:
+               dpu_hw_intf_destroy(hw);
+               break;
+       case DPU_HW_BLK_SSPP:
+               /* SSPPs are not managed by the resource manager */
+       case DPU_HW_BLK_TOP:
+               /* Top is a singleton, not managed in hw_blks list */
+       case DPU_HW_BLK_MAX:
+       default:
+               DPU_ERROR("unsupported block type %d\n", type);
+               break;
+       }
+}
+
+int dpu_rm_destroy(struct dpu_rm *rm)
+{
+
+       struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt;
+       struct dpu_rm_hw_blk *hw_cur, *hw_nxt;
+       enum dpu_hw_blk_type type;
+
+       if (!rm) {
+               DPU_ERROR("invalid rm\n");
+               return -EINVAL;
+       }
+
+       list_for_each_entry_safe(rsvp_cur, rsvp_nxt, &rm->rsvps, list) {
+               list_del(&rsvp_cur->list);
+               kfree(rsvp_cur);
+       }
+
+
+       for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+               list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type],
+                               list) {
+                       list_del(&hw_cur->list);
+                       _dpu_rm_hw_destroy(hw_cur->type, hw_cur->hw);
+                       kfree(hw_cur);
+               }
+       }
+
+       dpu_hw_mdp_destroy(rm->hw_mdp);
+       rm->hw_mdp = NULL;
+
+       mutex_destroy(&rm->rm_lock);
+
+       return 0;
+}
+
+static int _dpu_rm_hw_blk_create(
+               struct dpu_rm *rm,
+               struct dpu_mdss_cfg *cat,
+               void __iomem *mmio,
+               enum dpu_hw_blk_type type,
+               uint32_t id,
+               void *hw_catalog_info)
+{
+       struct dpu_rm_hw_blk *blk;
+       struct dpu_hw_mdp *hw_mdp;
+       void *hw;
+
+       hw_mdp = rm->hw_mdp;
+
+       switch (type) {
+       case DPU_HW_BLK_LM:
+               hw = dpu_hw_lm_init(id, mmio, cat);
+               break;
+       case DPU_HW_BLK_CTL:
+               hw = dpu_hw_ctl_init(id, mmio, cat);
+               break;
+       case DPU_HW_BLK_CDM:
+               hw = dpu_hw_cdm_init(id, mmio, cat, hw_mdp);
+               break;
+       case DPU_HW_BLK_PINGPONG:
+               hw = dpu_hw_pingpong_init(id, mmio, cat);
+               break;
+       case DPU_HW_BLK_INTF:
+               hw = dpu_hw_intf_init(id, mmio, cat);
+               break;
+       case DPU_HW_BLK_SSPP:
+               /* SSPPs are not managed by the resource manager */
+       case DPU_HW_BLK_TOP:
+               /* Top is a singleton, not managed in hw_blks list */
+       case DPU_HW_BLK_MAX:
+       default:
+               DPU_ERROR("unsupported block type %d\n", type);
+               return -EINVAL;
+       }
+
+       if (IS_ERR_OR_NULL(hw)) {
+               DPU_ERROR("failed hw object creation: type %d, err %ld\n",
+                               type, PTR_ERR(hw));
+               return -EFAULT;
+       }
+
+       blk = kzalloc(sizeof(*blk), GFP_KERNEL);
+       if (!blk) {
+               _dpu_rm_hw_destroy(type, hw);
+               return -ENOMEM;
+       }
+
+       blk->type = type;
+       blk->id = id;
+       blk->hw = hw;
+       list_add_tail(&blk->list, &rm->hw_blks[type]);
+
+       return 0;
+}
+
+int dpu_rm_init(struct dpu_rm *rm,
+               struct dpu_mdss_cfg *cat,
+               void __iomem *mmio,
+               struct drm_device *dev)
+{
+       int rc, i;
+       enum dpu_hw_blk_type type;
+
+       if (!rm || !cat || !mmio || !dev) {
+               DPU_ERROR("invalid kms\n");
+               return -EINVAL;
+       }
+
+       /* Clear, setup lists */
+       memset(rm, 0, sizeof(*rm));
+
+       mutex_init(&rm->rm_lock);
+
+       INIT_LIST_HEAD(&rm->rsvps);
+       for (type = 0; type < DPU_HW_BLK_MAX; type++)
+               INIT_LIST_HEAD(&rm->hw_blks[type]);
+
+       rm->dev = dev;
+
+       /* Some of the sub-blocks require an mdptop to be created */
+       rm->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, mmio, cat);
+       if (IS_ERR_OR_NULL(rm->hw_mdp)) {
+               rc = PTR_ERR(rm->hw_mdp);
+               rm->hw_mdp = NULL;
+               DPU_ERROR("failed: mdp hw not available\n");
+               goto fail;
+       }
+
+       /* Interrogate HW catalog and create tracking items for hw blocks */
+       for (i = 0; i < cat->mixer_count; i++) {
+               struct dpu_lm_cfg *lm = &cat->mixer[i];
+
+               if (lm->pingpong == PINGPONG_MAX) {
+                       DPU_DEBUG("skip mixer %d without pingpong\n", lm->id);
+                       continue;
+               }
+
+               rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_LM,
+                               cat->mixer[i].id, &cat->mixer[i]);
+               if (rc) {
+                       DPU_ERROR("failed: lm hw not available\n");
+                       goto fail;
+               }
+
+               if (!rm->lm_max_width) {
+                       rm->lm_max_width = lm->sblk->maxwidth;
+               } else if (rm->lm_max_width != lm->sblk->maxwidth) {
+                       /*
+                        * Don't expect to have hw where lm max widths differ.
+                        * If found, take the min.
+                        */
+                       DPU_ERROR("unsupported: lm maxwidth differs\n");
+                       if (rm->lm_max_width > lm->sblk->maxwidth)
+                               rm->lm_max_width = lm->sblk->maxwidth;
+               }
+       }
+
+       for (i = 0; i < cat->pingpong_count; i++) {
+               rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_PINGPONG,
+                               cat->pingpong[i].id, &cat->pingpong[i]);
+               if (rc) {
+                       DPU_ERROR("failed: pp hw not available\n");
+                       goto fail;
+               }
+       }
+
+       for (i = 0; i < cat->intf_count; i++) {
+               if (cat->intf[i].type == INTF_NONE) {
+                       DPU_DEBUG("skip intf %d with type none\n", i);
+                       continue;
+               }
+
+               rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_INTF,
+                               cat->intf[i].id, &cat->intf[i]);
+               if (rc) {
+                       DPU_ERROR("failed: intf hw not available\n");
+                       goto fail;
+               }
+       }
+
+       for (i = 0; i < cat->ctl_count; i++) {
+               rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CTL,
+                               cat->ctl[i].id, &cat->ctl[i]);
+               if (rc) {
+                       DPU_ERROR("failed: ctl hw not available\n");
+                       goto fail;
+               }
+       }
+
+       for (i = 0; i < cat->cdm_count; i++) {
+               rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CDM,
+                               cat->cdm[i].id, &cat->cdm[i]);
+               if (rc) {
+                       DPU_ERROR("failed: cdm hw not available\n");
+                       goto fail;
+               }
+       }
+
+       return 0;
+
+fail:
+       dpu_rm_destroy(rm);
+
+       return rc;
+}
+
+/**
+ * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
+ *     proposed use case requirements, incl. hardwired dependent blocks like
+ *     pingpong
+ * @rm: dpu resource manager handle
+ * @rsvp: reservation currently being created
+ * @reqs: proposed use case requirements
+ * @lm: proposed layer mixer, function checks if lm, and all other hardwired
+ *      blocks connected to the lm (pp) is available and appropriate
+ * @pp: output parameter, pingpong block attached to the layer mixer.
+ *      NULL if pp was not available, or not matching requirements.
+ * @primary_lm: if non-null, this function check if lm is compatible primary_lm
+ *              as well as satisfying all other requirements
+ * @Return: true if lm matches all requirements, false otherwise
+ */
+static bool _dpu_rm_check_lm_and_get_connected_blks(
+               struct dpu_rm *rm,
+               struct dpu_rm_rsvp *rsvp,
+               struct dpu_rm_requirements *reqs,
+               struct dpu_rm_hw_blk *lm,
+               struct dpu_rm_hw_blk **pp,
+               struct dpu_rm_hw_blk *primary_lm)
+{
+       const struct dpu_lm_cfg *lm_cfg = to_dpu_hw_mixer(lm->hw)->cap;
+       struct dpu_rm_hw_iter iter;
+
+       *pp = NULL;
+
+       DPU_DEBUG("check lm %d pp %d\n",
+                          lm_cfg->id, lm_cfg->pingpong);
+
+       /* Check if this layer mixer is a peer of the proposed primary LM */
+       if (primary_lm) {
+               const struct dpu_lm_cfg *prim_lm_cfg =
+                               to_dpu_hw_mixer(primary_lm->hw)->cap;
+
+               if (!test_bit(lm_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
+                       DPU_DEBUG("lm %d not peer of lm %d\n", lm_cfg->id,
+                                       prim_lm_cfg->id);
+                       return false;
+               }
+       }
+
+       /* Already reserved? */
+       if (RESERVED_BY_OTHER(lm, rsvp)) {
+               DPU_DEBUG("lm %d already reserved\n", lm_cfg->id);
+               return false;
+       }
+
+       dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_PINGPONG);
+       while (_dpu_rm_get_hw_locked(rm, &iter)) {
+               if (iter.blk->id == lm_cfg->pingpong) {
+                       *pp = iter.blk;
+                       break;
+               }
+       }
+
+       if (!*pp) {
+               DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
+               return false;
+       }
+
+       if (RESERVED_BY_OTHER(*pp, rsvp)) {
+               DPU_DEBUG("lm %d pp %d already reserved\n", lm->id,
+                               (*pp)->id);
+               return false;
+       }
+
+       return true;
+}
+
+static int _dpu_rm_reserve_lms(
+               struct dpu_rm *rm,
+               struct dpu_rm_rsvp *rsvp,
+               struct dpu_rm_requirements *reqs)
+
+{
+       struct dpu_rm_hw_blk *lm[MAX_BLOCKS];
+       struct dpu_rm_hw_blk *pp[MAX_BLOCKS];
+       struct dpu_rm_hw_iter iter_i, iter_j;
+       int lm_count = 0;
+       int i, rc = 0;
+
+       if (!reqs->topology->num_lm) {
+               DPU_ERROR("invalid number of lm: %d\n", reqs->topology->num_lm);
+               return -EINVAL;
+       }
+
+       /* Find a primary mixer */
+       dpu_rm_init_hw_iter(&iter_i, 0, DPU_HW_BLK_LM);
+       while (lm_count != reqs->topology->num_lm &&
+                       _dpu_rm_get_hw_locked(rm, &iter_i)) {
+               memset(&lm, 0, sizeof(lm));
+               memset(&pp, 0, sizeof(pp));
+
+               lm_count = 0;
+               lm[lm_count] = iter_i.blk;
+
+               if (!_dpu_rm_check_lm_and_get_connected_blks(
+                               rm, rsvp, reqs, lm[lm_count],
+                               &pp[lm_count], NULL))
+                       continue;
+
+               ++lm_count;
+
+               /* Valid primary mixer found, find matching peers */
+               dpu_rm_init_hw_iter(&iter_j, 0, DPU_HW_BLK_LM);
+
+               while (lm_count != reqs->topology->num_lm &&
+                               _dpu_rm_get_hw_locked(rm, &iter_j)) {
+                       if (iter_i.blk == iter_j.blk)
+                               continue;
+
+                       if (!_dpu_rm_check_lm_and_get_connected_blks(
+                                       rm, rsvp, reqs, iter_j.blk,
+                                       &pp[lm_count], iter_i.blk))
+                               continue;
+
+                       lm[lm_count] = iter_j.blk;
+                       ++lm_count;
+               }
+       }
+
+       if (lm_count != reqs->topology->num_lm) {
+               DPU_DEBUG("unable to find appropriate mixers\n");
+               return -ENAVAIL;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(lm); i++) {
+               if (!lm[i])
+                       break;
+
+               lm[i]->rsvp_nxt = rsvp;
+               pp[i]->rsvp_nxt = rsvp;
+
+               trace_dpu_rm_reserve_lms(lm[i]->id, lm[i]->type, rsvp->enc_id,
+                                        pp[i]->id);
+       }
+
+       return rc;
+}
+
+static int _dpu_rm_reserve_ctls(
+               struct dpu_rm *rm,
+               struct dpu_rm_rsvp *rsvp,
+               const struct dpu_rm_topology_def *top)
+{
+       struct dpu_rm_hw_blk *ctls[MAX_BLOCKS];
+       struct dpu_rm_hw_iter iter;
+       int i = 0;
+
+       memset(&ctls, 0, sizeof(ctls));
+
+       dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CTL);
+       while (_dpu_rm_get_hw_locked(rm, &iter)) {
+               const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw);
+               unsigned long features = ctl->caps->features;
+               bool has_split_display;
+
+               if (RESERVED_BY_OTHER(iter.blk, rsvp))
+                       continue;
+
+               has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
+
+               DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features);
+
+               if (top->needs_split_display != has_split_display)
+                       continue;
+
+               ctls[i] = iter.blk;
+               DPU_DEBUG("ctl %d match\n", iter.blk->id);
+
+               if (++i == top->num_ctl)
+                       break;
+       }
+
+       if (i != top->num_ctl)
+               return -ENAVAIL;
+
+       for (i = 0; i < ARRAY_SIZE(ctls) && i < top->num_ctl; i++) {
+               ctls[i]->rsvp_nxt = rsvp;
+               trace_dpu_rm_reserve_ctls(ctls[i]->id, ctls[i]->type,
+                                         rsvp->enc_id);
+       }
+
+       return 0;
+}
+
+static int _dpu_rm_reserve_cdm(
+               struct dpu_rm *rm,
+               struct dpu_rm_rsvp *rsvp,
+               uint32_t id,
+               enum dpu_hw_blk_type type)
+{
+       struct dpu_rm_hw_iter iter;
+
+       DRM_DEBUG_KMS("type %d id %d\n", type, id);
+
+       dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CDM);
+       while (_dpu_rm_get_hw_locked(rm, &iter)) {
+               const struct dpu_hw_cdm *cdm = to_dpu_hw_cdm(iter.blk->hw);
+               const struct dpu_cdm_cfg *caps = cdm->caps;
+               bool match = false;
+
+               if (RESERVED_BY_OTHER(iter.blk, rsvp))
+                       continue;
+
+               if (type == DPU_HW_BLK_INTF && id != INTF_MAX)
+                       match = test_bit(id, &caps->intf_connect);
+
+               DRM_DEBUG_KMS("iter: type:%d id:%d enc:%d cdm:%lu match:%d\n",
+                             iter.blk->type, iter.blk->id, rsvp->enc_id,
+                             caps->intf_connect, match);
+
+               if (!match)
+                       continue;
+
+               trace_dpu_rm_reserve_cdm(iter.blk->id, iter.blk->type,
+                                        rsvp->enc_id);
+               iter.blk->rsvp_nxt = rsvp;
+               break;
+       }
+
+       if (!iter.hw) {
+               DPU_ERROR("couldn't reserve cdm for type %d id %d\n", type, id);
+               return -ENAVAIL;
+       }
+
+       return 0;
+}
+
+static int _dpu_rm_reserve_intf(
+               struct dpu_rm *rm,
+               struct dpu_rm_rsvp *rsvp,
+               uint32_t id,
+               enum dpu_hw_blk_type type,
+               bool needs_cdm)
+{
+       struct dpu_rm_hw_iter iter;
+       int ret = 0;
+
+       /* Find the block entry in the rm, and note the reservation */
+       dpu_rm_init_hw_iter(&iter, 0, type);
+       while (_dpu_rm_get_hw_locked(rm, &iter)) {
+               if (iter.blk->id != id)
+                       continue;
+
+               if (RESERVED_BY_OTHER(iter.blk, rsvp)) {
+                       DPU_ERROR("type %d id %d already reserved\n", type, id);
+                       return -ENAVAIL;
+               }
+
+               iter.blk->rsvp_nxt = rsvp;
+               trace_dpu_rm_reserve_intf(iter.blk->id, iter.blk->type,
+                                         rsvp->enc_id);
+               break;
+       }
+
+       /* Shouldn't happen since intfs are fixed at probe */
+       if (!iter.hw) {
+               DPU_ERROR("couldn't find type %d id %d\n", type, id);
+               return -EINVAL;
+       }
+
+       if (needs_cdm)
+               ret = _dpu_rm_reserve_cdm(rm, rsvp, id, type);
+
+       return ret;
+}
+
+static int _dpu_rm_reserve_intf_related_hw(
+               struct dpu_rm *rm,
+               struct dpu_rm_rsvp *rsvp,
+               struct dpu_encoder_hw_resources *hw_res)
+{
+       int i, ret = 0;
+       u32 id;
+
+       for (i = 0; i < ARRAY_SIZE(hw_res->intfs); i++) {
+               if (hw_res->intfs[i] == INTF_MODE_NONE)
+                       continue;
+               id = i + INTF_0;
+               ret = _dpu_rm_reserve_intf(rm, rsvp, id,
+                               DPU_HW_BLK_INTF, hw_res->needs_cdm);
+               if (ret)
+                       return ret;
+       }
+
+       return ret;
+}
+
+static int _dpu_rm_make_next_rsvp(
+               struct dpu_rm *rm,
+               struct drm_encoder *enc,
+               struct drm_crtc_state *crtc_state,
+               struct drm_connector_state *conn_state,
+               struct dpu_rm_rsvp *rsvp,
+               struct dpu_rm_requirements *reqs)
+{
+       int ret;
+       struct dpu_rm_topology_def topology;
+
+       /* Create reservation info, tag reserved blocks with it as we go */
+       rsvp->seq = ++rm->rsvp_next_seq;
+       rsvp->enc_id = enc->base.id;
+       rsvp->topology = reqs->topology->top_name;
+       list_add_tail(&rsvp->list, &rm->rsvps);
+
+       ret = _dpu_rm_reserve_lms(rm, rsvp, reqs);
+       if (ret) {
+               DPU_ERROR("unable to find appropriate mixers\n");
+               return ret;
+       }
+
+       /*
+        * Do assignment preferring to give away low-resource CTLs first:
+        * - Check mixers without Split Display
+        * - Only then allow to grab from CTLs with split display capability
+        */
+       _dpu_rm_reserve_ctls(rm, rsvp, reqs->topology);
+       if (ret && !reqs->topology->needs_split_display) {
+               memcpy(&topology, reqs->topology, sizeof(topology));
+               topology.needs_split_display = true;
+               _dpu_rm_reserve_ctls(rm, rsvp, &topology);
+       }
+       if (ret) {
+               DPU_ERROR("unable to find appropriate CTL\n");
+               return ret;
+       }
+
+       /* Assign INTFs and blks whose usage is tied to them: CTL & CDM */
+       ret = _dpu_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res);
+       if (ret)
+               return ret;
+
+       return ret;
+}
+
+static int _dpu_rm_populate_requirements(
+               struct dpu_rm *rm,
+               struct drm_encoder *enc,
+               struct drm_crtc_state *crtc_state,
+               struct drm_connector_state *conn_state,
+               struct dpu_rm_requirements *reqs,
+               struct msm_display_topology req_topology)
+{
+       int i;
+
+       memset(reqs, 0, sizeof(*reqs));
+
+       dpu_encoder_get_hw_resources(enc, &reqs->hw_res, conn_state);
+
+       for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++) {
+               if (RM_IS_TOPOLOGY_MATCH(g_top_table[i],
+                                       req_topology)) {
+                       reqs->topology = &g_top_table[i];
+                       break;
+               }
+       }
+
+       if (!reqs->topology) {
+               DPU_ERROR("invalid topology for the display\n");
+               return -EINVAL;
+       }
+
+       /**
+        * Set the requirement based on caps if not set from user space
+        * This will ensure to select LM tied with DS blocks
+        * Currently, DS blocks are tied with LM 0 and LM 1 (primary display)
+        */
+       if (!RM_RQ_DS(reqs) && rm->hw_mdp->caps->has_dest_scaler &&
+               conn_state->connector->connector_type == DRM_MODE_CONNECTOR_DSI)
+               reqs->top_ctrl |= BIT(DPU_RM_TOPCTL_DS);
+
+       DRM_DEBUG_KMS("top_ctrl: 0x%llX num_h_tiles: %d\n", reqs->top_ctrl,
+                     reqs->hw_res.display_num_of_h_tiles);
+       DRM_DEBUG_KMS("num_lm: %d num_ctl: %d topology: %d split_display: %d\n",
+                     reqs->topology->num_lm, reqs->topology->num_ctl,
+                     reqs->topology->top_name,
+                     reqs->topology->needs_split_display);
+
+       return 0;
+}
+
+static struct dpu_rm_rsvp *_dpu_rm_get_rsvp(
+               struct dpu_rm *rm,
+               struct drm_encoder *enc)
+{
+       struct dpu_rm_rsvp *i;
+
+       if (!rm || !enc) {
+               DPU_ERROR("invalid params\n");
+               return NULL;
+       }
+
+       if (list_empty(&rm->rsvps))
+               return NULL;
+
+       list_for_each_entry(i, &rm->rsvps, list)
+               if (i->enc_id == enc->base.id)
+                       return i;
+
+       return NULL;
+}
+
+static struct drm_connector *_dpu_rm_get_connector(
+               struct drm_encoder *enc)
+{
+       struct drm_connector *conn = NULL;
+       struct list_head *connector_list =
+                       &enc->dev->mode_config.connector_list;
+
+       list_for_each_entry(conn, connector_list, head)
+               if (conn->encoder == enc)
+                       return conn;
+
+       return NULL;
+}
+
+/**
+ * _dpu_rm_release_rsvp - release resources and release a reservation
+ * @rm:        KMS handle
+ * @rsvp:      RSVP pointer to release and release resources for
+ */
+static void _dpu_rm_release_rsvp(
+               struct dpu_rm *rm,
+               struct dpu_rm_rsvp *rsvp,
+               struct drm_connector *conn)
+{
+       struct dpu_rm_rsvp *rsvp_c, *rsvp_n;
+       struct dpu_rm_hw_blk *blk;
+       enum dpu_hw_blk_type type;
+
+       if (!rsvp)
+               return;
+
+       DPU_DEBUG("rel rsvp %d enc %d\n", rsvp->seq, rsvp->enc_id);
+
+       list_for_each_entry_safe(rsvp_c, rsvp_n, &rm->rsvps, list) {
+               if (rsvp == rsvp_c) {
+                       list_del(&rsvp_c->list);
+                       break;
+               }
+       }
+
+       for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+               list_for_each_entry(blk, &rm->hw_blks[type], list) {
+                       if (blk->rsvp == rsvp) {
+                               blk->rsvp = NULL;
+                               DPU_DEBUG("rel rsvp %d enc %d %d %d\n",
+                                               rsvp->seq, rsvp->enc_id,
+                                               blk->type, blk->id);
+                       }
+                       if (blk->rsvp_nxt == rsvp) {
+                               blk->rsvp_nxt = NULL;
+                               DPU_DEBUG("rel rsvp_nxt %d enc %d %d %d\n",
+                                               rsvp->seq, rsvp->enc_id,
+                                               blk->type, blk->id);
+                       }
+               }
+       }
+
+       kfree(rsvp);
+}
+
+void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc)
+{
+       struct dpu_rm_rsvp *rsvp;
+       struct drm_connector *conn;
+
+       if (!rm || !enc) {
+               DPU_ERROR("invalid params\n");
+               return;
+       }
+
+       mutex_lock(&rm->rm_lock);
+
+       rsvp = _dpu_rm_get_rsvp(rm, enc);
+       if (!rsvp) {
+               DPU_ERROR("failed to find rsvp for enc %d\n", enc->base.id);
+               goto end;
+       }
+
+       conn = _dpu_rm_get_connector(enc);
+       if (!conn) {
+               DPU_ERROR("failed to get connector for enc %d\n", enc->base.id);
+               goto end;
+       }
+
+       _dpu_rm_release_rsvp(rm, rsvp, conn);
+end:
+       mutex_unlock(&rm->rm_lock);
+}
+
+static int _dpu_rm_commit_rsvp(
+               struct dpu_rm *rm,
+               struct dpu_rm_rsvp *rsvp,
+               struct drm_connector_state *conn_state)
+{
+       struct dpu_rm_hw_blk *blk;
+       enum dpu_hw_blk_type type;
+       int ret = 0;
+
+       /* Swap next rsvp to be the active */
+       for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+               list_for_each_entry(blk, &rm->hw_blks[type], list) {
+                       if (blk->rsvp_nxt) {
+                               blk->rsvp = blk->rsvp_nxt;
+                               blk->rsvp_nxt = NULL;
+                       }
+               }
+       }
+
+       if (!ret)
+               DRM_DEBUG_KMS("rsrv enc %d topology %d\n", rsvp->enc_id,
+                             rsvp->topology);
+
+       return ret;
+}
+
+int dpu_rm_reserve(
+               struct dpu_rm *rm,
+               struct drm_encoder *enc,
+               struct drm_crtc_state *crtc_state,
+               struct drm_connector_state *conn_state,
+               struct msm_display_topology topology,
+               bool test_only)
+{
+       struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt;
+       struct dpu_rm_requirements reqs;
+       int ret;
+
+       if (!rm || !enc || !crtc_state || !conn_state) {
+               DPU_ERROR("invalid arguments\n");
+               return -EINVAL;
+       }
+
+       /* Check if this is just a page-flip */
+       if (!drm_atomic_crtc_needs_modeset(crtc_state))
+               return 0;
+
+       DRM_DEBUG_KMS("reserving hw for conn %d enc %d crtc %d test_only %d\n",
+                     conn_state->connector->base.id, enc->base.id,
+                     crtc_state->crtc->base.id, test_only);
+
+       mutex_lock(&rm->rm_lock);
+
+       _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_BEGIN);
+
+       ret = _dpu_rm_populate_requirements(rm, enc, crtc_state,
+                       conn_state, &reqs, topology);
+       if (ret) {
+               DPU_ERROR("failed to populate hw requirements\n");
+               goto end;
+       }
+
+       /*
+        * We only support one active reservation per-hw-block. But to implement
+        * transactional semantics for test-only, and for allowing failure while
+        * modifying your existing reservation, over the course of this
+        * function we can have two reservations:
+        * Current: Existing reservation
+        * Next: Proposed reservation. The proposed reservation may fail, or may
+        *       be discarded if in test-only mode.
+        * If reservation is successful, and we're not in test-only, then we
+        * replace the current with the next.
+        */
+       rsvp_nxt = kzalloc(sizeof(*rsvp_nxt), GFP_KERNEL);
+       if (!rsvp_nxt) {
+               ret = -ENOMEM;
+               goto end;
+       }
+
+       rsvp_cur = _dpu_rm_get_rsvp(rm, enc);
+
+       /*
+        * User can request that we clear out any reservation during the
+        * atomic_check phase by using this CLEAR bit
+        */
+       if (rsvp_cur && test_only && RM_RQ_CLEAR(&reqs)) {
+               DPU_DEBUG("test_only & CLEAR: clear rsvp[s%de%d]\n",
+                               rsvp_cur->seq, rsvp_cur->enc_id);
+               _dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
+               rsvp_cur = NULL;
+               _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_CLEAR);
+       }
+
+       /* Check the proposed reservation, store it in hw's "next" field */
+       ret = _dpu_rm_make_next_rsvp(rm, enc, crtc_state, conn_state,
+                       rsvp_nxt, &reqs);
+
+       _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_RSVPNEXT);
+
+       if (ret) {
+               DPU_ERROR("failed to reserve hw resources: %d\n", ret);
+               _dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
+       } else if (test_only && !RM_RQ_LOCK(&reqs)) {
+               /*
+                * Normally, if test_only, test the reservation and then undo
+                * However, if the user requests LOCK, then keep the reservation
+                * made during the atomic_check phase.
+                */
+               DPU_DEBUG("test_only: discard test rsvp[s%de%d]\n",
+                               rsvp_nxt->seq, rsvp_nxt->enc_id);
+               _dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
+       } else {
+               if (test_only && RM_RQ_LOCK(&reqs))
+                       DPU_DEBUG("test_only & LOCK: lock rsvp[s%de%d]\n",
+                                       rsvp_nxt->seq, rsvp_nxt->enc_id);
+
+               _dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
+
+               ret = _dpu_rm_commit_rsvp(rm, rsvp_nxt, conn_state);
+       }
+
+       _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_FINAL);
+
+end:
+       mutex_unlock(&rm->rm_lock);
+
+       return ret;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
new file mode 100644 (file)
index 0000000..ffd1841
--- /dev/null
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DPU_RM_H__
+#define __DPU_RM_H__
+
+#include <linux/list.h>
+
+#include "msm_kms.h"
+#include "dpu_hw_top.h"
+
+/**
+ * enum dpu_rm_topology_name - HW resource use case in use by connector
+ * @DPU_RM_TOPOLOGY_NONE:                 No topology in use currently
+ * @DPU_RM_TOPOLOGY_SINGLEPIPE:           1 LM, 1 PP, 1 INTF/WB
+ * @DPU_RM_TOPOLOGY_DUALPIPE:             2 LM, 2 PP, 2 INTF/WB
+ * @DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE:     2 LM, 2 PP, 3DMux, 1 INTF/WB
+ */
+enum dpu_rm_topology_name {
+       DPU_RM_TOPOLOGY_NONE = 0,
+       DPU_RM_TOPOLOGY_SINGLEPIPE,
+       DPU_RM_TOPOLOGY_DUALPIPE,
+       DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE,
+       DPU_RM_TOPOLOGY_MAX,
+};
+
+/**
+ * enum dpu_rm_topology_control - HW resource use case in use by connector
+ * @DPU_RM_TOPCTL_RESERVE_LOCK: If set, in AtomicTest phase, after a successful
+ *                              test, reserve the resources for this display.
+ *                              Normal behavior would not impact the reservation
+ *                              list during the AtomicTest phase.
+ * @DPU_RM_TOPCTL_RESERVE_CLEAR: If set, in AtomicTest phase, before testing,
+ *                               release any reservation held by this display.
+ *                               Normal behavior would not impact the
+ *                               reservation list during the AtomicTest phase.
+ * @DPU_RM_TOPCTL_DS  : Require layer mixers with DS capabilities
+ */
+enum dpu_rm_topology_control {
+       DPU_RM_TOPCTL_RESERVE_LOCK,
+       DPU_RM_TOPCTL_RESERVE_CLEAR,
+       DPU_RM_TOPCTL_DS,
+};
+
+/**
+ * struct dpu_rm - DPU dynamic hardware resource manager
+ * @dev: device handle for event logging purposes
+ * @rsvps: list of hardware reservations by each crtc->encoder->connector
+ * @hw_blks: array of lists of hardware resources present in the system, one
+ *     list per type of hardware block
+ * @hw_mdp: hardware object for mdp_top
+ * @lm_max_width: cached layer mixer maximum width
+ * @rsvp_next_seq: sequence number for next reservation for debugging purposes
+ * @rm_lock: resource manager mutex
+ */
+struct dpu_rm {
+       struct drm_device *dev;
+       struct list_head rsvps;
+       struct list_head hw_blks[DPU_HW_BLK_MAX];
+       struct dpu_hw_mdp *hw_mdp;
+       uint32_t lm_max_width;
+       uint32_t rsvp_next_seq;
+       struct mutex rm_lock;
+};
+
+/**
+ *  struct dpu_rm_hw_blk - resource manager internal structure
+ *     forward declaration for single iterator definition without void pointer
+ */
+struct dpu_rm_hw_blk;
+
+/**
+ * struct dpu_rm_hw_iter - iterator for use with dpu_rm
+ * @hw: dpu_hw object requested, or NULL on failure
+ * @blk: dpu_rm internal block representation. Clients ignore. Used as iterator.
+ * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
+ * @type: Hardware Block Type client wishes to search for.
+ */
+struct dpu_rm_hw_iter {
+       void *hw;
+       struct dpu_rm_hw_blk *blk;
+       uint32_t enc_id;
+       enum dpu_hw_blk_type type;
+};
+
+/**
+ * dpu_rm_init - Read hardware catalog and create reservation tracking objects
+ *     for all HW blocks.
+ * @rm: DPU Resource Manager handle
+ * @cat: Pointer to hardware catalog
+ * @mmio: mapped register io address of MDP
+ * @dev: device handle for event logging purposes
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_init(struct dpu_rm *rm,
+               struct dpu_mdss_cfg *cat,
+               void __iomem *mmio,
+               struct drm_device *dev);
+
+/**
+ * dpu_rm_destroy - Free all memory allocated by dpu_rm_init
+ * @rm: DPU Resource Manager handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_destroy(struct dpu_rm *rm);
+
+/**
+ * dpu_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze
+ *     the use connections and user requirements, specified through related
+ *     topology control properties, and reserve hardware blocks to that
+ *     display chain.
+ *     HW blocks can then be accessed through dpu_rm_get_* functions.
+ *     HW Reservations should be released via dpu_rm_release_hw.
+ * @rm: DPU Resource Manager handle
+ * @drm_enc: DRM Encoder handle
+ * @crtc_state: Proposed Atomic DRM CRTC State handle
+ * @conn_state: Proposed Atomic DRM Connector State handle
+ * @topology: Pointer to topology info for the display
+ * @test_only: Atomic-Test phase, discard results (unless property overrides)
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_reserve(struct dpu_rm *rm,
+               struct drm_encoder *drm_enc,
+               struct drm_crtc_state *crtc_state,
+               struct drm_connector_state *conn_state,
+               struct msm_display_topology topology,
+               bool test_only);
+
+/**
+ * dpu_rm_reserve - Given the encoder for the display chain, release any
+ *     HW blocks previously reserved for that use case.
+ * @rm: DPU Resource Manager handle
+ * @enc: DRM Encoder handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc);
+
+/**
+ * dpu_rm_get_mdp - Retrieve HW block for MDP TOP.
+ *     This is never reserved, and is usable by any display.
+ * @rm: DPU Resource Manager handle
+ * @Return: Pointer to hw block or NULL
+ */
+struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm);
+
+/**
+ * dpu_rm_init_hw_iter - setup given iterator for new iteration over hw list
+ *     using dpu_rm_get_hw
+ * @iter: iter object to initialize
+ * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
+ * @type: Hardware Block Type client wishes to search for.
+ */
+void dpu_rm_init_hw_iter(
+               struct dpu_rm_hw_iter *iter,
+               uint32_t enc_id,
+               enum dpu_hw_blk_type type);
+/**
+ * dpu_rm_get_hw - retrieve reserved hw object given encoder and hw type
+ *     Meant to do a single pass through the hardware list to iteratively
+ *     retrieve hardware blocks of a given type for a given encoder.
+ *     Initialize an iterator object.
+ *     Set hw block type of interest. Set encoder id of interest, 0 for any.
+ *     Function returns first hw of type for that encoder.
+ *     Subsequent calls will return the next reserved hw of that type in-order.
+ *     Iterator HW pointer will be null on failure to find hw.
+ * @rm: DPU Resource Manager handle
+ * @iter: iterator object
+ * @Return: true on match found, false on no match found
+ */
+bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *iter);
+
+/**
+ * dpu_rm_check_property_topctl - validate property bitmask before it is set
+ * @val: user's proposed topology control bitmask
+ * @Return: 0 on success or error
+ */
+int dpu_rm_check_property_topctl(uint64_t val);
+
+/**
+ * dpu_rm_get_topology_name - returns the name of the the given topology
+ *                            definition
+ * @topology: topology definition
+ * @Return: name of the topology
+ */
+enum dpu_rm_topology_name
+dpu_rm_get_topology_name(struct msm_display_topology topology);
+
+#endif /* __DPU_RM_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
new file mode 100644 (file)
index 0000000..ae0ca50
--- /dev/null
@@ -0,0 +1,1007 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#if !defined(_DPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _DPU_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <drm/drm_rect.h>
+#include "dpu_crtc.h"
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_plane.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dpu
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE dpu_trace
+
+TRACE_EVENT(dpu_perf_set_qos_luts,
+       TP_PROTO(u32 pnum, u32 fmt, bool rt, u32 fl,
+               u32 lut, u32 lut_usage),
+       TP_ARGS(pnum, fmt, rt, fl, lut, lut_usage),
+       TP_STRUCT__entry(
+                       __field(u32, pnum)
+                       __field(u32, fmt)
+                       __field(bool, rt)
+                       __field(u32, fl)
+                       __field(u64, lut)
+                       __field(u32, lut_usage)
+       ),
+       TP_fast_assign(
+                       __entry->pnum = pnum;
+                       __entry->fmt = fmt;
+                       __entry->rt = rt;
+                       __entry->fl = fl;
+                       __entry->lut = lut;
+                       __entry->lut_usage = lut_usage;
+       ),
+       TP_printk("pnum=%d fmt=%x rt=%d fl=%d lut=0x%llx lut_usage=%d",
+                       __entry->pnum, __entry->fmt,
+                       __entry->rt, __entry->fl,
+                       __entry->lut, __entry->lut_usage)
+);
+
+TRACE_EVENT(dpu_perf_set_danger_luts,
+       TP_PROTO(u32 pnum, u32 fmt, u32 mode, u32 danger_lut,
+               u32 safe_lut),
+       TP_ARGS(pnum, fmt, mode, danger_lut, safe_lut),
+       TP_STRUCT__entry(
+                       __field(u32, pnum)
+                       __field(u32, fmt)
+                       __field(u32, mode)
+                       __field(u32, danger_lut)
+                       __field(u32, safe_lut)
+       ),
+       TP_fast_assign(
+                       __entry->pnum = pnum;
+                       __entry->fmt = fmt;
+                       __entry->mode = mode;
+                       __entry->danger_lut = danger_lut;
+                       __entry->safe_lut = safe_lut;
+       ),
+       TP_printk("pnum=%d fmt=%x mode=%d luts[0x%x, 0x%x]",
+                       __entry->pnum, __entry->fmt,
+                       __entry->mode, __entry->danger_lut,
+                       __entry->safe_lut)
+);
+
+TRACE_EVENT(dpu_perf_set_ot,
+       TP_PROTO(u32 pnum, u32 xin_id, u32 rd_lim, u32 vbif_idx),
+       TP_ARGS(pnum, xin_id, rd_lim, vbif_idx),
+       TP_STRUCT__entry(
+                       __field(u32, pnum)
+                       __field(u32, xin_id)
+                       __field(u32, rd_lim)
+                       __field(u32, vbif_idx)
+       ),
+       TP_fast_assign(
+                       __entry->pnum = pnum;
+                       __entry->xin_id = xin_id;
+                       __entry->rd_lim = rd_lim;
+                       __entry->vbif_idx = vbif_idx;
+       ),
+       TP_printk("pnum:%d xin_id:%d ot:%d vbif:%d",
+                       __entry->pnum, __entry->xin_id, __entry->rd_lim,
+                       __entry->vbif_idx)
+)
+
+TRACE_EVENT(dpu_perf_update_bus,
+       TP_PROTO(int client, unsigned long long ab_quota,
+       unsigned long long ib_quota),
+       TP_ARGS(client, ab_quota, ib_quota),
+       TP_STRUCT__entry(
+                       __field(int, client)
+                       __field(u64, ab_quota)
+                       __field(u64, ib_quota)
+       ),
+       TP_fast_assign(
+                       __entry->client = client;
+                       __entry->ab_quota = ab_quota;
+                       __entry->ib_quota = ib_quota;
+       ),
+       TP_printk("Request client:%d ab=%llu ib=%llu",
+                       __entry->client,
+                       __entry->ab_quota,
+                       __entry->ib_quota)
+)
+
+
+TRACE_EVENT(dpu_cmd_release_bw,
+       TP_PROTO(u32 crtc_id),
+       TP_ARGS(crtc_id),
+       TP_STRUCT__entry(
+                       __field(u32, crtc_id)
+       ),
+       TP_fast_assign(
+                       __entry->crtc_id = crtc_id;
+       ),
+       TP_printk("crtc:%d", __entry->crtc_id)
+);
+
+TRACE_EVENT(tracing_mark_write,
+       TP_PROTO(int pid, const char *name, bool trace_begin),
+       TP_ARGS(pid, name, trace_begin),
+       TP_STRUCT__entry(
+                       __field(int, pid)
+                       __string(trace_name, name)
+                       __field(bool, trace_begin)
+       ),
+       TP_fast_assign(
+                       __entry->pid = pid;
+                       __assign_str(trace_name, name);
+                       __entry->trace_begin = trace_begin;
+       ),
+       TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E",
+               __entry->pid, __get_str(trace_name))
+)
+
+TRACE_EVENT(dpu_trace_counter,
+       TP_PROTO(int pid, char *name, int value),
+       TP_ARGS(pid, name, value),
+       TP_STRUCT__entry(
+                       __field(int, pid)
+                       __string(counter_name, name)
+                       __field(int, value)
+       ),
+       TP_fast_assign(
+                       __entry->pid = current->tgid;
+                       __assign_str(counter_name, name);
+                       __entry->value = value;
+       ),
+       TP_printk("%d|%s|%d", __entry->pid,
+                       __get_str(counter_name), __entry->value)
+)
+
+TRACE_EVENT(dpu_perf_crtc_update,
+       TP_PROTO(u32 crtc, u64 bw_ctl_mnoc, u64 bw_ctl_llcc,
+                       u64 bw_ctl_ebi, u32 core_clk_rate,
+                       bool stop_req, u32 update_bus, u32 update_clk),
+       TP_ARGS(crtc, bw_ctl_mnoc, bw_ctl_llcc, bw_ctl_ebi, core_clk_rate,
+               stop_req, update_bus, update_clk),
+       TP_STRUCT__entry(
+                       __field(u32, crtc)
+                       __field(u64, bw_ctl_mnoc)
+                       __field(u64, bw_ctl_llcc)
+                       __field(u64, bw_ctl_ebi)
+                       __field(u32, core_clk_rate)
+                       __field(bool, stop_req)
+                       __field(u32, update_bus)
+                       __field(u32, update_clk)
+       ),
+       TP_fast_assign(
+                       __entry->crtc = crtc;
+                       __entry->bw_ctl_mnoc = bw_ctl_mnoc;
+                       __entry->bw_ctl_llcc = bw_ctl_llcc;
+                       __entry->bw_ctl_ebi = bw_ctl_ebi;
+                       __entry->core_clk_rate = core_clk_rate;
+                       __entry->stop_req = stop_req;
+                       __entry->update_bus = update_bus;
+                       __entry->update_clk = update_clk;
+       ),
+        TP_printk(
+               "crtc=%d bw_mnoc=%llu bw_llcc=%llu bw_ebi=%llu clk_rate=%u stop_req=%d u_bus=%d u_clk=%d",
+                       __entry->crtc,
+                       __entry->bw_ctl_mnoc,
+                       __entry->bw_ctl_llcc,
+                       __entry->bw_ctl_ebi,
+                       __entry->core_clk_rate,
+                       __entry->stop_req,
+                       __entry->update_bus,
+                       __entry->update_clk)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_irq_template,
+       TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+                int irq_idx),
+       TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx),
+       TP_STRUCT__entry(
+               __field(        uint32_t,               drm_id          )
+               __field(        enum dpu_intr_idx,      intr_idx        )
+               __field(        int,                    hw_idx          )
+               __field(        int,                    irq_idx         )
+       ),
+       TP_fast_assign(
+               __entry->drm_id = drm_id;
+               __entry->intr_idx = intr_idx;
+               __entry->hw_idx = hw_idx;
+               __entry->irq_idx = irq_idx;
+       ),
+       TP_printk("id=%u, intr=%d, hw=%d, irq=%d",
+                 __entry->drm_id, __entry->intr_idx, __entry->hw_idx,
+                 __entry->irq_idx)
+);
+DEFINE_EVENT(dpu_enc_irq_template, dpu_enc_irq_register_success,
+       TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+                int irq_idx),
+       TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx)
+);
+DEFINE_EVENT(dpu_enc_irq_template, dpu_enc_irq_unregister_success,
+       TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+                int irq_idx),
+       TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx)
+);
+
+TRACE_EVENT(dpu_enc_irq_wait_success,
+       TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+                int irq_idx, enum dpu_pingpong pp_idx, int atomic_cnt),
+       TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx, pp_idx, atomic_cnt),
+       TP_STRUCT__entry(
+               __field(        uint32_t,               drm_id          )
+               __field(        enum dpu_intr_idx,      intr_idx        )
+               __field(        int,                    hw_idx          )
+               __field(        int,                    irq_idx         )
+               __field(        enum dpu_pingpong,      pp_idx          )
+               __field(        int,                    atomic_cnt      )
+       ),
+       TP_fast_assign(
+               __entry->drm_id = drm_id;
+               __entry->intr_idx = intr_idx;
+               __entry->hw_idx = hw_idx;
+               __entry->irq_idx = irq_idx;
+               __entry->pp_idx = pp_idx;
+               __entry->atomic_cnt = atomic_cnt;
+       ),
+       TP_printk("id=%u, intr=%d, hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
+                 __entry->drm_id, __entry->intr_idx, __entry->hw_idx,
+                 __entry->irq_idx, __entry->pp_idx, __entry->atomic_cnt)
+);
+
+DECLARE_EVENT_CLASS(dpu_drm_obj_template,
+       TP_PROTO(uint32_t drm_id),
+       TP_ARGS(drm_id),
+       TP_STRUCT__entry(
+               __field(        uint32_t,               drm_id          )
+       ),
+       TP_fast_assign(
+               __entry->drm_id = drm_id;
+       ),
+       TP_printk("id=%u", __entry->drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_atomic_check,
+       TP_PROTO(uint32_t drm_id),
+       TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_mode_set,
+       TP_PROTO(uint32_t drm_id),
+       TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_disable,
+       TP_PROTO(uint32_t drm_id),
+       TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_kickoff,
+       TP_PROTO(uint32_t drm_id),
+       TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_prepare_kickoff,
+       TP_PROTO(uint32_t drm_id),
+       TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_prepare_kickoff_reset,
+       TP_PROTO(uint32_t drm_id),
+       TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_complete_flip,
+       TP_PROTO(uint32_t drm_id),
+       TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_vblank_cb,
+       TP_PROTO(uint32_t drm_id),
+       TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_complete_commit,
+       TP_PROTO(uint32_t drm_id),
+       TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_enc_enable,
+       TP_PROTO(uint32_t drm_id),
+       TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_commit,
+       TP_PROTO(uint32_t drm_id),
+       TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_wait_for_commit_done,
+       TP_PROTO(uint32_t drm_id),
+       TP_ARGS(drm_id)
+);
+
+TRACE_EVENT(dpu_enc_enable,
+       TP_PROTO(uint32_t drm_id, int hdisplay, int vdisplay),
+       TP_ARGS(drm_id, hdisplay, vdisplay),
+       TP_STRUCT__entry(
+               __field(        uint32_t,               drm_id          )
+               __field(        int,                    hdisplay        )
+               __field(        int,                    vdisplay        )
+       ),
+       TP_fast_assign(
+               __entry->drm_id = drm_id;
+               __entry->hdisplay = hdisplay;
+               __entry->vdisplay = vdisplay;
+       ),
+       TP_printk("id=%u, mode=%dx%d",
+                 __entry->drm_id, __entry->hdisplay, __entry->vdisplay)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_keyval_template,
+       TP_PROTO(uint32_t drm_id, int val),
+       TP_ARGS(drm_id, val),
+       TP_STRUCT__entry(
+               __field(        uint32_t,       drm_id  )
+               __field(        int,            val     )
+       ),
+       TP_fast_assign(
+               __entry->drm_id = drm_id;
+               __entry->val = val;
+       ),
+       TP_printk("id=%u, val=%d", __entry->drm_id, __entry->val)
+);
+DEFINE_EVENT(dpu_enc_keyval_template, dpu_enc_underrun_cb,
+       TP_PROTO(uint32_t drm_id, int count),
+       TP_ARGS(drm_id, count)
+);
+DEFINE_EVENT(dpu_enc_keyval_template, dpu_enc_trigger_start,
+       TP_PROTO(uint32_t drm_id, int ctl_idx),
+       TP_ARGS(drm_id, ctl_idx)
+);
+
+TRACE_EVENT(dpu_enc_atomic_check_flags,
+       TP_PROTO(uint32_t drm_id, unsigned int flags, int private_flags),
+       TP_ARGS(drm_id, flags, private_flags),
+       TP_STRUCT__entry(
+               __field(        uint32_t,               drm_id          )
+               __field(        unsigned int,           flags           )
+               __field(        int,                    private_flags   )
+       ),
+       TP_fast_assign(
+               __entry->drm_id = drm_id;
+               __entry->flags = flags;
+               __entry->private_flags = private_flags;
+       ),
+       TP_printk("id=%u, flags=%u, private_flags=%d",
+                 __entry->drm_id, __entry->flags, __entry->private_flags)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_id_enable_template,
+       TP_PROTO(uint32_t drm_id, bool enable),
+       TP_ARGS(drm_id, enable),
+       TP_STRUCT__entry(
+               __field(        uint32_t,               drm_id          )
+               __field(        bool,                   enable          )
+       ),
+       TP_fast_assign(
+               __entry->drm_id = drm_id;
+               __entry->enable = enable;
+       ),
+       TP_printk("id=%u, enable=%s",
+                 __entry->drm_id, __entry->enable ? "true" : "false")
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_rc_helper,
+       TP_PROTO(uint32_t drm_id, bool enable),
+       TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_vblank_cb,
+       TP_PROTO(uint32_t drm_id, bool enable),
+       TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_frame_event_cb,
+       TP_PROTO(uint32_t drm_id, bool enable),
+       TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_phys_cmd_connect_te,
+       TP_PROTO(uint32_t drm_id, bool enable),
+       TP_ARGS(drm_id, enable)
+);
+
+TRACE_EVENT(dpu_enc_rc,
+       TP_PROTO(uint32_t drm_id, u32 sw_event, bool idle_pc_supported,
+                int rc_state, const char *stage),
+       TP_ARGS(drm_id, sw_event, idle_pc_supported, rc_state, stage),
+       TP_STRUCT__entry(
+               __field(        uint32_t,       drm_id                  )
+               __field(        u32,            sw_event                )
+               __field(        bool,           idle_pc_supported       )
+               __field(        int,            rc_state                )
+               __string(       stage_str,      stage                   )
+       ),
+       TP_fast_assign(
+               __entry->drm_id = drm_id;
+               __entry->sw_event = sw_event;
+               __entry->idle_pc_supported = idle_pc_supported;
+               __entry->rc_state = rc_state;
+               __assign_str(stage_str, stage);
+       ),
+       TP_printk("%s: id:%u, sw_event:%d, idle_pc_supported:%s, rc_state:%d\n",
+                 __get_str(stage_str), __entry->drm_id, __entry->sw_event,
+                 __entry->idle_pc_supported ? "true" : "false",
+                 __entry->rc_state)
+);
+
+TRACE_EVENT(dpu_enc_frame_done_cb_not_busy,
+       TP_PROTO(uint32_t drm_id, u32 event, enum dpu_intf intf_idx),
+       TP_ARGS(drm_id, event, intf_idx),
+       TP_STRUCT__entry(
+               __field(        uint32_t,       drm_id          )
+               __field(        u32,            event           )
+               __field(        enum dpu_intf,  intf_idx        )
+       ),
+       TP_fast_assign(
+               __entry->drm_id = drm_id;
+               __entry->event = event;
+               __entry->intf_idx = intf_idx;
+       ),
+       TP_printk("id=%u, event=%u, intf=%d", __entry->drm_id, __entry->event,
+                 __entry->intf_idx)
+);
+
+TRACE_EVENT(dpu_enc_frame_done_cb,
+       TP_PROTO(uint32_t drm_id, unsigned int idx,
+                unsigned long frame_busy_mask),
+       TP_ARGS(drm_id, idx, frame_busy_mask),
+       TP_STRUCT__entry(
+               __field(        uint32_t,               drm_id          )
+               __field(        unsigned int,           idx             )
+               __field(        unsigned long,          frame_busy_mask )
+       ),
+       TP_fast_assign(
+               __entry->drm_id = drm_id;
+               __entry->idx = idx;
+               __entry->frame_busy_mask = frame_busy_mask;
+       ),
+       TP_printk("id=%u, idx=%u, frame_busy_mask=%lx", __entry->drm_id,
+                 __entry->idx, __entry->frame_busy_mask)
+);
+
+TRACE_EVENT(dpu_enc_trigger_flush,
+       TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx,
+                int pending_kickoff_cnt, int ctl_idx, u32 pending_flush_ret),
+       TP_ARGS(drm_id, intf_idx, pending_kickoff_cnt, ctl_idx,
+               pending_flush_ret),
+       TP_STRUCT__entry(
+               __field(        uint32_t,       drm_id                  )
+               __field(        enum dpu_intf,  intf_idx                )
+               __field(        int,            pending_kickoff_cnt     )
+               __field(        int,            ctl_idx                 )
+               __field(        u32,            pending_flush_ret       )
+       ),
+       TP_fast_assign(
+               __entry->drm_id = drm_id;
+               __entry->intf_idx = intf_idx;
+               __entry->pending_kickoff_cnt = pending_kickoff_cnt;
+               __entry->ctl_idx = ctl_idx;
+               __entry->pending_flush_ret = pending_flush_ret;
+       ),
+       TP_printk("id=%u, intf_idx=%d, pending_kickoff_cnt=%d ctl_idx=%d "
+                 "pending_flush_ret=%u", __entry->drm_id,
+                 __entry->intf_idx, __entry->pending_kickoff_cnt,
+                 __entry->ctl_idx, __entry->pending_flush_ret)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_ktime_template,
+       TP_PROTO(uint32_t drm_id, ktime_t time),
+       TP_ARGS(drm_id, time),
+       TP_STRUCT__entry(
+               __field(        uint32_t,       drm_id  )
+               __field(        ktime_t,        time    )
+       ),
+       TP_fast_assign(
+               __entry->drm_id = drm_id;
+               __entry->time = time;
+       ),
+       TP_printk("id=%u, time=%lld", __entry->drm_id,
+                 ktime_to_ms(__entry->time))
+);
+DEFINE_EVENT(dpu_enc_ktime_template, dpu_enc_vsync_event_work,
+       TP_PROTO(uint32_t drm_id, ktime_t time),
+       TP_ARGS(drm_id, time)
+);
+DEFINE_EVENT(dpu_enc_ktime_template, dpu_enc_early_kickoff,
+       TP_PROTO(uint32_t drm_id, ktime_t time),
+       TP_ARGS(drm_id, time)
+);
+
+DECLARE_EVENT_CLASS(dpu_id_event_template,
+       TP_PROTO(uint32_t drm_id, u32 event),
+       TP_ARGS(drm_id, event),
+       TP_STRUCT__entry(
+               __field(        uint32_t,       drm_id  )
+               __field(        u32,            event   )
+       ),
+       TP_fast_assign(
+               __entry->drm_id = drm_id;
+               __entry->event = event;
+       ),
+       TP_printk("id=%u, event=%u", __entry->drm_id, __entry->event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_enc_frame_done_timeout,
+       TP_PROTO(uint32_t drm_id, u32 event),
+       TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_cb,
+       TP_PROTO(uint32_t drm_id, u32 event),
+       TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_handle_power_event,
+       TP_PROTO(uint32_t drm_id, u32 event),
+       TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_done,
+       TP_PROTO(uint32_t drm_id, u32 event),
+       TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_more_pending,
+       TP_PROTO(uint32_t drm_id, u32 event),
+       TP_ARGS(drm_id, event)
+);
+
+TRACE_EVENT(dpu_enc_wait_event_timeout,
+       TP_PROTO(uint32_t drm_id, int32_t hw_id, int rc, s64 time,
+                s64 expected_time, int atomic_cnt),
+       TP_ARGS(drm_id, hw_id, rc, time, expected_time, atomic_cnt),
+       TP_STRUCT__entry(
+               __field(        uint32_t,       drm_id          )
+               __field(        int32_t,        hw_id           )
+               __field(        int,            rc              )
+               __field(        s64,            time            )
+               __field(        s64,            expected_time   )
+               __field(        int,            atomic_cnt      )
+       ),
+       TP_fast_assign(
+               __entry->drm_id = drm_id;
+               __entry->hw_id = hw_id;
+               __entry->rc = rc;
+               __entry->time = time;
+               __entry->expected_time = expected_time;
+               __entry->atomic_cnt = atomic_cnt;
+       ),
+       TP_printk("id=%u, hw_id=%d, rc=%d, time=%lld, expected=%lld cnt=%d",
+                 __entry->drm_id, __entry->hw_id, __entry->rc, __entry->time,
+                 __entry->expected_time, __entry->atomic_cnt)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_irq_ctrl,
+       TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, bool enable,
+                int refcnt),
+       TP_ARGS(drm_id, pp, enable, refcnt),
+       TP_STRUCT__entry(
+               __field(        uint32_t,               drm_id  )
+               __field(        enum dpu_pingpong,      pp      )
+               __field(        bool,                   enable  )
+               __field(        int,                    refcnt  )
+       ),
+       TP_fast_assign(
+               __entry->drm_id = drm_id;
+               __entry->pp = pp;
+               __entry->enable = enable;
+               __entry->refcnt = refcnt;
+       ),
+       TP_printk("id=%u, pp=%d, enable=%s, refcnt=%d", __entry->drm_id,
+                 __entry->pp, __entry->enable ? "true" : "false",
+                 __entry->refcnt)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_pp_tx_done,
+       TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, int new_count,
+                u32 event),
+       TP_ARGS(drm_id, pp, new_count, event),
+       TP_STRUCT__entry(
+               __field(        uint32_t,               drm_id          )
+               __field(        enum dpu_pingpong,      pp              )
+               __field(        int,                    new_count       )
+               __field(        u32,                    event           )
+       ),
+       TP_fast_assign(
+               __entry->drm_id = drm_id;
+               __entry->pp = pp;
+               __entry->new_count = new_count;
+               __entry->event = event;
+       ),
+       TP_printk("id=%u, pp=%d, new_count=%d, event=%u", __entry->drm_id,
+                 __entry->pp, __entry->new_count, __entry->event)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_pdone_timeout,
+       TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, int timeout_count,
+                int kickoff_count, u32 event),
+       TP_ARGS(drm_id, pp, timeout_count, kickoff_count, event),
+       TP_STRUCT__entry(
+               __field(        uint32_t,               drm_id          )
+               __field(        enum dpu_pingpong,      pp              )
+               __field(        int,                    timeout_count   )
+               __field(        int,                    kickoff_count   )
+               __field(        u32,                    event           )
+       ),
+       TP_fast_assign(
+               __entry->drm_id = drm_id;
+               __entry->pp = pp;
+               __entry->timeout_count = timeout_count;
+               __entry->kickoff_count = kickoff_count;
+               __entry->event = event;
+       ),
+       TP_printk("id=%u, pp=%d, timeout_count=%d, kickoff_count=%d, event=%u",
+                 __entry->drm_id, __entry->pp, __entry->timeout_count,
+                 __entry->kickoff_count, __entry->event)
+);
+
+TRACE_EVENT(dpu_enc_phys_vid_post_kickoff,
+       TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx),
+       TP_ARGS(drm_id, intf_idx),
+       TP_STRUCT__entry(
+               __field(        uint32_t,       drm_id                  )
+               __field(        enum dpu_intf,  intf_idx                )
+       ),
+       TP_fast_assign(
+               __entry->drm_id = drm_id;
+               __entry->intf_idx = intf_idx;
+       ),
+       TP_printk("id=%u, intf_idx=%d", __entry->drm_id, __entry->intf_idx)
+);
+
+TRACE_EVENT(dpu_enc_phys_vid_irq_ctrl,
+       TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx, bool enable,
+                int refcnt),
+       TP_ARGS(drm_id, intf_idx, enable, refcnt),
+       TP_STRUCT__entry(
+               __field(        uint32_t,       drm_id          )
+               __field(        enum dpu_intf,  intf_idx        )
+               __field(        bool,           enable          )
+               __field(        int,            refcnt          )
+       ),
+       TP_fast_assign(
+               __entry->drm_id = drm_id;
+               __entry->intf_idx = intf_idx;
+               __entry->enable = enable;
+               __entry->refcnt = refcnt;
+       ),
+       TP_printk("id=%u, intf_idx=%d enable=%s refcnt=%d", __entry->drm_id,
+                 __entry->intf_idx, __entry->enable ? "true" : "false",
+                 __entry->drm_id)
+);
+
+TRACE_EVENT(dpu_crtc_setup_mixer,
+       TP_PROTO(uint32_t crtc_id, uint32_t plane_id,
+                struct drm_plane_state *state, struct dpu_plane_state *pstate,
+                uint32_t stage_idx, enum dpu_sspp sspp, uint32_t pixel_format,
+                uint64_t modifier),
+       TP_ARGS(crtc_id, plane_id, state, pstate, stage_idx, sspp,
+               pixel_format, modifier),
+       TP_STRUCT__entry(
+               __field(        uint32_t,               crtc_id         )
+               __field(        uint32_t,               plane_id        )
+               __field(        struct drm_plane_state*,state           )
+               __field(        struct dpu_plane_state*,pstate          )
+               __field(        uint32_t,               stage_idx       )
+               __field(        enum dpu_sspp,          sspp            )
+               __field(        uint32_t,               pixel_format    )
+               __field(        uint64_t,               modifier        )
+       ),
+       TP_fast_assign(
+               __entry->crtc_id = crtc_id;
+               __entry->plane_id = plane_id;
+               __entry->state = state;
+               __entry->pstate = pstate;
+               __entry->stage_idx = stage_idx;
+               __entry->sspp = sspp;
+               __entry->pixel_format = pixel_format;
+               __entry->modifier = modifier;
+       ),
+       TP_printk("crtc_id:%u plane_id:%u fb_id:%u src:{%ux%u+%ux%u} "
+                 "dst:{%ux%u+%ux%u} stage_idx:%u stage:%d, sspp:%d "
+                 "multirect_index:%d multirect_mode:%u pix_format:%u "
+                 "modifier:%llu",
+                 __entry->crtc_id, __entry->plane_id,
+                 __entry->state->fb ? __entry->state->fb->base.id : -1,
+                 __entry->state->src_w >> 16,  __entry->state->src_h >> 16,
+                 __entry->state->src_x >> 16,  __entry->state->src_y >> 16,
+                 __entry->state->crtc_w,  __entry->state->crtc_h,
+                 __entry->state->crtc_x,  __entry->state->crtc_y,
+                 __entry->stage_idx, __entry->pstate->stage, __entry->sspp,
+                 __entry->pstate->multirect_index,
+                 __entry->pstate->multirect_mode, __entry->pixel_format,
+                 __entry->modifier)
+);
+
+TRACE_EVENT(dpu_crtc_setup_lm_bounds,
+       TP_PROTO(uint32_t drm_id, int mixer, struct drm_rect *bounds),
+       TP_ARGS(drm_id, mixer, bounds),
+       TP_STRUCT__entry(
+               __field(        uint32_t,               drm_id  )
+               __field(        int,                    mixer   )
+               __field(        struct drm_rect *,      bounds  )
+       ),
+       TP_fast_assign(
+               __entry->drm_id = drm_id;
+               __entry->mixer = mixer;
+               __entry->bounds = bounds;
+       ),
+       TP_printk("id:%u mixer:%d bounds:" DRM_RECT_FMT, __entry->drm_id,
+                 __entry->mixer, DRM_RECT_ARG(__entry->bounds))
+);
+
+TRACE_EVENT(dpu_crtc_vblank_enable,
+       TP_PROTO(uint32_t drm_id, uint32_t enc_id, bool enable,
+                struct dpu_crtc *crtc),
+       TP_ARGS(drm_id, enc_id, enable, crtc),
+       TP_STRUCT__entry(
+               __field(        uint32_t,               drm_id  )
+               __field(        uint32_t,               enc_id  )
+               __field(        bool,                   enable  )
+               __field(        struct dpu_crtc *,      crtc    )
+       ),
+       TP_fast_assign(
+               __entry->drm_id = drm_id;
+               __entry->enc_id = enc_id;
+               __entry->enable = enable;
+               __entry->crtc = crtc;
+       ),
+       TP_printk("id:%u encoder:%u enable:%s state{enabled:%s suspend:%s "
+                 "vblank_req:%s}",
+                 __entry->drm_id, __entry->enc_id,
+                 __entry->enable ? "true" : "false",
+                 __entry->crtc->enabled ? "true" : "false",
+                 __entry->crtc->suspend ? "true" : "false",
+                 __entry->crtc->vblank_requested ? "true" : "false")
+);
+
+DECLARE_EVENT_CLASS(dpu_crtc_enable_template,
+       TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+       TP_ARGS(drm_id, enable, crtc),
+       TP_STRUCT__entry(
+               __field(        uint32_t,               drm_id  )
+               __field(        bool,                   enable  )
+               __field(        struct dpu_crtc *,      crtc    )
+       ),
+       TP_fast_assign(
+               __entry->drm_id = drm_id;
+               __entry->enable = enable;
+               __entry->crtc = crtc;
+       ),
+       TP_printk("id:%u enable:%s state{enabled:%s suspend:%s vblank_req:%s}",
+                 __entry->drm_id, __entry->enable ? "true" : "false",
+                 __entry->crtc->enabled ? "true" : "false",
+                 __entry->crtc->suspend ? "true" : "false",
+                 __entry->crtc->vblank_requested ? "true" : "false")
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_set_suspend,
+       TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+       TP_ARGS(drm_id, enable, crtc)
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_enable,
+       TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+       TP_ARGS(drm_id, enable, crtc)
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_disable,
+       TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+       TP_ARGS(drm_id, enable, crtc)
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_vblank,
+       TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+       TP_ARGS(drm_id, enable, crtc)
+);
+
+TRACE_EVENT(dpu_crtc_disable_frame_pending,
+       TP_PROTO(uint32_t drm_id, int frame_pending),
+       TP_ARGS(drm_id, frame_pending),
+       TP_STRUCT__entry(
+               __field(        uint32_t,               drm_id          )
+               __field(        int,                    frame_pending   )
+       ),
+       TP_fast_assign(
+               __entry->drm_id = drm_id;
+               __entry->frame_pending = frame_pending;
+       ),
+       TP_printk("id:%u frame_pending:%d", __entry->drm_id,
+                 __entry->frame_pending)
+);
+
+TRACE_EVENT(dpu_plane_set_scanout,
+       TP_PROTO(enum dpu_sspp index, struct dpu_hw_fmt_layout *layout,
+                enum dpu_sspp_multirect_index multirect_index),
+       TP_ARGS(index, layout, multirect_index),
+       TP_STRUCT__entry(
+               __field(        enum dpu_sspp,                  index   )
+               __field(        struct dpu_hw_fmt_layout*,      layout  )
+               __field(        enum dpu_sspp_multirect_index,  multirect_index)
+       ),
+       TP_fast_assign(
+               __entry->index = index;
+               __entry->layout = layout;
+               __entry->multirect_index = multirect_index;
+       ),
+       TP_printk("index:%d layout:{%ux%u @ [%u/%u, %u/%u, %u/%u, %u/%u]} "
+                 "multirect_index:%d", __entry->index, __entry->layout->width,
+                 __entry->layout->height, __entry->layout->plane_addr[0],
+                 __entry->layout->plane_size[0],
+                 __entry->layout->plane_addr[1],
+                 __entry->layout->plane_size[1],
+                 __entry->layout->plane_addr[2],
+                 __entry->layout->plane_size[2],
+                 __entry->layout->plane_addr[3],
+                 __entry->layout->plane_size[3], __entry->multirect_index)
+);
+
+TRACE_EVENT(dpu_plane_disable,
+       TP_PROTO(uint32_t drm_id, bool is_virtual, uint32_t multirect_mode),
+       TP_ARGS(drm_id, is_virtual, multirect_mode),
+       TP_STRUCT__entry(
+               __field(        uint32_t,               drm_id          )
+               __field(        bool,                   is_virtual      )
+               __field(        uint32_t,               multirect_mode  )
+       ),
+       TP_fast_assign(
+               __entry->drm_id = drm_id;
+               __entry->is_virtual = is_virtual;
+               __entry->multirect_mode = multirect_mode;
+       ),
+       TP_printk("id:%u is_virtual:%s multirect_mode:%u", __entry->drm_id,
+                 __entry->is_virtual ? "true" : "false",
+                 __entry->multirect_mode)
+);
+
+DECLARE_EVENT_CLASS(dpu_rm_iter_template,
+       TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+       TP_ARGS(id, type, enc_id),
+       TP_STRUCT__entry(
+               __field(        uint32_t,               id      )
+               __field(        enum dpu_hw_blk_type,   type    )
+               __field(        uint32_t,               enc_id  )
+       ),
+       TP_fast_assign(
+               __entry->id = id;
+               __entry->type = type;
+               __entry->enc_id = enc_id;
+       ),
+       TP_printk("id:%d type:%d enc_id:%u", __entry->id, __entry->type,
+                 __entry->enc_id)
+);
+DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_cdm,
+       TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+       TP_ARGS(id, type, enc_id)
+);
+DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_intf,
+       TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+       TP_ARGS(id, type, enc_id)
+);
+DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_ctls,
+       TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+       TP_ARGS(id, type, enc_id)
+);
+
+TRACE_EVENT(dpu_rm_reserve_lms,
+       TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id,
+                uint32_t pp_id),
+       TP_ARGS(id, type, enc_id, pp_id),
+       TP_STRUCT__entry(
+               __field(        uint32_t,               id      )
+               __field(        enum dpu_hw_blk_type,   type    )
+               __field(        uint32_t,               enc_id  )
+               __field(        uint32_t,               pp_id   )
+       ),
+       TP_fast_assign(
+               __entry->id = id;
+               __entry->type = type;
+               __entry->enc_id = enc_id;
+               __entry->pp_id = pp_id;
+       ),
+       TP_printk("id:%d type:%d enc_id:%u pp_id:%u", __entry->id,
+                 __entry->type, __entry->enc_id, __entry->pp_id)
+);
+
+TRACE_EVENT(dpu_vbif_wait_xin_halt_fail,
+       TP_PROTO(enum dpu_vbif index, u32 xin_id),
+       TP_ARGS(index, xin_id),
+       TP_STRUCT__entry(
+               __field(        enum dpu_vbif,  index   )
+               __field(        u32,            xin_id  )
+       ),
+       TP_fast_assign(
+               __entry->index = index;
+               __entry->xin_id = xin_id;
+       ),
+       TP_printk("index:%d xin_id:%u", __entry->index, __entry->xin_id)
+);
+
+TRACE_EVENT(dpu_pp_connect_ext_te,
+       TP_PROTO(enum dpu_pingpong pp, u32 cfg),
+       TP_ARGS(pp, cfg),
+       TP_STRUCT__entry(
+               __field(        enum dpu_pingpong,      pp      )
+               __field(        u32,                    cfg     )
+       ),
+       TP_fast_assign(
+               __entry->pp = pp;
+               __entry->cfg = cfg;
+       ),
+       TP_printk("pp:%d cfg:%u", __entry->pp, __entry->cfg)
+);
+
+DECLARE_EVENT_CLASS(dpu_core_irq_idx_cnt_template,
+       TP_PROTO(int irq_idx, int enable_count),
+       TP_ARGS(irq_idx, enable_count),
+       TP_STRUCT__entry(
+               __field(        int,    irq_idx         )
+               __field(        int,    enable_count    )
+       ),
+       TP_fast_assign(
+               __entry->irq_idx = irq_idx;
+               __entry->enable_count = enable_count;
+       ),
+       TP_printk("irq_idx:%d enable_count:%u", __entry->irq_idx,
+                 __entry->enable_count)
+);
+DEFINE_EVENT(dpu_core_irq_idx_cnt_template, dpu_core_irq_enable_idx,
+       TP_PROTO(int irq_idx, int enable_count),
+       TP_ARGS(irq_idx, enable_count)
+);
+DEFINE_EVENT(dpu_core_irq_idx_cnt_template, dpu_core_irq_disable_idx,
+       TP_PROTO(int irq_idx, int enable_count),
+       TP_ARGS(irq_idx, enable_count)
+);
+
+DECLARE_EVENT_CLASS(dpu_core_irq_callback_template,
+       TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
+       TP_ARGS(irq_idx, callback),
+       TP_STRUCT__entry(
+               __field(        int,                            irq_idx )
+               __field(        struct dpu_irq_callback *,      callback)
+       ),
+       TP_fast_assign(
+               __entry->irq_idx = irq_idx;
+               __entry->callback = callback;
+       ),
+       TP_printk("irq_idx:%d callback:%pK", __entry->irq_idx,
+                 __entry->callback)
+);
+DEFINE_EVENT(dpu_core_irq_callback_template, dpu_core_irq_register_callback,
+       TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
+       TP_ARGS(irq_idx, callback)
+);
+DEFINE_EVENT(dpu_core_irq_callback_template, dpu_core_irq_unregister_callback,
+       TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
+       TP_ARGS(irq_idx, callback)
+);
+
+TRACE_EVENT(dpu_core_perf_update_clk,
+       TP_PROTO(struct drm_device *dev, bool stop_req, u64 clk_rate),
+       TP_ARGS(dev, stop_req, clk_rate),
+       TP_STRUCT__entry(
+               __field(        struct drm_device *,    dev             )
+               __field(        bool,                   stop_req        )
+               __field(        u64,                    clk_rate        )
+       ),
+       TP_fast_assign(
+               __entry->dev = dev;
+               __entry->stop_req = stop_req;
+               __entry->clk_rate = clk_rate;
+       ),
+       TP_printk("dev:%s stop_req:%s clk_rate:%llu", __entry->dev->unique,
+                 __entry->stop_req ? "true" : "false", __entry->clk_rate)
+);
+
+#define DPU_ATRACE_END(name) trace_tracing_mark_write(current->tgid, name, 0)
+#define DPU_ATRACE_BEGIN(name) trace_tracing_mark_write(current->tgid, name, 1)
+#define DPU_ATRACE_FUNC() DPU_ATRACE_BEGIN(__func__)
+
+#define DPU_ATRACE_INT(name, value) \
+       trace_dpu_trace_counter(current->tgid, name, value)
+
+#endif /* _DPU_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
new file mode 100644 (file)
index 0000000..2955282
--- /dev/null
@@ -0,0 +1,384 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)    "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+
+#include "dpu_vbif.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_trace.h"
+
+/**
+ * _dpu_vbif_wait_for_xin_halt - wait for the xin to halt
+ * @vbif:      Pointer to hardware vbif driver
+ * @xin_id:    Client interface identifier
+ * @return:    0 if success; error code otherwise
+ */
+static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id)
+{
+       ktime_t timeout;
+       bool status;
+       int rc;
+
+       if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) {
+               DPU_ERROR("invalid arguments vbif %d\n", vbif != 0);
+               return -EINVAL;
+       }
+
+       timeout = ktime_add_us(ktime_get(), vbif->cap->xin_halt_timeout);
+       for (;;) {
+               status = vbif->ops.get_halt_ctrl(vbif, xin_id);
+               if (status)
+                       break;
+               if (ktime_compare_safe(ktime_get(), timeout) > 0) {
+                       status = vbif->ops.get_halt_ctrl(vbif, xin_id);
+                       break;
+               }
+               usleep_range(501, 1000);
+       }
+
+       if (!status) {
+               rc = -ETIMEDOUT;
+               DPU_ERROR("VBIF %d client %d not halting. TIMEDOUT.\n",
+                               vbif->idx - VBIF_0, xin_id);
+       } else {
+               rc = 0;
+               DPU_DEBUG("VBIF %d client %d is halted\n",
+                               vbif->idx - VBIF_0, xin_id);
+       }
+
+       return rc;
+}
+
+/**
+ * _dpu_vbif_apply_dynamic_ot_limit - determine OT based on usecase parameters
+ * @vbif:      Pointer to hardware vbif driver
+ * @ot_lim:    Pointer to OT limit to be modified
+ * @params:    Pointer to usecase parameters
+ */
+static void _dpu_vbif_apply_dynamic_ot_limit(struct dpu_hw_vbif *vbif,
+               u32 *ot_lim, struct dpu_vbif_set_ot_params *params)
+{
+       u64 pps;
+       const struct dpu_vbif_dynamic_ot_tbl *tbl;
+       u32 i;
+
+       if (!vbif || !(vbif->cap->features & BIT(DPU_VBIF_QOS_OTLIM)))
+               return;
+
+       /* Dynamic OT setting done only for WFD */
+       if (!params->is_wfd)
+               return;
+
+       pps = params->frame_rate;
+       pps *= params->width;
+       pps *= params->height;
+
+       tbl = params->rd ? &vbif->cap->dynamic_ot_rd_tbl :
+                       &vbif->cap->dynamic_ot_wr_tbl;
+
+       for (i = 0; i < tbl->count; i++) {
+               if (pps <= tbl->cfg[i].pps) {
+                       *ot_lim = tbl->cfg[i].ot_limit;
+                       break;
+               }
+       }
+
+       DPU_DEBUG("vbif:%d xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
+                       vbif->idx - VBIF_0, params->xin_id,
+                       params->width, params->height, params->frame_rate,
+                       pps, *ot_lim);
+}
+
+/**
+ * _dpu_vbif_get_ot_limit - get OT based on usecase & configuration parameters
+ * @vbif:      Pointer to hardware vbif driver
+ * @params:    Pointer to usecase parameters
+ * @return:    OT limit
+ */
+static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif,
+       struct dpu_vbif_set_ot_params *params)
+{
+       u32 ot_lim = 0;
+       u32 val;
+
+       if (!vbif || !vbif->cap) {
+               DPU_ERROR("invalid arguments vbif %d\n", vbif != 0);
+               return -EINVAL;
+       }
+
+       if (vbif->cap->default_ot_wr_limit && !params->rd)
+               ot_lim = vbif->cap->default_ot_wr_limit;
+       else if (vbif->cap->default_ot_rd_limit && params->rd)
+               ot_lim = vbif->cap->default_ot_rd_limit;
+
+       /*
+        * If default ot is not set from dt/catalog,
+        * then do not configure it.
+        */
+       if (ot_lim == 0)
+               goto exit;
+
+       /* Modify the limits if the target and the use case requires it */
+       _dpu_vbif_apply_dynamic_ot_limit(vbif, &ot_lim, params);
+
+       if (vbif && vbif->ops.get_limit_conf) {
+               val = vbif->ops.get_limit_conf(vbif,
+                               params->xin_id, params->rd);
+               if (val == ot_lim)
+                       ot_lim = 0;
+       }
+
+exit:
+       DPU_DEBUG("vbif:%d xin:%d ot_lim:%d\n",
+                       vbif->idx - VBIF_0, params->xin_id, ot_lim);
+       return ot_lim;
+}
+
+/**
+ * dpu_vbif_set_ot_limit - set OT based on usecase & configuration parameters
+ * @vbif:      Pointer to hardware vbif driver
+ * @params:    Pointer to usecase parameters
+ *
+ * Note this function would block waiting for bus halt.
+ */
+void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
+               struct dpu_vbif_set_ot_params *params)
+{
+       struct dpu_hw_vbif *vbif = NULL;
+       struct dpu_hw_mdp *mdp;
+       bool forced_on = false;
+       u32 ot_lim;
+       int ret, i;
+
+       if (!dpu_kms) {
+               DPU_ERROR("invalid arguments\n");
+               return;
+       }
+       mdp = dpu_kms->hw_mdp;
+
+       for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+               if (dpu_kms->hw_vbif[i] &&
+                               dpu_kms->hw_vbif[i]->idx == params->vbif_idx)
+                       vbif = dpu_kms->hw_vbif[i];
+       }
+
+       if (!vbif || !mdp) {
+               DPU_DEBUG("invalid arguments vbif %d mdp %d\n",
+                               vbif != 0, mdp != 0);
+               return;
+       }
+
+       if (!mdp->ops.setup_clk_force_ctrl ||
+                       !vbif->ops.set_limit_conf ||
+                       !vbif->ops.set_halt_ctrl)
+               return;
+
+       /* set write_gather_en for all write clients */
+       if (vbif->ops.set_write_gather_en && !params->rd)
+               vbif->ops.set_write_gather_en(vbif, params->xin_id);
+
+       ot_lim = _dpu_vbif_get_ot_limit(vbif, params) & 0xFF;
+
+       if (ot_lim == 0)
+               goto exit;
+
+       trace_dpu_perf_set_ot(params->num, params->xin_id, ot_lim,
+               params->vbif_idx);
+
+       forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
+
+       vbif->ops.set_limit_conf(vbif, params->xin_id, params->rd, ot_lim);
+
+       vbif->ops.set_halt_ctrl(vbif, params->xin_id, true);
+
+       ret = _dpu_vbif_wait_for_xin_halt(vbif, params->xin_id);
+       if (ret)
+               trace_dpu_vbif_wait_xin_halt_fail(vbif->idx, params->xin_id);
+
+       vbif->ops.set_halt_ctrl(vbif, params->xin_id, false);
+
+       if (forced_on)
+               mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
+exit:
+       return;
+}
+
+void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
+               struct dpu_vbif_set_qos_params *params)
+{
+       struct dpu_hw_vbif *vbif = NULL;
+       struct dpu_hw_mdp *mdp;
+       bool forced_on = false;
+       const struct dpu_vbif_qos_tbl *qos_tbl;
+       int i;
+
+       if (!dpu_kms || !params || !dpu_kms->hw_mdp) {
+               DPU_ERROR("invalid arguments\n");
+               return;
+       }
+       mdp = dpu_kms->hw_mdp;
+
+       for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+               if (dpu_kms->hw_vbif[i] &&
+                               dpu_kms->hw_vbif[i]->idx == params->vbif_idx) {
+                       vbif = dpu_kms->hw_vbif[i];
+                       break;
+               }
+       }
+
+       if (!vbif || !vbif->cap) {
+               DPU_ERROR("invalid vbif %d\n", params->vbif_idx);
+               return;
+       }
+
+       if (!vbif->ops.set_qos_remap || !mdp->ops.setup_clk_force_ctrl) {
+               DPU_DEBUG("qos remap not supported\n");
+               return;
+       }
+
+       qos_tbl = params->is_rt ? &vbif->cap->qos_rt_tbl :
+                       &vbif->cap->qos_nrt_tbl;
+
+       if (!qos_tbl->npriority_lvl || !qos_tbl->priority_lvl) {
+               DPU_DEBUG("qos tbl not defined\n");
+               return;
+       }
+
+       forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
+
+       for (i = 0; i < qos_tbl->npriority_lvl; i++) {
+               DPU_DEBUG("vbif:%d xin:%d lvl:%d/%d\n",
+                               params->vbif_idx, params->xin_id, i,
+                               qos_tbl->priority_lvl[i]);
+               vbif->ops.set_qos_remap(vbif, params->xin_id, i,
+                               qos_tbl->priority_lvl[i]);
+       }
+
+       if (forced_on)
+               mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
+}
+
+void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms)
+{
+       struct dpu_hw_vbif *vbif;
+       u32 i, pnd, src;
+
+       if (!dpu_kms) {
+               DPU_ERROR("invalid argument\n");
+               return;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+               vbif = dpu_kms->hw_vbif[i];
+               if (vbif && vbif->ops.clear_errors) {
+                       vbif->ops.clear_errors(vbif, &pnd, &src);
+                       if (pnd || src) {
+                               DRM_DEBUG_KMS("VBIF %d: pnd 0x%X, src 0x%X\n",
+                                             vbif->idx - VBIF_0, pnd, src);
+                       }
+               }
+       }
+}
+
+void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms)
+{
+       struct dpu_hw_vbif *vbif;
+       int i, j;
+
+       if (!dpu_kms) {
+               DPU_ERROR("invalid argument\n");
+               return;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+               vbif = dpu_kms->hw_vbif[i];
+               if (vbif && vbif->cap && vbif->ops.set_mem_type) {
+                       for (j = 0; j < vbif->cap->memtype_count; j++)
+                               vbif->ops.set_mem_type(
+                                               vbif, j, vbif->cap->memtype[j]);
+               }
+       }
+}
+
+#ifdef CONFIG_DEBUG_FS
+void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
+{
+       debugfs_remove_recursive(dpu_kms->debugfs_vbif);
+       dpu_kms->debugfs_vbif = NULL;
+}
+
+int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
+{
+       char vbif_name[32];
+       struct dentry *debugfs_vbif;
+       int i, j;
+
+       dpu_kms->debugfs_vbif = debugfs_create_dir("vbif", debugfs_root);
+       if (!dpu_kms->debugfs_vbif) {
+               DPU_ERROR("failed to create vbif debugfs\n");
+               return -EINVAL;
+       }
+
+       for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+               struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
+
+               snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
+
+               debugfs_vbif = debugfs_create_dir(vbif_name,
+                               dpu_kms->debugfs_vbif);
+
+               debugfs_create_u32("features", 0600, debugfs_vbif,
+                       (u32 *)&vbif->features);
+
+               debugfs_create_u32("xin_halt_timeout", 0400, debugfs_vbif,
+                       (u32 *)&vbif->xin_halt_timeout);
+
+               debugfs_create_u32("default_rd_ot_limit", 0400, debugfs_vbif,
+                       (u32 *)&vbif->default_ot_rd_limit);
+
+               debugfs_create_u32("default_wr_ot_limit", 0400, debugfs_vbif,
+                       (u32 *)&vbif->default_ot_wr_limit);
+
+               for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
+                       struct dpu_vbif_dynamic_ot_cfg *cfg =
+                                       &vbif->dynamic_ot_rd_tbl.cfg[j];
+
+                       snprintf(vbif_name, sizeof(vbif_name),
+                                       "dynamic_ot_rd_%d_pps", j);
+                       debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
+                                       (u64 *)&cfg->pps);
+                       snprintf(vbif_name, sizeof(vbif_name),
+                                       "dynamic_ot_rd_%d_ot_limit", j);
+                       debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
+                                       (u32 *)&cfg->ot_limit);
+               }
+
+               for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
+                       struct dpu_vbif_dynamic_ot_cfg *cfg =
+                                       &vbif->dynamic_ot_wr_tbl.cfg[j];
+
+                       snprintf(vbif_name, sizeof(vbif_name),
+                                       "dynamic_ot_wr_%d_pps", j);
+                       debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
+                                       (u64 *)&cfg->pps);
+                       snprintf(vbif_name, sizeof(vbif_name),
+                                       "dynamic_ot_wr_%d_ot_limit", j);
+                       debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
+                                       (u32 *)&cfg->ot_limit);
+               }
+       }
+
+       return 0;
+}
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
new file mode 100644 (file)
index 0000000..f17af52
--- /dev/null
@@ -0,0 +1,94 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_VBIF_H__
+#define __DPU_VBIF_H__
+
+#include "dpu_kms.h"
+
+struct dpu_vbif_set_ot_params {
+       u32 xin_id;
+       u32 num;
+       u32 width;
+       u32 height;
+       u32 frame_rate;
+       bool rd;
+       bool is_wfd;
+       u32 vbif_idx;
+       u32 clk_ctrl;
+};
+
+struct dpu_vbif_set_memtype_params {
+       u32 xin_id;
+       u32 vbif_idx;
+       u32 clk_ctrl;
+       bool is_cacheable;
+};
+
+/**
+ * struct dpu_vbif_set_qos_params - QoS remapper parameter
+ * @vbif_idx: vbif identifier
+ * @xin_id: client interface identifier
+ * @clk_ctrl: clock control identifier of the xin
+ * @num: pipe identifier (debug only)
+ * @is_rt: true if pipe is used in real-time use case
+ */
+struct dpu_vbif_set_qos_params {
+       u32 vbif_idx;
+       u32 xin_id;
+       u32 clk_ctrl;
+       u32 num;
+       bool is_rt;
+};
+
+/**
+ * dpu_vbif_set_ot_limit - set OT limit for vbif client
+ * @dpu_kms:   DPU handler
+ * @params:    Pointer to OT configuration parameters
+ */
+void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
+               struct dpu_vbif_set_ot_params *params);
+
+/**
+ * dpu_vbif_set_qos_remap - set QoS priority level remap
+ * @dpu_kms:   DPU handler
+ * @params:    Pointer to QoS configuration parameters
+ */
+void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
+               struct dpu_vbif_set_qos_params *params);
+
+/**
+ * dpu_vbif_clear_errors - clear any vbif errors
+ * @dpu_kms:   DPU handler
+ */
+void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_vbif_init_memtypes - initialize xin memory types for vbif
+ * @dpu_kms:   DPU handler
+ */
+void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms);
+
+#ifdef CONFIG_DEBUG_FS
+int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root);
+void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms);
+#else
+static inline int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms,
+               struct dentry *debugfs_root)
+{
+       return 0;
+}
+static inline void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
+{
+}
+#endif
+#endif /* __DPU_VBIF_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h b/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
new file mode 100644 (file)
index 0000000..4f12e5c
--- /dev/null
@@ -0,0 +1,1376 @@
+#ifndef __MEDIA_INFO_H__
+#define __MEDIA_INFO_H__
+
+#ifndef MSM_MEDIA_ALIGN
+#define MSM_MEDIA_ALIGN(__sz, __align) (((__align) & ((__align) - 1)) ?\
+       ((((__sz) + (__align) - 1) / (__align)) * (__align)) :\
+       (((__sz) + (__align) - 1) & (~((__align) - 1))))
+#endif
+
+#ifndef MSM_MEDIA_ROUNDUP
+#define MSM_MEDIA_ROUNDUP(__sz, __r) (((__sz) + ((__r) - 1)) / (__r))
+#endif
+
+#ifndef MSM_MEDIA_MAX
+#define MSM_MEDIA_MAX(__a, __b) ((__a) > (__b)?(__a):(__b))
+#endif
+
+enum color_fmts {
+       /* Venus NV12:
+        * YUV 4:2:0 image with a plane of 8 bit Y samples followed
+        * by an interleaved U/V plane containing 8 bit 2x2 subsampled
+        * colour difference samples.
+        *
+        * <-------- Y/UV_Stride -------->
+        * <------- Width ------->
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  ^           ^
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  Height      |
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |          Y_Scanlines
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  V           |
+        * . . . . . . . . . . . . . . . .              |
+        * . . . . . . . . . . . . . . . .              |
+        * . . . . . . . . . . . . . . . .              |
+        * . . . . . . . . . . . . . . . .              V
+        * U V U V U V U V U V U V . . . .  ^
+        * U V U V U V U V U V U V . . . .  |
+        * U V U V U V U V U V U V . . . .  |
+        * U V U V U V U V U V U V . . . .  UV_Scanlines
+        * . . . . . . . . . . . . . . . .  |
+        * . . . . . . . . . . . . . . . .  V
+        * . . . . . . . . . . . . . . . .  --> Buffer size alignment
+        *
+        * Y_Stride : Width aligned to 128
+        * UV_Stride : Width aligned to 128
+        * Y_Scanlines: Height aligned to 32
+        * UV_Scanlines: Height/2 aligned to 16
+        * Extradata: Arbitrary (software-imposed) padding
+        * Total size = align((Y_Stride * Y_Scanlines
+        *          + UV_Stride * UV_Scanlines
+        *          + max(Extradata, Y_Stride * 8), 4096)
+        */
+       COLOR_FMT_NV12,
+
+       /* Venus NV21:
+        * YUV 4:2:0 image with a plane of 8 bit Y samples followed
+        * by an interleaved V/U plane containing 8 bit 2x2 subsampled
+        * colour difference samples.
+        *
+        * <-------- Y/UV_Stride -------->
+        * <------- Width ------->
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  ^           ^
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  Height      |
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |          Y_Scanlines
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  V           |
+        * . . . . . . . . . . . . . . . .              |
+        * . . . . . . . . . . . . . . . .              |
+        * . . . . . . . . . . . . . . . .              |
+        * . . . . . . . . . . . . . . . .              V
+        * V U V U V U V U V U V U . . . .  ^
+        * V U V U V U V U V U V U . . . .  |
+        * V U V U V U V U V U V U . . . .  |
+        * V U V U V U V U V U V U . . . .  UV_Scanlines
+        * . . . . . . . . . . . . . . . .  |
+        * . . . . . . . . . . . . . . . .  V
+        * . . . . . . . . . . . . . . . .  --> Padding & Buffer size alignment
+        *
+        * Y_Stride : Width aligned to 128
+        * UV_Stride : Width aligned to 128
+        * Y_Scanlines: Height aligned to 32
+        * UV_Scanlines: Height/2 aligned to 16
+        * Extradata: Arbitrary (software-imposed) padding
+        * Total size = align((Y_Stride * Y_Scanlines
+        *          + UV_Stride * UV_Scanlines
+        *          + max(Extradata, Y_Stride * 8), 4096)
+        */
+       COLOR_FMT_NV21,
+       /* Venus NV12_MVTB:
+        * Two YUV 4:2:0 images/views one after the other
+        * in a top-bottom layout, same as NV12
+        * with a plane of 8 bit Y samples followed
+        * by an interleaved U/V plane containing 8 bit 2x2 subsampled
+        * colour difference samples.
+        *
+        *
+        * <-------- Y/UV_Stride -------->
+        * <------- Width ------->
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  ^           ^               ^
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  Height      |               |
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |          Y_Scanlines      |
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  V           |               |
+        * . . . . . . . . . . . . . . . .              |             View_1
+        * . . . . . . . . . . . . . . . .              |               |
+        * . . . . . . . . . . . . . . . .              |               |
+        * . . . . . . . . . . . . . . . .              V               |
+        * U V U V U V U V U V U V . . . .  ^                           |
+        * U V U V U V U V U V U V . . . .  |                           |
+        * U V U V U V U V U V U V . . . .  |                           |
+        * U V U V U V U V U V U V . . . .  UV_Scanlines                |
+        * . . . . . . . . . . . . . . . .  |                           |
+        * . . . . . . . . . . . . . . . .  V                           V
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  ^           ^               ^
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  Height      |               |
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |          Y_Scanlines      |
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |               |
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  V           |               |
+        * . . . . . . . . . . . . . . . .              |             View_2
+        * . . . . . . . . . . . . . . . .              |               |
+        * . . . . . . . . . . . . . . . .              |               |
+        * . . . . . . . . . . . . . . . .              V               |
+        * U V U V U V U V U V U V . . . .  ^                           |
+        * U V U V U V U V U V U V . . . .  |                           |
+        * U V U V U V U V U V U V . . . .  |                           |
+        * U V U V U V U V U V U V . . . .  UV_Scanlines                |
+        * . . . . . . . . . . . . . . . .  |                           |
+        * . . . . . . . . . . . . . . . .  V                           V
+        * . . . . . . . . . . . . . . . .  --> Buffer size alignment
+        *
+        * Y_Stride : Width aligned to 128
+        * UV_Stride : Width aligned to 128
+        * Y_Scanlines: Height aligned to 32
+        * UV_Scanlines: Height/2 aligned to 16
+        * View_1 begin at: 0 (zero)
+        * View_2 begin at: Y_Stride * Y_Scanlines + UV_Stride * UV_Scanlines
+        * Extradata: Arbitrary (software-imposed) padding
+        * Total size = align((2*(Y_Stride * Y_Scanlines)
+        *          + 2*(UV_Stride * UV_Scanlines) + Extradata), 4096)
+        */
+       COLOR_FMT_NV12_MVTB,
+       /*
+        * The buffer can be of 2 types:
+        * (1) Venus NV12 UBWC Progressive
+        * (2) Venus NV12 UBWC Interlaced
+        *
+        * (1) Venus NV12 UBWC Progressive Buffer Format:
+        * Compressed Macro-tile format for NV12.
+        * Contains 4 planes in the following order -
+        * (A) Y_Meta_Plane
+        * (B) Y_UBWC_Plane
+        * (C) UV_Meta_Plane
+        * (D) UV_UBWC_Plane
+        *
+        * Y_Meta_Plane consists of meta information to decode compressed
+        * tile data in Y_UBWC_Plane.
+        * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+        * UBWC decoder block will use the Y_Meta_Plane data together with
+        * Y_UBWC_Plane data to produce loss-less uncompressed 8 bit Y samples.
+        *
+        * UV_Meta_Plane consists of meta information to decode compressed
+        * tile data in UV_UBWC_Plane.
+        * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+        * UBWC decoder block will use UV_Meta_Plane data together with
+        * UV_UBWC_Plane data to produce loss-less uncompressed 8 bit 2x2
+        * subsampled color difference samples.
+        *
+        * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+        * and randomly accessible. There is no dependency between tiles.
+        *
+        * <----- Y_Meta_Stride ---->
+        * <-------- Width ------>
+        * M M M M M M M M M M M M . .      ^           ^
+        * M M M M M M M M M M M M . .      |           |
+        * M M M M M M M M M M M M . .      Height      |
+        * M M M M M M M M M M M M . .      |         Meta_Y_Scanlines
+        * M M M M M M M M M M M M . .      |           |
+        * M M M M M M M M M M M M . .      |           |
+        * M M M M M M M M M M M M . .      |           |
+        * M M M M M M M M M M M M . .      V           |
+        * . . . . . . . . . . . . . .                  |
+        * . . . . . . . . . . . . . .                  |
+        * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+        * . . . . . . . . . . . . . .                  V
+        * <--Compressed tile Y Stride--->
+        * <------- Width ------->
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  ^           ^
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  Height      |
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |        Macro_tile_Y_Scanlines
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  V           |
+        * . . . . . . . . . . . . . . . .              |
+        * . . . . . . . . . . . . . . . .              |
+        * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+        * . . . . . . . . . . . . . . . .              V
+        * <----- UV_Meta_Stride ---->
+        * M M M M M M M M M M M M . .      ^
+        * M M M M M M M M M M M M . .      |
+        * M M M M M M M M M M M M . .      |
+        * M M M M M M M M M M M M . .      M_UV_Scanlines
+        * . . . . . . . . . . . . . .      |
+        * . . . . . . . . . . . . . .      V
+        * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+        * <--Compressed tile UV Stride--->
+        * U* V* U* V* U* V* U* V* . . . .  ^
+        * U* V* U* V* U* V* U* V* . . . .  |
+        * U* V* U* V* U* V* U* V* . . . .  |
+        * U* V* U* V* U* V* U* V* . . . .  UV_Scanlines
+        * . . . . . . . . . . . . . . . .  |
+        * . . . . . . . . . . . . . . . .  V
+        * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+        *
+        * Y_Stride = align(Width, 128)
+        * UV_Stride = align(Width, 128)
+        * Y_Scanlines = align(Height, 32)
+        * UV_Scanlines = align(Height/2, 16)
+        * Y_UBWC_Plane_size = align(Y_Stride * Y_Scanlines, 4096)
+        * UV_UBWC_Plane_size = align(UV_Stride * UV_Scanlines, 4096)
+        * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+        * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+        * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+        * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+        * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+        * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+        * Extradata = 8k
+        *
+        * Total size = align( Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+        *           Y_Meta_Plane_size + UV_Meta_Plane_size
+        *           + max(Extradata, Y_Stride * 48), 4096)
+        *
+        *
+        * (2) Venus NV12 UBWC Interlaced Buffer Format:
+        * Compressed Macro-tile format for NV12 interlaced.
+        * Contains 8 planes in the following order -
+        * (A) Y_Meta_Top_Field_Plane
+        * (B) Y_UBWC_Top_Field_Plane
+        * (C) UV_Meta_Top_Field_Plane
+        * (D) UV_UBWC_Top_Field_Plane
+        * (E) Y_Meta_Bottom_Field_Plane
+        * (F) Y_UBWC_Bottom_Field_Plane
+        * (G) UV_Meta_Bottom_Field_Plane
+        * (H) UV_UBWC_Bottom_Field_Plane
+        * Y_Meta_Top_Field_Plane consists of meta information to decode
+        * compressed tile data for Y_UBWC_Top_Field_Plane.
+        * Y_UBWC_Top_Field_Plane consists of Y data in compressed macro-tile
+        * format for top field of an interlaced frame.
+        * UBWC decoder block will use the Y_Meta_Top_Field_Plane data together
+        * with Y_UBWC_Top_Field_Plane data to produce loss-less uncompressed
+        * 8 bit Y samples for top field of an interlaced frame.
+        *
+        * UV_Meta_Top_Field_Plane consists of meta information to decode
+        * compressed tile data in UV_UBWC_Top_Field_Plane.
+        * UV_UBWC_Top_Field_Plane consists of UV data in compressed macro-tile
+        * format for top field of an interlaced frame.
+        * UBWC decoder block will use UV_Meta_Top_Field_Plane data together
+        * with UV_UBWC_Top_Field_Plane data to produce loss-less uncompressed
+        * 8 bit subsampled color difference samples for top field of an
+        * interlaced frame.
+        *
+        * Each tile in Y_UBWC_Top_Field_Plane/UV_UBWC_Top_Field_Plane is
+        * independently decodable and randomly accessible. There is no
+        * dependency between tiles.
+        *
+        * Y_Meta_Bottom_Field_Plane consists of meta information to decode
+        * compressed tile data for Y_UBWC_Bottom_Field_Plane.
+        * Y_UBWC_Bottom_Field_Plane consists of Y data in compressed macro-tile
+        * format for bottom field of an interlaced frame.
+        * UBWC decoder block will use the Y_Meta_Bottom_Field_Plane data
+        * together with Y_UBWC_Bottom_Field_Plane data to produce loss-less
+        * uncompressed 8 bit Y samples for bottom field of an interlaced frame.
+        *
+        * UV_Meta_Bottom_Field_Plane consists of meta information to decode
+        * compressed tile data in UV_UBWC_Bottom_Field_Plane.
+        * UV_UBWC_Bottom_Field_Plane consists of UV data in compressed
+        * macro-tile format for bottom field of an interlaced frame.
+        * UBWC decoder block will use UV_Meta_Bottom_Field_Plane data together
+        * with UV_UBWC_Bottom_Field_Plane data to produce loss-less
+        * uncompressed 8 bit subsampled color difference samples for bottom
+        * field of an interlaced frame.
+        *
+        * Each tile in Y_UBWC_Bottom_Field_Plane/UV_UBWC_Bottom_Field_Plane is
+        * independently decodable and randomly accessible. There is no
+        * dependency between tiles.
+        *
+        * <-----Y_TF_Meta_Stride---->
+        * <-------- Width ------>
+        * M M M M M M M M M M M M . .      ^           ^
+        * M M M M M M M M M M M M . .      |           |
+        * M M M M M M M M M M M M . . Half_height      |
+        * M M M M M M M M M M M M . .      |         Meta_Y_TF_Scanlines
+        * M M M M M M M M M M M M . .      |           |
+        * M M M M M M M M M M M M . .      |           |
+        * M M M M M M M M M M M M . .      |           |
+        * M M M M M M M M M M M M . .      V           |
+        * . . . . . . . . . . . . . .                  |
+        * . . . . . . . . . . . . . .                  |
+        * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+        * . . . . . . . . . . . . . .                  V
+        * <-Compressed tile Y_TF Stride->
+        * <------- Width ------->
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  ^           ^
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height  |
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |        Macro_tile_Y_TF_Scanlines
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  V           |
+        * . . . . . . . . . . . . . . . .              |
+        * . . . . . . . . . . . . . . . .              |
+        * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+        * . . . . . . . . . . . . . . . .              V
+        * <----UV_TF_Meta_Stride---->
+        * M M M M M M M M M M M M . .      ^
+        * M M M M M M M M M M M M . .      |
+        * M M M M M M M M M M M M . .      |
+        * M M M M M M M M M M M M . .      M_UV_TF_Scanlines
+        * . . . . . . . . . . . . . .      |
+        * . . . . . . . . . . . . . .      V
+        * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+        * <-Compressed tile UV_TF Stride->
+        * U* V* U* V* U* V* U* V* . . . .  ^
+        * U* V* U* V* U* V* U* V* . . . .  |
+        * U* V* U* V* U* V* U* V* . . . .  |
+        * U* V* U* V* U* V* U* V* . . . .  UV_TF_Scanlines
+        * . . . . . . . . . . . . . . . .  |
+        * . . . . . . . . . . . . . . . .  V
+        * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+        * <-----Y_BF_Meta_Stride---->
+        * <-------- Width ------>
+        * M M M M M M M M M M M M . .      ^           ^
+        * M M M M M M M M M M M M . .      |           |
+        * M M M M M M M M M M M M . . Half_height      |
+        * M M M M M M M M M M M M . .      |         Meta_Y_BF_Scanlines
+        * M M M M M M M M M M M M . .      |           |
+        * M M M M M M M M M M M M . .      |           |
+        * M M M M M M M M M M M M . .      |           |
+        * M M M M M M M M M M M M . .      V           |
+        * . . . . . . . . . . . . . .                  |
+        * . . . . . . . . . . . . . .                  |
+        * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+        * . . . . . . . . . . . . . .                  V
+        * <-Compressed tile Y_BF Stride->
+        * <------- Width ------->
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  ^           ^
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height  |
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |        Macro_tile_Y_BF_Scanlines
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  V           |
+        * . . . . . . . . . . . . . . . .              |
+        * . . . . . . . . . . . . . . . .              |
+        * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+        * . . . . . . . . . . . . . . . .              V
+        * <----UV_BF_Meta_Stride---->
+        * M M M M M M M M M M M M . .      ^
+        * M M M M M M M M M M M M . .      |
+        * M M M M M M M M M M M M . .      |
+        * M M M M M M M M M M M M . .      M_UV_BF_Scanlines
+        * . . . . . . . . . . . . . .      |
+        * . . . . . . . . . . . . . .      V
+        * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+        * <-Compressed tile UV_BF Stride->
+        * U* V* U* V* U* V* U* V* . . . .  ^
+        * U* V* U* V* U* V* U* V* . . . .  |
+        * U* V* U* V* U* V* U* V* . . . .  |
+        * U* V* U* V* U* V* U* V* . . . .  UV_BF_Scanlines
+        * . . . . . . . . . . . . . . . .  |
+        * . . . . . . . . . . . . . . . .  V
+        * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+        *
+        * Half_height = (Height+1)>>1
+        * Y_TF_Stride = align(Width, 128)
+        * UV_TF_Stride = align(Width, 128)
+        * Y_TF_Scanlines = align(Half_height, 32)
+        * UV_TF_Scanlines = align((Half_height+1)/2, 32)
+        * Y_UBWC_TF_Plane_size = align(Y_TF_Stride * Y_TF_Scanlines, 4096)
+        * UV_UBWC_TF_Plane_size = align(UV_TF_Stride * UV_TF_Scanlines, 4096)
+        * Y_TF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+        * Y_TF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16)
+        * Y_TF_Meta_Plane_size =
+        *     align(Y_TF_Meta_Stride * Y_TF_Meta_Scanlines, 4096)
+        * UV_TF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+        * UV_TF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16)
+        * UV_TF_Meta_Plane_size =
+        *     align(UV_TF_Meta_Stride * UV_TF_Meta_Scanlines, 4096)
+        * Y_BF_Stride = align(Width, 128)
+        * UV_BF_Stride = align(Width, 128)
+        * Y_BF_Scanlines = align(Half_height, 32)
+        * UV_BF_Scanlines = align((Half_height+1)/2, 32)
+        * Y_UBWC_BF_Plane_size = align(Y_BF_Stride * Y_BF_Scanlines, 4096)
+        * UV_UBWC_BF_Plane_size = align(UV_BF_Stride * UV_BF_Scanlines, 4096)
+        * Y_BF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+        * Y_BF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16)
+        * Y_BF_Meta_Plane_size =
+        *     align(Y_BF_Meta_Stride * Y_BF_Meta_Scanlines, 4096)
+        * UV_BF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+        * UV_BF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16)
+        * UV_BF_Meta_Plane_size =
+        *     align(UV_BF_Meta_Stride * UV_BF_Meta_Scanlines, 4096)
+        * Extradata = 8k
+        *
+        * Total size = align( Y_UBWC_TF_Plane_size + UV_UBWC_TF_Plane_size +
+        *           Y_TF_Meta_Plane_size + UV_TF_Meta_Plane_size +
+        *                       Y_UBWC_BF_Plane_size + UV_UBWC_BF_Plane_size +
+        *           Y_BF_Meta_Plane_size + UV_BF_Meta_Plane_size +
+        *           + max(Extradata, Y_TF_Stride * 48), 4096)
+        */
+       COLOR_FMT_NV12_UBWC,
+       /* Venus NV12 10-bit UBWC:
+        * Compressed Macro-tile format for NV12.
+        * Contains 4 planes in the following order -
+        * (A) Y_Meta_Plane
+        * (B) Y_UBWC_Plane
+        * (C) UV_Meta_Plane
+        * (D) UV_UBWC_Plane
+        *
+        * Y_Meta_Plane consists of meta information to decode compressed
+        * tile data in Y_UBWC_Plane.
+        * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+        * UBWC decoder block will use the Y_Meta_Plane data together with
+        * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples.
+        *
+        * UV_Meta_Plane consists of meta information to decode compressed
+        * tile data in UV_UBWC_Plane.
+        * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+        * UBWC decoder block will use UV_Meta_Plane data together with
+        * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2
+        * subsampled color difference samples.
+        *
+        * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+        * and randomly accessible. There is no dependency between tiles.
+        *
+        * <----- Y_Meta_Stride ----->
+        * <-------- Width ------>
+        * M M M M M M M M M M M M . .      ^           ^
+        * M M M M M M M M M M M M . .      |           |
+        * M M M M M M M M M M M M . .      Height      |
+        * M M M M M M M M M M M M . .      |         Meta_Y_Scanlines
+        * M M M M M M M M M M M M . .      |           |
+        * M M M M M M M M M M M M . .      |           |
+        * M M M M M M M M M M M M . .      |           |
+        * M M M M M M M M M M M M . .      V           |
+        * . . . . . . . . . . . . . .                  |
+        * . . . . . . . . . . . . . .                  |
+        * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+        * . . . . . . . . . . . . . .                  V
+        * <--Compressed tile Y Stride--->
+        * <------- Width ------->
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  ^           ^
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  Height      |
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |        Macro_tile_Y_Scanlines
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  V           |
+        * . . . . . . . . . . . . . . . .              |
+        * . . . . . . . . . . . . . . . .              |
+        * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+        * . . . . . . . . . . . . . . . .              V
+        * <----- UV_Meta_Stride ---->
+        * M M M M M M M M M M M M . .      ^
+        * M M M M M M M M M M M M . .      |
+        * M M M M M M M M M M M M . .      |
+        * M M M M M M M M M M M M . .      M_UV_Scanlines
+        * . . . . . . . . . . . . . .      |
+        * . . . . . . . . . . . . . .      V
+        * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+        * <--Compressed tile UV Stride--->
+        * U* V* U* V* U* V* U* V* . . . .  ^
+        * U* V* U* V* U* V* U* V* . . . .  |
+        * U* V* U* V* U* V* U* V* . . . .  |
+        * U* V* U* V* U* V* U* V* . . . .  UV_Scanlines
+        * . . . . . . . . . . . . . . . .  |
+        * . . . . . . . . . . . . . . . .  V
+        * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+        *
+        *
+        * Y_Stride = align(Width * 4/3, 128)
+        * UV_Stride = align(Width * 4/3, 128)
+        * Y_Scanlines = align(Height, 32)
+        * UV_Scanlines = align(Height/2, 16)
+        * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096)
+        * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096)
+        * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+        * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+        * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+        * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+        * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+        * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+        * Extradata = 8k
+        *
+        * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+        *           Y_Meta_Plane_size + UV_Meta_Plane_size
+        *           + max(Extradata, Y_Stride * 48), 4096)
+        */
+       COLOR_FMT_NV12_BPP10_UBWC,
+       /* Venus RGBA8888 format:
+        * Contains 1 plane in the following order -
+        * (A) RGBA plane
+        *
+        * <-------- RGB_Stride -------->
+        * <------- Width ------->
+        * R R R R R R R R R R R R . . . .  ^           ^
+        * R R R R R R R R R R R R . . . .  |           |
+        * R R R R R R R R R R R R . . . .  Height      |
+        * R R R R R R R R R R R R . . . .  |       RGB_Scanlines
+        * R R R R R R R R R R R R . . . .  |           |
+        * R R R R R R R R R R R R . . . .  |           |
+        * R R R R R R R R R R R R . . . .  |           |
+        * R R R R R R R R R R R R . . . .  V           |
+        * . . . . . . . . . . . . . . . .              |
+        * . . . . . . . . . . . . . . . .              |
+        * . . . . . . . . . . . . . . . .              |
+        * . . . . . . . . . . . . . . . .              V
+        *
+        * RGB_Stride = align(Width * 4, 128)
+        * RGB_Scanlines = align(Height, 32)
+        * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+        * Extradata = 8k
+        *
+        * Total size = align(RGB_Plane_size + Extradata, 4096)
+        */
+       COLOR_FMT_RGBA8888,
+       /* Venus RGBA8888 UBWC format:
+        * Contains 2 planes in the following order -
+        * (A) Meta plane
+        * (B) RGBA plane
+        *
+        * <--- RGB_Meta_Stride ---->
+        * <-------- Width ------>
+        * M M M M M M M M M M M M . .      ^           ^
+        * M M M M M M M M M M M M . .      |           |
+        * M M M M M M M M M M M M . .      Height      |
+        * M M M M M M M M M M M M . .      |       Meta_RGB_Scanlines
+        * M M M M M M M M M M M M . .      |           |
+        * M M M M M M M M M M M M . .      |           |
+        * M M M M M M M M M M M M . .      |           |
+        * M M M M M M M M M M M M . .      V           |
+        * . . . . . . . . . . . . . .                  |
+        * . . . . . . . . . . . . . .                  |
+        * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+        * . . . . . . . . . . . . . .                  V
+        * <-------- RGB_Stride -------->
+        * <------- Width ------->
+        * R R R R R R R R R R R R . . . .  ^           ^
+        * R R R R R R R R R R R R . . . .  |           |
+        * R R R R R R R R R R R R . . . .  Height      |
+        * R R R R R R R R R R R R . . . .  |       RGB_Scanlines
+        * R R R R R R R R R R R R . . . .  |           |
+        * R R R R R R R R R R R R . . . .  |           |
+        * R R R R R R R R R R R R . . . .  |           |
+        * R R R R R R R R R R R R . . . .  V           |
+        * . . . . . . . . . . . . . . . .              |
+        * . . . . . . . . . . . . . . . .              |
+        * . . . . . . . . . . . . . . . .    -------> Buffer size aligned to 4k
+        * . . . . . . . . . . . . . . . .              V
+        *
+        * RGB_Stride = align(Width * 4, 128)
+        * RGB_Scanlines = align(Height, 32)
+        * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+        * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+        * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+        * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+        *              RGB_Meta_Scanlines, 4096)
+        * Extradata = 8k
+        *
+        * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+        *              Extradata, 4096)
+        */
+       COLOR_FMT_RGBA8888_UBWC,
+       /* Venus RGBA1010102 UBWC format:
+        * Contains 2 planes in the following order -
+        * (A) Meta plane
+        * (B) RGBA plane
+        *
+        * <--- RGB_Meta_Stride ---->
+        * <-------- Width ------>
+        * M M M M M M M M M M M M . .      ^           ^
+        * M M M M M M M M M M M M . .      |           |
+        * M M M M M M M M M M M M . .      Height      |
+        * M M M M M M M M M M M M . .      |       Meta_RGB_Scanlines
+        * M M M M M M M M M M M M . .      |           |
+        * M M M M M M M M M M M M . .      |           |
+        * M M M M M M M M M M M M . .      |           |
+        * M M M M M M M M M M M M . .      V           |
+        * . . . . . . . . . . . . . .                  |
+        * . . . . . . . . . . . . . .                  |
+        * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+        * . . . . . . . . . . . . . .                  V
+        * <-------- RGB_Stride -------->
+        * <------- Width ------->
+        * R R R R R R R R R R R R . . . .  ^           ^
+        * R R R R R R R R R R R R . . . .  |           |
+        * R R R R R R R R R R R R . . . .  Height      |
+        * R R R R R R R R R R R R . . . .  |       RGB_Scanlines
+        * R R R R R R R R R R R R . . . .  |           |
+        * R R R R R R R R R R R R . . . .  |           |
+        * R R R R R R R R R R R R . . . .  |           |
+        * R R R R R R R R R R R R . . . .  V           |
+        * . . . . . . . . . . . . . . . .              |
+        * . . . . . . . . . . . . . . . .              |
+        * . . . . . . . . . . . . . . . .    -------> Buffer size aligned to 4k
+        * . . . . . . . . . . . . . . . .              V
+        *
+        * RGB_Stride = align(Width * 4, 256)
+        * RGB_Scanlines = align(Height, 16)
+        * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+        * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+        * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+        * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+        *              RGB_Meta_Scanlines, 4096)
+        * Extradata = 8k
+        *
+        * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+        *              Extradata, 4096)
+        */
+       COLOR_FMT_RGBA1010102_UBWC,
+       /* Venus RGB565 UBWC format:
+        * Contains 2 planes in the following order -
+        * (A) Meta plane
+        * (B) RGB plane
+        *
+        * <--- RGB_Meta_Stride ---->
+        * <-------- Width ------>
+        * M M M M M M M M M M M M . .      ^           ^
+        * M M M M M M M M M M M M . .      |           |
+        * M M M M M M M M M M M M . .      Height      |
+        * M M M M M M M M M M M M . .      |       Meta_RGB_Scanlines
+        * M M M M M M M M M M M M . .      |           |
+        * M M M M M M M M M M M M . .      |           |
+        * M M M M M M M M M M M M . .      |           |
+        * M M M M M M M M M M M M . .      V           |
+        * . . . . . . . . . . . . . .                  |
+        * . . . . . . . . . . . . . .                  |
+        * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+        * . . . . . . . . . . . . . .                  V
+        * <-------- RGB_Stride -------->
+        * <------- Width ------->
+        * R R R R R R R R R R R R . . . .  ^           ^
+        * R R R R R R R R R R R R . . . .  |           |
+        * R R R R R R R R R R R R . . . .  Height      |
+        * R R R R R R R R R R R R . . . .  |       RGB_Scanlines
+        * R R R R R R R R R R R R . . . .  |           |
+        * R R R R R R R R R R R R . . . .  |           |
+        * R R R R R R R R R R R R . . . .  |           |
+        * R R R R R R R R R R R R . . . .  V           |
+        * . . . . . . . . . . . . . . . .              |
+        * . . . . . . . . . . . . . . . .              |
+        * . . . . . . . . . . . . . . . .    -------> Buffer size aligned to 4k
+        * . . . . . . . . . . . . . . . .              V
+        *
+        * RGB_Stride = align(Width * 2, 128)
+        * RGB_Scanlines = align(Height, 16)
+        * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+        * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+        * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+        * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+        *              RGB_Meta_Scanlines, 4096)
+        * Extradata = 8k
+        *
+        * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+        *              Extradata, 4096)
+        */
+       COLOR_FMT_RGB565_UBWC,
+       /* P010 UBWC:
+        * Compressed Macro-tile format for NV12.
+        * Contains 4 planes in the following order -
+        * (A) Y_Meta_Plane
+        * (B) Y_UBWC_Plane
+        * (C) UV_Meta_Plane
+        * (D) UV_UBWC_Plane
+        *
+        * Y_Meta_Plane consists of meta information to decode compressed
+        * tile data in Y_UBWC_Plane.
+        * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+        * UBWC decoder block will use the Y_Meta_Plane data together with
+        * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples.
+        *
+        * UV_Meta_Plane consists of meta information to decode compressed
+        * tile data in UV_UBWC_Plane.
+        * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+        * UBWC decoder block will use UV_Meta_Plane data together with
+        * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2
+        * subsampled color difference samples.
+        *
+        * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+        * and randomly accessible. There is no dependency between tiles.
+        *
+        * <----- Y_Meta_Stride ----->
+        * <-------- Width ------>
+        * M M M M M M M M M M M M . .      ^           ^
+        * M M M M M M M M M M M M . .      |           |
+        * M M M M M M M M M M M M . .      Height      |
+        * M M M M M M M M M M M M . .      |         Meta_Y_Scanlines
+        * M M M M M M M M M M M M . .      |           |
+        * M M M M M M M M M M M M . .      |           |
+        * M M M M M M M M M M M M . .      |           |
+        * M M M M M M M M M M M M . .      V           |
+        * . . . . . . . . . . . . . .                  |
+        * . . . . . . . . . . . . . .                  |
+        * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+        * . . . . . . . . . . . . . .                  V
+        * <--Compressed tile Y Stride--->
+        * <------- Width ------->
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  ^           ^
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  Height      |
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |        Macro_tile_Y_Scanlines
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  |           |
+        * Y* Y* Y* Y* Y* Y* Y* Y* . . . .  V           |
+        * . . . . . . . . . . . . . . . .              |
+        * . . . . . . . . . . . . . . . .              |
+        * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+        * . . . . . . . . . . . . . . . .              V
+        * <----- UV_Meta_Stride ---->
+        * M M M M M M M M M M M M . .      ^
+        * M M M M M M M M M M M M . .      |
+        * M M M M M M M M M M M M . .      |
+        * M M M M M M M M M M M M . .      M_UV_Scanlines
+        * . . . . . . . . . . . . . .      |
+        * . . . . . . . . . . . . . .      V
+        * . . . . . . . . . . . . . .      -------> Buffer size aligned to 4k
+        * <--Compressed tile UV Stride--->
+        * U* V* U* V* U* V* U* V* . . . .  ^
+        * U* V* U* V* U* V* U* V* . . . .  |
+        * U* V* U* V* U* V* U* V* . . . .  |
+        * U* V* U* V* U* V* U* V* . . . .  UV_Scanlines
+        * . . . . . . . . . . . . . . . .  |
+        * . . . . . . . . . . . . . . . .  V
+        * . . . . . . . . . . . . . . . .  -------> Buffer size aligned to 4k
+        *
+        *
+        * Y_Stride = align(Width * 2, 256)
+        * UV_Stride = align(Width * 2, 256)
+        * Y_Scanlines = align(Height, 16)
+        * UV_Scanlines = align(Height/2, 16)
+        * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096)
+        * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096)
+        * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+        * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+        * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+        * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+        * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+        * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+        * Extradata = 8k
+        *
+        * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+        *           Y_Meta_Plane_size + UV_Meta_Plane_size
+        *           + max(Extradata, Y_Stride * 48), 4096)
+        */
+       COLOR_FMT_P010_UBWC,
+       /* Venus P010:
+        * YUV 4:2:0 image with a plane of 10 bit Y samples followed
+        * by an interleaved U/V plane containing 10 bit 2x2 subsampled
+        * colour difference samples.
+        *
+        * <-------- Y/UV_Stride -------->
+        * <------- Width ------->
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  ^           ^
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  Height      |
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |          Y_Scanlines
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  |           |
+        * Y Y Y Y Y Y Y Y Y Y Y Y . . . .  V           |
+        * . . . . . . . . . . . . . . . .              |
+        * . . . . . . . . . . . . . . . .              |
+        * . . . . . . . . . . . . . . . .              |
+        * . . . . . . . . . . . . . . . .              V
+        * U V U V U V U V U V U V . . . .  ^
+        * U V U V U V U V U V U V . . . .  |
+        * U V U V U V U V U V U V . . . .  |
+        * U V U V U V U V U V U V . . . .  UV_Scanlines
+        * . . . . . . . . . . . . . . . .  |
+        * . . . . . . . . . . . . . . . .  V
+        * . . . . . . . . . . . . . . . .  --> Buffer size alignment
+        *
+        * Y_Stride : Width * 2 aligned to 128
+        * UV_Stride : Width * 2 aligned to 128
+        * Y_Scanlines: Height aligned to 32
+        * UV_Scanlines: Height/2 aligned to 16
+        * Extradata: Arbitrary (software-imposed) padding
+        * Total size = align((Y_Stride * Y_Scanlines
+        *          + UV_Stride * UV_Scanlines
+        *          + max(Extradata, Y_Stride * 8), 4096)
+        */
+       COLOR_FMT_P010,
+};
+
+#define COLOR_FMT_RGBA1010102_UBWC     COLOR_FMT_RGBA1010102_UBWC
+#define COLOR_FMT_RGB565_UBWC          COLOR_FMT_RGB565_UBWC
+#define COLOR_FMT_P010_UBWC            COLOR_FMT_P010_UBWC
+#define COLOR_FMT_P010         COLOR_FMT_P010
+
+static inline unsigned int VENUS_EXTRADATA_SIZE(int width, int height)
+{
+       (void)height;
+       (void)width;
+
+       /*
+        * In the future, calculate the size based on the w/h but just
+        * hardcode it for now since 16K satisfies all current usecases.
+        */
+       return 16 * 1024;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_Y_STRIDE(int color_fmt, int width)
+{
+       unsigned int alignment, stride = 0;
+
+       if (!width)
+               goto invalid_input;
+
+       switch (color_fmt) {
+       case COLOR_FMT_NV21:
+       case COLOR_FMT_NV12:
+       case COLOR_FMT_NV12_MVTB:
+       case COLOR_FMT_NV12_UBWC:
+               alignment = 128;
+               stride = MSM_MEDIA_ALIGN(width, alignment);
+               break;
+       case COLOR_FMT_NV12_BPP10_UBWC:
+               alignment = 256;
+               stride = MSM_MEDIA_ALIGN(width, 192);
+               stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
+               break;
+       case COLOR_FMT_P010_UBWC:
+               alignment = 256;
+               stride = MSM_MEDIA_ALIGN(width * 2, alignment);
+               break;
+       case COLOR_FMT_P010:
+               alignment = 128;
+               stride = MSM_MEDIA_ALIGN(width*2, alignment);
+               break;
+       default:
+               break;
+       }
+invalid_input:
+       return stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_UV_STRIDE(int color_fmt, int width)
+{
+       unsigned int alignment, stride = 0;
+
+       if (!width)
+               goto invalid_input;
+
+       switch (color_fmt) {
+       case COLOR_FMT_NV21:
+       case COLOR_FMT_NV12:
+       case COLOR_FMT_NV12_MVTB:
+       case COLOR_FMT_NV12_UBWC:
+               alignment = 128;
+               stride = MSM_MEDIA_ALIGN(width, alignment);
+               break;
+       case COLOR_FMT_NV12_BPP10_UBWC:
+               alignment = 256;
+               stride = MSM_MEDIA_ALIGN(width, 192);
+               stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
+               break;
+       case COLOR_FMT_P010_UBWC:
+               alignment = 256;
+               stride = MSM_MEDIA_ALIGN(width * 2, alignment);
+               break;
+       case COLOR_FMT_P010:
+               alignment = 128;
+               stride = MSM_MEDIA_ALIGN(width*2, alignment);
+               break;
+       default:
+               break;
+       }
+invalid_input:
+       return stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_Y_SCANLINES(int color_fmt, int height)
+{
+       unsigned int alignment, sclines = 0;
+
+       if (!height)
+               goto invalid_input;
+
+       switch (color_fmt) {
+       case COLOR_FMT_NV21:
+       case COLOR_FMT_NV12:
+       case COLOR_FMT_NV12_MVTB:
+       case COLOR_FMT_NV12_UBWC:
+       case COLOR_FMT_P010:
+               alignment = 32;
+               break;
+       case COLOR_FMT_NV12_BPP10_UBWC:
+       case COLOR_FMT_P010_UBWC:
+               alignment = 16;
+               break;
+       default:
+               return 0;
+       }
+       sclines = MSM_MEDIA_ALIGN(height, alignment);
+invalid_input:
+       return sclines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
+{
+       unsigned int alignment, sclines = 0;
+
+       if (!height)
+               goto invalid_input;
+
+       switch (color_fmt) {
+       case COLOR_FMT_NV21:
+       case COLOR_FMT_NV12:
+       case COLOR_FMT_NV12_MVTB:
+       case COLOR_FMT_NV12_BPP10_UBWC:
+       case COLOR_FMT_P010_UBWC:
+       case COLOR_FMT_P010:
+               alignment = 16;
+               break;
+       case COLOR_FMT_NV12_UBWC:
+               alignment = 32;
+               break;
+       default:
+               goto invalid_input;
+       }
+
+       sclines = MSM_MEDIA_ALIGN((height+1)>>1, alignment);
+
+invalid_input:
+       return sclines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
+{
+       int y_tile_width = 0, y_meta_stride = 0;
+
+       if (!width)
+               goto invalid_input;
+
+       switch (color_fmt) {
+       case COLOR_FMT_NV12_UBWC:
+       case COLOR_FMT_P010_UBWC:
+               y_tile_width = 32;
+               break;
+       case COLOR_FMT_NV12_BPP10_UBWC:
+               y_tile_width = 48;
+               break;
+       default:
+               goto invalid_input;
+       }
+
+       y_meta_stride = MSM_MEDIA_ROUNDUP(width, y_tile_width);
+       y_meta_stride = MSM_MEDIA_ALIGN(y_meta_stride, 64);
+
+invalid_input:
+       return y_meta_stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
+{
+       int y_tile_height = 0, y_meta_scanlines = 0;
+
+       if (!height)
+               goto invalid_input;
+
+       switch (color_fmt) {
+       case COLOR_FMT_NV12_UBWC:
+               y_tile_height = 8;
+               break;
+       case COLOR_FMT_NV12_BPP10_UBWC:
+       case COLOR_FMT_P010_UBWC:
+               y_tile_height = 4;
+               break;
+       default:
+               goto invalid_input;
+       }
+
+       y_meta_scanlines = MSM_MEDIA_ROUNDUP(height, y_tile_height);
+       y_meta_scanlines = MSM_MEDIA_ALIGN(y_meta_scanlines, 16);
+
+invalid_input:
+       return y_meta_scanlines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
+{
+       int uv_tile_width = 0, uv_meta_stride = 0;
+
+       if (!width)
+               goto invalid_input;
+
+       switch (color_fmt) {
+       case COLOR_FMT_NV12_UBWC:
+       case COLOR_FMT_P010_UBWC:
+               uv_tile_width = 16;
+               break;
+       case COLOR_FMT_NV12_BPP10_UBWC:
+               uv_tile_width = 24;
+               break;
+       default:
+               goto invalid_input;
+       }
+
+       uv_meta_stride = MSM_MEDIA_ROUNDUP((width+1)>>1, uv_tile_width);
+       uv_meta_stride = MSM_MEDIA_ALIGN(uv_meta_stride, 64);
+
+invalid_input:
+       return uv_meta_stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height)
+{
+       int uv_tile_height = 0, uv_meta_scanlines = 0;
+
+       if (!height)
+               goto invalid_input;
+
+       switch (color_fmt) {
+       case COLOR_FMT_NV12_UBWC:
+               uv_tile_height = 8;
+               break;
+       case COLOR_FMT_NV12_BPP10_UBWC:
+       case COLOR_FMT_P010_UBWC:
+               uv_tile_height = 4;
+               break;
+       default:
+               goto invalid_input;
+       }
+
+       uv_meta_scanlines = MSM_MEDIA_ROUNDUP((height+1)>>1, uv_tile_height);
+       uv_meta_scanlines = MSM_MEDIA_ALIGN(uv_meta_scanlines, 16);
+
+invalid_input:
+       return uv_meta_scanlines;
+}
+
+static inline unsigned int VENUS_RGB_STRIDE(int color_fmt, int width)
+{
+       unsigned int alignment = 0, stride = 0, bpp = 4;
+
+       if (!width)
+               goto invalid_input;
+
+       switch (color_fmt) {
+       case COLOR_FMT_RGBA8888:
+               alignment = 128;
+               break;
+       case COLOR_FMT_RGB565_UBWC:
+               alignment = 256;
+               bpp = 2;
+               break;
+       case COLOR_FMT_RGBA8888_UBWC:
+       case COLOR_FMT_RGBA1010102_UBWC:
+               alignment = 256;
+               break;
+       default:
+               goto invalid_input;
+       }
+
+       stride = MSM_MEDIA_ALIGN(width * bpp, alignment);
+
+invalid_input:
+       return stride;
+}
+
+static inline unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height)
+{
+       unsigned int alignment = 0, scanlines = 0;
+
+       if (!height)
+               goto invalid_input;
+
+       switch (color_fmt) {
+       case COLOR_FMT_RGBA8888:
+               alignment = 32;
+               break;
+       case COLOR_FMT_RGBA8888_UBWC:
+       case COLOR_FMT_RGBA1010102_UBWC:
+       case COLOR_FMT_RGB565_UBWC:
+               alignment = 16;
+               break;
+       default:
+               goto invalid_input;
+       }
+
+       scanlines = MSM_MEDIA_ALIGN(height, alignment);
+
+invalid_input:
+       return scanlines;
+}
+
+static inline unsigned int VENUS_RGB_META_STRIDE(int color_fmt, int width)
+{
+       int rgb_tile_width = 0, rgb_meta_stride = 0;
+
+       if (!width)
+               goto invalid_input;
+
+       switch (color_fmt) {
+       case COLOR_FMT_RGBA8888_UBWC:
+       case COLOR_FMT_RGBA1010102_UBWC:
+       case COLOR_FMT_RGB565_UBWC:
+               rgb_tile_width = 16;
+               break;
+       default:
+               goto invalid_input;
+       }
+
+       rgb_meta_stride = MSM_MEDIA_ROUNDUP(width, rgb_tile_width);
+       rgb_meta_stride = MSM_MEDIA_ALIGN(rgb_meta_stride, 64);
+
+invalid_input:
+       return rgb_meta_stride;
+}
+
+static inline unsigned int VENUS_RGB_META_SCANLINES(int color_fmt, int height)
+{
+       int rgb_tile_height = 0, rgb_meta_scanlines = 0;
+
+       if (!height)
+               goto invalid_input;
+
+       switch (color_fmt) {
+       case COLOR_FMT_RGBA8888_UBWC:
+       case COLOR_FMT_RGBA1010102_UBWC:
+       case COLOR_FMT_RGB565_UBWC:
+               rgb_tile_height = 4;
+               break;
+       default:
+               goto invalid_input;
+       }
+
+       rgb_meta_scanlines = MSM_MEDIA_ROUNDUP(height, rgb_tile_height);
+       rgb_meta_scanlines = MSM_MEDIA_ALIGN(rgb_meta_scanlines, 16);
+
+invalid_input:
+       return rgb_meta_scanlines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ * @height
+ * Progressive: height
+ * Interlaced: height
+ */
+static inline unsigned int VENUS_BUFFER_SIZE(
+       int color_fmt, int width, int height)
+{
+       const unsigned int extra_size = VENUS_EXTRADATA_SIZE(width, height);
+       unsigned int uv_alignment = 0, size = 0;
+       unsigned int y_plane, uv_plane, y_stride,
+               uv_stride, y_sclines, uv_sclines;
+       unsigned int y_ubwc_plane = 0, uv_ubwc_plane = 0;
+       unsigned int y_meta_stride = 0, y_meta_scanlines = 0;
+       unsigned int uv_meta_stride = 0, uv_meta_scanlines = 0;
+       unsigned int y_meta_plane = 0, uv_meta_plane = 0;
+       unsigned int rgb_stride = 0, rgb_scanlines = 0;
+       unsigned int rgb_plane = 0, rgb_ubwc_plane = 0, rgb_meta_plane = 0;
+       unsigned int rgb_meta_stride = 0, rgb_meta_scanlines = 0;
+
+       if (!width || !height)
+               goto invalid_input;
+
+       y_stride = VENUS_Y_STRIDE(color_fmt, width);
+       uv_stride = VENUS_UV_STRIDE(color_fmt, width);
+       y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
+       uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
+       rgb_stride = VENUS_RGB_STRIDE(color_fmt, width);
+       rgb_scanlines = VENUS_RGB_SCANLINES(color_fmt, height);
+
+       switch (color_fmt) {
+       case COLOR_FMT_NV21:
+       case COLOR_FMT_NV12:
+       case COLOR_FMT_P010:
+               uv_alignment = 4096;
+               y_plane = y_stride * y_sclines;
+               uv_plane = uv_stride * uv_sclines + uv_alignment;
+               size = y_plane + uv_plane +
+                               MSM_MEDIA_MAX(extra_size, 8 * y_stride);
+               size = MSM_MEDIA_ALIGN(size, 4096);
+               break;
+       case COLOR_FMT_NV12_MVTB:
+               uv_alignment = 4096;
+               y_plane = y_stride * y_sclines;
+               uv_plane = uv_stride * uv_sclines + uv_alignment;
+               size = y_plane + uv_plane;
+               size = 2 * size + extra_size;
+               size = MSM_MEDIA_ALIGN(size, 4096);
+               break;
+       case COLOR_FMT_NV12_UBWC:
+               y_sclines = VENUS_Y_SCANLINES(color_fmt, (height+1)>>1);
+               y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+               uv_sclines = VENUS_UV_SCANLINES(color_fmt, (height+1)>>1);
+               uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+               y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+               y_meta_scanlines =
+                       VENUS_Y_META_SCANLINES(color_fmt, (height+1)>>1);
+               y_meta_plane = MSM_MEDIA_ALIGN(
+                       y_meta_stride * y_meta_scanlines, 4096);
+               uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+               uv_meta_scanlines =
+                       VENUS_UV_META_SCANLINES(color_fmt, (height+1)>>1);
+               uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+                       uv_meta_scanlines, 4096);
+
+               size = (y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+                       uv_meta_plane)*2 +
+                       MSM_MEDIA_MAX(extra_size + 8192, 48 * y_stride);
+               size = MSM_MEDIA_ALIGN(size, 4096);
+               break;
+       case COLOR_FMT_NV12_BPP10_UBWC:
+               y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+               uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+               y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+               y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
+               y_meta_plane = MSM_MEDIA_ALIGN(
+                               y_meta_stride * y_meta_scanlines, 4096);
+               uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+               uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
+               uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+                                       uv_meta_scanlines, 4096);
+
+               size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+                       uv_meta_plane +
+                       MSM_MEDIA_MAX(extra_size + 8192, 48 * y_stride);
+               size = MSM_MEDIA_ALIGN(size, 4096);
+               break;
+       case COLOR_FMT_P010_UBWC:
+               y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+               uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+               y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+               y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
+               y_meta_plane = MSM_MEDIA_ALIGN(
+                               y_meta_stride * y_meta_scanlines, 4096);
+               uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+               uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
+               uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+                                       uv_meta_scanlines, 4096);
+
+               size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+                       uv_meta_plane;
+               size = MSM_MEDIA_ALIGN(size, 4096);
+               break;
+       case COLOR_FMT_RGBA8888:
+               rgb_plane = MSM_MEDIA_ALIGN(rgb_stride  * rgb_scanlines, 4096);
+               size = rgb_plane;
+               size =  MSM_MEDIA_ALIGN(size, 4096);
+               break;
+       case COLOR_FMT_RGBA8888_UBWC:
+       case COLOR_FMT_RGBA1010102_UBWC:
+       case COLOR_FMT_RGB565_UBWC:
+               rgb_ubwc_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines,
+                                                       4096);
+               rgb_meta_stride = VENUS_RGB_META_STRIDE(color_fmt, width);
+               rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color_fmt,
+                                       height);
+               rgb_meta_plane = MSM_MEDIA_ALIGN(rgb_meta_stride *
+                                       rgb_meta_scanlines, 4096);
+               size = rgb_ubwc_plane + rgb_meta_plane;
+               size = MSM_MEDIA_ALIGN(size, 4096);
+               break;
+       default:
+               break;
+       }
+invalid_input:
+       return size;
+}
+
+static inline unsigned int VENUS_VIEW2_OFFSET(
+       int color_fmt, int width, int height)
+{
+       unsigned int offset = 0;
+       unsigned int y_plane, uv_plane, y_stride,
+               uv_stride, y_sclines, uv_sclines;
+       if (!width || !height)
+               goto invalid_input;
+
+       y_stride = VENUS_Y_STRIDE(color_fmt, width);
+       uv_stride = VENUS_UV_STRIDE(color_fmt, width);
+       y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
+       uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
+       switch (color_fmt) {
+       case COLOR_FMT_NV12_MVTB:
+               y_plane = y_stride * y_sclines;
+               uv_plane = uv_stride * uv_sclines;
+               offset = y_plane + uv_plane;
+               break;
+       default:
+               break;
+       }
+invalid_input:
+       return offset;
+}
+
+#endif
index b001699297c486ab075a76bedd3c27d0c95572ee..457c29dba4a1a096012fad1fd7b21e3b47cdb22e 100644 (file)
@@ -201,7 +201,7 @@ static void blend_setup(struct drm_crtc *crtc)
                int idx = idxs[pipe_id];
                if (idx > 0) {
                        const struct mdp_format *format =
-                                       to_mdp_format(msm_framebuffer_format(plane->fb));
+                                       to_mdp_format(msm_framebuffer_format(plane->state->fb));
                        alpha[idx-1] = format->alpha_enable;
                }
        }
@@ -665,7 +665,6 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
        drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs,
                                  NULL);
        drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
-       plane->crtc = crtc;
 
        return crtc;
 }
index 4b646bf9c2146329fa8bde9763537d0186f82b07..44d1cda56974d7f6c106329ae0fac034fe4d38c6 100644 (file)
@@ -125,6 +125,8 @@ static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s
        struct drm_crtc *crtc;
        struct drm_crtc_state *crtc_state;
 
+       drm_atomic_helper_wait_for_vblanks(mdp4_kms->dev, state);
+
        /* see 119ecb7fd */
        for_each_new_crtc_in_state(state, crtc, crtc_state, i)
                drm_crtc_vblank_put(crtc);
index 4a645926edb7d1654978861b17cedc9c269ad0b3..2bfb39082f54dd7f9e9c51e038a46e2955ed03c8 100644 (file)
@@ -341,7 +341,7 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
        mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
 
        panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
-       if (panel) {
+       if (!IS_ERR(panel)) {
                drm_panel_disable(panel);
                drm_panel_unprepare(panel);
        }
@@ -410,7 +410,7 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
                dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
 
        panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
-       if (panel) {
+       if (!IS_ERR(panel)) {
                drm_panel_prepare(panel);
                drm_panel_enable(panel);
        }
index e3b1c86b7aaeaeecf39d0fff91cad1f6c1f325b8..5368e621999ce6eddf0a5826c11356b0bc197421 100644 (file)
@@ -34,9 +34,12 @@ static enum drm_connector_status mdp4_lvds_connector_detect(
        struct mdp4_lvds_connector *mdp4_lvds_connector =
                        to_mdp4_lvds_connector(connector);
 
-       if (!mdp4_lvds_connector->panel)
+       if (!mdp4_lvds_connector->panel) {
                mdp4_lvds_connector->panel =
                        of_drm_find_panel(mdp4_lvds_connector->panel_node);
+               if (IS_ERR(mdp4_lvds_connector->panel))
+                       mdp4_lvds_connector->panel = NULL;
+       }
 
        return mdp4_lvds_connector->panel ?
                        connector_status_connected :
@@ -129,7 +132,7 @@ struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
        connector->interlace_allowed = 0;
        connector->doublescan_allowed = 0;
 
-       drm_mode_connector_attach_encoder(connector, encoder);
+       drm_connector_attach_encoder(connector, encoder);
 
        return connector;
 }
index 20e956e14c2153fd31a3bfa170416e49e191f7a4..79ff653d8081e63d4aff0c57aa04037c24b7f58b 100644 (file)
@@ -68,7 +68,7 @@ static void mdp4_plane_destroy(struct drm_plane *plane)
 {
        struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
 
-       drm_plane_helper_disable(plane);
+       drm_plane_helper_disable(plane, NULL);
        drm_plane_cleanup(plane);
 
        kfree(mdp4_plane);
@@ -167,8 +167,6 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane,
                        msm_framebuffer_iova(fb, kms->aspace, 2));
        mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe),
                        msm_framebuffer_iova(fb, kms->aspace, 3));
-
-       plane->fb = fb;
 }
 
 static void mdp4_write_csc_config(struct mdp4_kms *mdp4_kms,
index 10271359789e2b150c2268ca1500e7379e10b2aa..b1da9ce54379099f3fc33e4f48a49cd007b74fa1 100644 (file)
@@ -65,7 +65,7 @@ struct mdp5_crtc {
                struct drm_gem_object *scanout_bo;
                uint64_t iova;
                uint32_t width, height;
-               uint32_t x, y;
+               int x, y;
        } cursor;
 };
 #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
@@ -760,20 +760,31 @@ static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
         * Cursor Region Of Interest (ROI) is a plane read from cursor
         * buffer to render. The ROI region is determined by the visibility of
         * the cursor point. In the default Cursor image the cursor point will
-        * be at the top left of the cursor image, unless it is specified
-        * otherwise using hotspot feature.
+        * be at the top left of the cursor image.
         *
+        * Without rotation:
         * If the cursor point reaches the right (xres - x < cursor.width) or
         * bottom (yres - y < cursor.height) boundary of the screen, then ROI
         * width and ROI height need to be evaluated to crop the cursor image
         * accordingly.
         * (xres-x) will be new cursor width when x > (xres - cursor.width)
         * (yres-y) will be new cursor height when y > (yres - cursor.height)
+        *
+        * With rotation:
+        * We get negative x and/or y coordinates.
+        * (cursor.width - abs(x)) will be new cursor width when x < 0
+        * (cursor.height - abs(y)) will be new cursor width when y < 0
         */
-       *roi_w = min(mdp5_crtc->cursor.width, xres -
+       if (mdp5_crtc->cursor.x >= 0)
+               *roi_w = min(mdp5_crtc->cursor.width, xres -
                        mdp5_crtc->cursor.x);
-       *roi_h = min(mdp5_crtc->cursor.height, yres -
+       else
+               *roi_w = mdp5_crtc->cursor.width - abs(mdp5_crtc->cursor.x);
+       if (mdp5_crtc->cursor.y >= 0)
+               *roi_h = min(mdp5_crtc->cursor.height, yres -
                        mdp5_crtc->cursor.y);
+       else
+               *roi_h = mdp5_crtc->cursor.height - abs(mdp5_crtc->cursor.y);
 }
 
 static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
@@ -783,7 +794,7 @@ static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
        struct mdp5_kms *mdp5_kms = get_kms(crtc);
        const enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
        uint32_t blendcfg, stride;
-       uint32_t x, y, width, height;
+       uint32_t x, y, src_x, src_y, width, height;
        uint32_t roi_w, roi_h;
        int lm;
 
@@ -800,6 +811,26 @@ static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
 
        get_roi(crtc, &roi_w, &roi_h);
 
+       /* If cusror buffer overlaps due to rotation on the
+        * upper or left screen border the pixel offset inside
+        * the cursor buffer of the ROI is the positive overlap
+        * distance.
+        */
+       if (mdp5_crtc->cursor.x < 0) {
+               src_x = abs(mdp5_crtc->cursor.x);
+               x = 0;
+       } else {
+               src_x = 0;
+       }
+       if (mdp5_crtc->cursor.y < 0) {
+               src_y = abs(mdp5_crtc->cursor.y);
+               y = 0;
+       } else {
+               src_y = 0;
+       }
+       DBG("%s: x=%d, y=%d roi_w=%d roi_h=%d src_x=%d src_y=%d",
+               crtc->name, x, y, roi_w, roi_h, src_x, src_y);
+
        mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
        mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
                        MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
@@ -812,6 +843,9 @@ static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
        mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
                        MDP5_LM_CURSOR_START_XY_Y_START(y) |
                        MDP5_LM_CURSOR_START_XY_X_START(x));
+       mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_XY(lm),
+                       MDP5_LM_CURSOR_XY_SRC_Y(src_y) |
+                       MDP5_LM_CURSOR_XY_SRC_X(src_x));
        mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm),
                        mdp5_crtc->cursor.iova);
 
@@ -932,8 +966,9 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
        if (unlikely(!crtc->state->enable))
                return 0;
 
-       mdp5_crtc->cursor.x = x = max(x, 0);
-       mdp5_crtc->cursor.y = y = max(y, 0);
+       /* accept negative x/y coordinates up to maximum cursor overlap */
+       mdp5_crtc->cursor.x = x = max(x, -(int)mdp5_crtc->cursor.width);
+       mdp5_crtc->cursor.y = y = max(y, -(int)mdp5_crtc->cursor.height);
 
        get_roi(crtc, &roi_w, &roi_h);
 
@@ -1207,7 +1242,6 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
                        "unref cursor", unref_cursor_worker);
 
        drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
-       plane->crtc = crtc;
 
        return crtc;
 }
index 9af94e35f678dd9f2a445989fdb8c98278ad60ac..fcd44d1d10682b6bf063ecf60848c635283689eb 100644 (file)
@@ -319,7 +319,17 @@ static int mdp5_encoder_atomic_check(struct drm_encoder *encoder,
 
        mdp5_cstate->ctl = ctl;
        mdp5_cstate->pipeline.intf = intf;
-       mdp5_cstate->defer_start = true;
+
+       /*
+        * This is a bit awkward, but we want to flush the CTL and hit the
+        * START bit at most once for an atomic update.  In the non-full-
+        * modeset case, this is done from crtc->atomic_flush(), but that
+        * is too early in the case of full modeset, in which case we
+        * defer to encoder->enable().  But we need to *know* whether
+        * encoder->enable() will be called to do this:
+        */
+       if (drm_atomic_crtc_needs_modeset(crtc_state))
+               mdp5_cstate->defer_start = true;
 
        return 0;
 }
index 6e12e275debae437877f566cefbe4d909e1b7f97..bddd625ab91bd44a56824c3e30a8ea2edefe46ff 100644 (file)
@@ -170,6 +170,8 @@ static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s
        struct device *dev = &mdp5_kms->pdev->dev;
        struct mdp5_global_state *global_state;
 
+       drm_atomic_helper_wait_for_vblanks(mdp5_kms->dev, state);
+
        global_state = mdp5_get_existing_global_state(mdp5_kms);
 
        if (mdp5_kms->smp)
index f2a0db7a8a031d7ee308139e71aca8b47663663b..1cc4e57f0226f89c40c5b78baa3045138c89470d 100644 (file)
 #include "msm_drv.h"
 #include "mdp5_kms.h"
 
-/*
- * If needed, this can become more specific: something like struct mdp5_mdss,
- * which contains a 'struct msm_mdss base' member.
- */
-struct msm_mdss {
-       struct drm_device *dev;
+#define to_mdp5_mdss(x) container_of(x, struct mdp5_mdss, base)
+
+struct mdp5_mdss {
+       struct msm_mdss base;
 
        void __iomem *mmio, *vbif;
 
@@ -41,22 +39,22 @@ struct msm_mdss {
        } irqcontroller;
 };
 
-static inline void mdss_write(struct msm_mdss *mdss, u32 reg, u32 data)
+static inline void mdss_write(struct mdp5_mdss *mdp5_mdss, u32 reg, u32 data)
 {
-       msm_writel(data, mdss->mmio + reg);
+       msm_writel(data, mdp5_mdss->mmio + reg);
 }
 
-static inline u32 mdss_read(struct msm_mdss *mdss, u32 reg)
+static inline u32 mdss_read(struct mdp5_mdss *mdp5_mdss, u32 reg)
 {
-       return msm_readl(mdss->mmio + reg);
+       return msm_readl(mdp5_mdss->mmio + reg);
 }
 
 static irqreturn_t mdss_irq(int irq, void *arg)
 {
-       struct msm_mdss *mdss = arg;
+       struct mdp5_mdss *mdp5_mdss = arg;
        u32 intr;
 
-       intr = mdss_read(mdss, REG_MDSS_HW_INTR_STATUS);
+       intr = mdss_read(mdp5_mdss, REG_MDSS_HW_INTR_STATUS);
 
        VERB("intr=%08x", intr);
 
@@ -64,7 +62,7 @@ static irqreturn_t mdss_irq(int irq, void *arg)
                irq_hw_number_t hwirq = fls(intr) - 1;
 
                generic_handle_irq(irq_find_mapping(
-                               mdss->irqcontroller.domain, hwirq));
+                               mdp5_mdss->irqcontroller.domain, hwirq));
                intr &= ~(1 << hwirq);
        }
 
@@ -84,19 +82,19 @@ static irqreturn_t mdss_irq(int irq, void *arg)
 
 static void mdss_hw_mask_irq(struct irq_data *irqd)
 {
-       struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd);
+       struct mdp5_mdss *mdp5_mdss = irq_data_get_irq_chip_data(irqd);
 
        smp_mb__before_atomic();
-       clear_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask);
+       clear_bit(irqd->hwirq, &mdp5_mdss->irqcontroller.enabled_mask);
        smp_mb__after_atomic();
 }
 
 static void mdss_hw_unmask_irq(struct irq_data *irqd)
 {
-       struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd);
+       struct mdp5_mdss *mdp5_mdss = irq_data_get_irq_chip_data(irqd);
 
        smp_mb__before_atomic();
-       set_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask);
+       set_bit(irqd->hwirq, &mdp5_mdss->irqcontroller.enabled_mask);
        smp_mb__after_atomic();
 }
 
@@ -109,13 +107,13 @@ static struct irq_chip mdss_hw_irq_chip = {
 static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
                                 irq_hw_number_t hwirq)
 {
-       struct msm_mdss *mdss = d->host_data;
+       struct mdp5_mdss *mdp5_mdss = d->host_data;
 
        if (!(VALID_IRQS & (1 << hwirq)))
                return -EPERM;
 
        irq_set_chip_and_handler(irq, &mdss_hw_irq_chip, handle_level_irq);
-       irq_set_chip_data(irq, mdss);
+       irq_set_chip_data(irq, mdp5_mdss);
 
        return 0;
 }
@@ -126,90 +124,99 @@ static const struct irq_domain_ops mdss_hw_irqdomain_ops = {
 };
 
 
-static int mdss_irq_domain_init(struct msm_mdss *mdss)
+static int mdss_irq_domain_init(struct mdp5_mdss *mdp5_mdss)
 {
-       struct device *dev = mdss->dev->dev;
+       struct device *dev = mdp5_mdss->base.dev->dev;
        struct irq_domain *d;
 
        d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops,
-                                 mdss);
+                                 mdp5_mdss);
        if (!d) {
                dev_err(dev, "mdss irq domain add failed\n");
                return -ENXIO;
        }
 
-       mdss->irqcontroller.enabled_mask = 0;
-       mdss->irqcontroller.domain = d;
+       mdp5_mdss->irqcontroller.enabled_mask = 0;
+       mdp5_mdss->irqcontroller.domain = d;
 
        return 0;
 }
 
-int msm_mdss_enable(struct msm_mdss *mdss)
+static int mdp5_mdss_enable(struct msm_mdss *mdss)
 {
+       struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
        DBG("");
 
-       clk_prepare_enable(mdss->ahb_clk);
-       if (mdss->axi_clk)
-               clk_prepare_enable(mdss->axi_clk);
-       if (mdss->vsync_clk)
-               clk_prepare_enable(mdss->vsync_clk);
+       clk_prepare_enable(mdp5_mdss->ahb_clk);
+       if (mdp5_mdss->axi_clk)
+               clk_prepare_enable(mdp5_mdss->axi_clk);
+       if (mdp5_mdss->vsync_clk)
+               clk_prepare_enable(mdp5_mdss->vsync_clk);
 
        return 0;
 }
 
-int msm_mdss_disable(struct msm_mdss *mdss)
+static int mdp5_mdss_disable(struct msm_mdss *mdss)
 {
+       struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
        DBG("");
 
-       if (mdss->vsync_clk)
-               clk_disable_unprepare(mdss->vsync_clk);
-       if (mdss->axi_clk)
-               clk_disable_unprepare(mdss->axi_clk);
-       clk_disable_unprepare(mdss->ahb_clk);
+       if (mdp5_mdss->vsync_clk)
+               clk_disable_unprepare(mdp5_mdss->vsync_clk);
+       if (mdp5_mdss->axi_clk)
+               clk_disable_unprepare(mdp5_mdss->axi_clk);
+       clk_disable_unprepare(mdp5_mdss->ahb_clk);
 
        return 0;
 }
 
-static int msm_mdss_get_clocks(struct msm_mdss *mdss)
+static int msm_mdss_get_clocks(struct mdp5_mdss *mdp5_mdss)
 {
-       struct platform_device *pdev = to_platform_device(mdss->dev->dev);
+       struct platform_device *pdev =
+                       to_platform_device(mdp5_mdss->base.dev->dev);
 
-       mdss->ahb_clk = msm_clk_get(pdev, "iface");
-       if (IS_ERR(mdss->ahb_clk))
-               mdss->ahb_clk = NULL;
+       mdp5_mdss->ahb_clk = msm_clk_get(pdev, "iface");
+       if (IS_ERR(mdp5_mdss->ahb_clk))
+               mdp5_mdss->ahb_clk = NULL;
 
-       mdss->axi_clk = msm_clk_get(pdev, "bus");
-       if (IS_ERR(mdss->axi_clk))
-               mdss->axi_clk = NULL;
+       mdp5_mdss->axi_clk = msm_clk_get(pdev, "bus");
+       if (IS_ERR(mdp5_mdss->axi_clk))
+               mdp5_mdss->axi_clk = NULL;
 
-       mdss->vsync_clk = msm_clk_get(pdev, "vsync");
-       if (IS_ERR(mdss->vsync_clk))
-               mdss->vsync_clk = NULL;
+       mdp5_mdss->vsync_clk = msm_clk_get(pdev, "vsync");
+       if (IS_ERR(mdp5_mdss->vsync_clk))
+               mdp5_mdss->vsync_clk = NULL;
 
        return 0;
 }
 
-void msm_mdss_destroy(struct drm_device *dev)
+static void mdp5_mdss_destroy(struct drm_device *dev)
 {
        struct msm_drm_private *priv = dev->dev_private;
-       struct msm_mdss *mdss = priv->mdss;
+       struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(priv->mdss);
 
-       if (!mdss)
+       if (!mdp5_mdss)
                return;
 
-       irq_domain_remove(mdss->irqcontroller.domain);
-       mdss->irqcontroller.domain = NULL;
+       irq_domain_remove(mdp5_mdss->irqcontroller.domain);
+       mdp5_mdss->irqcontroller.domain = NULL;
 
-       regulator_disable(mdss->vdd);
+       regulator_disable(mdp5_mdss->vdd);
 
        pm_runtime_disable(dev->dev);
 }
 
-int msm_mdss_init(struct drm_device *dev)
+static const struct msm_mdss_funcs mdss_funcs = {
+       .enable = mdp5_mdss_enable,
+       .disable = mdp5_mdss_disable,
+       .destroy = mdp5_mdss_destroy,
+};
+
+int mdp5_mdss_init(struct drm_device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev->dev);
        struct msm_drm_private *priv = dev->dev_private;
-       struct msm_mdss *mdss;
+       struct mdp5_mdss *mdp5_mdss;
        int ret;
 
        DBG("");
@@ -217,40 +224,40 @@ int msm_mdss_init(struct drm_device *dev)
        if (!of_device_is_compatible(dev->dev->of_node, "qcom,mdss"))
                return 0;
 
-       mdss = devm_kzalloc(dev->dev, sizeof(*mdss), GFP_KERNEL);
-       if (!mdss) {
+       mdp5_mdss = devm_kzalloc(dev->dev, sizeof(*mdp5_mdss), GFP_KERNEL);
+       if (!mdp5_mdss) {
                ret = -ENOMEM;
                goto fail;
        }
 
-       mdss->dev = dev;
+       mdp5_mdss->base.dev = dev;
 
-       mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS");
-       if (IS_ERR(mdss->mmio)) {
-               ret = PTR_ERR(mdss->mmio);
+       mdp5_mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS");
+       if (IS_ERR(mdp5_mdss->mmio)) {
+               ret = PTR_ERR(mdp5_mdss->mmio);
                goto fail;
        }
 
-       mdss->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
-       if (IS_ERR(mdss->vbif)) {
-               ret = PTR_ERR(mdss->vbif);
+       mdp5_mdss->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
+       if (IS_ERR(mdp5_mdss->vbif)) {
+               ret = PTR_ERR(mdp5_mdss->vbif);
                goto fail;
        }
 
-       ret = msm_mdss_get_clocks(mdss);
+       ret = msm_mdss_get_clocks(mdp5_mdss);
        if (ret) {
                dev_err(dev->dev, "failed to get clocks: %d\n", ret);
                goto fail;
        }
 
        /* Regulator to enable GDSCs in downstream kernels */
-       mdss->vdd = devm_regulator_get(dev->dev, "vdd");
-       if (IS_ERR(mdss->vdd)) {
-               ret = PTR_ERR(mdss->vdd);
+       mdp5_mdss->vdd = devm_regulator_get(dev->dev, "vdd");
+       if (IS_ERR(mdp5_mdss->vdd)) {
+               ret = PTR_ERR(mdp5_mdss->vdd);
                goto fail;
        }
 
-       ret = regulator_enable(mdss->vdd);
+       ret = regulator_enable(mdp5_mdss->vdd);
        if (ret) {
                dev_err(dev->dev, "failed to enable regulator vdd: %d\n",
                        ret);
@@ -258,25 +265,26 @@ int msm_mdss_init(struct drm_device *dev)
        }
 
        ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
-                              mdss_irq, 0, "mdss_isr", mdss);
+                              mdss_irq, 0, "mdss_isr", mdp5_mdss);
        if (ret) {
                dev_err(dev->dev, "failed to init irq: %d\n", ret);
                goto fail_irq;
        }
 
-       ret = mdss_irq_domain_init(mdss);
+       ret = mdss_irq_domain_init(mdp5_mdss);
        if (ret) {
                dev_err(dev->dev, "failed to init sub-block irqs: %d\n", ret);
                goto fail_irq;
        }
 
-       priv->mdss = mdss;
+       mdp5_mdss->base.funcs = &mdss_funcs;
+       priv->mdss = &mdp5_mdss->base;
 
        pm_runtime_enable(dev->dev);
 
        return 0;
 fail_irq:
-       regulator_disable(mdss->vdd);
+       regulator_disable(mdp5_mdss->vdd);
 fail:
        return ret;
 }
index e09bc53a0e6543eb1093dc46c22617fd53647e0b..7d306c5acd09644ff62b7e7d62f6590390a90344 100644 (file)
@@ -46,7 +46,7 @@ static void mdp5_plane_destroy(struct drm_plane *plane)
 {
        struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
 
-       drm_plane_helper_disable(plane);
+       drm_plane_helper_disable(plane, NULL);
        drm_plane_cleanup(plane);
 
        kfree(mdp5_plane);
@@ -512,7 +512,7 @@ static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
        if (plane_enabled(new_state)) {
                struct mdp5_ctl *ctl;
                struct mdp5_pipeline *pipeline =
-                                       mdp5_crtc_get_pipeline(plane->crtc);
+                                       mdp5_crtc_get_pipeline(new_state->crtc);
                int ret;
 
                ret = mdp5_plane_mode_set(plane, new_state->crtc, new_state->fb,
@@ -1029,8 +1029,6 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
                                     src_img_w, src_img_h,
                                     src_x + src_w, src_y, src_w, src_h);
 
-       plane->fb = fb;
-
        return ret;
 }
 
index b744bcc7d8ad0e3f4548277ea5c587d9832e09d2..ff8164cc6738d8d16499862e620a999330a7aaf7 100644 (file)
@@ -208,6 +208,9 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
                goto fail;
        }
 
+       if (!msm_dsi_manager_validate_current_config(msm_dsi->id))
+               goto fail;
+
        msm_dsi->encoder = encoder;
 
        msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id);
index 70d9a9a47acd53bbed826f05ee30a49d50a92fc6..08f3fc6771b7829de66c1af953cbb3abb16ccee2 100644 (file)
@@ -100,6 +100,7 @@ bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
 void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags);
 int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
 void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
+bool msm_dsi_manager_validate_current_config(u8 id);
 
 /* msm dsi */
 static inline bool msm_dsi_device_connected(struct msm_dsi *msm_dsi)
@@ -149,6 +150,7 @@ static inline int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll,
 #endif
 
 /* dsi host */
+struct msm_dsi_host;
 int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
                                        const struct mipi_dsi_msg *msg);
 void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
@@ -162,7 +164,8 @@ void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host,
 int msm_dsi_host_enable(struct mipi_dsi_host *host);
 int msm_dsi_host_disable(struct mipi_dsi_host *host);
 int msm_dsi_host_power_on(struct mipi_dsi_host *host,
-                       struct msm_dsi_phy_shared_timings *phy_shared_timings);
+                       struct msm_dsi_phy_shared_timings *phy_shared_timings,
+                       bool is_dual_dsi);
 int msm_dsi_host_power_off(struct mipi_dsi_host *host);
 int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
                                        struct drm_display_mode *mode);
@@ -175,13 +178,29 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
                        struct msm_dsi_pll *src_pll);
 void msm_dsi_host_reset_phy(struct mipi_dsi_host *host);
 void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
-       struct msm_dsi_phy_clk_request *clk_req);
+       struct msm_dsi_phy_clk_request *clk_req,
+       bool is_dual_dsi);
 void msm_dsi_host_destroy(struct mipi_dsi_host *host);
 int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
                                        struct drm_device *dev);
 int msm_dsi_host_init(struct msm_dsi *msm_dsi);
 int msm_dsi_runtime_suspend(struct device *dev);
 int msm_dsi_runtime_resume(struct device *dev);
+int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host);
+int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host);
+void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host);
+void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host);
+int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size);
+int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size);
+void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host);
+void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host);
+void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host);
+int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *iova);
+int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *iova);
+int dsi_clk_init_v2(struct msm_dsi_host *msm_host);
+int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host);
+int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_dual_dsi);
+int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_dual_dsi);
 
 /* dsi phy */
 struct msm_dsi_phy;
index 0327bb54b01b43404591718c748c3cf1b52a28c7..dcdfb1bb54f98a164a8db5d785cd01e8b884c5bd 100644 (file)
@@ -136,20 +136,58 @@ static const struct msm_dsi_config sdm845_dsi_cfg = {
        .num_dsi = 2,
 };
 
+const static struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
+       .link_clk_enable = dsi_link_clk_enable_v2,
+       .link_clk_disable = dsi_link_clk_disable_v2,
+       .clk_init_ver = dsi_clk_init_v2,
+       .tx_buf_alloc = dsi_tx_buf_alloc_v2,
+       .tx_buf_get = dsi_tx_buf_get_v2,
+       .tx_buf_put = NULL,
+       .dma_base_get = dsi_dma_base_get_v2,
+       .calc_clk_rate = dsi_calc_clk_rate_v2,
+};
+
+const static struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = {
+       .link_clk_enable = dsi_link_clk_enable_6g,
+       .link_clk_disable = dsi_link_clk_disable_6g,
+       .clk_init_ver = NULL,
+       .tx_buf_alloc = dsi_tx_buf_alloc_6g,
+       .tx_buf_get = dsi_tx_buf_get_6g,
+       .tx_buf_put = dsi_tx_buf_put_6g,
+       .dma_base_get = dsi_dma_base_get_6g,
+       .calc_clk_rate = dsi_calc_clk_rate_6g,
+};
+
+const static struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = {
+       .link_clk_enable = dsi_link_clk_enable_6g,
+       .link_clk_disable = dsi_link_clk_disable_6g,
+       .clk_init_ver = dsi_clk_init_6g_v2,
+       .tx_buf_alloc = dsi_tx_buf_alloc_6g,
+       .tx_buf_get = dsi_tx_buf_get_6g,
+       .tx_buf_put = dsi_tx_buf_put_6g,
+       .dma_base_get = dsi_dma_base_get_6g,
+       .calc_clk_rate = dsi_calc_clk_rate_6g,
+};
+
 static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
-       {MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064, &apq8064_dsi_cfg},
+       {MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064,
+               &apq8064_dsi_cfg, &msm_dsi_v2_host_ops},
        {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0,
-                                               &msm8974_apq8084_dsi_cfg},
+               &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
        {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1,
-                                               &msm8974_apq8084_dsi_cfg},
+               &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
        {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1_1,
-                                               &msm8974_apq8084_dsi_cfg},
+               &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
        {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_2,
-                                               &msm8974_apq8084_dsi_cfg},
-       {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3, &msm8994_dsi_cfg},
-       {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1, &msm8916_dsi_cfg},
-       {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1, &msm8996_dsi_cfg},
-       {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1, &sdm845_dsi_cfg},
+               &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
+       {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3,
+               &msm8994_dsi_cfg, &msm_dsi_6g_host_ops},
+       {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1,
+               &msm8916_dsi_cfg, &msm_dsi_6g_host_ops},
+       {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1,
+               &msm8996_dsi_cfg, &msm_dsi_6g_host_ops},
+       {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1,
+               &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
 };
 
 const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
index 9cfdcf1c95d5ecfb47f4dc1f515f1bd99a08984a..16c5079111109ca71f3192b2480cdb2c0003bcaa 100644 (file)
@@ -40,10 +40,22 @@ struct msm_dsi_config {
        const int num_dsi;
 };
 
+struct msm_dsi_host_cfg_ops {
+       int (*link_clk_enable)(struct msm_dsi_host *msm_host);
+       void (*link_clk_disable)(struct msm_dsi_host *msm_host);
+       int (*clk_init_ver)(struct msm_dsi_host *msm_host);
+       int (*tx_buf_alloc)(struct msm_dsi_host *msm_host, int size);
+       void* (*tx_buf_get)(struct msm_dsi_host *msm_host);
+       void (*tx_buf_put)(struct msm_dsi_host *msm_host);
+       int (*dma_base_get)(struct msm_dsi_host *msm_host, uint64_t *iova);
+       int (*calc_clk_rate)(struct msm_dsi_host *msm_host, bool is_dual_dsi);
+};
+
 struct msm_dsi_cfg_handler {
        u32 major;
        u32 minor;
        const struct msm_dsi_config *cfg;
+       const struct msm_dsi_host_cfg_ops *ops;
 };
 
 const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor);
index 2f1a2780658a416bd4e5235f34cf110965bce569..96fb5f63531482fcf688cb4d69b56f9eec2d719c 100644 (file)
@@ -118,6 +118,7 @@ struct msm_dsi_host {
        struct clk *byte_intf_clk;
 
        u32 byte_clk_rate;
+       u32 pixel_clk_rate;
        u32 esc_clk_rate;
 
        /* DSI v2 specific clocks */
@@ -332,6 +333,54 @@ static int dsi_regulator_init(struct msm_dsi_host *msm_host)
        return 0;
 }
 
+int dsi_clk_init_v2(struct msm_dsi_host *msm_host)
+{
+       struct platform_device *pdev = msm_host->pdev;
+       int ret = 0;
+
+       msm_host->src_clk = msm_clk_get(pdev, "src");
+
+       if (IS_ERR(msm_host->src_clk)) {
+               ret = PTR_ERR(msm_host->src_clk);
+               pr_err("%s: can't find src clock. ret=%d\n",
+                       __func__, ret);
+               msm_host->src_clk = NULL;
+               return ret;
+       }
+
+       msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
+       if (!msm_host->esc_clk_src) {
+               ret = -ENODEV;
+               pr_err("%s: can't get esc clock parent. ret=%d\n",
+                       __func__, ret);
+               return ret;
+       }
+
+       msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
+       if (!msm_host->dsi_clk_src) {
+               ret = -ENODEV;
+               pr_err("%s: can't get src clock parent. ret=%d\n",
+                       __func__, ret);
+       }
+
+       return ret;
+}
+
+int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host)
+{
+       struct platform_device *pdev = msm_host->pdev;
+       int ret = 0;
+
+       msm_host->byte_intf_clk = msm_clk_get(pdev, "byte_intf");
+       if (IS_ERR(msm_host->byte_intf_clk)) {
+               ret = PTR_ERR(msm_host->byte_intf_clk);
+               pr_err("%s: can't find byte_intf clock. ret=%d\n",
+                       __func__, ret);
+       }
+
+       return ret;
+}
+
 static int dsi_clk_init(struct msm_dsi_host *msm_host)
 {
        struct platform_device *pdev = msm_host->pdev;
@@ -379,19 +428,6 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
                goto exit;
        }
 
-       if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G &&
-           cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V2_2_1) {
-               msm_host->byte_intf_clk = msm_clk_get(pdev, "byte_intf");
-               if (IS_ERR(msm_host->byte_intf_clk)) {
-                       ret = PTR_ERR(msm_host->byte_intf_clk);
-                       pr_err("%s: can't find byte_intf clock. ret=%d\n",
-                               __func__, ret);
-                       goto exit;
-               }
-       } else {
-               msm_host->byte_intf_clk = NULL;
-       }
-
        msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
        if (!msm_host->byte_clk_src) {
                ret = -ENODEV;
@@ -406,31 +442,8 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
                goto exit;
        }
 
-       if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
-               msm_host->src_clk = msm_clk_get(pdev, "src");
-               if (IS_ERR(msm_host->src_clk)) {
-                       ret = PTR_ERR(msm_host->src_clk);
-                       pr_err("%s: can't find src clock. ret=%d\n",
-                               __func__, ret);
-                       msm_host->src_clk = NULL;
-                       goto exit;
-               }
-
-               msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
-               if (!msm_host->esc_clk_src) {
-                       ret = -ENODEV;
-                       pr_err("%s: can't get esc clock parent. ret=%d\n",
-                               __func__, ret);
-                       goto exit;
-               }
-
-               msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
-               if (!msm_host->dsi_clk_src) {
-                       ret = -ENODEV;
-                       pr_err("%s: can't get src clock parent. ret=%d\n",
-                               __func__, ret);
-               }
-       }
+       if (cfg_hnd->ops->clk_init_ver)
+               ret = cfg_hnd->ops->clk_init_ver(msm_host);
 exit:
        return ret;
 }
@@ -498,7 +511,7 @@ int msm_dsi_runtime_resume(struct device *dev)
        return dsi_bus_clk_enable(msm_host);
 }
 
-static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
+int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
 {
        int ret;
 
@@ -511,7 +524,7 @@ static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
                goto error;
        }
 
-       ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
+       ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
        if (ret) {
                pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
                goto error;
@@ -566,7 +579,7 @@ error:
        return ret;
 }
 
-static int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
+int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
 {
        int ret;
 
@@ -592,7 +605,7 @@ static int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
                goto error;
        }
 
-       ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
+       ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
        if (ret) {
                pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
                goto error;
@@ -634,98 +647,121 @@ error:
        return ret;
 }
 
-static int dsi_link_clk_enable(struct msm_dsi_host *msm_host)
+void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host)
 {
-       const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+       clk_disable_unprepare(msm_host->esc_clk);
+       clk_disable_unprepare(msm_host->pixel_clk);
+       if (msm_host->byte_intf_clk)
+               clk_disable_unprepare(msm_host->byte_intf_clk);
+       clk_disable_unprepare(msm_host->byte_clk);
+}
 
-       if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
-               return dsi_link_clk_enable_6g(msm_host);
-       else
-               return dsi_link_clk_enable_v2(msm_host);
+void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host)
+{
+       clk_disable_unprepare(msm_host->pixel_clk);
+       clk_disable_unprepare(msm_host->src_clk);
+       clk_disable_unprepare(msm_host->esc_clk);
+       clk_disable_unprepare(msm_host->byte_clk);
 }
 
-static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
+static u32 dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_dual_dsi)
 {
-       const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+       struct drm_display_mode *mode = msm_host->mode;
+       u32 pclk_rate;
 
-       if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
-               clk_disable_unprepare(msm_host->esc_clk);
-               clk_disable_unprepare(msm_host->pixel_clk);
-               if (msm_host->byte_intf_clk)
-                       clk_disable_unprepare(msm_host->byte_intf_clk);
-               clk_disable_unprepare(msm_host->byte_clk);
-       } else {
-               clk_disable_unprepare(msm_host->pixel_clk);
-               clk_disable_unprepare(msm_host->src_clk);
-               clk_disable_unprepare(msm_host->esc_clk);
-               clk_disable_unprepare(msm_host->byte_clk);
-       }
+       pclk_rate = mode->clock * 1000;
+
+       /*
+        * For dual DSI mode, the current DRM mode has the complete width of the
+        * panel. Since, the complete panel is driven by two DSI controllers,
+        * the clock rates have to be split between the two dsi controllers.
+        * Adjust the byte and pixel clock rates for each dsi host accordingly.
+        */
+       if (is_dual_dsi)
+               pclk_rate /= 2;
+
+       return pclk_rate;
 }
 
-static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
+static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_dual_dsi)
 {
-       struct drm_display_mode *mode = msm_host->mode;
-       const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
        u8 lanes = msm_host->lanes;
        u32 bpp = dsi_get_bpp(msm_host->format);
-       u32 pclk_rate;
+       u32 pclk_rate = dsi_get_pclk_rate(msm_host, is_dual_dsi);
+       u64 pclk_bpp = (u64)pclk_rate * bpp;
 
-       if (!mode) {
-               pr_err("%s: mode not set\n", __func__);
-               return -EINVAL;
-       }
-
-       pclk_rate = mode->clock * 1000;
-       if (lanes > 0) {
-               msm_host->byte_clk_rate = (pclk_rate * bpp) / (8 * lanes);
-       } else {
+       if (lanes == 0) {
                pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
-               msm_host->byte_clk_rate = (pclk_rate * bpp) / 8;
+               lanes = 1;
        }
 
-       DBG("pclk=%d, bclk=%d", pclk_rate, msm_host->byte_clk_rate);
+       do_div(pclk_bpp, (8 * lanes));
 
-       msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk);
+       msm_host->pixel_clk_rate = pclk_rate;
+       msm_host->byte_clk_rate = pclk_bpp;
 
-       if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
-               unsigned int esc_mhz, esc_div;
-               unsigned long byte_mhz;
+       DBG("pclk=%d, bclk=%d", msm_host->pixel_clk_rate,
+                               msm_host->byte_clk_rate);
 
-               msm_host->src_clk_rate = (pclk_rate * bpp) / 8;
+}
 
-               /*
-                * esc clock is byte clock followed by a 4 bit divider,
-                * we need to find an escape clock frequency within the
-                * mipi DSI spec range within the maximum divider limit
-                * We iterate here between an escape clock frequencey
-                * between 20 Mhz to 5 Mhz and pick up the first one
-                * that can be supported by our divider
-                */
+int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_dual_dsi)
+{
+       if (!msm_host->mode) {
+               pr_err("%s: mode not set\n", __func__);
+               return -EINVAL;
+       }
+
+       dsi_calc_pclk(msm_host, is_dual_dsi);
+       msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk);
+       return 0;
+}
 
-               byte_mhz = msm_host->byte_clk_rate / 1000000;
+int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_dual_dsi)
+{
+       u32 bpp = dsi_get_bpp(msm_host->format);
+       u64 pclk_bpp;
+       unsigned int esc_mhz, esc_div;
+       unsigned long byte_mhz;
 
-               for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) {
-                       esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz);
+       dsi_calc_pclk(msm_host, is_dual_dsi);
 
-                       /*
-                        * TODO: Ideally, we shouldn't know what sort of divider
-                        * is available in mmss_cc, we're just assuming that
-                        * it'll always be a 4 bit divider. Need to come up with
-                        * a better way here.
-                        */
-                       if (esc_div >= 1 && esc_div <= 16)
-                               break;
-               }
+       pclk_bpp = (u64)dsi_get_pclk_rate(msm_host, is_dual_dsi) * bpp;
+       do_div(pclk_bpp, 8);
+       msm_host->src_clk_rate = pclk_bpp;
 
-               if (esc_mhz < 5)
-                       return -EINVAL;
+       /*
+        * esc clock is byte clock followed by a 4 bit divider,
+        * we need to find an escape clock frequency within the
+        * mipi DSI spec range within the maximum divider limit
+        * We iterate here between an escape clock frequencey
+        * between 20 Mhz to 5 Mhz and pick up the first one
+        * that can be supported by our divider
+        */
+
+       byte_mhz = msm_host->byte_clk_rate / 1000000;
 
-               msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div;
+       for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) {
+               esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz);
 
-               DBG("esc=%d, src=%d", msm_host->esc_clk_rate,
-                       msm_host->src_clk_rate);
+               /*
+                * TODO: Ideally, we shouldn't know what sort of divider
+                * is available in mmss_cc, we're just assuming that
+                * it'll always be a 4 bit divider. Need to come up with
+                * a better way here.
+                */
+               if (esc_div >= 1 && esc_div <= 16)
+                       break;
        }
 
+       if (esc_mhz < 5)
+               return -EINVAL;
+
+       msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div;
+
+       DBG("esc=%d, src=%d", msm_host->esc_clk_rate,
+               msm_host->src_clk_rate);
+
        return 0;
 }
 
@@ -885,7 +921,7 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
        dsi_write(msm_host, REG_DSI_CTRL, data);
 }
 
-static void dsi_timing_setup(struct msm_dsi_host *msm_host)
+static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_dual_dsi)
 {
        struct drm_display_mode *mode = msm_host->mode;
        u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */
@@ -897,10 +933,26 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host)
        u32 ha_end = ha_start + mode->hdisplay;
        u32 va_start = v_total - mode->vsync_start;
        u32 va_end = va_start + mode->vdisplay;
+       u32 hdisplay = mode->hdisplay;
        u32 wc;
 
        DBG("");
 
+       /*
+        * For dual DSI mode, the current DRM mode has
+        * the complete width of the panel. Since, the complete
+        * panel is driven by two DSI controllers, the horizontal
+        * timings have to be split between the two dsi controllers.
+        * Adjust the DSI host timing values accordingly.
+        */
+       if (is_dual_dsi) {
+               h_total /= 2;
+               hs_end /= 2;
+               ha_start /= 2;
+               ha_end /= 2;
+               hdisplay /= 2;
+       }
+
        if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
                dsi_write(msm_host, REG_DSI_ACTIVE_H,
                        DSI_ACTIVE_H_START(ha_start) |
@@ -921,7 +973,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host)
                        DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
        } else {                /* command mode */
                /* image data and 1 byte write_memory_start cmd */
-               wc = mode->hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
+               wc = hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
 
                dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_CTRL,
                        DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(wc) |
@@ -931,7 +983,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host)
                                        MIPI_DSI_DCS_LONG_WRITE));
 
                dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_TOTAL,
-                       DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(mode->hdisplay) |
+                       DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(hdisplay) |
                        DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(mode->vdisplay));
        }
 }
@@ -1015,50 +1067,37 @@ static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
        }
 }
 
-/* dsi_cmd */
-static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
+int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
 {
        struct drm_device *dev = msm_host->dev;
        struct msm_drm_private *priv = dev->dev_private;
-       const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
-       int ret;
        uint64_t iova;
+       u8 *data;
 
-       if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
-               msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
-               if (IS_ERR(msm_host->tx_gem_obj)) {
-                       ret = PTR_ERR(msm_host->tx_gem_obj);
-                       pr_err("%s: failed to allocate gem, %d\n",
-                               __func__, ret);
-                       msm_host->tx_gem_obj = NULL;
-                       return ret;
-               }
+       data = msm_gem_kernel_new(dev, size, MSM_BO_UNCACHED,
+                                       priv->kms->aspace,
+                                       &msm_host->tx_gem_obj, &iova);
 
-               ret = msm_gem_get_iova(msm_host->tx_gem_obj,
-                               priv->kms->aspace, &iova);
-               if (ret) {
-                       pr_err("%s: failed to get iova, %d\n", __func__, ret);
-                       return ret;
-               }
+       if (IS_ERR(data)) {
+               msm_host->tx_gem_obj = NULL;
+               return PTR_ERR(data);
+       }
 
-               if (iova & 0x07) {
-                       pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
-                       return -EINVAL;
-               }
+       msm_host->tx_size = msm_host->tx_gem_obj->size;
 
-               msm_host->tx_size = msm_host->tx_gem_obj->size;
-       } else {
-               msm_host->tx_buf = dma_alloc_coherent(dev->dev, size,
+       return 0;
+}
+
+int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size)
+{
+       struct drm_device *dev = msm_host->dev;
+
+       msm_host->tx_buf = dma_alloc_coherent(dev->dev, size,
                                        &msm_host->tx_buf_paddr, GFP_KERNEL);
-               if (!msm_host->tx_buf) {
-                       ret = -ENOMEM;
-                       pr_err("%s: failed to allocate tx buf, %d\n",
-                               __func__, ret);
-                       return ret;
-               }
+       if (!msm_host->tx_buf)
+               return -ENOMEM;
 
-               msm_host->tx_size = size;
-       }
+       msm_host->tx_size = size;
 
        return 0;
 }
@@ -1089,6 +1128,21 @@ static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
                        msm_host->tx_buf_paddr);
 }
 
+void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host)
+{
+       return msm_gem_get_vaddr(msm_host->tx_gem_obj);
+}
+
+void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host)
+{
+       return msm_host->tx_buf;
+}
+
+void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host)
+{
+       msm_gem_put_vaddr(msm_host->tx_gem_obj);
+}
+
 /*
  * prepare cmd buffer to be txed
  */
@@ -1113,15 +1167,11 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
                return -EINVAL;
        }
 
-       if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
-               data = msm_gem_get_vaddr(msm_host->tx_gem_obj);
-               if (IS_ERR(data)) {
-                       ret = PTR_ERR(data);
-                       pr_err("%s: get vaddr failed, %d\n", __func__, ret);
-                       return ret;
-               }
-       } else {
-               data = msm_host->tx_buf;
+       data = cfg_hnd->ops->tx_buf_get(msm_host);
+       if (IS_ERR(data)) {
+               ret = PTR_ERR(data);
+               pr_err("%s: get vaddr failed, %d\n", __func__, ret);
+               return ret;
        }
 
        /* MSM specific command format in memory */
@@ -1142,8 +1192,8 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
        if (packet.size < len)
                memset(data + packet.size, 0xff, len - packet.size);
 
-       if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
-               msm_gem_put_vaddr(msm_host->tx_gem_obj);
+       if (cfg_hnd->ops->tx_buf_put)
+               cfg_hnd->ops->tx_buf_put(msm_host);
 
        return len;
 }
@@ -1190,24 +1240,38 @@ static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
        return msg->rx_len;
 }
 
-static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
+int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *dma_base)
 {
-       const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
        struct drm_device *dev = msm_host->dev;
        struct msm_drm_private *priv = dev->dev_private;
+
+       if (!dma_base)
+               return -EINVAL;
+
+       return msm_gem_get_iova(msm_host->tx_gem_obj,
+                               priv->kms->aspace, dma_base);
+}
+
+int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *dma_base)
+{
+       if (!dma_base)
+               return -EINVAL;
+
+       *dma_base = msm_host->tx_buf_paddr;
+       return 0;
+}
+
+static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
+{
+       const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
        int ret;
        uint64_t dma_base;
        bool triggered;
 
-       if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
-               ret = msm_gem_get_iova(msm_host->tx_gem_obj,
-                               priv->kms->aspace, &dma_base);
-               if (ret) {
-                       pr_err("%s: failed to get iova: %d\n", __func__, ret);
-                       return ret;
-               }
-       } else {
-               dma_base = msm_host->tx_buf_paddr;
+       ret = cfg_hnd->ops->dma_base_get(msm_host, &dma_base);
+       if (ret) {
+               pr_err("%s: failed to get iova: %d\n", __func__, ret);
+               return ret;
        }
 
        reinit_completion(&msm_host->dma_comp);
@@ -1845,6 +1909,7 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
                                        struct drm_device *dev)
 {
        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+       const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
        struct platform_device *pdev = msm_host->pdev;
        int ret;
 
@@ -1865,7 +1930,7 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
        }
 
        msm_host->dev = dev;
-       ret = dsi_tx_buf_alloc(msm_host, SZ_4K);
+       ret = cfg_hnd->ops->tx_buf_alloc(msm_host, SZ_4K);
        if (ret) {
                pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret);
                return ret;
@@ -1898,7 +1963,7 @@ int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
                 * output
                 */
                if (check_defer && msm_host->device_node) {
-                       if (!of_drm_find_panel(msm_host->device_node))
+                       if (IS_ERR(of_drm_find_panel(msm_host->device_node)))
                                if (!of_drm_find_bridge(msm_host->device_node))
                                        return -EPROBE_DEFER;
                }
@@ -1923,6 +1988,7 @@ int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
                                const struct mipi_dsi_msg *msg)
 {
        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+       const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
 
        /* TODO: make sure dsi_cmd_mdp is idle.
         * Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME
@@ -1935,7 +2001,7 @@ int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
         * mdp clock need to be enabled to receive dsi interrupt
         */
        pm_runtime_get_sync(&msm_host->pdev->dev);
-       dsi_link_clk_enable(msm_host);
+       cfg_hnd->ops->link_clk_enable(msm_host);
 
        /* TODO: vote for bus bandwidth */
 
@@ -1956,6 +2022,7 @@ void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
                                const struct mipi_dsi_msg *msg)
 {
        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+       const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
 
        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0);
        dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore);
@@ -1965,7 +2032,7 @@ void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
 
        /* TODO: unvote for bus bandwidth */
 
-       dsi_link_clk_disable(msm_host);
+       cfg_hnd->ops->link_clk_disable(msm_host);
        pm_runtime_put_autosuspend(&msm_host->pdev->dev);
 }
 
@@ -2129,7 +2196,6 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
        struct msm_dsi_pll *src_pll)
 {
        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
-       const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
        struct clk *byte_clk_provider, *pixel_clk_provider;
        int ret;
 
@@ -2155,14 +2221,16 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
                goto exit;
        }
 
-       if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
+       if (msm_host->dsi_clk_src) {
                ret = clk_set_parent(msm_host->dsi_clk_src, pixel_clk_provider);
                if (ret) {
                        pr_err("%s: can't set parent to dsi_clk_src. ret=%d\n",
                                __func__, ret);
                        goto exit;
                }
+       }
 
+       if (msm_host->esc_clk_src) {
                ret = clk_set_parent(msm_host->esc_clk_src, byte_clk_provider);
                if (ret) {
                        pr_err("%s: can't set parent to esc_clk_src. ret=%d\n",
@@ -2189,12 +2257,14 @@ void msm_dsi_host_reset_phy(struct mipi_dsi_host *host)
 }
 
 void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
-       struct msm_dsi_phy_clk_request *clk_req)
+                       struct msm_dsi_phy_clk_request *clk_req,
+                       bool is_dual_dsi)
 {
        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+       const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
        int ret;
 
-       ret = dsi_calc_clk_rate(msm_host);
+       ret = cfg_hnd->ops->calc_clk_rate(msm_host, is_dual_dsi);
        if (ret) {
                pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
                return;
@@ -2256,9 +2326,11 @@ static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable)
 }
 
 int msm_dsi_host_power_on(struct mipi_dsi_host *host,
-                       struct msm_dsi_phy_shared_timings *phy_shared_timings)
+                       struct msm_dsi_phy_shared_timings *phy_shared_timings,
+                       bool is_dual_dsi)
 {
        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+       const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
        int ret = 0;
 
        mutex_lock(&msm_host->dev_mutex);
@@ -2277,7 +2349,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
        }
 
        pm_runtime_get_sync(&msm_host->pdev->dev);
-       ret = dsi_link_clk_enable(msm_host);
+       ret = cfg_hnd->ops->link_clk_enable(msm_host);
        if (ret) {
                pr_err("%s: failed to enable link clocks. ret=%d\n",
                       __func__, ret);
@@ -2291,7 +2363,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
                goto fail_disable_clk;
        }
 
-       dsi_timing_setup(msm_host);
+       dsi_timing_setup(msm_host, is_dual_dsi);
        dsi_sw_reset(msm_host);
        dsi_ctrl_config(msm_host, true, phy_shared_timings);
 
@@ -2304,7 +2376,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
        return 0;
 
 fail_disable_clk:
-       dsi_link_clk_disable(msm_host);
+       cfg_hnd->ops->link_clk_disable(msm_host);
        pm_runtime_put_autosuspend(&msm_host->pdev->dev);
 fail_disable_reg:
        dsi_host_regulator_disable(msm_host);
@@ -2316,6 +2388,7 @@ unlock_ret:
 int msm_dsi_host_power_off(struct mipi_dsi_host *host)
 {
        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+       const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
 
        mutex_lock(&msm_host->dev_mutex);
        if (!msm_host->power_on) {
@@ -2330,7 +2403,7 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host)
 
        pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
 
-       dsi_link_clk_disable(msm_host);
+       cfg_hnd->ops->link_clk_disable(msm_host);
        pm_runtime_put_autosuspend(&msm_host->pdev->dev);
 
        dsi_host_regulator_disable(msm_host);
index 4cb1cb68878b0dec65313d092aa8c28848e3f436..5224010d90e4ac0cb78a66b1e785586ff514087f 100644 (file)
@@ -134,8 +134,9 @@ static int enable_phy(struct msm_dsi *msm_dsi, int src_pll_id,
 {
        struct msm_dsi_phy_clk_request clk_req;
        int ret;
+       bool is_dual_dsi = IS_DUAL_DSI();
 
-       msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req);
+       msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req, is_dual_dsi);
 
        ret = msm_dsi_phy_enable(msm_dsi->phy, src_pll_id, &clk_req);
        msm_dsi_phy_get_shared_timings(msm_dsi->phy, shared_timings);
@@ -305,102 +306,25 @@ static void dsi_mgr_connector_destroy(struct drm_connector *connector)
        kfree(dsi_connector);
 }
 
-static void dsi_dual_connector_fix_modes(struct drm_connector *connector)
-{
-       struct drm_display_mode *mode, *m;
-
-       /* Only support left-right mode */
-       list_for_each_entry_safe(mode, m, &connector->probed_modes, head) {
-               mode->clock >>= 1;
-               mode->hdisplay >>= 1;
-               mode->hsync_start >>= 1;
-               mode->hsync_end >>= 1;
-               mode->htotal >>= 1;
-               drm_mode_set_name(mode);
-       }
-}
-
-static int dsi_dual_connector_tile_init(
-                       struct drm_connector *connector, int id)
-{
-       struct drm_display_mode *mode;
-       /* Fake topology id */
-       char topo_id[8] = {'M', 'S', 'M', 'D', 'U', 'D', 'S', 'I'};
-
-       if (connector->tile_group) {
-               DBG("Tile property has been initialized");
-               return 0;
-       }
-
-       /* Use the first mode only for now */
-       mode = list_first_entry(&connector->probed_modes,
-                               struct drm_display_mode,
-                               head);
-       if (!mode)
-               return -EINVAL;
-
-       connector->tile_group = drm_mode_get_tile_group(
-                                       connector->dev, topo_id);
-       if (!connector->tile_group)
-               connector->tile_group = drm_mode_create_tile_group(
-                                       connector->dev, topo_id);
-       if (!connector->tile_group) {
-               pr_err("%s: failed to create tile group\n", __func__);
-               return -ENOMEM;
-       }
-
-       connector->has_tile = true;
-       connector->tile_is_single_monitor = true;
-
-       /* mode has been fixed */
-       connector->tile_h_size = mode->hdisplay;
-       connector->tile_v_size = mode->vdisplay;
-
-       /* Only support left-right mode */
-       connector->num_h_tile = 2;
-       connector->num_v_tile = 1;
-
-       connector->tile_v_loc = 0;
-       connector->tile_h_loc = (id == DSI_RIGHT) ? 1 : 0;
-
-       return 0;
-}
-
 static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
 {
        int id = dsi_mgr_connector_get_id(connector);
        struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
        struct drm_panel *panel = msm_dsi->panel;
-       int ret, num;
+       int num;
 
        if (!panel)
                return 0;
 
-       /* Since we have 2 connectors, but only 1 drm_panel in dual DSI mode,
-        * panel should not attach to any connector.
-        * Only temporarily attach panel to the current connector here,
-        * to let panel set mode to this connector.
+       /*
+        * In dual DSI mode, we have one connector that can be
+        * attached to the drm_panel.
         */
        drm_panel_attach(panel, connector);
        num = drm_panel_get_modes(panel);
-       drm_panel_detach(panel);
        if (!num)
                return 0;
 
-       if (IS_DUAL_DSI()) {
-               /* report half resolution to user */
-               dsi_dual_connector_fix_modes(connector);
-               ret = dsi_dual_connector_tile_init(connector, id);
-               if (ret)
-                       return ret;
-               ret = drm_mode_connector_set_tile_property(connector);
-               if (ret) {
-                       pr_err("%s: set tile property failed, %d\n",
-                                       __func__, ret);
-                       return ret;
-               }
-       }
-
        return num;
 }
 
@@ -454,11 +378,11 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
        if (ret)
                goto phy_en_fail;
 
-       /* Do nothing with the host if it is DSI 1 in case of dual DSI */
-       if (is_dual_dsi && (DSI_1 == id))
+       /* Do nothing with the host if it is slave-DSI in case of dual DSI */
+       if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
                return;
 
-       ret = msm_dsi_host_power_on(host, &phy_shared_timings[id]);
+       ret = msm_dsi_host_power_on(host, &phy_shared_timings[id], is_dual_dsi);
        if (ret) {
                pr_err("%s: power on host %d failed, %d\n", __func__, id, ret);
                goto host_on_fail;
@@ -466,7 +390,7 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
 
        if (is_dual_dsi && msm_dsi1) {
                ret = msm_dsi_host_power_on(msm_dsi1->host,
-                                           &phy_shared_timings[DSI_1]);
+                               &phy_shared_timings[DSI_1], is_dual_dsi);
                if (ret) {
                        pr_err("%s: power on host1 failed, %d\n",
                                                        __func__, ret);
@@ -556,11 +480,11 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
                return;
 
        /*
-        * Do nothing with the host if it is DSI 1 in case of dual DSI.
+        * Do nothing with the host if it is slave-DSI in case of dual DSI.
         * It is safe to call dsi_mgr_phy_disable() here because a single PHY
         * won't be diabled until both PHYs request disable.
         */
-       if (is_dual_dsi && (DSI_1 == id))
+       if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
                goto disable_phy;
 
        if (panel) {
@@ -621,7 +545,7 @@ static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
                        mode->vsync_end, mode->vtotal,
                        mode->type, mode->flags);
 
-       if (is_dual_dsi && (DSI_1 == id))
+       if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
                return;
 
        msm_dsi_host_set_display_mode(host, adjusted_mode);
@@ -684,11 +608,28 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
        connector->interlace_allowed = 0;
        connector->doublescan_allowed = 0;
 
-       drm_mode_connector_attach_encoder(connector, msm_dsi->encoder);
+       drm_connector_attach_encoder(connector, msm_dsi->encoder);
 
        return connector;
 }
 
+bool msm_dsi_manager_validate_current_config(u8 id)
+{
+       bool is_dual_dsi = IS_DUAL_DSI();
+
+       /*
+        * For dual DSI, we only have one drm panel. For this
+        * use case, we register only one bridge/connector.
+        * Skip bridge/connector initialisation if it is
+        * slave-DSI for dual DSI configuration.
+        */
+       if (is_dual_dsi && !IS_MASTER_DSI_LINK(id)) {
+               DBG("Skip bridge registration for slave DSI->id: %d\n", id);
+               return false;
+       }
+       return true;
+}
+
 /* initialize bridge */
 struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
 {
@@ -751,12 +692,8 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
        connector_list = &dev->mode_config.connector_list;
 
        list_for_each_entry(connector, connector_list, head) {
-               int i;
-
-               for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-                       if (connector->encoder_ids[i] == encoder->base.id)
-                               return connector;
-               }
+               if (drm_connector_has_possible_encoder(connector, encoder))
+                       return connector;
        }
 
        return ERR_PTR(-ENODEV);
@@ -836,6 +773,7 @@ void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags)
        struct msm_drm_private *priv;
        struct msm_kms *kms;
        struct drm_encoder *encoder;
+       bool cmd_mode;
 
        /*
         * drm_device pointer is assigned to msm_dsi only in the modeset_init
@@ -850,10 +788,11 @@ void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags)
        priv = dev->dev_private;
        kms = priv->kms;
        encoder = msm_dsi_get_encoder(msm_dsi);
+       cmd_mode = !(device_flags &
+                                MIPI_DSI_MODE_VIDEO);
 
        if (encoder && kms->funcs->set_encoder_mode)
-               if (!(device_flags & MIPI_DSI_MODE_VIDEO))
-                       kms->funcs->set_encoder_mode(kms, encoder, true);
+               kms->funcs->set_encoder_mode(kms, encoder, cmd_mode);
 }
 
 int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
index c4c37a7df637fe326d7a14ec99ac1c57b2e8078c..4c03f0b7343ed655c60111be4d09249bde463b28 100644 (file)
@@ -798,6 +798,8 @@ struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
                return ERR_PTR(-ENOMEM);
        }
 
+       spin_lock_init(&pll_10nm->postdiv_lock);
+
        pll = &pll_10nm->base;
        pll->min_rate = 1000000000UL;
        pll->max_rate = 3500000000UL;
index 6f3fc6b0f0a31e5567ad714cdc1654d654b596ab..058ff92a020765ec5ee3bab752533295ff9f5aae 100644 (file)
@@ -56,7 +56,7 @@ static int edp_connector_get_modes(struct drm_connector *connector)
        if (ret)
                return ret;
 
-       drm_mode_connector_update_edid_property(connector, drm_edid);
+       drm_connector_update_edid_property(connector, drm_edid);
        if (drm_edid)
                ret = drm_add_edid_modes(connector, drm_edid);
 
@@ -134,7 +134,7 @@ struct drm_connector *msm_edp_connector_init(struct msm_edp *edp)
        connector->interlace_allowed = false;
        connector->doublescan_allowed = false;
 
-       drm_mode_connector_attach_encoder(connector, edp->encoder);
+       drm_connector_attach_encoder(connector, edp->encoder);
 
        return connector;
 }
index c0848dfedd50648de59be95cdd389f81bde29edf..e9c9a0af508e8c41bc12e91fc13d2f23b5041f33 100644 (file)
@@ -392,7 +392,7 @@ static int msm_hdmi_connector_get_modes(struct drm_connector *connector)
        hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
 
        hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid);
-       drm_mode_connector_update_edid_property(connector, edid);
+       drm_connector_update_edid_property(connector, edid);
 
        if (edid) {
                ret = drm_add_edid_modes(connector, edid);
@@ -477,7 +477,7 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
                return ERR_PTR(ret);
        }
 
-       drm_mode_connector_attach_encoder(connector, hdmi->encoder);
+       drm_connector_attach_encoder(connector, hdmi->encoder);
 
        return connector;
 }
index f0635c3da7f48ad1bdc15c2305fe4d0b1cb48ff1..c1f1779c980f615ce8b339d0f34f351990d8ff5d 100644 (file)
@@ -71,12 +71,15 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
 
        drm_atomic_helper_commit_modeset_enables(dev, state);
 
+       if (kms->funcs->commit) {
+               DRM_DEBUG_ATOMIC("triggering commit\n");
+               kms->funcs->commit(kms, state);
+       }
+
        msm_atomic_wait_for_commit_done(dev, state);
 
        kms->funcs->complete_commit(kms, state);
 
-       drm_atomic_helper_wait_for_vblanks(dev, state);
-
        drm_atomic_helper_commit_hw_done(state);
 
        drm_atomic_helper_cleanup_planes(dev, state);
index 1ff3fda245d18e0260a052024973a95c7de448d2..f0da0d3c8a80f7cf9ab5082095aed6df3e3c9529 100644 (file)
  */
 
 #ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
 #include "msm_drv.h"
 #include "msm_gpu.h"
 #include "msm_kms.h"
 #include "msm_debugfs.h"
 
-static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
+struct msm_gpu_show_priv {
+       struct msm_gpu_state *state;
+       struct drm_device *dev;
+};
+
+static int msm_gpu_show(struct seq_file *m, void *arg)
+{
+       struct drm_printer p = drm_seq_file_printer(m);
+       struct msm_gpu_show_priv *show_priv = m->private;
+       struct msm_drm_private *priv = show_priv->dev->dev_private;
+       struct msm_gpu *gpu = priv->gpu;
+       int ret;
+
+       ret = mutex_lock_interruptible(&show_priv->dev->struct_mutex);
+       if (ret)
+               return ret;
+
+       drm_printf(&p, "%s Status:\n", gpu->name);
+       gpu->funcs->show(gpu, show_priv->state, &p);
+
+       mutex_unlock(&show_priv->dev->struct_mutex);
+
+       return 0;
+}
+
+static int msm_gpu_release(struct inode *inode, struct file *file)
 {
+       struct seq_file *m = file->private_data;
+       struct msm_gpu_show_priv *show_priv = m->private;
+       struct msm_drm_private *priv = show_priv->dev->dev_private;
+       struct msm_gpu *gpu = priv->gpu;
+       int ret;
+
+       ret = mutex_lock_interruptible(&show_priv->dev->struct_mutex);
+       if (ret)
+               return ret;
+
+       gpu->funcs->gpu_state_put(show_priv->state);
+       mutex_unlock(&show_priv->dev->struct_mutex);
+
+       kfree(show_priv);
+
+       return single_release(inode, file);
+}
+
+static int msm_gpu_open(struct inode *inode, struct file *file)
+{
+       struct drm_device *dev = inode->i_private;
        struct msm_drm_private *priv = dev->dev_private;
        struct msm_gpu *gpu = priv->gpu;
+       struct msm_gpu_show_priv *show_priv;
+       int ret;
 
-       if (gpu) {
-               seq_printf(m, "%s Status:\n", gpu->name);
-               pm_runtime_get_sync(&gpu->pdev->dev);
-               gpu->funcs->show(gpu, m);
-               pm_runtime_put_sync(&gpu->pdev->dev);
+       if (!gpu)
+               return -ENODEV;
+
+       show_priv = kmalloc(sizeof(*show_priv), GFP_KERNEL);
+       if (!show_priv)
+               return -ENOMEM;
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+
+       pm_runtime_get_sync(&gpu->pdev->dev);
+       show_priv->state = gpu->funcs->gpu_state_get(gpu);
+       pm_runtime_put_sync(&gpu->pdev->dev);
+
+       mutex_unlock(&dev->struct_mutex);
+
+       if (IS_ERR(show_priv->state)) {
+               ret = PTR_ERR(show_priv->state);
+               kfree(show_priv);
+               return ret;
        }
 
-       return 0;
+       show_priv->dev = dev;
+
+       return single_open(file, msm_gpu_show, show_priv);
 }
 
+static const struct file_operations msm_gpu_fops = {
+       .owner = THIS_MODULE,
+       .open = msm_gpu_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = msm_gpu_release,
+};
+
 static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
 {
        struct msm_drm_private *priv = dev->dev_private;
@@ -105,7 +180,6 @@ static int show_locked(struct seq_file *m, void *arg)
 }
 
 static struct drm_info_list msm_debugfs_list[] = {
-               {"gpu", show_locked, 0, msm_gpu_show},
                {"gem", show_locked, 0, msm_gem_show},
                { "mm", show_locked, 0, msm_mm_show },
                { "fb", show_locked, 0, msm_fb_show },
@@ -158,6 +232,9 @@ int msm_debugfs_init(struct drm_minor *minor)
                return ret;
        }
 
+       debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root,
+               dev, &msm_gpu_fops);
+
        if (priv->kms->funcs->debugfs_init) {
                ret = priv->kms->funcs->debugfs_init(priv->kms, minor);
                if (ret)
index 021a0b6f9a597b0b22795c1fa18f97e5e14eb2c0..46876bc8b7077e9ef8ed7f7d116ae2f8a99f78fc 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -15,6 +16,8 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/kthread.h>
+#include <uapi/linux/sched/types.h>
 #include <drm/drm_of.h>
 
 #include "msm_drv.h"
@@ -149,7 +152,7 @@ struct vblank_event {
        bool enable;
 };
 
-static void vblank_ctrl_worker(struct work_struct *work)
+static void vblank_ctrl_worker(struct kthread_work *work)
 {
        struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
                                                struct msm_vblank_ctrl, work);
@@ -197,7 +200,8 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
        list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
        spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
 
-       queue_work(priv->wq, &vbl_ctrl->work);
+       kthread_queue_work(&priv->disp_thread[crtc_id].worker,
+                       &vbl_ctrl->work);
 
        return 0;
 }
@@ -208,19 +212,36 @@ static int msm_drm_uninit(struct device *dev)
        struct drm_device *ddev = platform_get_drvdata(pdev);
        struct msm_drm_private *priv = ddev->dev_private;
        struct msm_kms *kms = priv->kms;
+       struct msm_mdss *mdss = priv->mdss;
        struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
        struct vblank_event *vbl_ev, *tmp;
+       int i;
 
        /* We must cancel and cleanup any pending vblank enable/disable
         * work before drm_irq_uninstall() to avoid work re-enabling an
         * irq after uninstall has disabled it.
         */
-       cancel_work_sync(&vbl_ctrl->work);
+       kthread_flush_work(&vbl_ctrl->work);
        list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
                list_del(&vbl_ev->node);
                kfree(vbl_ev);
        }
 
+       /* clean up display commit/event worker threads */
+       for (i = 0; i < priv->num_crtcs; i++) {
+               if (priv->disp_thread[i].thread) {
+                       kthread_flush_worker(&priv->disp_thread[i].worker);
+                       kthread_stop(priv->disp_thread[i].thread);
+                       priv->disp_thread[i].thread = NULL;
+               }
+
+               if (priv->event_thread[i].thread) {
+                       kthread_flush_worker(&priv->event_thread[i].worker);
+                       kthread_stop(priv->event_thread[i].thread);
+                       priv->event_thread[i].thread = NULL;
+               }
+       }
+
        msm_gem_shrinker_cleanup(ddev);
 
        drm_kms_helper_poll_fini(ddev);
@@ -243,9 +264,6 @@ static int msm_drm_uninit(struct device *dev)
        flush_workqueue(priv->wq);
        destroy_workqueue(priv->wq);
 
-       flush_workqueue(priv->atomic_wq);
-       destroy_workqueue(priv->atomic_wq);
-
        if (kms && kms->funcs)
                kms->funcs->destroy(kms);
 
@@ -258,7 +276,8 @@ static int msm_drm_uninit(struct device *dev)
 
        component_unbind_all(dev, ddev);
 
-       msm_mdss_destroy(ddev);
+       if (mdss && mdss->funcs)
+               mdss->funcs->destroy(ddev);
 
        ddev->dev_private = NULL;
        drm_dev_unref(ddev);
@@ -268,6 +287,10 @@ static int msm_drm_uninit(struct device *dev)
        return 0;
 }
 
+#define KMS_MDP4 4
+#define KMS_MDP5 5
+#define KMS_DPU  3
+
 static int get_mdp_ver(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -357,7 +380,9 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
        struct drm_device *ddev;
        struct msm_drm_private *priv;
        struct msm_kms *kms;
-       int ret;
+       struct msm_mdss *mdss;
+       int ret, i;
+       struct sched_param param;
 
        ddev = drm_dev_alloc(drv, dev);
        if (IS_ERR(ddev)) {
@@ -369,53 +394,61 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 
        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
        if (!priv) {
-               drm_dev_unref(ddev);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto err_unref_drm_dev;
        }
 
        ddev->dev_private = priv;
        priv->dev = ddev;
 
-       ret = msm_mdss_init(ddev);
-       if (ret) {
-               kfree(priv);
-               drm_dev_unref(ddev);
-               return ret;
+       switch (get_mdp_ver(pdev)) {
+       case KMS_MDP5:
+               ret = mdp5_mdss_init(ddev);
+               break;
+       case KMS_DPU:
+               ret = dpu_mdss_init(ddev);
+               break;
+       default:
+               ret = 0;
+               break;
        }
+       if (ret)
+               goto err_free_priv;
+
+       mdss = priv->mdss;
 
        priv->wq = alloc_ordered_workqueue("msm", 0);
-       priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0);
 
        INIT_LIST_HEAD(&priv->inactive_list);
        INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
-       INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
+       kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
        spin_lock_init(&priv->vblank_ctrl.lock);
 
        drm_mode_config_init(ddev);
 
        /* Bind all our sub-components: */
        ret = component_bind_all(dev, ddev);
-       if (ret) {
-               msm_mdss_destroy(ddev);
-               kfree(priv);
-               drm_dev_unref(ddev);
-               return ret;
-       }
+       if (ret)
+               goto err_destroy_mdss;
 
        ret = msm_init_vram(ddev);
        if (ret)
-               goto fail;
+               goto err_msm_uninit;
 
        msm_gem_shrinker_init(ddev);
 
        switch (get_mdp_ver(pdev)) {
-       case 4:
+       case KMS_MDP4:
                kms = mdp4_kms_init(ddev);
                priv->kms = kms;
                break;
-       case 5:
+       case KMS_MDP5:
                kms = mdp5_kms_init(ddev);
                break;
+       case KMS_DPU:
+               kms = dpu_kms_init(ddev);
+               priv->kms = kms;
+               break;
        default:
                kms = ERR_PTR(-ENODEV);
                break;
@@ -430,24 +463,100 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
                 */
                dev_err(dev, "failed to load kms\n");
                ret = PTR_ERR(kms);
-               goto fail;
+               goto err_msm_uninit;
        }
 
+       /* Enable normalization of plane zpos */
+       ddev->mode_config.normalize_zpos = true;
+
        if (kms) {
                ret = kms->funcs->hw_init(kms);
                if (ret) {
                        dev_err(dev, "kms hw init failed: %d\n", ret);
-                       goto fail;
+                       goto err_msm_uninit;
                }
        }
 
        ddev->mode_config.funcs = &mode_config_funcs;
        ddev->mode_config.helper_private = &mode_config_helper_funcs;
 
+       /**
+        * this priority was found during empiric testing to have appropriate
+        * realtime scheduling to process display updates and interact with
+        * other real time and normal priority task
+        */
+       param.sched_priority = 16;
+       for (i = 0; i < priv->num_crtcs; i++) {
+
+               /* initialize display thread */
+               priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
+               kthread_init_worker(&priv->disp_thread[i].worker);
+               priv->disp_thread[i].dev = ddev;
+               priv->disp_thread[i].thread =
+                       kthread_run(kthread_worker_fn,
+                               &priv->disp_thread[i].worker,
+                               "crtc_commit:%d", priv->disp_thread[i].crtc_id);
+               ret = sched_setscheduler(priv->disp_thread[i].thread,
+                                                       SCHED_FIFO, &param);
+               if (ret)
+                       pr_warn("display thread priority update failed: %d\n",
+                                                                       ret);
+
+               if (IS_ERR(priv->disp_thread[i].thread)) {
+                       dev_err(dev, "failed to create crtc_commit kthread\n");
+                       priv->disp_thread[i].thread = NULL;
+               }
+
+               /* initialize event thread */
+               priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
+               kthread_init_worker(&priv->event_thread[i].worker);
+               priv->event_thread[i].dev = ddev;
+               priv->event_thread[i].thread =
+                       kthread_run(kthread_worker_fn,
+                               &priv->event_thread[i].worker,
+                               "crtc_event:%d", priv->event_thread[i].crtc_id);
+               /**
+                * event thread should also run at same priority as disp_thread
+                * because it is handling frame_done events. A lower priority
+                * event thread and higher priority disp_thread can causes
+                * frame_pending counters beyond 2. This can lead to commit
+                * failure at crtc commit level.
+                */
+               ret = sched_setscheduler(priv->event_thread[i].thread,
+                                                       SCHED_FIFO, &param);
+               if (ret)
+                       pr_warn("display event thread priority update failed: %d\n",
+                                                                       ret);
+
+               if (IS_ERR(priv->event_thread[i].thread)) {
+                       dev_err(dev, "failed to create crtc_event kthread\n");
+                       priv->event_thread[i].thread = NULL;
+               }
+
+               if ((!priv->disp_thread[i].thread) ||
+                               !priv->event_thread[i].thread) {
+                       /* clean up previously created threads if any */
+                       for ( ; i >= 0; i--) {
+                               if (priv->disp_thread[i].thread) {
+                                       kthread_stop(
+                                               priv->disp_thread[i].thread);
+                                       priv->disp_thread[i].thread = NULL;
+                               }
+
+                               if (priv->event_thread[i].thread) {
+                                       kthread_stop(
+                                               priv->event_thread[i].thread);
+                                       priv->event_thread[i].thread = NULL;
+                               }
+                       }
+                       goto err_msm_uninit;
+               }
+       }
+
        ret = drm_vblank_init(ddev, priv->num_crtcs);
        if (ret < 0) {
                dev_err(dev, "failed to initialize vblank\n");
-               goto fail;
+               goto err_msm_uninit;
        }
 
        if (kms) {
@@ -456,13 +565,13 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
                pm_runtime_put_sync(dev);
                if (ret < 0) {
                        dev_err(dev, "failed to install IRQ handler\n");
-                       goto fail;
+                       goto err_msm_uninit;
                }
        }
 
        ret = drm_dev_register(ddev, 0);
        if (ret)
-               goto fail;
+               goto err_msm_uninit;
 
        drm_mode_config_reset(ddev);
 
@@ -473,15 +582,23 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 
        ret = msm_debugfs_late_init(ddev);
        if (ret)
-               goto fail;
+               goto err_msm_uninit;
 
        drm_kms_helper_poll_init(ddev);
 
        return 0;
 
-fail:
+err_msm_uninit:
        msm_drm_uninit(dev);
        return ret;
+err_destroy_mdss:
+       if (mdss && mdss->funcs)
+               mdss->funcs->destroy(ddev);
+err_free_priv:
+       kfree(priv);
+err_unref_drm_dev:
+       drm_dev_unref(ddev);
+       return ret;
 }
 
 /*
@@ -894,16 +1011,35 @@ static struct drm_driver msm_driver = {
 static int msm_pm_suspend(struct device *dev)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
+       struct msm_drm_private *priv = ddev->dev_private;
+       struct msm_kms *kms = priv->kms;
+
+       /* TODO: Use atomic helper suspend/resume */
+       if (kms && kms->funcs && kms->funcs->pm_suspend)
+               return kms->funcs->pm_suspend(dev);
 
        drm_kms_helper_poll_disable(ddev);
 
+       priv->pm_state = drm_atomic_helper_suspend(ddev);
+       if (IS_ERR(priv->pm_state)) {
+               drm_kms_helper_poll_enable(ddev);
+               return PTR_ERR(priv->pm_state);
+       }
+
        return 0;
 }
 
 static int msm_pm_resume(struct device *dev)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
+       struct msm_drm_private *priv = ddev->dev_private;
+       struct msm_kms *kms = priv->kms;
+
+       /* TODO: Use atomic helper suspend/resume */
+       if (kms && kms->funcs && kms->funcs->pm_resume)
+               return kms->funcs->pm_resume(dev);
 
+       drm_atomic_helper_resume(ddev, priv->pm_state);
        drm_kms_helper_poll_enable(ddev);
 
        return 0;
@@ -915,11 +1051,12 @@ static int msm_runtime_suspend(struct device *dev)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct msm_drm_private *priv = ddev->dev_private;
+       struct msm_mdss *mdss = priv->mdss;
 
        DBG("");
 
-       if (priv->mdss)
-               return msm_mdss_disable(priv->mdss);
+       if (mdss && mdss->funcs)
+               return mdss->funcs->disable(mdss);
 
        return 0;
 }
@@ -928,11 +1065,12 @@ static int msm_runtime_resume(struct device *dev)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct msm_drm_private *priv = ddev->dev_private;
+       struct msm_mdss *mdss = priv->mdss;
 
        DBG("");
 
-       if (priv->mdss)
-               return msm_mdss_enable(priv->mdss);
+       if (mdss && mdss->funcs)
+               return mdss->funcs->enable(mdss);
 
        return 0;
 }
@@ -1031,12 +1169,13 @@ static int add_display_components(struct device *dev,
        int ret;
 
        /*
-        * MDP5 based devices don't have a flat hierarchy. There is a top level
-        * parent: MDSS, and children: MDP5, DSI, HDMI, eDP etc. Populate the
-        * children devices, find the MDP5 node, and then add the interfaces
-        * to our components list.
+        * MDP5/DPU based devices don't have a flat hierarchy. There is a top
+        * level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc.
+        * Populate the children devices, find the MDP5/DPU node, and then add
+        * the interfaces to our components list.
         */
-       if (of_device_is_compatible(dev->of_node, "qcom,mdss")) {
+       if (of_device_is_compatible(dev->of_node, "qcom,mdss") ||
+           of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss")) {
                ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
                if (ret) {
                        dev_err(dev, "failed to populate children devices\n");
@@ -1146,8 +1285,9 @@ static int msm_pdev_remove(struct platform_device *pdev)
 }
 
 static const struct of_device_id dt_match[] = {
-       { .compatible = "qcom,mdp4", .data = (void *)4 },       /* MDP4 */
-       { .compatible = "qcom,mdss", .data = (void *)5 },       /* MDP5 MDSS */
+       { .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
+       { .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
+       { .compatible = "qcom,sdm845-mdss", .data = (void *)KMS_DPU },
        {}
 };
 MODULE_DEVICE_TABLE(of, dt_match);
@@ -1169,6 +1309,7 @@ static int __init msm_drm_register(void)
 
        DBG("init");
        msm_mdp_register();
+       msm_dpu_register();
        msm_dsi_register();
        msm_edp_register();
        msm_hdmi_register();
@@ -1185,6 +1326,7 @@ static void __exit msm_drm_unregister(void)
        msm_edp_unregister();
        msm_dsi_unregister();
        msm_mdp_unregister();
+       msm_dpu_unregister();
 }
 
 module_init(msm_drm_register);
index b2da1fbf81e0f373c75a4a34ba62e04d8c10f339..0cba86ed3f54ca1be0513164ba8481b86a255b65 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -33,6 +34,7 @@
 #include <linux/of_graph.h>
 #include <linux/of_device.h>
 #include <asm/sizes.h>
+#include <linux/kthread.h>
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
@@ -54,6 +56,12 @@ struct msm_fence_context;
 struct msm_gem_address_space;
 struct msm_gem_vma;
 
+#define MAX_CRTCS      8
+#define MAX_PLANES     20
+#define MAX_ENCODERS   8
+#define MAX_BRIDGES    8
+#define MAX_CONNECTORS 8
+
 struct msm_file_private {
        rwlock_t queuelock;
        struct list_head submitqueues;
@@ -68,12 +76,77 @@ enum msm_mdp_plane_property {
 };
 
 struct msm_vblank_ctrl {
-       struct work_struct work;
+       struct kthread_work work;
        struct list_head event_list;
        spinlock_t lock;
 };
 
 #define MSM_GPU_MAX_RINGS 4
+#define MAX_H_TILES_PER_DISPLAY 2
+
+/**
+ * enum msm_display_caps - features/capabilities supported by displays
+ * @MSM_DISPLAY_CAP_VID_MODE:           Video or "active" mode supported
+ * @MSM_DISPLAY_CAP_CMD_MODE:           Command mode supported
+ * @MSM_DISPLAY_CAP_HOT_PLUG:           Hot plug detection supported
+ * @MSM_DISPLAY_CAP_EDID:               EDID supported
+ */
+enum msm_display_caps {
+       MSM_DISPLAY_CAP_VID_MODE        = BIT(0),
+       MSM_DISPLAY_CAP_CMD_MODE        = BIT(1),
+       MSM_DISPLAY_CAP_HOT_PLUG        = BIT(2),
+       MSM_DISPLAY_CAP_EDID            = BIT(3),
+};
+
+/**
+ * enum msm_event_wait - type of HW events to wait for
+ * @MSM_ENC_COMMIT_DONE - wait for the driver to flush the registers to HW
+ * @MSM_ENC_TX_COMPLETE - wait for the HW to transfer the frame to panel
+ * @MSM_ENC_VBLANK - wait for the HW VBLANK event (for driver-internal waiters)
+ */
+enum msm_event_wait {
+       MSM_ENC_COMMIT_DONE = 0,
+       MSM_ENC_TX_COMPLETE,
+       MSM_ENC_VBLANK,
+};
+
+/**
+ * struct msm_display_topology - defines a display topology pipeline
+ * @num_lm:       number of layer mixers used
+ * @num_enc:      number of compression encoder blocks used
+ * @num_intf:     number of interfaces the panel is mounted on
+ */
+struct msm_display_topology {
+       u32 num_lm;
+       u32 num_enc;
+       u32 num_intf;
+};
+
+/**
+ * struct msm_display_info - defines display properties
+ * @intf_type:          DRM_MODE_CONNECTOR_ display type
+ * @capabilities:       Bitmask of display flags
+ * @num_of_h_tiles:     Number of horizontal tiles in case of split interface
+ * @h_tile_instance:    Controller instance used per tile. Number of elements is
+ *                      based on num_of_h_tiles
+ * @is_te_using_watchdog_timer:  Boolean to indicate watchdog TE is
+ *                              used instead of panel TE in cmd mode panels
+ */
+struct msm_display_info {
+       int intf_type;
+       uint32_t capabilities;
+       uint32_t num_of_h_tiles;
+       uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY];
+       bool is_te_using_watchdog_timer;
+};
+
+/* Commit/Event thread specific structure */
+struct msm_drm_thread {
+       struct drm_device *dev;
+       struct task_struct *thread;
+       unsigned int crtc_id;
+       struct kthread_worker worker;
+};
 
 struct msm_drm_private {
 
@@ -84,7 +157,7 @@ struct msm_drm_private {
        /* subordinate devices, if present: */
        struct platform_device *gpu_pdev;
 
-       /* top level MDSS wrapper device (for MDP5 only) */
+       /* top level MDSS wrapper device (for MDP5/DPU only) */
        struct msm_mdss *mdss;
 
        /* possibly this should be in the kms component, but it is
@@ -115,22 +188,24 @@ struct msm_drm_private {
        struct list_head inactive_list;
 
        struct workqueue_struct *wq;
-       struct workqueue_struct *atomic_wq;
 
        unsigned int num_planes;
-       struct drm_plane *planes[16];
+       struct drm_plane *planes[MAX_PLANES];
 
        unsigned int num_crtcs;
-       struct drm_crtc *crtcs[8];
+       struct drm_crtc *crtcs[MAX_CRTCS];
+
+       struct msm_drm_thread disp_thread[MAX_CRTCS];
+       struct msm_drm_thread event_thread[MAX_CRTCS];
 
        unsigned int num_encoders;
-       struct drm_encoder *encoders[8];
+       struct drm_encoder *encoders[MAX_ENCODERS];
 
        unsigned int num_bridges;
-       struct drm_bridge *bridges[8];
+       struct drm_bridge *bridges[MAX_BRIDGES];
 
        unsigned int num_connectors;
-       struct drm_connector *connectors[8];
+       struct drm_connector *connectors[MAX_CONNECTORS];
 
        /* Properties */
        struct drm_property *plane_property[PLANE_PROP_MAX_NUM];
@@ -150,6 +225,7 @@ struct msm_drm_private {
        struct shrinker shrinker;
 
        struct msm_vblank_ctrl vblank_ctrl;
+       struct drm_atomic_state *pm_state;
 };
 
 struct msm_format {
@@ -174,6 +250,9 @@ struct msm_gem_address_space *
 msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
                const char *name);
 
+int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
+void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
+
 void msm_gem_submit_free(struct msm_gem_submit *submit);
 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
                struct drm_file *file);
@@ -285,6 +364,8 @@ static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
 
 void __init msm_mdp_register(void);
 void __exit msm_mdp_unregister(void);
+void __init msm_dpu_register(void);
+void __exit msm_dpu_unregister(void);
 
 #ifdef CONFIG_DEBUG_FS
 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
index 7a16242bf8bf28bb5d7493fb47ded89c754f7092..2a7348aeb38d1a785c79b874a4d6fef9bb080017 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 
 #include "msm_drv.h"
 #include "msm_kms.h"
 struct msm_framebuffer {
        struct drm_framebuffer base;
        const struct msm_format *format;
-       struct drm_gem_object *planes[MAX_PLANE];
 };
 #define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
 
 static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
                const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
 
-static int msm_framebuffer_create_handle(struct drm_framebuffer *fb,
-               struct drm_file *file_priv,
-               unsigned int *handle)
-{
-       struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
-       return drm_gem_handle_create(file_priv,
-                       msm_fb->planes[0], handle);
-}
-
-static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
-{
-       struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
-       int i, n = fb->format->num_planes;
-
-       DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
-
-       drm_framebuffer_cleanup(fb);
-
-       for (i = 0; i < n; i++) {
-               struct drm_gem_object *bo = msm_fb->planes[i];
-
-               drm_gem_object_put_unlocked(bo);
-       }
-
-       kfree(msm_fb);
-}
-
 static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
-       .create_handle = msm_framebuffer_create_handle,
-       .destroy = msm_framebuffer_destroy,
+       .create_handle = drm_gem_fb_create_handle,
+       .destroy = drm_gem_fb_destroy,
 };
 
 #ifdef CONFIG_DEBUG_FS
 void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
 {
-       struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
        int i, n = fb->format->num_planes;
 
        seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n",
@@ -77,7 +49,7 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
        for (i = 0; i < n; i++) {
                seq_printf(m, "   %d: offset=%d pitch=%d, obj: ",
                                i, fb->offsets[i], fb->pitches[i]);
-               msm_gem_describe(msm_fb->planes[i], m);
+               msm_gem_describe(fb->obj[i], m);
        }
 }
 #endif
@@ -90,12 +62,11 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
 int msm_framebuffer_prepare(struct drm_framebuffer *fb,
                struct msm_gem_address_space *aspace)
 {
-       struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
        int ret, i, n = fb->format->num_planes;
        uint64_t iova;
 
        for (i = 0; i < n; i++) {
-               ret = msm_gem_get_iova(msm_fb->planes[i], aspace, &iova);
+               ret = msm_gem_get_iova(fb->obj[i], aspace, &iova);
                DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
                if (ret)
                        return ret;
@@ -107,26 +78,23 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb,
 void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
                struct msm_gem_address_space *aspace)
 {
-       struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
        int i, n = fb->format->num_planes;
 
        for (i = 0; i < n; i++)
-               msm_gem_put_iova(msm_fb->planes[i], aspace);
+               msm_gem_put_iova(fb->obj[i], aspace);
 }
 
 uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
                struct msm_gem_address_space *aspace, int plane)
 {
-       struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
-       if (!msm_fb->planes[plane])
+       if (!fb->obj[plane])
                return 0;
-       return msm_gem_iova(msm_fb->planes[plane], aspace) + fb->offsets[plane];
+       return msm_gem_iova(fb->obj[plane], aspace) + fb->offsets[plane];
 }
 
 struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
 {
-       struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
-       return msm_fb->planes[plane];
+       return drm_gem_fb_get_obj(fb, plane);
 }
 
 const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb)
@@ -202,7 +170,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
 
        msm_fb->format = format;
 
-       if (n > ARRAY_SIZE(msm_fb->planes)) {
+       if (n > ARRAY_SIZE(fb->obj)) {
                ret = -EINVAL;
                goto fail;
        }
@@ -221,7 +189,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
                        goto fail;
                }
 
-               msm_fb->planes[i] = bos[i];
+               msm_fb->base.obj[i] = bos[i];
        }
 
        drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
index 1c09acfb4028d228ab806d2c4b81d239cff20d33..f388944c93e2750a5b3092fee535b400e5fa63ca 100644 (file)
 #include "msm_mmu.h"
 #include "msm_fence.h"
 
+#include <generated/utsrelease.h>
 #include <linux/string_helpers.h>
 #include <linux/pm_opp.h>
 #include <linux/devfreq.h>
-
+#include <linux/devcoredump.h>
 
 /*
  * Power Management:
@@ -273,6 +274,123 @@ int msm_gpu_hw_init(struct msm_gpu *gpu)
        return ret;
 }
 
+#ifdef CONFIG_DEV_COREDUMP
+static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
+               size_t count, void *data, size_t datalen)
+{
+       struct msm_gpu *gpu = data;
+       struct drm_print_iterator iter;
+       struct drm_printer p;
+       struct msm_gpu_state *state;
+
+       state = msm_gpu_crashstate_get(gpu);
+       if (!state)
+               return 0;
+
+       iter.data = buffer;
+       iter.offset = 0;
+       iter.start = offset;
+       iter.remain = count;
+
+       p = drm_coredump_printer(&iter);
+
+       drm_printf(&p, "---\n");
+       drm_printf(&p, "kernel: " UTS_RELEASE "\n");
+       drm_printf(&p, "module: " KBUILD_MODNAME "\n");
+       drm_printf(&p, "time: %lld.%09ld\n",
+               state->time.tv_sec, state->time.tv_nsec);
+       if (state->comm)
+               drm_printf(&p, "comm: %s\n", state->comm);
+       if (state->cmd)
+               drm_printf(&p, "cmdline: %s\n", state->cmd);
+
+       gpu->funcs->show(gpu, state, &p);
+
+       msm_gpu_crashstate_put(gpu);
+
+       return count - iter.remain;
+}
+
+static void msm_gpu_devcoredump_free(void *data)
+{
+       struct msm_gpu *gpu = data;
+
+       msm_gpu_crashstate_put(gpu);
+}
+
+static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
+               struct msm_gem_object *obj, u64 iova, u32 flags)
+{
+       struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
+
+       /* Don't record write only objects */
+
+       state_bo->size = obj->base.size;
+       state_bo->iova = iova;
+
+       /* Only store the data for buffer objects marked for read */
+       if ((flags & MSM_SUBMIT_BO_READ)) {
+               void *ptr;
+
+               state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
+               if (!state_bo->data)
+                       return;
+
+               ptr = msm_gem_get_vaddr_active(&obj->base);
+               if (IS_ERR(ptr)) {
+                       kvfree(state_bo->data);
+                       return;
+               }
+
+               memcpy(state_bo->data, ptr, obj->base.size);
+               msm_gem_put_vaddr(&obj->base);
+       }
+
+       state->nr_bos++;
+}
+
+static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
+               struct msm_gem_submit *submit, char *comm, char *cmd)
+{
+       struct msm_gpu_state *state;
+
+       /* Only save one crash state at a time */
+       if (gpu->crashstate)
+               return;
+
+       state = gpu->funcs->gpu_state_get(gpu);
+       if (IS_ERR_OR_NULL(state))
+               return;
+
+       /* Fill in the additional crash state information */
+       state->comm = kstrdup(comm, GFP_KERNEL);
+       state->cmd = kstrdup(cmd, GFP_KERNEL);
+
+       if (submit) {
+               int i;
+
+               state->bos = kcalloc(submit->nr_bos,
+                       sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
+
+               for (i = 0; state->bos && i < submit->nr_bos; i++)
+                       msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
+                               submit->bos[i].iova, submit->bos[i].flags);
+       }
+
+       /* Set the active crash state to be dumped on failure */
+       gpu->crashstate = state;
+
+       /* FIXME: Release the crashstate if this errors out? */
+       dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
+               msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
+}
+#else
+static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, char *comm,
+               char *cmd)
+{
+}
+#endif
+
 /*
  * Hangcheck detection for locked gpu:
  */
@@ -314,6 +432,7 @@ static void recover_worker(struct work_struct *work)
        struct msm_drm_private *priv = dev->dev_private;
        struct msm_gem_submit *submit;
        struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
+       char *comm = NULL, *cmd = NULL;
        int i;
 
        mutex_lock(&dev->struct_mutex);
@@ -327,7 +446,7 @@ static void recover_worker(struct work_struct *work)
                rcu_read_lock();
                task = pid_task(submit->pid, PIDTYPE_PID);
                if (task) {
-                       char *cmd;
+                       comm = kstrdup(task->comm, GFP_ATOMIC);
 
                        /*
                         * So slightly annoying, in other paths like
@@ -340,22 +459,28 @@ static void recover_worker(struct work_struct *work)
                         * about the submit going away.
                         */
                        mutex_unlock(&dev->struct_mutex);
-                       cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
+                       cmd = kstrdup_quotable_cmdline(task, GFP_ATOMIC);
                        mutex_lock(&dev->struct_mutex);
+               }
+               rcu_read_unlock();
 
+               if (comm && cmd) {
                        dev_err(dev->dev, "%s: offending task: %s (%s)\n",
-                               gpu->name, task->comm, cmd);
+                               gpu->name, comm, cmd);
 
                        msm_rd_dump_submit(priv->hangrd, submit,
-                               "offending task: %s (%s)", task->comm, cmd);
-
-                       kfree(cmd);
-               } else {
+                               "offending task: %s (%s)", comm, cmd);
+               } else
                        msm_rd_dump_submit(priv->hangrd, submit, NULL);
-               }
-               rcu_read_unlock();
        }
 
+       /* Record the crash state */
+       pm_runtime_get_sync(&gpu->pdev->dev);
+       msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
+       pm_runtime_put_sync(&gpu->pdev->dev);
+
+       kfree(cmd);
+       kfree(comm);
 
        /*
         * Update all the rings with the latest and greatest fence.. this
index b8241179175a2ca523b259777c481f53a460f2f9..1c6105bc55c767735e13c771c66ebe1d243cc859 100644 (file)
@@ -27,6 +27,7 @@
 
 struct msm_gem_submit;
 struct msm_gpu_perfcntr;
+struct msm_gpu_state;
 
 struct msm_gpu_config {
        const char *ioname;
@@ -64,11 +65,14 @@ struct msm_gpu_funcs {
        void (*destroy)(struct msm_gpu *gpu);
 #ifdef CONFIG_DEBUG_FS
        /* show GPU status in debugfs: */
-       void (*show)(struct msm_gpu *gpu, struct seq_file *m);
+       void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
+                       struct drm_printer *p);
        /* for generation specific debugfs: */
        int (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
 #endif
        int (*gpu_busy)(struct msm_gpu *gpu, uint64_t *value);
+       struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
+       int (*gpu_state_put)(struct msm_gpu_state *state);
 };
 
 struct msm_gpu {
@@ -129,6 +133,8 @@ struct msm_gpu {
                u64 busy_cycles;
                ktime_t time;
        } devfreq;
+
+       struct msm_gpu_state *crashstate;
 };
 
 /* It turns out that all targets use the same ringbuffer size */
@@ -175,6 +181,38 @@ struct msm_gpu_submitqueue {
        struct kref ref;
 };
 
+struct msm_gpu_state_bo {
+       u64 iova;
+       size_t size;
+       void *data;
+};
+
+struct msm_gpu_state {
+       struct kref ref;
+       struct timespec64 time;
+
+       struct {
+               u64 iova;
+               u32 fence;
+               u32 seqno;
+               u32 rptr;
+               u32 wptr;
+               void *data;
+               int data_size;
+       } ring[MSM_GPU_MAX_RINGS];
+
+       int nr_registers;
+       u32 *registers;
+
+       u32 rbbm_status;
+
+       char *comm;
+       char *cmd;
+
+       int nr_bos;
+       struct msm_gpu_state_bo *bos;
+};
+
 static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
 {
        msm_writel(data, gpu->mmio + (reg << 2));
@@ -254,4 +292,32 @@ static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue)
                kref_put(&queue->ref, msm_submitqueue_destroy);
 }
 
+static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu)
+{
+       struct msm_gpu_state *state = NULL;
+
+       mutex_lock(&gpu->dev->struct_mutex);
+
+       if (gpu->crashstate) {
+               kref_get(&gpu->crashstate->ref);
+               state = gpu->crashstate;
+       }
+
+       mutex_unlock(&gpu->dev->struct_mutex);
+
+       return state;
+}
+
+static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
+{
+       mutex_lock(&gpu->dev->struct_mutex);
+
+       if (gpu->crashstate) {
+               if (gpu->funcs->gpu_state_put(gpu->crashstate))
+                       gpu->crashstate = NULL;
+       }
+
+       mutex_unlock(&gpu->dev->struct_mutex);
+}
+
 #endif /* __MSM_GPU_H__ */
index dfd92947de2c88a7b3e0c9e963ba202093215aec..fd88cebb6adb339f489137178bd49e8ec0b0eafc 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -42,6 +43,7 @@ struct msm_kms_funcs {
        void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
        /* modeset, bracketing atomic_commit(): */
        void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
+       void (*commit)(struct msm_kms *kms, struct drm_atomic_state *state);
        void (*complete_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
        /* functions to wait for atomic commit completed on each CRTC */
        void (*wait_for_crtc_commit_done)(struct msm_kms *kms,
@@ -50,6 +52,11 @@ struct msm_kms_funcs {
        const struct msm_format *(*get_format)(struct msm_kms *kms,
                                        const uint32_t format,
                                        const uint64_t modifiers);
+       /* do format checking on format modified through fb_cmd2 modifiers */
+       int (*check_modified_format)(const struct msm_kms *kms,
+                       const struct msm_format *msm_fmt,
+                       const struct drm_mode_fb_cmd2 *cmd,
+                       struct drm_gem_object **bos);
        /* misc: */
        long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
                        struct drm_encoder *encoder);
@@ -60,6 +67,9 @@ struct msm_kms_funcs {
        void (*set_encoder_mode)(struct msm_kms *kms,
                                 struct drm_encoder *encoder,
                                 bool cmd_mode);
+       /* pm suspend/resume hooks */
+       int (*pm_suspend)(struct device *dev);
+       int (*pm_resume)(struct device *dev);
        /* cleanup: */
        void (*destroy)(struct msm_kms *kms);
 #ifdef CONFIG_DEBUG_FS
@@ -86,9 +96,20 @@ static inline void msm_kms_init(struct msm_kms *kms,
 
 struct msm_kms *mdp4_kms_init(struct drm_device *dev);
 struct msm_kms *mdp5_kms_init(struct drm_device *dev);
-int msm_mdss_init(struct drm_device *dev);
-void msm_mdss_destroy(struct drm_device *dev);
-int msm_mdss_enable(struct msm_mdss *mdss);
-int msm_mdss_disable(struct msm_mdss *mdss);
+struct msm_kms *dpu_kms_init(struct drm_device *dev);
+
+struct msm_mdss_funcs {
+       int (*enable)(struct msm_mdss *mdss);
+       int (*disable)(struct msm_mdss *mdss);
+       void (*destroy)(struct drm_device *dev);
+};
+
+struct msm_mdss {
+       struct drm_device *dev;
+       const struct msm_mdss_funcs *funcs;
+};
+
+int mdp5_mdss_init(struct drm_device *dev);
+int dpu_mdss_init(struct drm_device *dev);
 
 #endif /* __MSM_KMS_H__ */
index 6aa6ee16dcbdc564b927ee964775b81a3574aed3..2c569e264df37181d91fce3f5261a8e3503a5fa1 100644 (file)
@@ -1017,7 +1017,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
        nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
        nv_crtc->cursor.show(nv_crtc, true);
 out:
-       drm_gem_object_unreference_unlocked(gem);
+       drm_gem_object_put_unlocked(gem);
        return ret;
 }
 
index 4feab0a5419d73d4bb24e3f53ad974f884a3295e..e7af95d37ddb54488e31f92d2498066a745fc5c4 100644 (file)
@@ -556,6 +556,6 @@ nv04_dac_create(struct drm_connector *connector, struct dcb_output *entry)
        encoder->possible_crtcs = entry->heads;
        encoder->possible_clones = 0;
 
-       drm_mode_connector_attach_encoder(connector, encoder);
+       drm_connector_attach_encoder(connector, encoder);
        return 0;
 }
index 9805d2cdc1a1d5f3b02f1422a98d0a84a41ba8f4..73d41abbb5103e2b6a4d01fc14a0a8e9df2f93f4 100644 (file)
@@ -716,6 +716,6 @@ nv04_dfp_create(struct drm_connector *connector, struct dcb_output *entry)
            entry->location != DCB_LOC_ON_CHIP)
                nv04_tmds_slave_init(encoder);
 
-       drm_mode_connector_attach_encoder(connector, encoder);
+       drm_connector_attach_encoder(connector, encoder);
        return 0;
 }
index 501d2d290e9c6c49072c8a57d9a3643dd286b731..70dce544984e848b54409a390a41c2a3f9c24d4f 100644 (file)
@@ -55,6 +55,9 @@ nv04_display_create(struct drm_device *dev)
        nouveau_display(dev)->init = nv04_display_init;
        nouveau_display(dev)->fini = nv04_display_fini;
 
+       /* Pre-nv50 doesn't support atomic, so don't expose the ioctls */
+       dev->driver->driver_features &= ~DRIVER_ATOMIC;
+
        nouveau_hw_save_vga_fonts(dev, 1);
 
        nv04_crtc_create(dev, 0);
index 01664357d3e18b4249348a81498e52bfd7170e4f..de4490b4ed30c80d3e26996ba6d9731869c07302 100644 (file)
@@ -244,7 +244,7 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
 
        /* Attach it to the specified connector. */
        get_slave_funcs(encoder)->create_resources(encoder, connector);
-       drm_mode_connector_attach_encoder(connector, encoder);
+       drm_connector_attach_encoder(connector, encoder);
 
        return 0;
 
index 6d99f11fee4e0e5c65f9518b8c51f429f784bc9b..6a4ca139cf5d71efb67427dd2f79eb886482d817 100644 (file)
@@ -821,6 +821,6 @@ nv17_tv_create(struct drm_connector *connector, struct dcb_output *entry)
        encoder->possible_clones = 0;
 
        nv17_tv_create_resources(encoder, connector);
-       drm_mode_connector_attach_encoder(connector, encoder);
+       drm_connector_attach_encoder(connector, encoder);
        return 0;
 }
index 291c08117ab65337f7a9d8567cec08207cd555db..397143b639c64ba6dbd7c1144c0314aae954339f 100644 (file)
@@ -132,7 +132,7 @@ curs507a_new_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
 
        nvif_object_map(&wndw->wimm.base.user, NULL, 0);
        wndw->immd = func;
-       wndw->ctxdma.parent = &disp->core->chan.base.user;
+       wndw->ctxdma.parent = NULL;
        return 0;
 }
 
index b83465ae7c1bcead7b81657cd2d03968e74a6baf..8412119bd94058b7197c531c49fc7ad88a9e6fab 100644 (file)
@@ -136,12 +136,24 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
 {
        struct nouveau_cli *cli = (void *)device->object.client;
        struct nv50_disp_core_channel_dma_v0 *args = data;
+       u8 type = NVIF_MEM_COHERENT;
        int ret;
 
        mutex_init(&dmac->lock);
 
-       ret = nvif_mem_init_map(&cli->mmu, NVIF_MEM_COHERENT, 0x1000,
-                               &dmac->push);
+       /* Pascal added support for 47-bit physical addresses, but some
+        * parts of EVO still only accept 40-bit PAs.
+        *
+        * To avoid issues on systems with large amounts of RAM, and on
+        * systems where an IOMMU maps pages at a high address, we need
+        * to allocate push buffers in VRAM instead.
+        *
+        * This appears to match NVIDIA's behaviour on Pascal.
+        */
+       if (device->info.family == NV_DEVICE_INFO_V0_PASCAL)
+               type |= NVIF_MEM_VRAM;
+
+       ret = nvif_mem_init_map(&cli->mmu, type, 0x1000, &dmac->push);
        if (ret)
                return ret;
 
@@ -216,6 +228,19 @@ void
 evo_kick(u32 *push, struct nv50_dmac *evoc)
 {
        struct nv50_dmac *dmac = evoc;
+
+       /* Push buffer fetches are not coherent with BAR1, we need to ensure
+        * writes have been flushed right through to VRAM before writing PUT.
+        */
+       if (dmac->push.type & NVIF_MEM_VRAM) {
+               struct nvif_device *device = dmac->base.device;
+               nvif_wr32(&device->object, 0x070000, 0x00000001);
+               nvif_msec(device, 2000,
+                       if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
+                               break;
+               );
+       }
+
        nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
        mutex_unlock(&dmac->lock);
 }
@@ -424,7 +449,7 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
                         "dac-%04x-%04x", dcbe->hasht, dcbe->hashm);
        drm_encoder_helper_add(encoder, &nv50_dac_help);
 
-       drm_mode_connector_attach_encoder(connector, encoder);
+       drm_connector_attach_encoder(connector, encoder);
        return 0;
 }
 
@@ -850,7 +875,7 @@ nv50_mstc_get_modes(struct drm_connector *connector)
        int ret = 0;
 
        mstc->edid = drm_dp_mst_get_edid(&mstc->connector, mstc->port->mgr, mstc->port);
-       drm_mode_connector_update_edid_property(&mstc->connector, mstc->edid);
+       drm_connector_update_edid_property(&mstc->connector, mstc->edid);
        if (mstc->edid)
                ret = drm_add_edid_modes(&mstc->connector, mstc->edid);
 
@@ -927,11 +952,11 @@ nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
        nouveau_conn_attach_properties(&mstc->connector);
 
        for (i = 0; i < ARRAY_SIZE(mstm->msto) && mstm->msto[i]; i++)
-               drm_mode_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder);
+               drm_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder);
 
        drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0);
        drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0);
-       drm_mode_connector_set_path_property(&mstc->connector, path);
+       drm_connector_set_path_property(&mstc->connector, path);
        return 0;
 }
 
@@ -1007,7 +1032,7 @@ nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr,
        mstc->port = NULL;
        drm_modeset_unlock(&drm->dev->mode_config.connection_mutex);
 
-       drm_connector_unreference(&mstc->connector);
+       drm_connector_put(&mstc->connector);
 }
 
 static void
@@ -1418,7 +1443,7 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
                         "sor-%04x-%04x", dcbe->hasht, dcbe->hashm);
        drm_encoder_helper_add(encoder, &nv50_sor_help);
 
-       drm_mode_connector_attach_encoder(connector, encoder);
+       drm_connector_attach_encoder(connector, encoder);
 
        if (dcbe->type == DCB_OUTPUT_DP) {
                struct nv50_disp *disp = nv50_disp(encoder->dev);
@@ -1576,7 +1601,7 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
                         "pior-%04x-%04x", dcbe->hasht, dcbe->hashm);
        drm_encoder_helper_add(encoder, &nv50_pior_help);
 
-       drm_mode_connector_attach_encoder(connector, encoder);
+       drm_connector_attach_encoder(connector, encoder);
        return 0;
 }
 
@@ -1585,8 +1610,9 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
  *****************************************************************************/
 
 static void
-nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 *interlock)
+nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock)
 {
+       struct nouveau_drm *drm = nouveau_drm(state->dev);
        struct nv50_disp *disp = nv50_disp(drm->dev);
        struct nv50_core *core = disp->core;
        struct nv50_mstm *mstm;
@@ -1617,6 +1643,22 @@ nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 *interlock)
        }
 }
 
+static void
+nv50_disp_atomic_commit_wndw(struct drm_atomic_state *state, u32 *interlock)
+{
+       struct drm_plane_state *new_plane_state;
+       struct drm_plane *plane;
+       int i;
+
+       for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+               struct nv50_wndw *wndw = nv50_wndw(plane);
+               if (interlock[wndw->interlock.type] & wndw->interlock.data) {
+                       if (wndw->func->update)
+                               wndw->func->update(wndw, interlock);
+               }
+       }
+}
+
 static void
 nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
 {
@@ -1684,7 +1726,8 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
                        help->disable(encoder);
                        interlock[NV50_DISP_INTERLOCK_CORE] |= 1;
                        if (outp->flush_disable) {
-                               nv50_disp_atomic_commit_core(drm, interlock);
+                               nv50_disp_atomic_commit_wndw(state, interlock);
+                               nv50_disp_atomic_commit_core(state, interlock);
                                memset(interlock, 0x00, sizeof(interlock));
                        }
                }
@@ -1693,15 +1736,8 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
        /* Flush disable. */
        if (interlock[NV50_DISP_INTERLOCK_CORE]) {
                if (atom->flush_disable) {
-                       for_each_new_plane_in_state(state, plane, new_plane_state, i) {
-                               struct nv50_wndw *wndw = nv50_wndw(plane);
-                               if (interlock[wndw->interlock.type] & wndw->interlock.data) {
-                                       if (wndw->func->update)
-                                               wndw->func->update(wndw, interlock);
-                               }
-                       }
-
-                       nv50_disp_atomic_commit_core(drm, interlock);
+                       nv50_disp_atomic_commit_wndw(state, interlock);
+                       nv50_disp_atomic_commit_core(state, interlock);
                        memset(interlock, 0x00, sizeof(interlock));
                }
        }
@@ -1762,18 +1798,14 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
        }
 
        /* Flush update. */
-       for_each_new_plane_in_state(state, plane, new_plane_state, i) {
-               struct nv50_wndw *wndw = nv50_wndw(plane);
-               if (interlock[wndw->interlock.type] & wndw->interlock.data) {
-                       if (wndw->func->update)
-                               wndw->func->update(wndw, interlock);
-               }
-       }
+       nv50_disp_atomic_commit_wndw(state, interlock);
 
        if (interlock[NV50_DISP_INTERLOCK_CORE]) {
                if (interlock[NV50_DISP_INTERLOCK_BASE] ||
+                   interlock[NV50_DISP_INTERLOCK_OVLY] ||
+                   interlock[NV50_DISP_INTERLOCK_WNDW] ||
                    !atom->state.legacy_cursor_update)
-                       nv50_disp_atomic_commit_core(drm, interlock);
+                       nv50_disp_atomic_commit_core(state, interlock);
                else
                        disp->core->func->update(disp->core, interlock, false);
        }
@@ -1871,7 +1903,7 @@ nv50_disp_atomic_commit(struct drm_device *dev,
                nv50_disp_atomic_commit_tail(state);
 
        drm_for_each_crtc(crtc, dev) {
-               if (crtc->state->enable) {
+               if (crtc->state->active) {
                        if (!drm->have_disp_power_ref) {
                                drm->have_disp_power_ref = true;
                                return 0;
@@ -2119,10 +2151,6 @@ nv50_display_destroy(struct drm_device *dev)
        kfree(disp);
 }
 
-MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
-static int nouveau_atomic = 0;
-module_param_named(atomic, nouveau_atomic, int, 0400);
-
 int
 nv50_display_create(struct drm_device *dev)
 {
@@ -2147,8 +2175,6 @@ nv50_display_create(struct drm_device *dev)
        disp->disp = &nouveau_display(dev)->disp;
        dev->mode_config.funcs = &nv50_disp_func;
        dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP;
-       if (nouveau_atomic)
-               dev->driver->driver_features |= DRIVER_ATOMIC;
 
        /* small shared memory area we use for notifiers and semaphores */
        ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
@@ -2231,6 +2257,9 @@ nv50_display_create(struct drm_device *dev)
                connector->funcs->destroy(connector);
        }
 
+       /* Disable vblank irqs aggressively for power-saving, safe on nv50+ */
+       dev->vblank_disable_immediate = true;
+
 out:
        if (ret)
                nv50_display_destroy(dev);
index 224963b533a69163b39bed8cbf175e637befd591..2187922e8dc28d4d11df28bc33aec3f91281791c 100644 (file)
@@ -444,14 +444,17 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
        if (ret)
                return ret;
 
-       ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
-       if (IS_ERR(ctxdma)) {
-               nouveau_bo_unpin(fb->nvbo);
-               return PTR_ERR(ctxdma);
+       if (wndw->ctxdma.parent) {
+               ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
+               if (IS_ERR(ctxdma)) {
+                       nouveau_bo_unpin(fb->nvbo);
+                       return PTR_ERR(ctxdma);
+               }
+
+               asyw->image.handle[0] = ctxdma->object.handle;
        }
 
        asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv);
-       asyw->image.handle[0] = ctxdma->object.handle;
        asyw->image.offset[0] = fb->nvbo->bo.offset;
 
        if (wndw->func->prepare) {
@@ -583,7 +586,6 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
        wndw->id = index;
        wndw->interlock.type = interlock_type;
        wndw->interlock.data = interlock_data;
-       wndw->ctxdma.parent = &wndw->wndw.base.user;
 
        wndw->ctxdma.parent = &wndw->wndw.base.user;
        INIT_LIST_HEAD(&wndw->ctxdma.list);
index 20754d9e68838e167ad29a0fef9c2d4de66c9371..8407651f6ac6ef2b3327953d3114118f85d58acf 100644 (file)
@@ -78,7 +78,7 @@ struct nvif_mclass {
 #define nvif_mclass(o,m) ({                                                    \
        struct nvif_object *object = (o);                                      \
        struct nvif_sclass *sclass;                                            \
-       const typeof(m[0]) *mclass = (m);                                      \
+       typeof(m[0]) *mclass = (m);                                            \
        int ret = -ENODEV;                                                     \
        int cnt, i, j;                                                         \
                                                                                \
index e2211bb2cf79e8affb9d8ca06b7f4b8e7d2c74d0..e67a471331b514b75321c4bf8a0fff1be7057113 100644 (file)
@@ -139,7 +139,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
        if (chan->ntfy) {
                nouveau_vma_del(&chan->ntfy_vma);
                nouveau_bo_unpin(chan->ntfy);
-               drm_gem_object_unreference_unlocked(&chan->ntfy->gem);
+               drm_gem_object_put_unlocked(&chan->ntfy->gem);
        }
 
        if (chan->heap.block_size)
index debbbf0fd4bdda619732c67952c772f9957c4166..408b955e5c39a6b41043c18fb37ae8dc9de42c04 100644 (file)
@@ -267,6 +267,7 @@ nouveau_backlight_init(struct drm_device *dev)
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct nvif_device *device = &drm->client.device;
        struct drm_connector *connector;
+       struct drm_connector_list_iter conn_iter;
 
        INIT_LIST_HEAD(&drm->bl_connectors);
 
@@ -275,7 +276,8 @@ nouveau_backlight_init(struct drm_device *dev)
                return 0;
        }
 
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+       drm_connector_list_iter_begin(dev, &conn_iter);
+       drm_for_each_connector_iter(connector, &conn_iter) {
                if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
                    connector->connector_type != DRM_MODE_CONNECTOR_eDP)
                        continue;
@@ -292,7 +294,7 @@ nouveau_backlight_init(struct drm_device *dev)
                        break;
                }
        }
-
+       drm_connector_list_iter_end(&conn_iter);
 
        return 0;
 }
index 7b557c3543079128ff339b79320c62d6bfb36334..51932c72334ef6529abb18bc44762cc01ba2d176 100644 (file)
@@ -363,19 +363,11 @@ module_param_named(hdmimhz, nouveau_hdmimhz, int, 0400);
 struct nouveau_encoder *
 find_encoder(struct drm_connector *connector, int type)
 {
-       struct drm_device *dev = connector->dev;
        struct nouveau_encoder *nv_encoder;
        struct drm_encoder *enc;
-       int i, id;
-
-       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-               id = connector->encoder_ids[i];
-               if (!id)
-                       break;
+       int i;
 
-               enc = drm_encoder_find(dev, NULL, id);
-               if (!enc)
-                       continue;
+       drm_connector_for_each_possible_encoder(connector, enc, i) {
                nv_encoder = nouveau_encoder(enc);
 
                if (type == DCB_OUTPUT_ANY ||
@@ -420,7 +412,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
        struct nouveau_connector *nv_connector = nouveau_connector(connector);
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
-       struct nouveau_encoder *nv_encoder;
+       struct nouveau_encoder *nv_encoder = NULL;
        struct drm_encoder *encoder;
        int i, panel = -ENODEV;
 
@@ -436,14 +428,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
                }
        }
 
-       for (i = 0; nv_encoder = NULL, i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-               int id = connector->encoder_ids[i];
-               if (id == 0)
-                       break;
-
-               encoder = drm_encoder_find(dev, NULL, id);
-               if (!encoder)
-                       continue;
+       drm_connector_for_each_possible_encoder(connector, encoder, i) {
                nv_encoder = nouveau_encoder(encoder);
 
                if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
@@ -565,7 +550,7 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
 
        /* Cleanup the previous EDID block. */
        if (nv_connector->edid) {
-               drm_mode_connector_update_edid_property(connector, NULL);
+               drm_connector_update_edid_property(connector, NULL);
                kfree(nv_connector->edid);
                nv_connector->edid = NULL;
        }
@@ -590,7 +575,7 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
                else
                        nv_connector->edid = drm_get_edid(connector, i2c);
 
-               drm_mode_connector_update_edid_property(connector,
+               drm_connector_update_edid_property(connector,
                                                        nv_connector->edid);
                if (!nv_connector->edid) {
                        NV_ERROR(drm, "DDC responded, but no EDID for %s\n",
@@ -672,7 +657,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
 
        /* Cleanup the previous EDID block. */
        if (nv_connector->edid) {
-               drm_mode_connector_update_edid_property(connector, NULL);
+               drm_connector_update_edid_property(connector, NULL);
                kfree(nv_connector->edid);
                nv_connector->edid = NULL;
        }
@@ -736,7 +721,7 @@ out:
                status = connector_status_unknown;
 #endif
 
-       drm_mode_connector_update_edid_property(connector, nv_connector->edid);
+       drm_connector_update_edid_property(connector, nv_connector->edid);
        nouveau_connector_set_encoder(connector, nv_encoder);
        return status;
 }
@@ -1208,14 +1193,19 @@ nouveau_connector_create(struct drm_device *dev, int index)
        struct nouveau_display *disp = nouveau_display(dev);
        struct nouveau_connector *nv_connector = NULL;
        struct drm_connector *connector;
+       struct drm_connector_list_iter conn_iter;
        int type, ret = 0;
        bool dummy;
 
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+       drm_connector_list_iter_begin(dev, &conn_iter);
+       nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
                nv_connector = nouveau_connector(connector);
-               if (nv_connector->index == index)
+               if (nv_connector->index == index) {
+                       drm_connector_list_iter_end(&conn_iter);
                        return connector;
+               }
        }
+       drm_connector_list_iter_end(&conn_iter);
 
        nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
        if (!nv_connector)
index a4d1a059bd3d4f948c36c0a942150c68199ec974..dc7454e7f19aa0ec9f22e279015a0966eedbd531 100644 (file)
@@ -33,6 +33,7 @@
 #include <drm/drm_encoder.h>
 #include <drm/drm_dp_helper.h>
 #include "nouveau_crtc.h"
+#include "nouveau_encoder.h"
 
 struct nvkm_i2c_port;
 
@@ -60,19 +61,46 @@ static inline struct nouveau_connector *nouveau_connector(
        return container_of(con, struct nouveau_connector, base);
 }
 
+static inline bool
+nouveau_connector_is_mst(struct drm_connector *connector)
+{
+       const struct nouveau_encoder *nv_encoder;
+       const struct drm_encoder *encoder;
+
+       if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+               return false;
+
+       nv_encoder = find_encoder(connector, DCB_OUTPUT_ANY);
+       if (!nv_encoder)
+               return false;
+
+       encoder = &nv_encoder->base.base;
+       return encoder->encoder_type == DRM_MODE_ENCODER_DPMST;
+}
+
+#define nouveau_for_each_non_mst_connector_iter(connector, iter) \
+       drm_for_each_connector_iter(connector, iter) \
+               for_each_if(!nouveau_connector_is_mst(connector))
+
 static inline struct nouveau_connector *
 nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
 {
        struct drm_device *dev = nv_crtc->base.dev;
        struct drm_connector *connector;
+       struct drm_connector_list_iter conn_iter;
+       struct nouveau_connector *nv_connector = NULL;
        struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
 
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               if (connector->encoder && connector->encoder->crtc == crtc)
-                       return nouveau_connector(connector);
+       drm_connector_list_iter_begin(dev, &conn_iter);
+       nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
+               if (connector->encoder && connector->encoder->crtc == crtc) {
+                       nv_connector = nouveau_connector(connector);
+                       break;
+               }
        }
+       drm_connector_list_iter_end(&conn_iter);
 
-       return NULL;
+       return nv_connector;
 }
 
 struct drm_connector *
index 963a4dba8213eb6080ff3713cbbdbb20ddb4b61e..9109b69cd052958bbc126b4bad4f490720e11f4a 100644 (file)
@@ -160,7 +160,11 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf,
                args.ustate = value;
        }
 
+       ret = pm_runtime_get_sync(drm->dev);
+       if (IS_ERR_VALUE(ret) && ret != -EACCES)
+               return ret;
        ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args));
+       pm_runtime_put_autosuspend(drm->dev);
        if (ret < 0)
                return ret;
 
index 774b429142bc8e22c79e6b0ed46f97f72e3ffab1..139368b31916b0f5a680916f0bc261ece493ec8f 100644 (file)
@@ -205,7 +205,7 @@ nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
        struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
 
        if (fb->nvbo)
-               drm_gem_object_unreference_unlocked(&fb->nvbo->gem);
+               drm_gem_object_put_unlocked(&fb->nvbo->gem);
 
        drm_framebuffer_cleanup(drm_fb);
        kfree(fb);
@@ -287,7 +287,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
        if (ret == 0)
                return &fb->base;
 
-       drm_gem_object_unreference_unlocked(gem);
+       drm_gem_object_put_unlocked(gem);
        return ERR_PTR(ret);
 }
 
@@ -404,6 +404,7 @@ nouveau_display_init(struct drm_device *dev)
        struct nouveau_display *disp = nouveau_display(dev);
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct drm_connector *connector;
+       struct drm_connector_list_iter conn_iter;
        int ret;
 
        ret = disp->init(dev);
@@ -411,10 +412,12 @@ nouveau_display_init(struct drm_device *dev)
                return ret;
 
        /* enable hotplug interrupts */
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+       drm_connector_list_iter_begin(dev, &conn_iter);
+       nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
                struct nouveau_connector *conn = nouveau_connector(connector);
                nvif_notify_get(&conn->hpd);
        }
+       drm_connector_list_iter_end(&conn_iter);
 
        /* enable flip completion events */
        nvif_notify_get(&drm->flip);
@@ -427,6 +430,7 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
        struct nouveau_display *disp = nouveau_display(dev);
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct drm_connector *connector;
+       struct drm_connector_list_iter conn_iter;
 
        if (!suspend) {
                if (drm_drv_uses_atomic_modeset(dev))
@@ -439,10 +443,12 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
        nvif_notify_put(&drm->flip);
 
        /* disable hotplug interrupts */
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+       drm_connector_list_iter_begin(dev, &conn_iter);
+       nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
                struct nouveau_connector *conn = nouveau_connector(connector);
                nvif_notify_put(&conn->hpd);
        }
+       drm_connector_list_iter_end(&conn_iter);
 
        drm_kms_helper_poll_disable(dev);
        disp->fini(dev);
@@ -939,7 +945,7 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
                return ret;
 
        ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
-       drm_gem_object_unreference_unlocked(&bo->gem);
+       drm_gem_object_put_unlocked(&bo->gem);
        return ret;
 }
 
@@ -954,7 +960,7 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
        if (gem) {
                struct nouveau_bo *bo = nouveau_gem_object(gem);
                *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
-               drm_gem_object_unreference_unlocked(gem);
+               drm_gem_object_put_unlocked(gem);
                return 0;
        }
 
index 775443c9af943eb2e96ffd3187852b681fc3f916..c7ec86d6c3c910fdb73a7774eb921a414c156363 100644 (file)
@@ -81,6 +81,10 @@ MODULE_PARM_DESC(modeset, "enable driver (default: auto, "
 int nouveau_modeset = -1;
 module_param_named(modeset, nouveau_modeset, int, 0400);
 
+MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
+static int nouveau_atomic = 0;
+module_param_named(atomic, nouveau_atomic, int, 0400);
+
 MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)");
 static int nouveau_runtime_pm = -1;
 module_param_named(runpm, nouveau_runtime_pm, int, 0400);
@@ -509,6 +513,9 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
 
        pci_set_master(pdev);
 
+       if (nouveau_atomic)
+               driver_pci.driver_features |= DRIVER_ATOMIC;
+
        ret = drm_get_pci_dev(pdev, pent, &driver_pci);
        if (ret) {
                nvkm_device_del(&device);
@@ -874,22 +881,11 @@ nouveau_pmops_runtime_resume(struct device *dev)
 static int
 nouveau_pmops_runtime_idle(struct device *dev)
 {
-       struct pci_dev *pdev = to_pci_dev(dev);
-       struct drm_device *drm_dev = pci_get_drvdata(pdev);
-       struct nouveau_drm *drm = nouveau_drm(drm_dev);
-       struct drm_crtc *crtc;
-
        if (!nouveau_pmops_runtime()) {
                pm_runtime_forbid(dev);
                return -EBUSY;
        }
 
-       list_for_each_entry(crtc, &drm->dev->mode_config.crtc_list, head) {
-               if (crtc->enabled) {
-                       DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
-                       return -EBUSY;
-               }
-       }
        pm_runtime_mark_last_busy(dev);
        pm_runtime_autosuspend(dev);
        /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
@@ -912,8 +908,10 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
        get_task_comm(tmpname, current);
        snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
 
-       if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL)))
-               return ret;
+       if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL))) {
+               ret = -ENOMEM;
+               goto done;
+       }
 
        ret = nouveau_cli_init(drm, name, cli);
        if (ret)
index 85c1f10bc2b67651222731f1b9703278328c90e8..844498c4267cb691ecde39083740ca6d08a86390 100644 (file)
@@ -429,7 +429,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
                nouveau_vma_del(&nouveau_fb->vma);
                nouveau_bo_unmap(nouveau_fb->nvbo);
                nouveau_bo_unpin(nouveau_fb->nvbo);
-               drm_framebuffer_unreference(&nouveau_fb->base);
+               drm_framebuffer_put(&nouveau_fb->base);
        }
 
        return 0;
index 300daee74209ab82a1675e94a052089f9523c6b4..b56524d343c3e84eb109badc3a763471fe95eff8 100644 (file)
@@ -274,7 +274,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
        }
 
        /* drop reference from allocate - handle holds it now */
-       drm_gem_object_unreference_unlocked(&nvbo->gem);
+       drm_gem_object_put_unlocked(&nvbo->gem);
        return ret;
 }
 
@@ -354,7 +354,7 @@ validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence,
                list_del(&nvbo->entry);
                nvbo->reserved_by = NULL;
                ttm_bo_unreserve(&nvbo->bo);
-               drm_gem_object_unreference_unlocked(&nvbo->gem);
+               drm_gem_object_put_unlocked(&nvbo->gem);
        }
 }
 
@@ -400,14 +400,14 @@ retry:
                nvbo = nouveau_gem_object(gem);
                if (nvbo == res_bo) {
                        res_bo = NULL;
-                       drm_gem_object_unreference_unlocked(gem);
+                       drm_gem_object_put_unlocked(gem);
                        continue;
                }
 
                if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
                        NV_PRINTK(err, cli, "multiple instances of buffer %d on "
                                      "validation list\n", b->handle);
-                       drm_gem_object_unreference_unlocked(gem);
+                       drm_gem_object_put_unlocked(gem);
                        ret = -EINVAL;
                        break;
                }
@@ -616,7 +616,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
                struct nouveau_bo *nvbo;
                uint32_t data;
 
-               if (unlikely(r->bo_index > req->nr_buffers)) {
+               if (unlikely(r->bo_index >= req->nr_buffers)) {
                        NV_PRINTK(err, cli, "reloc bo index invalid\n");
                        ret = -EINVAL;
                        break;
@@ -626,7 +626,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
                if (b->presumed.valid)
                        continue;
 
-               if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
+               if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
                        NV_PRINTK(err, cli, "reloc container bo index invalid\n");
                        ret = -EINVAL;
                        break;
@@ -894,7 +894,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
                ret = lret;
 
        nouveau_bo_sync_for_cpu(nvbo);
-       drm_gem_object_unreference_unlocked(gem);
+       drm_gem_object_put_unlocked(gem);
 
        return ret;
 }
@@ -913,7 +913,7 @@ nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
        nvbo = nouveau_gem_object(gem);
 
        nouveau_bo_sync_for_device(nvbo);
-       drm_gem_object_unreference_unlocked(gem);
+       drm_gem_object_put_unlocked(gem);
        return 0;
 }
 
@@ -930,7 +930,7 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
                return -ENOENT;
 
        ret = nouveau_gem_info(file_priv, gem, req);
-       drm_gem_object_unreference_unlocked(gem);
+       drm_gem_object_put_unlocked(gem);
        return ret;
 }
 
index 44178b4c359980d171332c19226d044de1817534..08a1ab6b150d00162300e9921d65adfa1ce05647 100644 (file)
@@ -69,8 +69,8 @@ nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d,
        struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
        long value;
 
-       if (kstrtol(buf, 10, &value) == -EINVAL)
-               return count;
+       if (kstrtol(buf, 10, &value))
+               return -EINVAL;
 
        therm->attr_set(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST,
                        value / 1000);
@@ -102,8 +102,8 @@ nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d,
        struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
        long value;
 
-       if (kstrtol(buf, 10, &value) == -EINVAL)
-               return count;
+       if (kstrtol(buf, 10, &value))
+               return -EINVAL;
 
        therm->attr_set(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST,
                        value / 1000);
@@ -156,7 +156,7 @@ nouveau_hwmon_set_pwm1_min(struct device *d, struct device_attribute *a,
        long value;
        int ret;
 
-       if (kstrtol(buf, 10, &value) == -EINVAL)
+       if (kstrtol(buf, 10, &value))
                return -EINVAL;
 
        ret = therm->attr_set(therm, NVKM_THERM_ATTR_FAN_MIN_DUTY, value);
@@ -179,7 +179,7 @@ nouveau_hwmon_set_pwm1_max(struct device *d, struct device_attribute *a,
        long value;
        int ret;
 
-       if (kstrtol(buf, 10, &value) == -EINVAL)
+       if (kstrtol(buf, 10, &value))
                return -EINVAL;
 
        ret = therm->attr_set(therm, NVKM_THERM_ATTR_FAN_MAX_DUTY, value);
index 1ada186fab770aba7c598c2661d5fc0955218ad4..039e23548e08f4074c915160a7a43d89298367ec 100644 (file)
@@ -36,7 +36,7 @@ static int nouveau_platform_probe(struct platform_device *pdev)
 
        ret = drm_dev_register(drm, 0);
        if (ret < 0) {
-               drm_dev_unref(drm);
+               drm_dev_put(drm);
                return ret;
        }
 
index 8c093ca4222e25b04b52096d4066159cdeb65451..8edb9f2a426945be9bf88ff167cc9ccebfa2171b 100644 (file)
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /*
  * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
- * All Rights Reserved.
  * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
- * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
index d0322ce85172a3f20748a074b9d90f854ae37a03..1a47c40e171b36e0e2ee519ea30851357ca99f05 100644 (file)
@@ -87,11 +87,12 @@ nvkm_engine_info(struct nvkm_subdev *subdev, u64 mthd, u64 *data)
 {
        struct nvkm_engine *engine = nvkm_engine(subdev);
        if (engine->func->info) {
-               if ((engine = nvkm_engine_ref(engine))) {
+               if (!IS_ERR((engine = nvkm_engine_ref(engine)))) {
                        int ret = engine->func->info(engine, mthd, data);
                        nvkm_engine_unref(&engine);
                        return ret;
                }
+               return PTR_ERR(engine);
        }
        return -ENOSYS;
 }
index 78597da6313ade568dc1ddb7a19582c3a778a091..0e372a190d3f111fc982aba65a56294b648fa368 100644 (file)
 #ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
 #include "priv.h"
 
+#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
+#include <asm/dma-iommu.h>
+#endif
+
 static int
 nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
 {
@@ -105,6 +109,15 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
        unsigned long pgsize_bitmap;
        int ret;
 
+#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
+       if (dev->archdata.mapping) {
+               struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
+
+               arm_iommu_detach_device(dev);
+               arm_iommu_release_mapping(mapping);
+       }
+#endif
+
        if (!tdev->func->iommu_bit)
                return;
 
index 29e6dd58ac48ccc896d93eee82201fe5126ae174..525f95d064290d9971876fa8d307dd7fb5c98e6b 100644 (file)
@@ -52,7 +52,7 @@ void
 gf119_disp_chan_intr(struct nv50_disp_chan *chan, bool en)
 {
        struct nvkm_device *device = chan->disp->base.engine.subdev.device;
-       const u64 mask = 0x00000001 << chan->chid.user;
+       const u32 mask = 0x00000001 << chan->chid.user;
        if (!en) {
                nvkm_mask(device, 0x610090, mask, 0x00000000);
                nvkm_mask(device, 0x6100a0, mask, 0x00000000);
index 57719f675eec92ae32640cd0cd160c5cef481bae..bcf32d92ee5a9ec28de558715566e5eccc89432a 100644 (file)
@@ -166,8 +166,8 @@ void
 nv50_disp_chan_intr(struct nv50_disp_chan *chan, bool en)
 {
        struct nvkm_device *device = chan->disp->base.engine.subdev.device;
-       const u64 mask = 0x00010001 << chan->chid.user;
-       const u64 data = en ? 0x00010000 : 0x00000000;
+       const u32 mask = 0x00010001 << chan->chid.user;
+       const u32 data = en ? 0x00010000 << chan->chid.user : 0x00000000;
        nvkm_mask(device, 0x610028, mask, data);
 }
 
index 19173ea190962017eef586f3317b595746685b8d..3b3327789ae78a39f7ea824dadd50b3854f33e9d 100644 (file)
 #include <nvif/class.h>
 
 static void
-gv100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc)
+gv100_gr_trap_sm(struct gf100_gr *gr, int gpc, int tpc, int sm)
 {
        struct nvkm_subdev *subdev = &gr->base.engine.subdev;
        struct nvkm_device *device = subdev->device;
-       u32 werr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x730));
-       u32 gerr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x734));
+       u32 werr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x730 + (sm * 0x80)));
+       u32 gerr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x734 + (sm * 0x80)));
        const struct nvkm_enum *warp;
        char glob[128];
 
        nvkm_snprintbf(glob, sizeof(glob), gf100_mp_global_error, gerr);
        warp = nvkm_enum_find(gf100_mp_warp_error, werr & 0xffff);
 
-       nvkm_error(subdev, "GPC%i/TPC%i/MP trap: "
+       nvkm_error(subdev, "GPC%i/TPC%i/SM%d trap: "
                           "global %08x [%s] warp %04x [%s]\n",
-                  gpc, tpc, gerr, glob, werr, warp ? warp->name : "");
+                  gpc, tpc, sm, gerr, glob, werr, warp ? warp->name : "");
+
+       nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x730 + sm * 0x80), 0x00000000);
+       nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x734 + sm * 0x80), gerr);
+}
 
-       nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x730), 0x00000000);
-       nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x734), gerr);
+static void
+gv100_gr_trap_mp(struct gf100_gr *gr, int gpc, int tpc)
+{
+       gv100_gr_trap_sm(gr, gpc, tpc, 0);
+       gv100_gr_trap_sm(gr, gpc, tpc, 1);
 }
 
 static void
index 20b6fc8243e0aa1a9d3e8b9228c952617e908745..71524548de32cc5b87e881b78893732230d77f6f 100644 (file)
@@ -58,8 +58,14 @@ nvbios_vpstate_parse(struct nvkm_bios *b, struct nvbios_vpstate_header *h)
                h->ecount   = nvbios_rd08(b, h->offset + 0x5);
 
                h->base_id  = nvbios_rd08(b, h->offset + 0x0f);
-               h->boost_id = nvbios_rd08(b, h->offset + 0x10);
-               h->tdp_id   = nvbios_rd08(b, h->offset + 0x11);
+               if (h->hlen > 0x10)
+                       h->boost_id = nvbios_rd08(b, h->offset + 0x10);
+               else
+                       h->boost_id = 0xff;
+               if (h->hlen > 0x11)
+                       h->tdp_id = nvbios_rd08(b, h->offset + 0x11);
+               else
+                       h->tdp_id = 0xff;
                return 0;
        default:
                return -EINVAL;
index 007bf4af33b9935979c481af36449be504faf26a..16ad91c91a7beca11a8febfbb91a54f3b34ac008 100644 (file)
@@ -133,8 +133,14 @@ nvkm_fault_oneinit(struct nvkm_subdev *subdev)
                }
        }
 
-       return nvkm_event_init(&nvkm_fault_ntfy, 1, fault->buffer_nr,
-                              &fault->event);
+       ret = nvkm_event_init(&nvkm_fault_ntfy, 1, fault->buffer_nr,
+                             &fault->event);
+       if (ret)
+               return ret;
+
+       if (fault->func->oneinit)
+               ret = fault->func->oneinit(fault);
+       return ret;
 }
 
 static void *
index 73c7728b5969670a2ef5a79f71a6540403a2383a..3cd610d7deb5268f1e73fcebbd4e49aa50519cfc 100644 (file)
@@ -176,8 +176,17 @@ gv100_fault_init(struct nvkm_fault *fault)
        nvkm_notify_get(&fault->nrpfb);
 }
 
+static int
+gv100_fault_oneinit(struct nvkm_fault *fault)
+{
+       return nvkm_notify_init(&fault->buffer[0]->object, &fault->event,
+                               gv100_fault_ntfy_nrpfb, false, NULL, 0, 0,
+                               &fault->nrpfb);
+}
+
 static const struct nvkm_fault_func
 gv100_fault = {
+       .oneinit = gv100_fault_oneinit,
        .init = gv100_fault_init,
        .fini = gv100_fault_fini,
        .intr = gv100_fault_intr,
@@ -192,15 +201,5 @@ int
 gv100_fault_new(struct nvkm_device *device, int index,
                struct nvkm_fault **pfault)
 {
-       struct nvkm_fault *fault;
-       int ret;
-
-       ret = nvkm_fault_new_(&gv100_fault, device, index, &fault);
-       *pfault = fault;
-       if (ret)
-               return ret;
-
-       return nvkm_notify_init(&fault->buffer[0]->object, &fault->event,
-                               gv100_fault_ntfy_nrpfb, false, NULL, 0, 0,
-                               &fault->nrpfb);
+       return nvkm_fault_new_(&gv100_fault, device, index, pfault);
 }
index 44843ecf12b07ca5df7b43442ccfd1038dfbd0fe..e4d2f5234fd19be82125e7266bf89e3db1e4f563 100644 (file)
@@ -20,6 +20,7 @@ int nvkm_fault_new_(const struct nvkm_fault_func *, struct nvkm_device *,
                    int index, struct nvkm_fault **);
 
 struct nvkm_fault_func {
+       int (*oneinit)(struct nvkm_fault *);
        void (*init)(struct nvkm_fault *);
        void (*fini)(struct nvkm_fault *);
        void (*intr)(struct nvkm_fault *);
index 73b5d46104bd3bfc97d8139ea640dbaa4aa3c8a6..434d2fc5bb1ce90c92c16299208ba5a290cbbe06 100644 (file)
@@ -140,6 +140,9 @@ nvkm_fb_init(struct nvkm_subdev *subdev)
        if (fb->func->init)
                fb->func->init(fb);
 
+       if (fb->func->init_remapper)
+               fb->func->init_remapper(fb);
+
        if (fb->func->init_page) {
                ret = fb->func->init_page(fb);
                if (WARN_ON(ret))
index dffe1f5e10712e6739fcfad81f589b6bd87aa30c..8205ce436b3e847663d5d44ab35c9e13885008a4 100644 (file)
@@ -36,6 +36,14 @@ gp100_fb_init_unkn(struct nvkm_fb *base)
        nvkm_wr32(device, 0x1faccc, nvkm_rd32(device, 0x100ccc));
 }
 
+void
+gp100_fb_init_remapper(struct nvkm_fb *fb)
+{
+       struct nvkm_device *device = fb->subdev.device;
+       /* Disable address remapper. */
+       nvkm_mask(device, 0x100c14, 0x00040000, 0x00000000);
+}
+
 void
 gp100_fb_init(struct nvkm_fb *base)
 {
@@ -56,6 +64,7 @@ gp100_fb = {
        .dtor = gf100_fb_dtor,
        .oneinit = gf100_fb_oneinit,
        .init = gp100_fb_init,
+       .init_remapper = gp100_fb_init_remapper,
        .init_page = gm200_fb_init_page,
        .init_unkn = gp100_fb_init_unkn,
        .ram_new = gp100_ram_new,
index b84b9861ef269e264e7838f308ff4a8d43545dc2..b4d74e81567447078fef479f49a722a8cd0f7cfc 100644 (file)
@@ -31,6 +31,7 @@ gp102_fb = {
        .dtor = gf100_fb_dtor,
        .oneinit = gf100_fb_oneinit,
        .init = gp100_fb_init,
+       .init_remapper = gp100_fb_init_remapper,
        .init_page = gm200_fb_init_page,
        .ram_new = gp100_ram_new,
 };
index 2857f31466bff2a30d3145d6b212a067428a0500..1e4ad61c19e1a2daca5600f80e0d2949db473e83 100644 (file)
@@ -11,6 +11,7 @@ struct nvkm_fb_func {
        u32 (*tags)(struct nvkm_fb *);
        int (*oneinit)(struct nvkm_fb *);
        void (*init)(struct nvkm_fb *);
+       void (*init_remapper)(struct nvkm_fb *);
        int (*init_page)(struct nvkm_fb *);
        void (*init_unkn)(struct nvkm_fb *);
        void (*intr)(struct nvkm_fb *);
@@ -69,5 +70,6 @@ int gf100_fb_init_page(struct nvkm_fb *);
 
 int gm200_fb_init_page(struct nvkm_fb *);
 
+void gp100_fb_init_remapper(struct nvkm_fb *);
 void gp100_fb_init_unkn(struct nvkm_fb *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b. b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/gp10b.
deleted file mode 100644 (file)
index e69de29..0000000
index a721354249ce68420d467990e0963a206fa9d2d3..d02e183717dc4575bd667116ab90caaaa8289968 100644 (file)
@@ -414,6 +414,20 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
 {
        struct ls_ucode_img *_img;
        u32 pos = 0;
+       u32 max_desc_size = 0;
+       u8 *gdesc;
+
+       /* Figure out how large we need gdesc to be. */
+       list_for_each_entry(_img, imgs, node) {
+               const struct acr_r352_ls_func *ls_func =
+                                           acr->func->ls_func[_img->falcon_id];
+
+               max_desc_size = max(max_desc_size, ls_func->bl_desc_size);
+       }
+
+       gdesc = kmalloc(max_desc_size, GFP_KERNEL);
+       if (!gdesc)
+               return -ENOMEM;
 
        nvkm_kmap(wpr_blob);
 
@@ -421,7 +435,6 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
                struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
                const struct acr_r352_ls_func *ls_func =
                                            acr->func->ls_func[_img->falcon_id];
-               u8 gdesc[ls_func->bl_desc_size];
 
                nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
                                      sizeof(img->wpr_header));
@@ -447,6 +460,8 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
 
        nvkm_done(wpr_blob);
 
+       kfree(gdesc);
+
        return 0;
 }
 
@@ -771,7 +786,11 @@ acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon,
        struct fw_bl_desc *hsbl_desc;
        void *bl, *blob_data, *hsbl_code, *hsbl_data;
        u32 code_size;
-       u8 bl_desc[bl_desc_size];
+       u8 *bl_desc;
+
+       bl_desc = kzalloc(bl_desc_size, GFP_KERNEL);
+       if (!bl_desc)
+               return -ENOMEM;
 
        /* Find the bootloader descriptor for our blob and copy it */
        if (blob == acr->load_blob) {
@@ -802,7 +821,6 @@ acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon,
                              code_size, hsbl_desc->start_tag, 0, false);
 
        /* Generate the BL header */
-       memset(bl_desc, 0, bl_desc_size);
        acr->func->generate_hs_bl_desc(load_hdr, bl_desc, offset);
 
        /*
@@ -811,6 +829,7 @@ acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon,
        nvkm_falcon_load_dmem(falcon, bl_desc, hsbl_desc->dmem_load_off,
                              bl_desc_size, 0);
 
+       kfree(bl_desc);
        return hsbl_desc->start_tag << 8;
 }
 
index 866877b88797b568f2d604f156ed2ad4118538f3..978ad079036705cbb10b2b87fafa9507dc79317c 100644 (file)
@@ -265,6 +265,19 @@ acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
 {
        struct ls_ucode_img *_img;
        u32 pos = 0;
+       u32 max_desc_size = 0;
+       u8 *gdesc;
+
+       list_for_each_entry(_img, imgs, node) {
+               const struct acr_r352_ls_func *ls_func =
+                                           acr->func->ls_func[_img->falcon_id];
+
+               max_desc_size = max(max_desc_size, ls_func->bl_desc_size);
+       }
+
+       gdesc = kmalloc(max_desc_size, GFP_KERNEL);
+       if (!gdesc)
+               return -ENOMEM;
 
        nvkm_kmap(wpr_blob);
 
@@ -272,7 +285,6 @@ acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
                struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
                const struct acr_r352_ls_func *ls_func =
                                            acr->func->ls_func[_img->falcon_id];
-               u8 gdesc[ls_func->bl_desc_size];
 
                nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
                                      sizeof(img->wpr_header));
@@ -298,6 +310,8 @@ acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
 
        nvkm_done(wpr_blob);
 
+       kfree(gdesc);
+
        return 0;
 }
 
index 30491d132d59c08cf6defd83c0169c1b83b227c7..df8b919dcf09bc4b6470afa898fed69a1221c531 100644 (file)
@@ -129,6 +129,7 @@ gm20b_secboot_new(struct nvkm_device *device, int index,
        return 0;
 }
 
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
 MODULE_FIRMWARE("nvidia/gm20b/acr/bl.bin");
 MODULE_FIRMWARE("nvidia/gm20b/acr/ucode_load.bin");
 MODULE_FIRMWARE("nvidia/gm20b/gr/fecs_bl.bin");
@@ -144,3 +145,4 @@ MODULE_FIRMWARE("nvidia/gm20b/gr/sw_method_init.bin");
 MODULE_FIRMWARE("nvidia/gm20b/pmu/desc.bin");
 MODULE_FIRMWARE("nvidia/gm20b/pmu/image.bin");
 MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin");
+#endif
index 632e9545e2923e5c055752c10b4cb5db4faf15b0..28ca29d0eeeeb8d0ab869f319b0e7c1304ef91be 100644 (file)
@@ -74,6 +74,7 @@ gp10b_secboot_new(struct nvkm_device *device, int index,
        return 0;
 }
 
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
 MODULE_FIRMWARE("nvidia/gp10b/acr/bl.bin");
 MODULE_FIRMWARE("nvidia/gp10b/acr/ucode_load.bin");
 MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_bl.bin");
@@ -91,3 +92,4 @@ MODULE_FIRMWARE("nvidia/gp10b/gr/sw_method_init.bin");
 MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin");
 MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin");
 MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin");
+#endif
index 92fe125ce22e4a5f1132aedbc302275adf96b115..f34c06bb5bd74395a73b36b25dc2e0b166c353c5 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (C) 2010 Nokia Corporation
  *
  * Original Driver Author: Imre Deak <imre.deak@nokia.com>
- * Based on panel-generic.c by Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Based on panel-generic.c by Tomi Valkeinen <tomi.valkeinen@ti.com>
  * Adapted to new DSS2 framework: Roger Quadros <roger.quadros@nokia.com>
  *
  * This program is free software; you can redistribute it and/or modify it
index b5d8a00df811b6c91f8c9e60b241f7e0ee7a01f7..a1f1dc18407a3e9b083eece383276f8bce79f4fe 100644 (file)
@@ -2,7 +2,7 @@
  * Toppoly TD028TTEC1 panel support
  *
  * Copyright (C) 2008 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
  *
  * Neo 1973 code (jbt6k74.c):
  * Copyright (C) 2006-2007 by OpenMoko, Inc.
index acef7ece5783367f8fe5ae40592e5787645e6447..07d00a186f151ae83ca0db4fd13576826af462d9 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
  *
  * Some code and ideas taken from drivers/video/omap/ driver
  * by Imre Deak.
@@ -82,7 +82,7 @@ static void __exit omap_dss_exit(void)
 module_init(omap_dss_init);
 module_exit(omap_dss_exit);
 
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@nokia.com>");
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
 MODULE_DESCRIPTION("OMAP2/3 Display Subsystem");
 MODULE_LICENSE("GPL v2");
 
index 7f3ac6b13b56745540843bf92ff43fb3dab6f34c..84f274c4a4cbf2d19841cc296913dbbeb0a0e76b 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
  *
  * Some code and ideas taken from drivers/video/omap/ driver
  * by Imre Deak.
index 424143128cd49341b2752222ae4bf31ae3494dd4..9e7fcbd57e5296e7b359c4c1aa336a084c0fc165 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
  *
  * Some code and ideas taken from drivers/video/omap/ driver
  * by Imre Deak.
index 3d662e6805eb0a6fa1b32471f3d542704877735c..9fcc50217133657a44d36a2f9b660e75c2e075a8 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
  *
  * Some code and ideas taken from drivers/video/omap/ driver
  * by Imre Deak.
index d4a680629825a206838a85069710e365041a901a..74467b308721863986e3c492a8f97d7214b1c7f3 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published by
index 0b908e9de792b94229ad8b365e2c554ffbf08e11..cb80ddaa19d265e63f739e32d96ea0b7aecfcf65 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
  *
  * Some code and ideas taken from drivers/video/omap/ driver
  * by Imre Deak.
index 847c78ade024d5cc8c353372a3ed96b137a7b452..38302631b64baf811476aaf143f5dbe88a304f37 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
  *
  * Some code and ideas taken from drivers/video/omap/ driver
  * by Imre Deak.
@@ -180,6 +180,9 @@ struct dss_pll_hw {
 
        /* DRA7 errata i886: use high N & M to avoid jitter */
        bool errata_i886;
+
+       /* DRA7 errata i932: retry pll lock on failure */
+       bool errata_i932;
 };
 
 struct dss_pll {
index 078b0e8216c382c88769c51f1b7cb29d86d9c6df..ff362b38bf0d436485c68e2cf4c6e60331df0444 100644 (file)
@@ -16,6 +16,7 @@
 
 #define DSS_SUBSYS_NAME "PLL"
 
+#include <linux/delay.h>
 #include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/kernel.h>
@@ -381,6 +382,22 @@ static int dss_wait_hsdiv_ack(struct dss_pll *pll, u32 hsdiv_ack_mask)
        return -ETIMEDOUT;
 }
 
+static bool pll_is_locked(u32 stat)
+{
+       /*
+        * Required value for each bitfield listed below
+        *
+        * PLL_STATUS[6] = 0  PLL_BYPASS
+        * PLL_STATUS[5] = 0  PLL_HIGHJITTER
+        *
+        * PLL_STATUS[3] = 0  PLL_LOSSREF
+        * PLL_STATUS[2] = 0  PLL_RECAL
+        * PLL_STATUS[1] = 1  PLL_LOCK
+        * PLL_STATUS[0] = 1  PLL_CTRL_RESET_DONE
+        */
+       return ((stat & 0x6f) == 0x3);
+}
+
 int dss_pll_write_config_type_a(struct dss_pll *pll,
                const struct dss_pll_clock_info *cinfo)
 {
@@ -436,18 +453,54 @@ int dss_pll_write_config_type_a(struct dss_pll *pll,
        l = FLD_MOD(l, 0, 25, 25);              /* M7_CLOCK_EN */
        writel_relaxed(l, base + PLL_CONFIGURATION2);
 
-       writel_relaxed(1, base + PLL_GO);       /* PLL_GO */
+       if (hw->errata_i932) {
+               int cnt = 0;
+               u32 sleep_time;
+               const u32 max_lock_retries = 20;
 
-       if (wait_for_bit_change(base + PLL_GO, 0, 0) != 0) {
-               DSSERR("DSS DPLL GO bit not going down.\n");
-               r = -EIO;
-               goto err;
-       }
+               /*
+                * Calculate wait time for PLL LOCK
+                * 1000 REFCLK cycles in us.
+                */
+               sleep_time = DIV_ROUND_UP(1000*1000*1000, cinfo->fint);
 
-       if (wait_for_bit_change(base + PLL_STATUS, 1, 1) != 1) {
-               DSSERR("cannot lock DSS DPLL\n");
-               r = -EIO;
-               goto err;
+               for (cnt = 0; cnt < max_lock_retries; cnt++) {
+                       writel_relaxed(1, base + PLL_GO);       /* PLL_GO */
+
+                       /**
+                        * read the register back to ensure the write is
+                        * flushed
+                        */
+                       readl_relaxed(base + PLL_GO);
+
+                       usleep_range(sleep_time, sleep_time + 5);
+                       l = readl_relaxed(base + PLL_STATUS);
+
+                       if (pll_is_locked(l) &&
+                           !(readl_relaxed(base + PLL_GO) & 0x1))
+                               break;
+
+               }
+
+               if (cnt == max_lock_retries) {
+                       DSSERR("cannot lock PLL\n");
+                       r = -EIO;
+                       goto err;
+               }
+       } else {
+               writel_relaxed(1, base + PLL_GO);       /* PLL_GO */
+
+               if (wait_for_bit_change(base + PLL_GO, 0, 0) != 0) {
+                       DSSERR("DSS DPLL GO bit not going down.\n");
+                       r = -EIO;
+                       goto err;
+               }
+
+               if (wait_for_bit_change(base + PLL_STATUS, 1, 1) != 1) {
+                       DSSERR("cannot lock DSS DPLL\n");
+                       r = -EIO;
+                       goto err;
+               }
        }
 
        l = readl_relaxed(base + PLL_CONFIGURATION2);
index 1e2c931f6acfa4f67fc118ffcfa515c07bceeb97..69c3b7a3d5c75e6a3547085fb6c066fe71660306 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published by
index 24d1ced210bd52511c13f91a945474caf554d2ba..ac01907dcc345d07ecabb95a3e1114a24b80f648 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
  *
  * VENC settings from TI's DSS driver
  *
index 585ed94ccf1781f1633279ac7ed0d651d2f20fa1..cb46311f92c9d3c1990605e387a0b7bc9d898aaf 100644 (file)
@@ -134,6 +134,7 @@ static const struct dss_pll_hw dss_dra7_video_pll_hw = {
        .has_refsel = true,
 
        .errata_i886 = true,
+       .errata_i932 = true,
 };
 
 struct dss_pll *dss_video_pll_init(struct dss_device *dss,
index 5cde26ac937bd3c88d7b4be1e2355ca196fe0c8d..2ddb856666c465c0ce0dc0bc48a4240498e560a1 100644 (file)
@@ -126,14 +126,14 @@ static int omap_connector_get_modes(struct drm_connector *connector)
 
                if ((dssdrv->read_edid(dssdev, edid, MAX_EDID) > 0) &&
                                drm_edid_is_valid(edid)) {
-                       drm_mode_connector_update_edid_property(
+                       drm_connector_update_edid_property(
                                        connector, edid);
                        n = drm_add_edid_modes(connector, edid);
 
                        omap_connector->hdmi_mode =
                                drm_detect_hdmi_monitor(edid);
                } else {
-                       drm_mode_connector_update_edid_property(
+                       drm_connector_update_edid_property(
                                        connector, NULL);
                }
 
index b42e286616b00b894b99e133c06d56b4b24db9be..91cf043f2b6ba8e17de4956c13701c503df885da 100644 (file)
@@ -30,16 +30,11 @@ static int gem_show(struct seq_file *m, void *arg)
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        struct omap_drm_private *priv = dev->dev_private;
-       int ret;
-
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
 
        seq_printf(m, "All Objects:\n");
+       mutex_lock(&priv->list_lock);
        omap_gem_describe_objects(&priv->obj_list, m);
-
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&priv->list_lock);
 
        return 0;
 }
index ef3b0e3571ec860ee64c3f75d15c08c00b83c011..1b6601e9b10723a42a7ebbd5d73894373021e5b9 100644 (file)
@@ -274,7 +274,7 @@ static int omap_modeset_init(struct drm_device *dev)
                if (IS_ERR(crtc))
                        return PTR_ERR(crtc);
 
-               drm_mode_connector_attach_encoder(connector, encoder);
+               drm_connector_attach_encoder(connector, encoder);
                encoder->possible_crtcs = (1 << crtc_idx);
 
                priv->crtcs[priv->num_crtcs++] = crtc;
@@ -493,7 +493,7 @@ static struct drm_driver omap_drm_driver = {
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_export = omap_gem_prime_export,
        .gem_prime_import = omap_gem_prime_import,
-       .gem_free_object = omap_gem_free_object,
+       .gem_free_object_unlocked = omap_gem_free_object,
        .gem_vm_ops = &omap_gem_vm_ops,
        .dumb_create = omap_gem_dumb_create,
        .dumb_map_offset = omap_gem_dumb_map_offset,
@@ -540,7 +540,7 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
        priv->omaprev = soc ? (unsigned int)soc->data : 0;
        priv->wq = alloc_ordered_workqueue("omapdrm", 0);
 
-       spin_lock_init(&priv->list_lock);
+       mutex_init(&priv->list_lock);
        INIT_LIST_HEAD(&priv->obj_list);
 
        /* Allocate and initialize the DRM device. */
index 6eaee4df45594c8d23ba3df0af07244e07c92d6c..f27c8e216adf894f8180f40b0c885b2c683ccd86 100644 (file)
@@ -71,7 +71,7 @@ struct omap_drm_private {
        struct workqueue_struct *wq;
 
        /* lock for obj_list below */
-       spinlock_t list_lock;
+       struct mutex list_lock;
 
        /* list of GEM objects: */
        struct list_head obj_list;
index 5fd22ca7391382d96269f7a364985e8d6bf1f05e..9f1e3d8f8488c823cc9914e49559adae88613017 100644 (file)
@@ -19,6 +19,7 @@
 
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 
 #include "omap_dmm_tiler.h"
 #include "omap_drv.h"
@@ -51,9 +52,6 @@ static const u32 formats[] = {
 
 /* per-plane info for the fb: */
 struct plane {
-       struct drm_gem_object *bo;
-       u32 pitch;
-       u32 offset;
        dma_addr_t dma_addr;
 };
 
@@ -68,56 +66,28 @@ struct omap_framebuffer {
        struct mutex lock;
 };
 
-static int omap_framebuffer_create_handle(struct drm_framebuffer *fb,
-               struct drm_file *file_priv,
-               unsigned int *handle)
-{
-       struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
-       return drm_gem_handle_create(file_priv,
-                       omap_fb->planes[0].bo, handle);
-}
-
-static void omap_framebuffer_destroy(struct drm_framebuffer *fb)
-{
-       struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
-       int i, n = fb->format->num_planes;
-
-       DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
-
-       drm_framebuffer_cleanup(fb);
-
-       for (i = 0; i < n; i++) {
-               struct plane *plane = &omap_fb->planes[i];
-
-               drm_gem_object_unreference_unlocked(plane->bo);
-       }
-
-       kfree(omap_fb);
-}
-
 static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
-       .create_handle = omap_framebuffer_create_handle,
-       .destroy = omap_framebuffer_destroy,
+       .create_handle = drm_gem_fb_create_handle,
+       .destroy = drm_gem_fb_destroy,
 };
 
-static u32 get_linear_addr(struct plane *plane,
+static u32 get_linear_addr(struct drm_framebuffer *fb,
                const struct drm_format_info *format, int n, int x, int y)
 {
+       struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+       struct plane *plane = &omap_fb->planes[n];
        u32 offset;
 
-       offset = plane->offset
+       offset = fb->offsets[n]
               + (x * format->cpp[n] / (n == 0 ? 1 : format->hsub))
-              + (y * plane->pitch / (n == 0 ? 1 : format->vsub));
+              + (y * fb->pitches[n] / (n == 0 ? 1 : format->vsub));
 
        return plane->dma_addr + offset;
 }
 
 bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb)
 {
-       struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
-       struct plane *plane = &omap_fb->planes[0];
-
-       return omap_gem_flags(plane->bo) & OMAP_BO_TILED;
+       return omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED;
 }
 
 /* Note: DRM rotates counter-clockwise, TILER & DSS rotates clockwise */
@@ -176,7 +146,7 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
        x = state->src_x >> 16;
        y = state->src_y >> 16;
 
-       if (omap_gem_flags(plane->bo) & OMAP_BO_TILED) {
+       if (omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED) {
                u32 w = state->src_w >> 16;
                u32 h = state->src_h >> 16;
 
@@ -201,12 +171,12 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
                        x += w - 1;
 
                /* Note: x and y are in TILER units, not pixels */
-               omap_gem_rotated_dma_addr(plane->bo, orient, x, y,
+               omap_gem_rotated_dma_addr(fb->obj[0], orient, x, y,
                                          &info->paddr);
                info->rotation_type = OMAP_DSS_ROT_TILER;
                info->rotation = state->rotation ?: DRM_MODE_ROTATE_0;
                /* Note: stride in TILER units, not pixels */
-               info->screen_width  = omap_gem_tiled_stride(plane->bo, orient);
+               info->screen_width  = omap_gem_tiled_stride(fb->obj[0], orient);
        } else {
                switch (state->rotation & DRM_MODE_ROTATE_MASK) {
                case 0:
@@ -221,10 +191,10 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
                        break;
                }
 
-               info->paddr         = get_linear_addr(plane, format, 0, x, y);
+               info->paddr         = get_linear_addr(fb, format, 0, x, y);
                info->rotation_type = OMAP_DSS_ROT_NONE;
                info->rotation      = DRM_MODE_ROTATE_0;
-               info->screen_width  = plane->pitch;
+               info->screen_width  = fb->pitches[0];
        }
 
        /* convert to pixels: */
@@ -234,11 +204,11 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
                plane = &omap_fb->planes[1];
 
                if (info->rotation_type == OMAP_DSS_ROT_TILER) {
-                       WARN_ON(!(omap_gem_flags(plane->bo) & OMAP_BO_TILED));
-                       omap_gem_rotated_dma_addr(plane->bo, orient, x/2, y/2,
+                       WARN_ON(!(omap_gem_flags(fb->obj[1]) & OMAP_BO_TILED));
+                       omap_gem_rotated_dma_addr(fb->obj[1], orient, x/2, y/2,
                                                  &info->p_uv_addr);
                } else {
-                       info->p_uv_addr = get_linear_addr(plane, format, 1, x, y);
+                       info->p_uv_addr = get_linear_addr(fb, format, 1, x, y);
                }
        } else {
                info->p_uv_addr = 0;
@@ -261,10 +231,10 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb)
 
        for (i = 0; i < n; i++) {
                struct plane *plane = &omap_fb->planes[i];
-               ret = omap_gem_pin(plane->bo, &plane->dma_addr);
+               ret = omap_gem_pin(fb->obj[i], &plane->dma_addr);
                if (ret)
                        goto fail;
-               omap_gem_dma_sync_buffer(plane->bo, DMA_TO_DEVICE);
+               omap_gem_dma_sync_buffer(fb->obj[i], DMA_TO_DEVICE);
        }
 
        omap_fb->pin_count++;
@@ -276,7 +246,7 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb)
 fail:
        for (i--; i >= 0; i--) {
                struct plane *plane = &omap_fb->planes[i];
-               omap_gem_unpin(plane->bo);
+               omap_gem_unpin(fb->obj[i]);
                plane->dma_addr = 0;
        }
 
@@ -302,54 +272,25 @@ void omap_framebuffer_unpin(struct drm_framebuffer *fb)
 
        for (i = 0; i < n; i++) {
                struct plane *plane = &omap_fb->planes[i];
-               omap_gem_unpin(plane->bo);
+               omap_gem_unpin(fb->obj[i]);
                plane->dma_addr = 0;
        }
 
        mutex_unlock(&omap_fb->lock);
 }
 
-/* iterate thru all the connectors, returning ones that are attached
- * to the same fb..
- */
-struct drm_connector *omap_framebuffer_get_next_connector(
-               struct drm_framebuffer *fb, struct drm_connector *from)
-{
-       struct drm_device *dev = fb->dev;
-       struct list_head *connector_list = &dev->mode_config.connector_list;
-       struct drm_connector *connector = from;
-
-       if (!from)
-               return list_first_entry_or_null(connector_list, typeof(*from),
-                                               head);
-
-       list_for_each_entry_from(connector, connector_list, head) {
-               if (connector != from) {
-                       struct drm_encoder *encoder = connector->encoder;
-                       struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
-                       if (crtc && crtc->primary->fb == fb)
-                               return connector;
-
-               }
-       }
-
-       return NULL;
-}
-
 #ifdef CONFIG_DEBUG_FS
 void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
 {
-       struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
        int i, n = fb->format->num_planes;
 
        seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
                        (char *)&fb->format->format);
 
        for (i = 0; i < n; i++) {
-               struct plane *plane = &omap_fb->planes[i];
                seq_printf(m, "   %d: offset=%d pitch=%d, obj: ",
-                               i, plane->offset, plane->pitch);
-               omap_gem_describe(plane->bo, m);
+                               i, fb->offsets[n], fb->pitches[i]);
+               omap_gem_describe(fb->obj[i], m);
        }
 }
 #endif
@@ -454,9 +395,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
                        goto fail;
                }
 
-               plane->bo     = bos[i];
-               plane->offset = mode_cmd->offsets[i];
-               plane->pitch  = pitch;
+               fb->obj[i]    = bos[i];
                plane->dma_addr  = 0;
        }
 
index 94ad5f9e440490fef9a09333d92ca6ed8f8d85ee..c20cb4bc714da874697c2104097ffee9726eb0a7 100644 (file)
@@ -38,8 +38,6 @@ int omap_framebuffer_pin(struct drm_framebuffer *fb);
 void omap_framebuffer_unpin(struct drm_framebuffer *fb);
 void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
                struct drm_plane_state *state, struct omap_overlay_info *info);
-struct drm_connector *omap_framebuffer_get_next_connector(
-               struct drm_framebuffer *fb, struct drm_connector *from);
 bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb);
 void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
 
index 0f66c74a54b0e4c742e0a61a722e1e53152bf7e9..d958cc813a94c7637885302ef76aef864c89c31c 100644 (file)
@@ -170,13 +170,11 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
                goto fail;
        }
 
-       mutex_lock(&dev->struct_mutex);
-
        fbi = drm_fb_helper_alloc_fbi(helper);
        if (IS_ERR(fbi)) {
                dev_err(dev->dev, "failed to allocate fb info\n");
                ret = PTR_ERR(fbi);
-               goto fail_unlock;
+               goto fail;
        }
 
        DBG("fbi=%p, dev=%p", fbi, dev);
@@ -212,12 +210,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
        DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
        DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
 
-       mutex_unlock(&dev->struct_mutex);
-
        return 0;
 
-fail_unlock:
-       mutex_unlock(&dev->struct_mutex);
 fail:
 
        if (ret) {
index 17a53d2079781c00ad743a3c0da9ac271024d589..4ba5d035c5909eed0f05b978324186e7ba1b46c0 100644 (file)
@@ -47,6 +47,9 @@ struct omap_gem_object {
        /** roll applied when mapping to DMM */
        u32 roll;
 
+       /** protects dma_addr_cnt, block, pages, dma_addrs and vaddr */
+       struct mutex lock;
+
        /**
         * dma_addr contains the buffer DMA address. It is valid for
         *
@@ -137,14 +140,12 @@ struct omap_drm_usergart {
  */
 
 /** get mmap offset */
-static u64 mmap_offset(struct drm_gem_object *obj)
+u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
        int ret;
        size_t size;
 
-       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
        /* Make it mmapable */
        size = omap_gem_mmap_size(obj);
        ret = drm_gem_create_mmap_offset_size(obj, size);
@@ -156,7 +157,7 @@ static u64 mmap_offset(struct drm_gem_object *obj)
        return drm_vma_node_offset_addr(&obj->vma_node);
 }
 
-static bool is_contiguous(struct omap_gem_object *omap_obj)
+static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj)
 {
        if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
                return true;
@@ -171,14 +172,14 @@ static bool is_contiguous(struct omap_gem_object *omap_obj)
  * Eviction
  */
 
-static void evict_entry(struct drm_gem_object *obj,
+static void omap_gem_evict_entry(struct drm_gem_object *obj,
                enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
 {
        struct omap_gem_object *omap_obj = to_omap_bo(obj);
        struct omap_drm_private *priv = obj->dev->dev_private;
        int n = priv->usergart[fmt].height;
        size_t size = PAGE_SIZE * n;
-       loff_t off = mmap_offset(obj) +
+       loff_t off = omap_gem_mmap_offset(obj) +
                        (entry->obj_pgoff << PAGE_SHIFT);
        const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
 
@@ -199,7 +200,7 @@ static void evict_entry(struct drm_gem_object *obj,
 }
 
 /* Evict a buffer from usergart, if it is mapped there */
-static void evict(struct drm_gem_object *obj)
+static void omap_gem_evict(struct drm_gem_object *obj)
 {
        struct omap_gem_object *omap_obj = to_omap_bo(obj);
        struct omap_drm_private *priv = obj->dev->dev_private;
@@ -213,7 +214,7 @@ static void evict(struct drm_gem_object *obj)
                                &priv->usergart[fmt].entry[i];
 
                        if (entry->obj == obj)
-                               evict_entry(obj, fmt, entry);
+                               omap_gem_evict_entry(obj, fmt, entry);
                }
        }
 }
@@ -222,7 +223,10 @@ static void evict(struct drm_gem_object *obj)
  * Page Management
  */
 
-/** ensure backing pages are allocated */
+/*
+ * Ensure backing pages are allocated. Must be called with the omap_obj.lock
+ * held.
+ */
 static int omap_gem_attach_pages(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
@@ -232,7 +236,14 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
        int i, ret;
        dma_addr_t *addrs;
 
-       WARN_ON(omap_obj->pages);
+       lockdep_assert_held(&omap_obj->lock);
+
+       /*
+        * If not using shmem (in which case backing pages don't need to be
+        * allocated) or if pages are already allocated we're done.
+        */
+       if (!(omap_obj->flags & OMAP_BO_MEM_SHMEM) || omap_obj->pages)
+               return 0;
 
        pages = drm_gem_get_pages(obj);
        if (IS_ERR(pages)) {
@@ -288,35 +299,15 @@ free_pages:
        return ret;
 }
 
-/* acquire pages when needed (for example, for DMA where physically
- * contiguous buffer is not required
- */
-static int get_pages(struct drm_gem_object *obj, struct page ***pages)
-{
-       struct omap_gem_object *omap_obj = to_omap_bo(obj);
-       int ret = 0;
-
-       if ((omap_obj->flags & OMAP_BO_MEM_SHMEM) && !omap_obj->pages) {
-               ret = omap_gem_attach_pages(obj);
-               if (ret) {
-                       dev_err(obj->dev->dev, "could not attach pages\n");
-                       return ret;
-               }
-       }
-
-       /* TODO: even phys-contig.. we should have a list of pages? */
-       *pages = omap_obj->pages;
-
-       return 0;
-}
-
-/** release backing pages */
+/* Release backing pages. Must be called with the omap_obj.lock held. */
 static void omap_gem_detach_pages(struct drm_gem_object *obj)
 {
        struct omap_gem_object *omap_obj = to_omap_bo(obj);
        unsigned int npages = obj->size >> PAGE_SHIFT;
        unsigned int i;
 
+       lockdep_assert_held(&omap_obj->lock);
+
        for (i = 0; i < npages; i++) {
                if (omap_obj->dma_addrs[i])
                        dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
@@ -336,16 +327,6 @@ u32 omap_gem_flags(struct drm_gem_object *obj)
        return to_omap_bo(obj)->flags;
 }
 
-u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
-{
-       u64 offset;
-
-       mutex_lock(&obj->dev->struct_mutex);
-       offset = mmap_offset(obj);
-       mutex_unlock(&obj->dev->struct_mutex);
-       return offset;
-}
-
 /** get mmap size */
 size_t omap_gem_mmap_size(struct drm_gem_object *obj)
 {
@@ -371,7 +352,7 @@ size_t omap_gem_mmap_size(struct drm_gem_object *obj)
  */
 
 /* Normal handling for the case of faulting in non-tiled buffers */
-static int fault_1d(struct drm_gem_object *obj,
+static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj,
                struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct omap_gem_object *omap_obj = to_omap_bo(obj);
@@ -385,18 +366,19 @@ static int fault_1d(struct drm_gem_object *obj,
                omap_gem_cpu_sync_page(obj, pgoff);
                pfn = page_to_pfn(omap_obj->pages[pgoff]);
        } else {
-               BUG_ON(!is_contiguous(omap_obj));
+               BUG_ON(!omap_gem_is_contiguous(omap_obj));
                pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
        }
 
        VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
                        pfn, pfn << PAGE_SHIFT);
 
-       return vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
+       return vmf_insert_mixed(vma, vmf->address,
+                       __pfn_to_pfn_t(pfn, PFN_DEV));
 }
 
 /* Special handling for the case of faulting in 2d tiled buffers */
-static int fault_2d(struct drm_gem_object *obj,
+static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
                struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct omap_gem_object *omap_obj = to_omap_bo(obj);
@@ -407,7 +389,8 @@ static int fault_2d(struct drm_gem_object *obj,
        unsigned long pfn;
        pgoff_t pgoff, base_pgoff;
        unsigned long vaddr;
-       int i, ret, slots;
+       int i, err, slots;
+       vm_fault_t ret = VM_FAULT_NOPAGE;
 
        /*
         * Note the height of the slot is also equal to the number of pages
@@ -443,7 +426,7 @@ static int fault_2d(struct drm_gem_object *obj,
 
        /* evict previous buffer using this usergart entry, if any: */
        if (entry->obj)
-               evict_entry(entry->obj, fmt, entry);
+               omap_gem_evict_entry(entry->obj, fmt, entry);
 
        entry->obj = obj;
        entry->obj_pgoff = base_pgoff;
@@ -473,9 +456,10 @@ static int fault_2d(struct drm_gem_object *obj,
        memset(pages + slots, 0,
                        sizeof(struct page *) * (n - slots));
 
-       ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
-       if (ret) {
-               dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
+       err = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
+       if (err) {
+               ret = vmf_error(err);
+               dev_err(obj->dev->dev, "failed to pin: %d\n", err);
                return ret;
        }
 
@@ -485,7 +469,10 @@ static int fault_2d(struct drm_gem_object *obj,
                        pfn, pfn << PAGE_SHIFT);
 
        for (i = n; i > 0; i--) {
-               vm_insert_mixed(vma, vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
+               ret = vmf_insert_mixed(vma,
+                       vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
+               if (ret & VM_FAULT_ERROR)
+                       break;
                pfn += priv->usergart[fmt].stride_pfn;
                vaddr += PAGE_SIZE * m;
        }
@@ -494,7 +481,7 @@ static int fault_2d(struct drm_gem_object *obj,
        priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
                                 % NUM_USERGART_ENTRIES;
 
-       return 0;
+       return ret;
 }
 
 /**
@@ -509,24 +496,25 @@ static int fault_2d(struct drm_gem_object *obj,
  * vma->vm_private_data points to the GEM object that is backing this
  * mapping.
  */
-int omap_gem_fault(struct vm_fault *vmf)
+vm_fault_t omap_gem_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct drm_gem_object *obj = vma->vm_private_data;
        struct omap_gem_object *omap_obj = to_omap_bo(obj);
-       struct drm_device *dev = obj->dev;
-       struct page **pages;
-       int ret;
+       int err;
+       vm_fault_t ret;
 
        /* Make sure we don't parallel update on a fault, nor move or remove
         * something from beneath our feet
         */
-       mutex_lock(&dev->struct_mutex);
+       mutex_lock(&omap_obj->lock);
 
        /* if a shmem backed object, make sure we have pages attached now */
-       ret = get_pages(obj, &pages);
-       if (ret)
+       err = omap_gem_attach_pages(obj);
+       if (err) {
+               ret = vmf_error(err);
                goto fail;
+       }
 
        /* where should we do corresponding put_pages().. we are mapping
         * the original page, rather than thru a GART, so we can't rely
@@ -535,28 +523,14 @@ int omap_gem_fault(struct vm_fault *vmf)
         */
 
        if (omap_obj->flags & OMAP_BO_TILED)
-               ret = fault_2d(obj, vma, vmf);
+               ret = omap_gem_fault_2d(obj, vma, vmf);
        else
-               ret = fault_1d(obj, vma, vmf);
+               ret = omap_gem_fault_1d(obj, vma, vmf);
 
 
 fail:
-       mutex_unlock(&dev->struct_mutex);
-       switch (ret) {
-       case 0:
-       case -ERESTARTSYS:
-       case -EINTR:
-       case -EBUSY:
-               /*
-                * EBUSY is ok: this just means that another thread
-                * already did the job.
-                */
-               return VM_FAULT_NOPAGE;
-       case -ENOMEM:
-               return VM_FAULT_OOM;
-       default:
-               return VM_FAULT_SIGBUS;
-       }
+       mutex_unlock(&omap_obj->lock);
+       return ret;
 }
 
 /** We override mainly to fix up some of the vm mapping flags.. */
@@ -689,21 +663,22 @@ int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
 
        omap_obj->roll = roll;
 
-       mutex_lock(&obj->dev->struct_mutex);
+       mutex_lock(&omap_obj->lock);
 
        /* if we aren't mapped yet, we don't need to do anything */
        if (omap_obj->block) {
-               struct page **pages;
-               ret = get_pages(obj, &pages);
+               ret = omap_gem_attach_pages(obj);
                if (ret)
                        goto fail;
-               ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
+
+               ret = tiler_pin(omap_obj->block, omap_obj->pages, npages,
+                               roll, true);
                if (ret)
                        dev_err(obj->dev->dev, "could not repin: %d\n", ret);
        }
 
 fail:
-       mutex_unlock(&obj->dev->struct_mutex);
+       mutex_unlock(&omap_obj->lock);
 
        return ret;
 }
@@ -722,7 +697,7 @@ fail:
  * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
  * unmapped from the CPU.
  */
-static inline bool is_cached_coherent(struct drm_gem_object *obj)
+static inline bool omap_gem_is_cached_coherent(struct drm_gem_object *obj)
 {
        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 
@@ -738,7 +713,7 @@ void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
        struct drm_device *dev = obj->dev;
        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 
-       if (is_cached_coherent(obj))
+       if (omap_gem_is_cached_coherent(obj))
                return;
 
        if (omap_obj->dma_addrs[pgoff]) {
@@ -758,7 +733,7 @@ void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
        struct page **pages = omap_obj->pages;
        bool dirty = false;
 
-       if (is_cached_coherent(obj))
+       if (omap_gem_is_cached_coherent(obj))
                return;
 
        for (i = 0; i < npages; i++) {
@@ -804,18 +779,17 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
        struct omap_gem_object *omap_obj = to_omap_bo(obj);
        int ret = 0;
 
-       mutex_lock(&obj->dev->struct_mutex);
+       mutex_lock(&omap_obj->lock);
 
-       if (!is_contiguous(omap_obj) && priv->has_dmm) {
+       if (!omap_gem_is_contiguous(omap_obj) && priv->has_dmm) {
                if (omap_obj->dma_addr_cnt == 0) {
-                       struct page **pages;
                        u32 npages = obj->size >> PAGE_SHIFT;
                        enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
                        struct tiler_block *block;
 
                        BUG_ON(omap_obj->block);
 
-                       ret = get_pages(obj, &pages);
+                       ret = omap_gem_attach_pages(obj);
                        if (ret)
                                goto fail;
 
@@ -835,7 +809,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
                        }
 
                        /* TODO: enable async refill.. */
-                       ret = tiler_pin(block, pages, npages,
+                       ret = tiler_pin(block, omap_obj->pages, npages,
                                        omap_obj->roll, true);
                        if (ret) {
                                tiler_release(block);
@@ -853,7 +827,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
                omap_obj->dma_addr_cnt++;
 
                *dma_addr = omap_obj->dma_addr;
-       } else if (is_contiguous(omap_obj)) {
+       } else if (omap_gem_is_contiguous(omap_obj)) {
                *dma_addr = omap_obj->dma_addr;
        } else {
                ret = -EINVAL;
@@ -861,7 +835,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
        }
 
 fail:
-       mutex_unlock(&obj->dev->struct_mutex);
+       mutex_unlock(&omap_obj->lock);
 
        return ret;
 }
@@ -879,7 +853,8 @@ void omap_gem_unpin(struct drm_gem_object *obj)
        struct omap_gem_object *omap_obj = to_omap_bo(obj);
        int ret;
 
-       mutex_lock(&obj->dev->struct_mutex);
+       mutex_lock(&omap_obj->lock);
+
        if (omap_obj->dma_addr_cnt > 0) {
                omap_obj->dma_addr_cnt--;
                if (omap_obj->dma_addr_cnt == 0) {
@@ -898,7 +873,7 @@ void omap_gem_unpin(struct drm_gem_object *obj)
                }
        }
 
-       mutex_unlock(&obj->dev->struct_mutex);
+       mutex_unlock(&omap_obj->lock);
 }
 
 /* Get rotated scanout address (only valid if already pinned), at the
@@ -911,13 +886,16 @@ int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
        struct omap_gem_object *omap_obj = to_omap_bo(obj);
        int ret = -EINVAL;
 
-       mutex_lock(&obj->dev->struct_mutex);
+       mutex_lock(&omap_obj->lock);
+
        if ((omap_obj->dma_addr_cnt > 0) && omap_obj->block &&
                        (omap_obj->flags & OMAP_BO_TILED)) {
                *dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
                ret = 0;
        }
-       mutex_unlock(&obj->dev->struct_mutex);
+
+       mutex_unlock(&omap_obj->lock);
+
        return ret;
 }
 
@@ -944,17 +922,27 @@ int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
 int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
                bool remap)
 {
-       int ret;
-       if (!remap) {
-               struct omap_gem_object *omap_obj = to_omap_bo(obj);
-               if (!omap_obj->pages)
-                       return -ENOMEM;
-               *pages = omap_obj->pages;
-               return 0;
+       struct omap_gem_object *omap_obj = to_omap_bo(obj);
+       int ret = 0;
+
+       mutex_lock(&omap_obj->lock);
+
+       if (remap) {
+               ret = omap_gem_attach_pages(obj);
+               if (ret)
+                       goto unlock;
        }
-       mutex_lock(&obj->dev->struct_mutex);
-       ret = get_pages(obj, pages);
-       mutex_unlock(&obj->dev->struct_mutex);
+
+       if (!omap_obj->pages) {
+               ret = -ENOMEM;
+               goto unlock;
+       }
+
+       *pages = omap_obj->pages;
+
+unlock:
+       mutex_unlock(&omap_obj->lock);
+
        return ret;
 }
 
@@ -969,23 +957,34 @@ int omap_gem_put_pages(struct drm_gem_object *obj)
 }
 
 #ifdef CONFIG_DRM_FBDEV_EMULATION
-/* Get kernel virtual address for CPU access.. this more or less only
- * exists for omap_fbdev.  This should be called with struct_mutex
- * held.
+/*
+ * Get kernel virtual address for CPU access.. this more or less only
+ * exists for omap_fbdev.
  */
 void *omap_gem_vaddr(struct drm_gem_object *obj)
 {
        struct omap_gem_object *omap_obj = to_omap_bo(obj);
-       WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+       void *vaddr;
+       int ret;
+
+       mutex_lock(&omap_obj->lock);
+
        if (!omap_obj->vaddr) {
-               struct page **pages;
-               int ret = get_pages(obj, &pages);
-               if (ret)
-                       return ERR_PTR(ret);
-               omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
+               ret = omap_gem_attach_pages(obj);
+               if (ret) {
+                       vaddr = ERR_PTR(ret);
+                       goto unlock;
+               }
+
+               omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT,
                                VM_MAP, pgprot_writecombine(PAGE_KERNEL));
        }
-       return omap_obj->vaddr;
+
+       vaddr = omap_obj->vaddr;
+
+unlock:
+       mutex_unlock(&omap_obj->lock);
+       return vaddr;
 }
 #endif
 
@@ -1001,6 +1000,7 @@ int omap_gem_resume(struct drm_device *dev)
        struct omap_gem_object *omap_obj;
        int ret = 0;
 
+       mutex_lock(&priv->list_lock);
        list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
                if (omap_obj->block) {
                        struct drm_gem_object *obj = &omap_obj->base;
@@ -1012,12 +1012,14 @@ int omap_gem_resume(struct drm_device *dev)
                                        omap_obj->roll, true);
                        if (ret) {
                                dev_err(dev->dev, "could not repin: %d\n", ret);
-                               return ret;
+                               goto done;
                        }
                }
        }
 
-       return 0;
+done:
+       mutex_unlock(&priv->list_lock);
+       return ret;
 }
 #endif
 
@@ -1033,6 +1035,8 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 
        off = drm_vma_node_start(&obj->vma_node);
 
+       mutex_lock(&omap_obj->lock);
+
        seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
                        omap_obj->flags, obj->name, kref_read(&obj->refcount),
                        off, &omap_obj->dma_addr, omap_obj->dma_addr_cnt,
@@ -1050,6 +1054,8 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
                seq_printf(m, " %zu", obj->size);
        }
 
+       mutex_unlock(&omap_obj->lock);
+
        seq_printf(m, "\n");
 }
 
@@ -1081,17 +1087,21 @@ void omap_gem_free_object(struct drm_gem_object *obj)
        struct omap_drm_private *priv = dev->dev_private;
        struct omap_gem_object *omap_obj = to_omap_bo(obj);
 
-       evict(obj);
-
-       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+       omap_gem_evict(obj);
 
-       spin_lock(&priv->list_lock);
+       mutex_lock(&priv->list_lock);
        list_del(&omap_obj->mm_list);
-       spin_unlock(&priv->list_lock);
+       mutex_unlock(&priv->list_lock);
 
-       /* this means the object is still pinned.. which really should
-        * not happen.  I think..
+       /*
+        * We own the sole reference to the object at this point, but to keep
+        * lockdep happy, we must still take the omap_obj_lock to call
+        * omap_gem_detach_pages(). This should hardly make any difference as
+        * there can't be any lock contention.
         */
+       mutex_lock(&omap_obj->lock);
+
+       /* The object should not be pinned. */
        WARN_ON(omap_obj->dma_addr_cnt > 0);
 
        if (omap_obj->pages) {
@@ -1110,8 +1120,12 @@ void omap_gem_free_object(struct drm_gem_object *obj)
                drm_prime_gem_destroy(obj, omap_obj->sgt);
        }
 
+       mutex_unlock(&omap_obj->lock);
+
        drm_gem_object_release(obj);
 
+       mutex_destroy(&omap_obj->lock);
+
        kfree(omap_obj);
 }
 
@@ -1167,6 +1181,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
 
        obj = &omap_obj->base;
        omap_obj->flags = flags;
+       mutex_init(&omap_obj->lock);
 
        if (flags & OMAP_BO_TILED) {
                /*
@@ -1206,9 +1221,9 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
                        goto err_release;
        }
 
-       spin_lock(&priv->list_lock);
+       mutex_lock(&priv->list_lock);
        list_add(&omap_obj->mm_list, &priv->obj_list);
-       spin_unlock(&priv->list_lock);
+       mutex_unlock(&priv->list_lock);
 
        return obj;
 
@@ -1231,16 +1246,15 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
        if (sgt->orig_nents != 1 && !priv->has_dmm)
                return ERR_PTR(-EINVAL);
 
-       mutex_lock(&dev->struct_mutex);
-
        gsize.bytes = PAGE_ALIGN(size);
        obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
-       if (!obj) {
-               obj = ERR_PTR(-ENOMEM);
-               goto done;
-       }
+       if (!obj)
+               return ERR_PTR(-ENOMEM);
 
        omap_obj = to_omap_bo(obj);
+
+       mutex_lock(&omap_obj->lock);
+
        omap_obj->sgt = sgt;
 
        if (sgt->orig_nents == 1) {
@@ -1276,7 +1290,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
        }
 
 done:
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&omap_obj->lock);
        return obj;
 }
 
index a78bde05193abb5a48cde37be852be7223f281a5..c1c45fbde155cff8f8767dcdac721aebc3acbb2e 100644 (file)
@@ -21,6 +21,7 @@
 #define __OMAPDRM_GEM_H__
 
 #include <linux/types.h>
+#include <linux/mm_types.h>
 
 enum dma_data_direction;
 
@@ -80,7 +81,7 @@ struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
 struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
                struct dma_buf *buffer);
 
-int omap_gem_fault(struct vm_fault *vmf);
+vm_fault_t omap_gem_fault(struct vm_fault *vmf);
 int omap_gem_roll(struct drm_gem_object *obj, u32 roll);
 void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff);
 void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
index 8e41d649e248dd14ba6a5340c9e24db177088a6d..ec04a69ade46bde5e4175740ecd6dccaca61ee6f 100644 (file)
@@ -93,23 +93,6 @@ static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
        return 0;
 }
 
-
-static void *omap_gem_dmabuf_kmap_atomic(struct dma_buf *buffer,
-               unsigned long page_num)
-{
-       struct drm_gem_object *obj = buffer->priv;
-       struct page **pages;
-       omap_gem_get_pages(obj, &pages, false);
-       omap_gem_cpu_sync_page(obj, page_num);
-       return kmap_atomic(pages[page_num]);
-}
-
-static void omap_gem_dmabuf_kunmap_atomic(struct dma_buf *buffer,
-               unsigned long page_num, void *addr)
-{
-       kunmap_atomic(addr);
-}
-
 static void *omap_gem_dmabuf_kmap(struct dma_buf *buffer,
                unsigned long page_num)
 {
@@ -148,8 +131,6 @@ static const struct dma_buf_ops omap_dmabuf_ops = {
        .release = drm_gem_dmabuf_release,
        .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
        .end_cpu_access = omap_gem_dmabuf_end_cpu_access,
-       .map_atomic = omap_gem_dmabuf_kmap_atomic,
-       .unmap_atomic = omap_gem_dmabuf_kunmap_atomic,
        .map = omap_gem_dmabuf_kmap,
        .unmap = omap_gem_dmabuf_kunmap,
        .mmap = omap_gem_dmabuf_mmap,
index 25682ff3449a43e45da5e5440a631fb665073697..6020c30a33b39ab696d7f820f14d951c4c483f19 100644 (file)
@@ -46,6 +46,15 @@ config DRM_PANEL_ILITEK_IL9322
          Say Y here if you want to enable support for Ilitek IL9322
          QVGA (320x240) RGB, YUV and ITU-T BT.656 panels.
 
+config DRM_PANEL_ILITEK_ILI9881C
+       tristate "Ilitek ILI9881C-based panels"
+       depends on OF
+       depends on DRM_MIPI_DSI
+       depends on BACKLIGHT_CLASS_DEVICE
+       help
+         Say Y if you want to enable support for panels based on the
+         Ilitek ILI9881c controller.
+
 config DRM_PANEL_INNOLUX_P079ZCA
        tristate "Innolux P079ZCA panel"
        depends on OF
index f26efc11d746d3acc9360da619d8e0b35d1f5663..5ccaaa9d13af50c0f61865e2bd20b2ca38dda5c6 100644 (file)
@@ -3,6 +3,7 @@ obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o
 obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
 obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
 obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o
+obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
 obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
 obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
 obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
new file mode 100644 (file)
index 0000000..3ad4a46
--- /dev/null
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017-2018, Bootlin
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/fb.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+struct ili9881c {
+       struct drm_panel        panel;
+       struct mipi_dsi_device  *dsi;
+
+       struct backlight_device *backlight;
+       struct regulator        *power;
+       struct gpio_desc        *reset;
+};
+
+enum ili9881c_op {
+       ILI9881C_SWITCH_PAGE,
+       ILI9881C_COMMAND,
+};
+
+struct ili9881c_instr {
+       enum ili9881c_op        op;
+
+       union arg {
+               struct cmd {
+                       u8      cmd;
+                       u8      data;
+               } cmd;
+               u8      page;
+       } arg;
+};
+
+#define ILI9881C_SWITCH_PAGE_INSTR(_page)      \
+       {                                       \
+               .op = ILI9881C_SWITCH_PAGE,     \
+               .arg = {                        \
+                       .page = (_page),        \
+               },                              \
+       }
+
+#define ILI9881C_COMMAND_INSTR(_cmd, _data)            \
+       {                                               \
+               .op = ILI9881C_COMMAND,         \
+               .arg = {                                \
+                       .cmd = {                        \
+                               .cmd = (_cmd),          \
+                               .data = (_data),        \
+                       },                              \
+               },                                      \
+       }
+
+static const struct ili9881c_instr ili9881c_init[] = {
+       ILI9881C_SWITCH_PAGE_INSTR(3),
+       ILI9881C_COMMAND_INSTR(0x01, 0x00),
+       ILI9881C_COMMAND_INSTR(0x02, 0x00),
+       ILI9881C_COMMAND_INSTR(0x03, 0x73),
+       ILI9881C_COMMAND_INSTR(0x04, 0x03),
+       ILI9881C_COMMAND_INSTR(0x05, 0x00),
+       ILI9881C_COMMAND_INSTR(0x06, 0x06),
+       ILI9881C_COMMAND_INSTR(0x07, 0x06),
+       ILI9881C_COMMAND_INSTR(0x08, 0x00),
+       ILI9881C_COMMAND_INSTR(0x09, 0x18),
+       ILI9881C_COMMAND_INSTR(0x0a, 0x04),
+       ILI9881C_COMMAND_INSTR(0x0b, 0x00),
+       ILI9881C_COMMAND_INSTR(0x0c, 0x02),
+       ILI9881C_COMMAND_INSTR(0x0d, 0x03),
+       ILI9881C_COMMAND_INSTR(0x0e, 0x00),
+       ILI9881C_COMMAND_INSTR(0x0f, 0x25),
+       ILI9881C_COMMAND_INSTR(0x10, 0x25),
+       ILI9881C_COMMAND_INSTR(0x11, 0x00),
+       ILI9881C_COMMAND_INSTR(0x12, 0x00),
+       ILI9881C_COMMAND_INSTR(0x13, 0x00),
+       ILI9881C_COMMAND_INSTR(0x14, 0x00),
+       ILI9881C_COMMAND_INSTR(0x15, 0x00),
+       ILI9881C_COMMAND_INSTR(0x16, 0x0C),
+       ILI9881C_COMMAND_INSTR(0x17, 0x00),
+       ILI9881C_COMMAND_INSTR(0x18, 0x00),
+       ILI9881C_COMMAND_INSTR(0x19, 0x00),
+       ILI9881C_COMMAND_INSTR(0x1a, 0x00),
+       ILI9881C_COMMAND_INSTR(0x1b, 0x00),
+       ILI9881C_COMMAND_INSTR(0x1c, 0x00),
+       ILI9881C_COMMAND_INSTR(0x1d, 0x00),
+       ILI9881C_COMMAND_INSTR(0x1e, 0xC0),
+       ILI9881C_COMMAND_INSTR(0x1f, 0x80),
+       ILI9881C_COMMAND_INSTR(0x20, 0x04),
+       ILI9881C_COMMAND_INSTR(0x21, 0x01),
+       ILI9881C_COMMAND_INSTR(0x22, 0x00),
+       ILI9881C_COMMAND_INSTR(0x23, 0x00),
+       ILI9881C_COMMAND_INSTR(0x24, 0x00),
+       ILI9881C_COMMAND_INSTR(0x25, 0x00),
+       ILI9881C_COMMAND_INSTR(0x26, 0x00),
+       ILI9881C_COMMAND_INSTR(0x27, 0x00),
+       ILI9881C_COMMAND_INSTR(0x28, 0x33),
+       ILI9881C_COMMAND_INSTR(0x29, 0x03),
+       ILI9881C_COMMAND_INSTR(0x2a, 0x00),
+       ILI9881C_COMMAND_INSTR(0x2b, 0x00),
+       ILI9881C_COMMAND_INSTR(0x2c, 0x00),
+       ILI9881C_COMMAND_INSTR(0x2d, 0x00),
+       ILI9881C_COMMAND_INSTR(0x2e, 0x00),
+       ILI9881C_COMMAND_INSTR(0x2f, 0x00),
+       ILI9881C_COMMAND_INSTR(0x30, 0x00),
+       ILI9881C_COMMAND_INSTR(0x31, 0x00),
+       ILI9881C_COMMAND_INSTR(0x32, 0x00),
+       ILI9881C_COMMAND_INSTR(0x33, 0x00),
+       ILI9881C_COMMAND_INSTR(0x34, 0x04),
+       ILI9881C_COMMAND_INSTR(0x35, 0x00),
+       ILI9881C_COMMAND_INSTR(0x36, 0x00),
+       ILI9881C_COMMAND_INSTR(0x37, 0x00),
+       ILI9881C_COMMAND_INSTR(0x38, 0x3C),
+       ILI9881C_COMMAND_INSTR(0x39, 0x00),
+       ILI9881C_COMMAND_INSTR(0x3a, 0x00),
+       ILI9881C_COMMAND_INSTR(0x3b, 0x00),
+       ILI9881C_COMMAND_INSTR(0x3c, 0x00),
+       ILI9881C_COMMAND_INSTR(0x3d, 0x00),
+       ILI9881C_COMMAND_INSTR(0x3e, 0x00),
+       ILI9881C_COMMAND_INSTR(0x3f, 0x00),
+       ILI9881C_COMMAND_INSTR(0x40, 0x00),
+       ILI9881C_COMMAND_INSTR(0x41, 0x00),
+       ILI9881C_COMMAND_INSTR(0x42, 0x00),
+       ILI9881C_COMMAND_INSTR(0x43, 0x00),
+       ILI9881C_COMMAND_INSTR(0x44, 0x00),
+       ILI9881C_COMMAND_INSTR(0x50, 0x01),
+       ILI9881C_COMMAND_INSTR(0x51, 0x23),
+       ILI9881C_COMMAND_INSTR(0x52, 0x45),
+       ILI9881C_COMMAND_INSTR(0x53, 0x67),
+       ILI9881C_COMMAND_INSTR(0x54, 0x89),
+       ILI9881C_COMMAND_INSTR(0x55, 0xab),
+       ILI9881C_COMMAND_INSTR(0x56, 0x01),
+       ILI9881C_COMMAND_INSTR(0x57, 0x23),
+       ILI9881C_COMMAND_INSTR(0x58, 0x45),
+       ILI9881C_COMMAND_INSTR(0x59, 0x67),
+       ILI9881C_COMMAND_INSTR(0x5a, 0x89),
+       ILI9881C_COMMAND_INSTR(0x5b, 0xab),
+       ILI9881C_COMMAND_INSTR(0x5c, 0xcd),
+       ILI9881C_COMMAND_INSTR(0x5d, 0xef),
+       ILI9881C_COMMAND_INSTR(0x5e, 0x11),
+       ILI9881C_COMMAND_INSTR(0x5f, 0x02),
+       ILI9881C_COMMAND_INSTR(0x60, 0x02),
+       ILI9881C_COMMAND_INSTR(0x61, 0x02),
+       ILI9881C_COMMAND_INSTR(0x62, 0x02),
+       ILI9881C_COMMAND_INSTR(0x63, 0x02),
+       ILI9881C_COMMAND_INSTR(0x64, 0x02),
+       ILI9881C_COMMAND_INSTR(0x65, 0x02),
+       ILI9881C_COMMAND_INSTR(0x66, 0x02),
+       ILI9881C_COMMAND_INSTR(0x67, 0x02),
+       ILI9881C_COMMAND_INSTR(0x68, 0x02),
+       ILI9881C_COMMAND_INSTR(0x69, 0x02),
+       ILI9881C_COMMAND_INSTR(0x6a, 0x0C),
+       ILI9881C_COMMAND_INSTR(0x6b, 0x02),
+       ILI9881C_COMMAND_INSTR(0x6c, 0x0F),
+       ILI9881C_COMMAND_INSTR(0x6d, 0x0E),
+       ILI9881C_COMMAND_INSTR(0x6e, 0x0D),
+       ILI9881C_COMMAND_INSTR(0x6f, 0x06),
+       ILI9881C_COMMAND_INSTR(0x70, 0x07),
+       ILI9881C_COMMAND_INSTR(0x71, 0x02),
+       ILI9881C_COMMAND_INSTR(0x72, 0x02),
+       ILI9881C_COMMAND_INSTR(0x73, 0x02),
+       ILI9881C_COMMAND_INSTR(0x74, 0x02),
+       ILI9881C_COMMAND_INSTR(0x75, 0x02),
+       ILI9881C_COMMAND_INSTR(0x76, 0x02),
+       ILI9881C_COMMAND_INSTR(0x77, 0x02),
+       ILI9881C_COMMAND_INSTR(0x78, 0x02),
+       ILI9881C_COMMAND_INSTR(0x79, 0x02),
+       ILI9881C_COMMAND_INSTR(0x7a, 0x02),
+       ILI9881C_COMMAND_INSTR(0x7b, 0x02),
+       ILI9881C_COMMAND_INSTR(0x7c, 0x02),
+       ILI9881C_COMMAND_INSTR(0x7d, 0x02),
+       ILI9881C_COMMAND_INSTR(0x7e, 0x02),
+       ILI9881C_COMMAND_INSTR(0x7f, 0x02),
+       ILI9881C_COMMAND_INSTR(0x80, 0x0C),
+       ILI9881C_COMMAND_INSTR(0x81, 0x02),
+       ILI9881C_COMMAND_INSTR(0x82, 0x0F),
+       ILI9881C_COMMAND_INSTR(0x83, 0x0E),
+       ILI9881C_COMMAND_INSTR(0x84, 0x0D),
+       ILI9881C_COMMAND_INSTR(0x85, 0x06),
+       ILI9881C_COMMAND_INSTR(0x86, 0x07),
+       ILI9881C_COMMAND_INSTR(0x87, 0x02),
+       ILI9881C_COMMAND_INSTR(0x88, 0x02),
+       ILI9881C_COMMAND_INSTR(0x89, 0x02),
+       ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+       ILI9881C_SWITCH_PAGE_INSTR(4),
+       ILI9881C_COMMAND_INSTR(0x6C, 0x15),
+       ILI9881C_COMMAND_INSTR(0x6E, 0x22),
+       ILI9881C_COMMAND_INSTR(0x6F, 0x33),
+       ILI9881C_COMMAND_INSTR(0x3A, 0xA4),
+       ILI9881C_COMMAND_INSTR(0x8D, 0x0D),
+       ILI9881C_COMMAND_INSTR(0x87, 0xBA),
+       ILI9881C_COMMAND_INSTR(0x26, 0x76),
+       ILI9881C_COMMAND_INSTR(0xB2, 0xD1),
+       ILI9881C_SWITCH_PAGE_INSTR(1),
+       ILI9881C_COMMAND_INSTR(0x22, 0x0A),
+       ILI9881C_COMMAND_INSTR(0x53, 0xDC),
+       ILI9881C_COMMAND_INSTR(0x55, 0xA7),
+       ILI9881C_COMMAND_INSTR(0x50, 0x78),
+       ILI9881C_COMMAND_INSTR(0x51, 0x78),
+       ILI9881C_COMMAND_INSTR(0x31, 0x02),
+       ILI9881C_COMMAND_INSTR(0x60, 0x14),
+       ILI9881C_COMMAND_INSTR(0xA0, 0x2A),
+       ILI9881C_COMMAND_INSTR(0xA1, 0x39),
+       ILI9881C_COMMAND_INSTR(0xA2, 0x46),
+       ILI9881C_COMMAND_INSTR(0xA3, 0x0e),
+       ILI9881C_COMMAND_INSTR(0xA4, 0x12),
+       ILI9881C_COMMAND_INSTR(0xA5, 0x25),
+       ILI9881C_COMMAND_INSTR(0xA6, 0x19),
+       ILI9881C_COMMAND_INSTR(0xA7, 0x1d),
+       ILI9881C_COMMAND_INSTR(0xA8, 0xa6),
+       ILI9881C_COMMAND_INSTR(0xA9, 0x1C),
+       ILI9881C_COMMAND_INSTR(0xAA, 0x29),
+       ILI9881C_COMMAND_INSTR(0xAB, 0x85),
+       ILI9881C_COMMAND_INSTR(0xAC, 0x1C),
+       ILI9881C_COMMAND_INSTR(0xAD, 0x1B),
+       ILI9881C_COMMAND_INSTR(0xAE, 0x51),
+       ILI9881C_COMMAND_INSTR(0xAF, 0x22),
+       ILI9881C_COMMAND_INSTR(0xB0, 0x2d),
+       ILI9881C_COMMAND_INSTR(0xB1, 0x4f),
+       ILI9881C_COMMAND_INSTR(0xB2, 0x59),
+       ILI9881C_COMMAND_INSTR(0xB3, 0x3F),
+       ILI9881C_COMMAND_INSTR(0xC0, 0x2A),
+       ILI9881C_COMMAND_INSTR(0xC1, 0x3a),
+       ILI9881C_COMMAND_INSTR(0xC2, 0x45),
+       ILI9881C_COMMAND_INSTR(0xC3, 0x0e),
+       ILI9881C_COMMAND_INSTR(0xC4, 0x11),
+       ILI9881C_COMMAND_INSTR(0xC5, 0x24),
+       ILI9881C_COMMAND_INSTR(0xC6, 0x1a),
+       ILI9881C_COMMAND_INSTR(0xC7, 0x1c),
+       ILI9881C_COMMAND_INSTR(0xC8, 0xaa),
+       ILI9881C_COMMAND_INSTR(0xC9, 0x1C),
+       ILI9881C_COMMAND_INSTR(0xCA, 0x29),
+       ILI9881C_COMMAND_INSTR(0xCB, 0x96),
+       ILI9881C_COMMAND_INSTR(0xCC, 0x1C),
+       ILI9881C_COMMAND_INSTR(0xCD, 0x1B),
+       ILI9881C_COMMAND_INSTR(0xCE, 0x51),
+       ILI9881C_COMMAND_INSTR(0xCF, 0x22),
+       ILI9881C_COMMAND_INSTR(0xD0, 0x2b),
+       ILI9881C_COMMAND_INSTR(0xD1, 0x4b),
+       ILI9881C_COMMAND_INSTR(0xD2, 0x59),
+       ILI9881C_COMMAND_INSTR(0xD3, 0x3F),
+};
+
+static inline struct ili9881c *panel_to_ili9881c(struct drm_panel *panel)
+{
+       return container_of(panel, struct ili9881c, panel);
+}
+
+/*
+ * The panel seems to accept some private DCS commands that map
+ * directly to registers.
+ *
+ * It is organised by page, with each page having its own set of
+ * registers, and the first page looks like it's holding the standard
+ * DCS commands.
+ *
+ * So before any attempt at sending a command or data, we have to be
+ * sure if we're in the right page or not.
+ */
+static int ili9881c_switch_page(struct ili9881c *ctx, u8 page)
+{
+       u8 buf[4] = { 0xff, 0x98, 0x81, page };
+       int ret;
+
+       ret = mipi_dsi_dcs_write_buffer(ctx->dsi, buf, sizeof(buf));
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static int ili9881c_send_cmd_data(struct ili9881c *ctx, u8 cmd, u8 data)
+{
+       u8 buf[2] = { cmd, data };
+       int ret;
+
+       ret = mipi_dsi_dcs_write_buffer(ctx->dsi, buf, sizeof(buf));
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static int ili9881c_prepare(struct drm_panel *panel)
+{
+       struct ili9881c *ctx = panel_to_ili9881c(panel);
+       unsigned int i;
+       int ret;
+
+       /* Power the panel */
+       ret = regulator_enable(ctx->power);
+       if (ret)
+               return ret;
+       msleep(5);
+
+       /* And reset it */
+       gpiod_set_value(ctx->reset, 1);
+       msleep(20);
+
+       gpiod_set_value(ctx->reset, 0);
+       msleep(20);
+
+       for (i = 0; i < ARRAY_SIZE(ili9881c_init); i++) {
+               const struct ili9881c_instr *instr = &ili9881c_init[i];
+
+               if (instr->op == ILI9881C_SWITCH_PAGE)
+                       ret = ili9881c_switch_page(ctx, instr->arg.page);
+               else if (instr->op == ILI9881C_COMMAND)
+                       ret = ili9881c_send_cmd_data(ctx, instr->arg.cmd.cmd,
+                                                     instr->arg.cmd.data);
+
+               if (ret)
+                       return ret;
+       }
+
+       ret = ili9881c_switch_page(ctx, 0);
+       if (ret)
+               return ret;
+
+       ret = mipi_dsi_dcs_set_tear_on(ctx->dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+       if (ret)
+               return ret;
+
+       ret = mipi_dsi_dcs_exit_sleep_mode(ctx->dsi);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int ili9881c_enable(struct drm_panel *panel)
+{
+       struct ili9881c *ctx = panel_to_ili9881c(panel);
+
+       msleep(120);
+
+       mipi_dsi_dcs_set_display_on(ctx->dsi);
+       backlight_enable(ctx->backlight);
+
+       return 0;
+}
+
+static int ili9881c_disable(struct drm_panel *panel)
+{
+       struct ili9881c *ctx = panel_to_ili9881c(panel);
+
+       backlight_disable(ctx->backlight);
+       return mipi_dsi_dcs_set_display_off(ctx->dsi);
+}
+
+static int ili9881c_unprepare(struct drm_panel *panel)
+{
+       struct ili9881c *ctx = panel_to_ili9881c(panel);
+
+       mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
+       regulator_disable(ctx->power);
+       gpiod_set_value(ctx->reset, 1);
+
+       return 0;
+}
+
+static const struct drm_display_mode bananapi_default_mode = {
+       .clock          = 62000,
+       .vrefresh       = 60,
+
+       .hdisplay       = 720,
+       .hsync_start    = 720 + 10,
+       .hsync_end      = 720 + 10 + 20,
+       .htotal         = 720 + 10 + 20 + 30,
+
+       .vdisplay       = 1280,
+       .vsync_start    = 1280 + 10,
+       .vsync_end      = 1280 + 10 + 10,
+       .vtotal         = 1280 + 10 + 10 + 20,
+};
+
+static int ili9881c_get_modes(struct drm_panel *panel)
+{
+       struct drm_connector *connector = panel->connector;
+       struct ili9881c *ctx = panel_to_ili9881c(panel);
+       struct drm_display_mode *mode;
+
+       mode = drm_mode_duplicate(panel->drm, &bananapi_default_mode);
+       if (!mode) {
+               dev_err(&ctx->dsi->dev, "failed to add mode %ux%ux@%u\n",
+                       bananapi_default_mode.hdisplay,
+                       bananapi_default_mode.vdisplay,
+                       bananapi_default_mode.vrefresh);
+               return -ENOMEM;
+       }
+
+       drm_mode_set_name(mode);
+
+       mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+       drm_mode_probed_add(connector, mode);
+
+       panel->connector->display_info.width_mm = 62;
+       panel->connector->display_info.height_mm = 110;
+
+       return 1;
+}
+
+static const struct drm_panel_funcs ili9881c_funcs = {
+       .prepare        = ili9881c_prepare,
+       .unprepare      = ili9881c_unprepare,
+       .enable         = ili9881c_enable,
+       .disable        = ili9881c_disable,
+       .get_modes      = ili9881c_get_modes,
+};
+
+static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
+{
+       struct device_node *np;
+       struct ili9881c *ctx;
+       int ret;
+
+       ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
+       mipi_dsi_set_drvdata(dsi, ctx);
+       ctx->dsi = dsi;
+
+       drm_panel_init(&ctx->panel);
+       ctx->panel.dev = &dsi->dev;
+       ctx->panel.funcs = &ili9881c_funcs;
+
+       ctx->power = devm_regulator_get(&dsi->dev, "power");
+       if (IS_ERR(ctx->power)) {
+               dev_err(&dsi->dev, "Couldn't get our power regulator\n");
+               return PTR_ERR(ctx->power);
+       }
+
+       ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
+       if (IS_ERR(ctx->reset)) {
+               dev_err(&dsi->dev, "Couldn't get our reset GPIO\n");
+               return PTR_ERR(ctx->reset);
+       }
+
+       np = of_parse_phandle(dsi->dev.of_node, "backlight", 0);
+       if (np) {
+               ctx->backlight = of_find_backlight_by_node(np);
+               of_node_put(np);
+
+               if (!ctx->backlight)
+                       return -EPROBE_DEFER;
+       }
+
+       ret = drm_panel_add(&ctx->panel);
+       if (ret < 0)
+               return ret;
+
+       dsi->mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+       dsi->format = MIPI_DSI_FMT_RGB888;
+       dsi->lanes = 4;
+
+       return mipi_dsi_attach(dsi);
+}
+
+static int ili9881c_dsi_remove(struct mipi_dsi_device *dsi)
+{
+       struct ili9881c *ctx = mipi_dsi_get_drvdata(dsi);
+
+       mipi_dsi_detach(dsi);
+       drm_panel_remove(&ctx->panel);
+
+       if (ctx->backlight)
+               put_device(&ctx->backlight->dev);
+
+       return 0;
+}
+
+static const struct of_device_id ili9881c_of_match[] = {
+       { .compatible = "bananapi,lhr050h41" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, ili9881c_of_match);
+
+static struct mipi_dsi_driver ili9881c_dsi_driver = {
+       .probe          = ili9881c_dsi_probe,
+       .remove         = ili9881c_dsi_remove,
+       .driver = {
+               .name           = "ili9881c-dsi",
+               .of_match_table = ili9881c_of_match,
+       },
+};
+module_mipi_dsi_driver(ili9881c_dsi_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Ilitek ILI9881C Controller Driver");
+MODULE_LICENSE("GPL v2");
index 57df39b5c5899cd3caaffb51d8dbf5d7b2a2eea2..72edb334d9976840b0649debe31ea2819b5d6c74 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/gpio/consumer.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/regulator/consumer.h>
 
 #include <drm/drmP.h>
 
 #include <video/mipi_display.h>
 
+struct panel_init_cmd {
+       size_t len;
+       const char *data;
+};
+
+#define _INIT_CMD(...) { \
+       .len = sizeof((char[]){__VA_ARGS__}), \
+       .data = (char[]){__VA_ARGS__} }
+
+struct panel_desc {
+       const struct drm_display_mode *mode;
+       unsigned int bpc;
+       struct {
+               unsigned int width;
+               unsigned int height;
+       } size;
+
+       unsigned long flags;
+       enum mipi_dsi_pixel_format format;
+       const struct panel_init_cmd *init_cmds;
+       unsigned int lanes;
+       const char * const *supply_names;
+       unsigned int num_supplies;
+       unsigned int sleep_mode_delay;
+       unsigned int power_down_delay;
+};
+
 struct innolux_panel {
        struct drm_panel base;
        struct mipi_dsi_device *link;
+       const struct panel_desc *desc;
 
        struct backlight_device *backlight;
-       struct regulator *supply;
+       struct regulator_bulk_data *supplies;
+       unsigned int num_supplies;
        struct gpio_desc *enable_gpio;
 
        bool prepared;
@@ -72,12 +102,16 @@ static int innolux_panel_unprepare(struct drm_panel *panel)
                return err;
        }
 
+       if (innolux->desc->sleep_mode_delay)
+               msleep(innolux->desc->sleep_mode_delay);
+
        gpiod_set_value_cansleep(innolux->enable_gpio, 0);
 
-       /* T8: 80ms - 1000ms */
-       msleep(80);
+       if (innolux->desc->power_down_delay)
+               msleep(innolux->desc->power_down_delay);
 
-       err = regulator_disable(innolux->supply);
+       err = regulator_bulk_disable(innolux->desc->num_supplies,
+                                    innolux->supplies);
        if (err < 0)
                return err;
 
@@ -89,24 +123,55 @@ static int innolux_panel_unprepare(struct drm_panel *panel)
 static int innolux_panel_prepare(struct drm_panel *panel)
 {
        struct innolux_panel *innolux = to_innolux_panel(panel);
-       int err, regulator_err;
+       int err;
 
        if (innolux->prepared)
                return 0;
 
        gpiod_set_value_cansleep(innolux->enable_gpio, 0);
 
-       err = regulator_enable(innolux->supply);
+       err = regulator_bulk_enable(innolux->desc->num_supplies,
+                                   innolux->supplies);
        if (err < 0)
                return err;
 
-       /* T2: 15ms - 1000ms */
-       usleep_range(15000, 16000);
+       /* p079zca: t2 (20ms), p097pfg: t4 (15ms) */
+       usleep_range(20000, 21000);
 
        gpiod_set_value_cansleep(innolux->enable_gpio, 1);
 
-       /* T4: 15ms - 1000ms */
-       usleep_range(15000, 16000);
+       /* p079zca: t4, p097pfg: t5 */
+       usleep_range(20000, 21000);
+
+       if (innolux->desc->init_cmds) {
+               const struct panel_init_cmd *cmds =
+                                       innolux->desc->init_cmds;
+               unsigned int i;
+
+               for (i = 0; cmds[i].len != 0; i++) {
+                       const struct panel_init_cmd *cmd = &cmds[i];
+
+                       err = mipi_dsi_generic_write(innolux->link, cmd->data,
+                                                    cmd->len);
+                       if (err < 0) {
+                               dev_err(panel->dev,
+                                       "failed to write command %u\n", i);
+                               goto poweroff;
+                       }
+
+                       /*
+                        * Included by random guessing, because without this
+                        * (or at least, some delay), the panel sometimes
+                        * didn't appear to pick up the command sequence.
+                        */
+                       err = mipi_dsi_dcs_nop(innolux->link);
+                       if (err < 0) {
+                               dev_err(panel->dev,
+                                       "failed to send DCS nop: %d\n", err);
+                               goto poweroff;
+                       }
+               }
+       }
 
        err = mipi_dsi_dcs_exit_sleep_mode(innolux->link);
        if (err < 0) {
@@ -133,12 +198,9 @@ static int innolux_panel_prepare(struct drm_panel *panel)
        return 0;
 
 poweroff:
-       regulator_err = regulator_disable(innolux->supply);
-       if (regulator_err)
-               DRM_DEV_ERROR(panel->dev, "failed to disable regulator: %d\n",
-                             regulator_err);
-
        gpiod_set_value_cansleep(innolux->enable_gpio, 0);
+       regulator_bulk_disable(innolux->desc->num_supplies, innolux->supplies);
+
        return err;
 }
 
@@ -162,7 +224,11 @@ static int innolux_panel_enable(struct drm_panel *panel)
        return 0;
 }
 
-static const struct drm_display_mode default_mode = {
+static const char * const innolux_p079zca_supply_names[] = {
+       "power",
+};
+
+static const struct drm_display_mode innolux_p079zca_mode = {
        .clock = 56900,
        .hdisplay = 768,
        .hsync_start = 768 + 40,
@@ -175,15 +241,181 @@ static const struct drm_display_mode default_mode = {
        .vrefresh = 60,
 };
 
+static const struct panel_desc innolux_p079zca_panel_desc = {
+       .mode = &innolux_p079zca_mode,
+       .bpc = 8,
+       .size = {
+               .width = 120,
+               .height = 160,
+       },
+       .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+                MIPI_DSI_MODE_LPM,
+       .format = MIPI_DSI_FMT_RGB888,
+       .lanes = 4,
+       .supply_names = innolux_p079zca_supply_names,
+       .num_supplies = ARRAY_SIZE(innolux_p079zca_supply_names),
+       .power_down_delay = 80, /* T8: 80ms - 1000ms */
+};
+
+static const char * const innolux_p097pfg_supply_names[] = {
+       "avdd",
+       "avee",
+};
+
+static const struct drm_display_mode innolux_p097pfg_mode = {
+       .clock = 229000,
+       .hdisplay = 1536,
+       .hsync_start = 1536 + 100,
+       .hsync_end = 1536 + 100 + 24,
+       .htotal = 1536 + 100 + 24 + 100,
+       .vdisplay = 2048,
+       .vsync_start = 2048 + 100,
+       .vsync_end = 2048 + 100 + 2,
+       .vtotal = 2048 + 100 + 2 + 18,
+       .vrefresh = 60,
+};
+
+/*
+ * Display manufacturer failed to provide init sequencing according to
+ * https://chromium-review.googlesource.com/c/chromiumos/third_party/coreboot/+/892065/
+ * so the init sequence stems from a register dump of a working panel.
+ */
+static const struct panel_init_cmd innolux_p097pfg_init_cmds[] = {
+       /* page 0 */
+       _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x00),
+       _INIT_CMD(0xB1, 0xE8, 0x11),
+       _INIT_CMD(0xB2, 0x25, 0x02),
+       _INIT_CMD(0xB5, 0x08, 0x00),
+       _INIT_CMD(0xBC, 0x0F, 0x00),
+       _INIT_CMD(0xB8, 0x03, 0x06, 0x00, 0x00),
+       _INIT_CMD(0xBD, 0x01, 0x90, 0x14, 0x14),
+       _INIT_CMD(0x6F, 0x01),
+       _INIT_CMD(0xC0, 0x03),
+       _INIT_CMD(0x6F, 0x02),
+       _INIT_CMD(0xC1, 0x0D),
+       _INIT_CMD(0xD9, 0x01, 0x09, 0x70),
+       _INIT_CMD(0xC5, 0x12, 0x21, 0x00),
+       _INIT_CMD(0xBB, 0x93, 0x93),
+
+       /* page 1 */
+       _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x01),
+       _INIT_CMD(0xB3, 0x3C, 0x3C),
+       _INIT_CMD(0xB4, 0x0F, 0x0F),
+       _INIT_CMD(0xB9, 0x45, 0x45),
+       _INIT_CMD(0xBA, 0x14, 0x14),
+       _INIT_CMD(0xCA, 0x02),
+       _INIT_CMD(0xCE, 0x04),
+       _INIT_CMD(0xC3, 0x9B, 0x9B),
+       _INIT_CMD(0xD8, 0xC0, 0x03),
+       _INIT_CMD(0xBC, 0x82, 0x01),
+       _INIT_CMD(0xBD, 0x9E, 0x01),
+
+       /* page 2 */
+       _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x02),
+       _INIT_CMD(0xB0, 0x82),
+       _INIT_CMD(0xD1, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x82, 0x00, 0xA5,
+                 0x00, 0xC1, 0x00, 0xEA, 0x01, 0x0D, 0x01, 0x40),
+       _INIT_CMD(0xD2, 0x01, 0x6A, 0x01, 0xA8, 0x01, 0xDC, 0x02, 0x29,
+                 0x02, 0x67, 0x02, 0x68, 0x02, 0xA8, 0x02, 0xF0),
+       _INIT_CMD(0xD3, 0x03, 0x19, 0x03, 0x49, 0x03, 0x67, 0x03, 0x8C,
+                 0x03, 0xA6, 0x03, 0xC7, 0x03, 0xDE, 0x03, 0xEC),
+       _INIT_CMD(0xD4, 0x03, 0xFF, 0x03, 0xFF),
+       _INIT_CMD(0xE0, 0x00, 0x00, 0x00, 0x86, 0x00, 0xC5, 0x00, 0xE5,
+                 0x00, 0xFF, 0x01, 0x26, 0x01, 0x45, 0x01, 0x75),
+       _INIT_CMD(0xE1, 0x01, 0x9C, 0x01, 0xD5, 0x02, 0x05, 0x02, 0x4D,
+                 0x02, 0x86, 0x02, 0x87, 0x02, 0xC3, 0x03, 0x03),
+       _INIT_CMD(0xE2, 0x03, 0x2A, 0x03, 0x56, 0x03, 0x72, 0x03, 0x94,
+                 0x03, 0xAC, 0x03, 0xCB, 0x03, 0xE0, 0x03, 0xED),
+       _INIT_CMD(0xE3, 0x03, 0xFF, 0x03, 0xFF),
+
+       /* page 3 */
+       _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x03),
+       _INIT_CMD(0xB0, 0x00, 0x00, 0x00, 0x00),
+       _INIT_CMD(0xB1, 0x00, 0x00, 0x00, 0x00),
+       _INIT_CMD(0xB2, 0x00, 0x00, 0x06, 0x04, 0x01, 0x40, 0x85),
+       _INIT_CMD(0xB3, 0x10, 0x07, 0xFC, 0x04, 0x01, 0x40, 0x80),
+       _INIT_CMD(0xB6, 0xF0, 0x08, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01,
+                 0x40, 0x80),
+       _INIT_CMD(0xBA, 0xC5, 0x07, 0x00, 0x04, 0x11, 0x25, 0x8C),
+       _INIT_CMD(0xBB, 0xC5, 0x07, 0x00, 0x03, 0x11, 0x25, 0x8C),
+       _INIT_CMD(0xC0, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x80, 0x80),
+       _INIT_CMD(0xC1, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x80, 0x80),
+       _INIT_CMD(0xC4, 0x00, 0x00),
+       _INIT_CMD(0xEF, 0x41),
+
+       /* page 4 */
+       _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x04),
+       _INIT_CMD(0xEC, 0x4C),
+
+       /* page 5 */
+       _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x05),
+       _INIT_CMD(0xB0, 0x13, 0x03, 0x03, 0x01),
+       _INIT_CMD(0xB1, 0x30, 0x00),
+       _INIT_CMD(0xB2, 0x02, 0x02, 0x00),
+       _INIT_CMD(0xB3, 0x82, 0x23, 0x82, 0x9D),
+       _INIT_CMD(0xB4, 0xC5, 0x75, 0x24, 0x57),
+       _INIT_CMD(0xB5, 0x00, 0xD4, 0x72, 0x11, 0x11, 0xAB, 0x0A),
+       _INIT_CMD(0xB6, 0x00, 0x00, 0xD5, 0x72, 0x24, 0x56),
+       _INIT_CMD(0xB7, 0x5C, 0xDC, 0x5C, 0x5C),
+       _INIT_CMD(0xB9, 0x0C, 0x00, 0x00, 0x01, 0x00),
+       _INIT_CMD(0xC0, 0x75, 0x11, 0x11, 0x54, 0x05),
+       _INIT_CMD(0xC6, 0x00, 0x00, 0x00, 0x00),
+       _INIT_CMD(0xD0, 0x00, 0x48, 0x08, 0x00, 0x00),
+       _INIT_CMD(0xD1, 0x00, 0x48, 0x09, 0x00, 0x00),
+
+       /* page 6 */
+       _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x06),
+       _INIT_CMD(0xB0, 0x02, 0x32, 0x32, 0x08, 0x2F),
+       _INIT_CMD(0xB1, 0x2E, 0x15, 0x14, 0x13, 0x12),
+       _INIT_CMD(0xB2, 0x11, 0x10, 0x00, 0x3D, 0x3D),
+       _INIT_CMD(0xB3, 0x3D, 0x3D, 0x3D, 0x3D, 0x3D),
+       _INIT_CMD(0xB4, 0x3D, 0x32),
+       _INIT_CMD(0xB5, 0x03, 0x32, 0x32, 0x09, 0x2F),
+       _INIT_CMD(0xB6, 0x2E, 0x1B, 0x1A, 0x19, 0x18),
+       _INIT_CMD(0xB7, 0x17, 0x16, 0x01, 0x3D, 0x3D),
+       _INIT_CMD(0xB8, 0x3D, 0x3D, 0x3D, 0x3D, 0x3D),
+       _INIT_CMD(0xB9, 0x3D, 0x32),
+       _INIT_CMD(0xC0, 0x01, 0x32, 0x32, 0x09, 0x2F),
+       _INIT_CMD(0xC1, 0x2E, 0x1A, 0x1B, 0x16, 0x17),
+       _INIT_CMD(0xC2, 0x18, 0x19, 0x03, 0x3D, 0x3D),
+       _INIT_CMD(0xC3, 0x3D, 0x3D, 0x3D, 0x3D, 0x3D),
+       _INIT_CMD(0xC4, 0x3D, 0x32),
+       _INIT_CMD(0xC5, 0x00, 0x32, 0x32, 0x08, 0x2F),
+       _INIT_CMD(0xC6, 0x2E, 0x14, 0x15, 0x10, 0x11),
+       _INIT_CMD(0xC7, 0x12, 0x13, 0x02, 0x3D, 0x3D),
+       _INIT_CMD(0xC8, 0x3D, 0x3D, 0x3D, 0x3D, 0x3D),
+       _INIT_CMD(0xC9, 0x3D, 0x32),
+
+       {},
+};
+
+static const struct panel_desc innolux_p097pfg_panel_desc = {
+       .mode = &innolux_p097pfg_mode,
+       .bpc = 8,
+       .size = {
+               .width = 147,
+               .height = 196,
+       },
+       .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+                MIPI_DSI_MODE_LPM,
+       .format = MIPI_DSI_FMT_RGB888,
+       .init_cmds = innolux_p097pfg_init_cmds,
+       .lanes = 4,
+       .supply_names = innolux_p097pfg_supply_names,
+       .num_supplies = ARRAY_SIZE(innolux_p097pfg_supply_names),
+       .sleep_mode_delay = 100, /* T15 */
+};
+
 static int innolux_panel_get_modes(struct drm_panel *panel)
 {
+       struct innolux_panel *innolux = to_innolux_panel(panel);
+       const struct drm_display_mode *m = innolux->desc->mode;
        struct drm_display_mode *mode;
 
-       mode = drm_mode_duplicate(panel->drm, &default_mode);
+       mode = drm_mode_duplicate(panel->drm, m);
        if (!mode) {
                DRM_DEV_ERROR(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
-                             default_mode.hdisplay, default_mode.vdisplay,
-                             default_mode.vrefresh);
+                             m->hdisplay, m->vdisplay, m->vrefresh);
                return -ENOMEM;
        }
 
@@ -191,9 +423,11 @@ static int innolux_panel_get_modes(struct drm_panel *panel)
 
        drm_mode_probed_add(panel->connector, mode);
 
-       panel->connector->display_info.width_mm = 120;
-       panel->connector->display_info.height_mm = 160;
-       panel->connector->display_info.bpc = 8;
+       panel->connector->display_info.width_mm =
+                       innolux->desc->size.width;
+       panel->connector->display_info.height_mm =
+                       innolux->desc->size.height;
+       panel->connector->display_info.bpc = innolux->desc->bpc;
 
        return 1;
 }
@@ -207,19 +441,42 @@ static const struct drm_panel_funcs innolux_panel_funcs = {
 };
 
 static const struct of_device_id innolux_of_match[] = {
-       { .compatible = "innolux,p079zca", },
+       { .compatible = "innolux,p079zca",
+         .data = &innolux_p079zca_panel_desc
+       },
+       { .compatible = "innolux,p097pfg",
+         .data = &innolux_p097pfg_panel_desc
+       },
        { }
 };
 MODULE_DEVICE_TABLE(of, innolux_of_match);
 
-static int innolux_panel_add(struct innolux_panel *innolux)
+static int innolux_panel_add(struct mipi_dsi_device *dsi,
+                            const struct panel_desc *desc)
 {
-       struct device *dev = &innolux->link->dev;
-       int err;
+       struct innolux_panel *innolux;
+       struct device *dev = &dsi->dev;
+       int err, i;
+
+       innolux = devm_kzalloc(dev, sizeof(*innolux), GFP_KERNEL);
+       if (!innolux)
+               return -ENOMEM;
+
+       innolux->desc = desc;
+
+       innolux->supplies = devm_kcalloc(dev, desc->num_supplies,
+                                        sizeof(*innolux->supplies),
+                                        GFP_KERNEL);
+       if (!innolux->supplies)
+               return -ENOMEM;
+
+       for (i = 0; i < desc->num_supplies; i++)
+               innolux->supplies[i].supply = desc->supply_names[i];
 
-       innolux->supply = devm_regulator_get(dev, "power");
-       if (IS_ERR(innolux->supply))
-               return PTR_ERR(innolux->supply);
+       err = devm_regulator_bulk_get(dev, desc->num_supplies,
+                                     innolux->supplies);
+       if (err < 0)
+               return err;
 
        innolux->enable_gpio = devm_gpiod_get_optional(dev, "enable",
                                                       GPIOD_OUT_HIGH);
@@ -230,15 +487,21 @@ static int innolux_panel_add(struct innolux_panel *innolux)
        }
 
        innolux->backlight = devm_of_find_backlight(dev);
-
        if (IS_ERR(innolux->backlight))
                return PTR_ERR(innolux->backlight);
 
        drm_panel_init(&innolux->base);
        innolux->base.funcs = &innolux_panel_funcs;
-       innolux->base.dev = &innolux->link->dev;
+       innolux->base.dev = dev;
+
+       err = drm_panel_add(&innolux->base);
+       if (err < 0)
+               return err;
+
+       mipi_dsi_set_drvdata(dsi, innolux);
+       innolux->link = dsi;
 
-       return drm_panel_add(&innolux->base);
+       return 0;
 }
 
 static void innolux_panel_del(struct innolux_panel *innolux)
@@ -249,28 +512,19 @@ static void innolux_panel_del(struct innolux_panel *innolux)
 
 static int innolux_panel_probe(struct mipi_dsi_device *dsi)
 {
-       struct innolux_panel *innolux;
+       const struct panel_desc *desc;
        int err;
 
-       dsi->lanes = 4;
-       dsi->format = MIPI_DSI_FMT_RGB888;
-       dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
-                         MIPI_DSI_MODE_LPM;
-
-       innolux = devm_kzalloc(&dsi->dev, sizeof(*innolux), GFP_KERNEL);
-       if (!innolux)
-               return -ENOMEM;
-
-       mipi_dsi_set_drvdata(dsi, innolux);
+       desc = of_device_get_match_data(&dsi->dev);
+       dsi->mode_flags = desc->flags;
+       dsi->format = desc->format;
+       dsi->lanes = desc->lanes;
 
-       innolux->link = dsi;
-
-       err = innolux_panel_add(innolux);
+       err = innolux_panel_add(dsi, desc);
        if (err < 0)
                return err;
 
-       err = mipi_dsi_attach(dsi);
-       return err;
+       return mipi_dsi_attach(dsi);
 }
 
 static int innolux_panel_remove(struct mipi_dsi_device *dsi)
@@ -292,7 +546,6 @@ static int innolux_panel_remove(struct mipi_dsi_device *dsi)
                DRM_DEV_ERROR(&dsi->dev, "failed to detach from DSI host: %d\n",
                              err);
 
-       drm_panel_detach(&innolux->base);
        innolux_panel_del(innolux);
 
        return 0;
@@ -318,5 +571,6 @@ static struct mipi_dsi_driver innolux_panel_driver = {
 module_mipi_dsi_driver(innolux_panel_driver);
 
 MODULE_AUTHOR("Chris Zhong <zyw@rock-chips.com>");
+MODULE_AUTHOR("Lin Huang <hl@rock-chips.com>");
 MODULE_DESCRIPTION("Innolux P079ZCA panel driver");
 MODULE_LICENSE("GPL v2");
index 0a94ab79a6c0f768227a3c273751913ef5bc512e..99caa7835e7b11d0be01b6aa5a0bbb3268cbd4ba 100644 (file)
@@ -500,7 +500,6 @@ static int jdi_panel_remove(struct mipi_dsi_device *dsi)
                dev_err(&dsi->dev, "failed to detach from DSI host: %d\n",
                        ret);
 
-       drm_panel_detach(&jdi->base);
        jdi_panel_del(jdi);
 
        return 0;
index 5185819c5b797c0d834d64fa4d8951b56c01f4f7..8a1687887ae912f310b7214de6ef779d905179ae 100644 (file)
@@ -282,7 +282,6 @@ static int panel_lvds_remove(struct platform_device *pdev)
 {
        struct panel_lvds *lvds = dev_get_drvdata(&pdev->dev);
 
-       drm_panel_detach(&lvds->panel);
        drm_panel_remove(&lvds->panel);
 
        panel_lvds_disable(&lvds->panel);
index 90f1ae4af93c0d3ec92abe04c38850eafbc16e57..87fa316e1d7b09a60778751acc51972c759a1c84 100644 (file)
@@ -14,8 +14,6 @@
 #include <linux/regulator/consumer.h>
 #include <video/mipi_display.h>
 
-#define DRV_NAME "orisetech_otm8009a"
-
 #define OTM8009A_BACKLIGHT_DEFAULT     240
 #define OTM8009A_BACKLIGHT_MAX         255
 
@@ -98,6 +96,20 @@ static void otm8009a_dcs_write_buf(struct otm8009a *ctx, const void *data,
                DRM_WARN("mipi dsi dcs write buffer failed\n");
 }
 
+static void otm8009a_dcs_write_buf_hs(struct otm8009a *ctx, const void *data,
+                                     size_t len)
+{
+       struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+
+       /* data will be sent in dsi hs mode (ie. no lpm) */
+       dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+       otm8009a_dcs_write_buf(ctx, data, len);
+
+       /* restore back the dsi lpm mode */
+       dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+}
+
 #define dcs_write_seq(ctx, seq...)                     \
 ({                                                     \
        static const u8 d[] = { seq };                  \
@@ -248,11 +260,7 @@ static int otm8009a_disable(struct drm_panel *panel)
        if (!ctx->enabled)
                return 0; /* This is not an issue so we return 0 here */
 
-       /* Power off the backlight. Note: end-user still controls brightness */
-       ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
-       ret = backlight_update_status(ctx->bl_dev);
-       if (ret)
-               return ret;
+       backlight_disable(ctx->bl_dev);
 
        ret = mipi_dsi_dcs_set_display_off(dsi);
        if (ret)
@@ -316,13 +324,6 @@ static int otm8009a_prepare(struct drm_panel *panel)
 
        ctx->prepared = true;
 
-       /*
-        * Power on the backlight. Note: end-user still controls brightness
-        * Note: ctx->prepared must be true before updating the backlight.
-        */
-       ctx->bl_dev->props.power = FB_BLANK_UNBLANK;
-       backlight_update_status(ctx->bl_dev);
-
        return 0;
 }
 
@@ -330,6 +331,11 @@ static int otm8009a_enable(struct drm_panel *panel)
 {
        struct otm8009a *ctx = panel_to_otm8009a(panel);
 
+       if (ctx->enabled)
+               return 0;
+
+       backlight_enable(ctx->bl_dev);
+
        ctx->enabled = true;
 
        return 0;
@@ -387,7 +393,7 @@ static int otm8009a_backlight_update_status(struct backlight_device *bd)
                 */
                data[0] = MIPI_DCS_SET_DISPLAY_BRIGHTNESS;
                data[1] = bd->props.brightness;
-               otm8009a_dcs_write_buf(ctx, data, ARRAY_SIZE(data));
+               otm8009a_dcs_write_buf_hs(ctx, data, ARRAY_SIZE(data));
 
                /* set Brightness Control & Backlight on */
                data[1] = 0x24;
@@ -399,7 +405,7 @@ static int otm8009a_backlight_update_status(struct backlight_device *bd)
 
        /* Update Brightness Control & Backlight */
        data[0] = MIPI_DCS_WRITE_CONTROL_DISPLAY;
-       otm8009a_dcs_write_buf(ctx, data, ARRAY_SIZE(data));
+       otm8009a_dcs_write_buf_hs(ctx, data, ARRAY_SIZE(data));
 
        return 0;
 }
@@ -444,11 +450,14 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
        ctx->panel.dev = dev;
        ctx->panel.funcs = &otm8009a_drm_funcs;
 
-       ctx->bl_dev = backlight_device_register(DRV_NAME "_backlight", dev, ctx,
-                                               &otm8009a_backlight_ops, NULL);
+       ctx->bl_dev = devm_backlight_device_register(dev, dev_name(dev),
+                                                    dsi->host->dev, ctx,
+                                                    &otm8009a_backlight_ops,
+                                                    NULL);
        if (IS_ERR(ctx->bl_dev)) {
-               dev_err(dev, "failed to register backlight device\n");
-               return PTR_ERR(ctx->bl_dev);
+               ret = PTR_ERR(ctx->bl_dev);
+               dev_err(dev, "failed to register backlight: %d\n", ret);
+               return ret;
        }
 
        ctx->bl_dev->props.max_brightness = OTM8009A_BACKLIGHT_MAX;
@@ -466,11 +475,6 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
                return ret;
        }
 
-       DRM_INFO(DRV_NAME "_panel %ux%u@%u %ubpp dsi %udl - ready\n",
-                default_mode.hdisplay, default_mode.vdisplay,
-                default_mode.vrefresh,
-                mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes);
-
        return 0;
 }
 
@@ -481,8 +485,6 @@ static int otm8009a_remove(struct mipi_dsi_device *dsi)
        mipi_dsi_detach(dsi);
        drm_panel_remove(&ctx->panel);
 
-       backlight_device_unregister(ctx->bl_dev);
-
        return 0;
 }
 
@@ -496,7 +498,7 @@ static struct mipi_dsi_driver orisetech_otm8009a_driver = {
        .probe  = otm8009a_probe,
        .remove = otm8009a_remove,
        .driver = {
-               .name = DRV_NAME "_panel",
+               .name = "panel-orisetech-otm8009a",
                .of_match_table = orisetech_otm8009a_of_match,
        },
 };
index 74a806121f80e58a42149cdd45a2536a1f6d8d70..cb4dfb98be0f11819f27183282f7d713895b1ba9 100644 (file)
@@ -299,7 +299,6 @@ static int wuxga_nt_panel_remove(struct mipi_dsi_device *dsi)
        if (ret < 0)
                dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
 
-       drm_panel_detach(&wuxga_nt->base);
        wuxga_nt_panel_del(wuxga_nt);
 
        return 0;
index a188a3959f1ad33384fb266b967853a61bb4f973..6ad827b93ae19a5082ddec0e08dd627b568a2af0 100644 (file)
@@ -823,7 +823,7 @@ static void s6e8aa0_read_mtp_id(struct s6e8aa0 *ctx)
        int ret, i;
 
        ret = s6e8aa0_dcs_read(ctx, 0xd1, id, ARRAY_SIZE(id));
-       if (ret < ARRAY_SIZE(id) || id[0] == 0x00) {
+       if (ret < 0 || ret < ARRAY_SIZE(id) || id[0] == 0x00) {
                dev_err(ctx->dev, "read id failed\n");
                ctx->error = -EIO;
                return;
index 71c09ed436ae46d652e4a7dfe0c51277d57df12f..75f92539055104c14aba1fa9e42f39b71c1f3467 100644 (file)
@@ -292,7 +292,6 @@ static int seiko_panel_remove(struct platform_device *pdev)
 {
        struct seiko_panel *panel = dev_get_drvdata(&pdev->dev);
 
-       drm_panel_detach(&panel->base);
        drm_panel_remove(&panel->base);
 
        seiko_panel_disable(&panel->base);
index 6bf8730f1a2115068e82b5fdc6c4694d9acdb9fb..02fc0f5423d40e585dd8d12002c2bfc98a5c4717 100644 (file)
@@ -418,7 +418,6 @@ static int sharp_panel_remove(struct mipi_dsi_device *dsi)
        if (err < 0)
                dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
 
-       drm_panel_detach(&sharp->base);
        sharp_panel_del(sharp);
 
        return 0;
index 494aa9b1628a7275210e9a657522287a76c0e385..e5cae0050f52d45a1c0c8fd387efb893d521196f 100644 (file)
@@ -327,7 +327,6 @@ static int sharp_nt_panel_remove(struct mipi_dsi_device *dsi)
        if (ret < 0)
                dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
 
-       drm_panel_detach(&sharp_nt->base);
        sharp_nt_panel_del(sharp_nt);
 
        return 0;
index cbf1ab404ee77316ac6edd504df143bdb1cd39ef..5b5d0a24e713e25dc81ae32effcd57b2c4a8f6cf 100644 (file)
@@ -252,7 +252,7 @@ static int panel_simple_get_modes(struct drm_panel *panel)
        /* probe EDID if a DDC bus is available */
        if (p->ddc) {
                struct edid *edid = drm_get_edid(panel->connector, p->ddc);
-               drm_mode_connector_update_edid_property(panel->connector, edid);
+               drm_connector_update_edid_property(panel->connector, edid);
                if (edid) {
                        num += drm_add_edid_modes(panel->connector, edid);
                        kfree(edid);
@@ -364,7 +364,6 @@ static int panel_simple_remove(struct device *dev)
 {
        struct panel_simple *panel = dev_get_drvdata(dev);
 
-       drm_panel_detach(&panel->base);
        drm_panel_remove(&panel->base);
 
        panel_simple_disable(&panel->base);
@@ -581,6 +580,34 @@ static const struct panel_desc auo_b133htn01 = {
        },
 };
 
+static const struct display_timing auo_g070vvn01_timings = {
+       .pixelclock = { 33300000, 34209000, 45000000 },
+       .hactive = { 800, 800, 800 },
+       .hfront_porch = { 20, 40, 200 },
+       .hback_porch = { 87, 40, 1 },
+       .hsync_len = { 1, 48, 87 },
+       .vactive = { 480, 480, 480 },
+       .vfront_porch = { 5, 13, 200 },
+       .vback_porch = { 31, 31, 29 },
+       .vsync_len = { 1, 1, 3 },
+};
+
+static const struct panel_desc auo_g070vvn01 = {
+       .timings = &auo_g070vvn01_timings,
+       .num_timings = 1,
+       .bpc = 8,
+       .size = {
+               .width = 152,
+               .height = 91,
+       },
+       .delay = {
+               .prepare = 200,
+               .enable = 50,
+               .disable = 50,
+               .unprepare = 1000,
+       },
+};
+
 static const struct drm_display_mode auo_g104sn02_mode = {
        .clock = 40000,
        .hdisplay = 800,
@@ -687,7 +714,7 @@ static const struct panel_desc auo_p320hvn03 = {
                .enable = 450,
                .unprepare = 500,
        },
-       .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
 };
 
 static const struct drm_display_mode auo_t215hvn01_mode = {
@@ -745,6 +772,28 @@ static const struct panel_desc avic_tm070ddh03 = {
        },
 };
 
+static const struct drm_display_mode boe_hv070wsa_mode = {
+       .clock = 40800,
+       .hdisplay = 1024,
+       .hsync_start = 1024 + 90,
+       .hsync_end = 1024 + 90 + 90,
+       .htotal = 1024 + 90 + 90 + 90,
+       .vdisplay = 600,
+       .vsync_start = 600 + 3,
+       .vsync_end = 600 + 3 + 4,
+       .vtotal = 600 + 3 + 4 + 3,
+       .vrefresh = 60,
+};
+
+static const struct panel_desc boe_hv070wsa = {
+       .modes = &boe_hv070wsa_mode,
+       .num_modes = 1,
+       .size = {
+               .width = 154,
+               .height = 90,
+       },
+};
+
 static const struct drm_display_mode boe_nv101wxmn51_modes[] = {
        {
                .clock = 71900,
@@ -857,6 +906,61 @@ static const struct panel_desc chunghwa_claa101wb01 = {
        },
 };
 
+static const struct drm_display_mode dataimage_scf0700c48ggu18_mode = {
+       .clock = 33260,
+       .hdisplay = 800,
+       .hsync_start = 800 + 40,
+       .hsync_end = 800 + 40 + 128,
+       .htotal = 800 + 40 + 128 + 88,
+       .vdisplay = 480,
+       .vsync_start = 480 + 10,
+       .vsync_end = 480 + 10 + 2,
+       .vtotal = 480 + 10 + 2 + 33,
+       .vrefresh = 60,
+       .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc dataimage_scf0700c48ggu18 = {
+       .modes = &dataimage_scf0700c48ggu18_mode,
+       .num_modes = 1,
+       .bpc = 8,
+       .size = {
+               .width = 152,
+               .height = 91,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+       .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+};
+
+static const struct display_timing dlc_dlc0700yzg_1_timing = {
+       .pixelclock = { 45000000, 51200000, 57000000 },
+       .hactive = { 1024, 1024, 1024 },
+       .hfront_porch = { 100, 106, 113 },
+       .hback_porch = { 100, 106, 113 },
+       .hsync_len = { 100, 108, 114 },
+       .vactive = { 600, 600, 600 },
+       .vfront_porch = { 8, 11, 15 },
+       .vback_porch = { 8, 11, 15 },
+       .vsync_len = { 9, 13, 15 },
+       .flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc dlc_dlc0700yzg_1 = {
+       .timings = &dlc_dlc0700yzg_1_timing,
+       .num_timings = 1,
+       .bpc = 6,
+       .size = {
+               .width = 154,
+               .height = 86,
+       },
+       .delay = {
+               .prepare = 30,
+               .enable = 200,
+               .disable = 200,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+};
+
 static const struct drm_display_mode edt_et057090dhu_mode = {
        .clock = 25175,
        .hdisplay = 640,
@@ -909,6 +1013,18 @@ static const struct panel_desc edt_etm0700g0dh6 = {
        .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_NEGEDGE,
 };
 
+static const struct panel_desc edt_etm0700g0bdh6 = {
+       .modes = &edt_etm0700g0dh6_mode,
+       .num_modes = 1,
+       .bpc = 6,
+       .size = {
+               .width = 152,
+               .height = 91,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+       .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+};
+
 static const struct drm_display_mode foxlink_fl500wvr00_a0t_mode = {
        .clock = 32260,
        .hdisplay = 800,
@@ -1086,6 +1202,36 @@ static const struct panel_desc innolux_at070tn92 = {
        .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
 };
 
+static const struct display_timing innolux_g070y2_l01_timing = {
+       .pixelclock = { 28000000, 29500000, 32000000 },
+       .hactive = { 800, 800, 800 },
+       .hfront_porch = { 61, 91, 141 },
+       .hback_porch = { 60, 90, 140 },
+       .hsync_len = { 12, 12, 12 },
+       .vactive = { 480, 480, 480 },
+       .vfront_porch = { 4, 9, 30 },
+       .vback_porch = { 4, 8, 28 },
+       .vsync_len = { 2, 2, 2 },
+       .flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc innolux_g070y2_l01 = {
+       .timings = &innolux_g070y2_l01_timing,
+       .num_timings = 1,
+       .bpc = 6,
+       .size = {
+               .width = 152,
+               .height = 91,
+       },
+       .delay = {
+               .prepare = 10,
+               .enable = 100,
+               .disable = 100,
+               .unprepare = 800,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+};
+
 static const struct display_timing innolux_g101ice_l01_timing = {
        .pixelclock = { 60400000, 71100000, 74700000 },
        .hactive = { 1280, 1280, 1280 },
@@ -1217,6 +1363,30 @@ static const struct panel_desc innolux_n156bge_l21 = {
        },
 };
 
+static const struct drm_display_mode innolux_tv123wam_mode = {
+       .clock = 206016,
+       .hdisplay = 2160,
+       .hsync_start = 2160 + 48,
+       .hsync_end = 2160 + 48 + 32,
+       .htotal = 2160 + 48 + 32 + 80,
+       .vdisplay = 1440,
+       .vsync_start = 1440 + 3,
+       .vsync_end = 1440 + 3 + 10,
+       .vtotal = 1440 + 3 + 10 + 27,
+       .vrefresh = 60,
+       .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc innolux_tv123wam = {
+       .modes = &innolux_tv123wam_mode,
+       .num_modes = 1,
+       .bpc = 8,
+       .size = {
+               .width = 259,
+               .height = 173,
+       },
+};
+
 static const struct drm_display_mode innolux_zj070na_01p_mode = {
        .clock = 51501,
        .hdisplay = 1024,
@@ -1247,8 +1417,8 @@ static const struct display_timing koe_tx31d200vm0baa_timing = {
        .hback_porch = { 16, 36, 56 },
        .hsync_len = { 8, 8, 8 },
        .vactive = { 480, 480, 480 },
-       .vfront_porch = { 6, 21, 33.5 },
-       .vback_porch = { 6, 21, 33.5 },
+       .vfront_porch = { 6, 21, 33 },
+       .vback_porch = { 6, 21, 33 },
        .vsync_len = { 8, 8, 8 },
        .flags = DISPLAY_FLAGS_DE_HIGH,
 };
@@ -1511,6 +1681,33 @@ static const struct panel_desc netron_dy_e231732 = {
        .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
 };
 
+static const struct drm_display_mode newhaven_nhd_43_480272ef_atxl_mode = {
+       .clock = 9000,
+       .hdisplay = 480,
+       .hsync_start = 480 + 2,
+       .hsync_end = 480 + 2 + 41,
+       .htotal = 480 + 2 + 41 + 2,
+       .vdisplay = 272,
+       .vsync_start = 272 + 2,
+       .vsync_end = 272 + 2 + 10,
+       .vtotal = 272 + 2 + 10 + 2,
+       .vrefresh = 60,
+       .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc newhaven_nhd_43_480272ef_atxl = {
+       .modes = &newhaven_nhd_43_480272ef_atxl_mode,
+       .num_modes = 1,
+       .bpc = 8,
+       .size = {
+               .width = 95,
+               .height = 54,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+       .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE |
+                    DRM_BUS_FLAG_SYNC_POSEDGE,
+};
+
 static const struct display_timing nlt_nl192108ac18_02d_timing = {
        .pixelclock = { 130000000, 148350000, 163000000 },
        .hactive = { 1920, 1920, 1920 },
@@ -1696,6 +1893,36 @@ static const struct panel_desc qd43003c0_40 = {
        .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
 };
 
+static const struct display_timing rocktech_rk070er9427_timing = {
+       .pixelclock = { 26400000, 33300000, 46800000 },
+       .hactive = { 800, 800, 800 },
+       .hfront_porch = { 16, 210, 354 },
+       .hback_porch = { 46, 46, 46 },
+       .hsync_len = { 1, 1, 1 },
+       .vactive = { 480, 480, 480 },
+       .vfront_porch = { 7, 22, 147 },
+       .vback_porch = { 23, 23, 23 },
+       .vsync_len = { 1, 1, 1 },
+       .flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc rocktech_rk070er9427 = {
+       .timings = &rocktech_rk070er9427_timing,
+       .num_timings = 1,
+       .bpc = 6,
+       .size = {
+               .width = 154,
+               .height = 86,
+       },
+       .delay = {
+               .prepare = 41,
+               .enable = 50,
+               .unprepare = 41,
+               .disable = 50,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+};
+
 static const struct drm_display_mode samsung_lsn122dl01_c01_mode = {
        .clock = 271560,
        .hdisplay = 2560,
@@ -1764,6 +1991,30 @@ static const struct panel_desc samsung_ltn140at29_301 = {
        },
 };
 
+static const struct drm_display_mode sharp_lq035q7db03_mode = {
+       .clock = 5500,
+       .hdisplay = 240,
+       .hsync_start = 240 + 16,
+       .hsync_end = 240 + 16 + 7,
+       .htotal = 240 + 16 + 7 + 5,
+       .vdisplay = 320,
+       .vsync_start = 320 + 9,
+       .vsync_end = 320 + 9 + 1,
+       .vtotal = 320 + 9 + 1 + 7,
+       .vrefresh = 60,
+};
+
+static const struct panel_desc sharp_lq035q7db03 = {
+       .modes = &sharp_lq035q7db03_mode,
+       .num_modes = 1,
+       .bpc = 6,
+       .size = {
+               .width = 54,
+               .height = 72,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+};
+
 static const struct display_timing sharp_lq101k1ly04_timing = {
        .pixelclock = { 60000000, 65000000, 80000000 },
        .hactive = { 1280, 1280, 1280 },
@@ -2094,6 +2345,9 @@ static const struct of_device_id platform_of_match[] = {
        }, {
                .compatible = "auo,b133xtn01",
                .data = &auo_b133xtn01,
+       }, {
+               .compatible = "auo,g070vvn01",
+               .data = &auo_g070vvn01,
        }, {
                .compatible = "auo,g104sn02",
                .data = &auo_g104sn02,
@@ -2112,6 +2366,9 @@ static const struct of_device_id platform_of_match[] = {
        }, {
                .compatible = "avic,tm070ddh03",
                .data = &avic_tm070ddh03,
+       }, {
+               .compatible = "boe,hv070wsa-100",
+               .data = &boe_hv070wsa
        }, {
                .compatible = "boe,nv101wxmn51",
                .data = &boe_nv101wxmn51,
@@ -2124,6 +2381,12 @@ static const struct of_device_id platform_of_match[] = {
        }, {
                .compatible = "chunghwa,claa101wb01",
                .data = &chunghwa_claa101wb01
+       }, {
+               .compatible = "dataimage,scf0700c48ggu18",
+               .data = &dataimage_scf0700c48ggu18,
+       }, {
+               .compatible = "dlc,dlc0700yzg-1",
+               .data = &dlc_dlc0700yzg_1,
        }, {
                .compatible = "edt,et057090dhu",
                .data = &edt_et057090dhu,
@@ -2133,6 +2396,12 @@ static const struct of_device_id platform_of_match[] = {
        }, {
                .compatible = "edt,etm0700g0dh6",
                .data = &edt_etm0700g0dh6,
+       }, {
+               .compatible = "edt,etm0700g0bdh6",
+               .data = &edt_etm0700g0bdh6,
+       }, {
+               .compatible = "edt,etm0700g0edh6",
+               .data = &edt_etm0700g0bdh6,
        }, {
                .compatible = "foxlink,fl500wvr00-a0t",
                .data = &foxlink_fl500wvr00_a0t,
@@ -2155,10 +2424,13 @@ static const struct of_device_id platform_of_match[] = {
                .compatible = "innolux,at070tn92",
                .data = &innolux_at070tn92,
        }, {
-               .compatible ="innolux,g101ice-l01",
+               .compatible = "innolux,g070y2-l01",
+               .data = &innolux_g070y2_l01,
+       }, {
+               .compatible = "innolux,g101ice-l01",
                .data = &innolux_g101ice_l01
        }, {
-               .compatible ="innolux,g121i1-l01",
+               .compatible = "innolux,g121i1-l01",
                .data = &innolux_g121i1_l01
        }, {
                .compatible = "innolux,g121x1-l03",
@@ -2169,6 +2441,9 @@ static const struct of_device_id platform_of_match[] = {
        }, {
                .compatible = "innolux,n156bge-l21",
                .data = &innolux_n156bge_l21,
+       }, {
+               .compatible = "innolux,tv123wam",
+               .data = &innolux_tv123wam,
        }, {
                .compatible = "innolux,zj070na-01p",
                .data = &innolux_zj070na_01p,
@@ -2205,6 +2480,9 @@ static const struct of_device_id platform_of_match[] = {
        }, {
                .compatible = "netron-dy,e231732",
                .data = &netron_dy_e231732,
+       }, {
+               .compatible = "newhaven,nhd-4.3-480272ef-atxl",
+               .data = &newhaven_nhd_43_480272ef_atxl,
        }, {
                .compatible = "nlt,nl192108ac18-02d",
                .data = &nlt_nl192108ac18_02d,
@@ -2226,6 +2504,9 @@ static const struct of_device_id platform_of_match[] = {
        }, {
                .compatible = "qiaodian,qd43003c0-40",
                .data = &qd43003c0_40,
+       }, {
+               .compatible = "rocktech,rk070er9427",
+               .data = &rocktech_rk070er9427,
        }, {
                .compatible = "samsung,lsn122dl01-c01",
                .data = &samsung_lsn122dl01_c01,
@@ -2235,6 +2516,9 @@ static const struct of_device_id platform_of_match[] = {
        }, {
                .compatible = "samsung,ltn140at29-301",
                .data = &samsung_ltn140at29_301,
+       }, {
+               .compatible = "sharp,lq035q7db03",
+               .data = &sharp_lq035q7db03,
        }, {
                .compatible = "sharp,lq101k1ly04",
                .data = &sharp_lq101k1ly04,
index 358c64ef192223e40e6a72b27e5b287aab5147ed..74284e5afc5d9f78b6746554ecc124f641b89276 100644 (file)
@@ -419,7 +419,6 @@ static int st7789v_remove(struct spi_device *spi)
 {
        struct st7789v *ctx = spi_get_drvdata(spi);
 
-       drm_panel_detach(&ctx->panel);
        drm_panel_remove(&ctx->panel);
 
        if (ctx->backlight)
index 19a8189dc54f8bcfd549fb25aa5c894ce726ac96..0c70f0e91d218aad577dfe02ff62efc91cd89edd 100644 (file)
@@ -4,6 +4,7 @@ pl111_drm-y +=  pl111_display.o \
                pl111_drv.o
 
 pl111_drm-$(CONFIG_ARCH_VEXPRESS) += pl111_vexpress.o
+pl111_drm-$(CONFIG_ARCH_NOMADIK) += pl111_nomadik.o
 pl111_drm-$(CONFIG_DEBUG_FS) += pl111_debugfs.o
 
 obj-$(CONFIG_DRM_PL111) += pl111_drm.o
index 19b0d006a54a4c63823c361d919e93674a83fe56..754f6b25f2652ee83e578c1a6786190eb554e70f 100644 (file)
@@ -63,7 +63,7 @@ pl111_mode_valid(struct drm_crtc *crtc,
         * We use the pixelclock to also account for interlaced modes, the
         * resulting bandwidth is in bytes per second.
         */
-       bw = mode->clock * 1000; /* In Hz */
+       bw = mode->clock * 1000ULL; /* In Hz */
        bw = bw * mode->hdisplay * mode->vdisplay * cpp;
        bw = div_u64(bw, mode->htotal * mode->vtotal);
 
@@ -223,48 +223,84 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
 
        /* Hard-code TFT panel */
        cntl = CNTL_LCDEN | CNTL_LCDTFT | CNTL_LCDVCOMP(1);
+       /* On the ST Micro variant, assume all 24 bits are connected */
+       if (priv->variant->st_bitmux_control)
+               cntl |= CNTL_ST_CDWID_24;
 
-       /* Note that the the hardware's format reader takes 'r' from
+       /*
+        * Note that the the ARM hardware's format reader takes 'r' from
         * the low bit, while DRM formats list channels from high bit
-        * to low bit as you read left to right.
+        * to low bit as you read left to right. The ST Micro version of
+        * the PL110 (LCDC) however uses the standard DRM format.
         */
        switch (fb->format->format) {
+       case DRM_FORMAT_BGR888:
+               /* Only supported on the ST Micro variant */
+               if (priv->variant->st_bitmux_control)
+                       cntl |= CNTL_ST_LCDBPP24_PACKED | CNTL_BGR;
+               break;
+       case DRM_FORMAT_RGB888:
+               /* Only supported on the ST Micro variant */
+               if (priv->variant->st_bitmux_control)
+                       cntl |= CNTL_ST_LCDBPP24_PACKED;
+               break;
        case DRM_FORMAT_ABGR8888:
        case DRM_FORMAT_XBGR8888:
-               cntl |= CNTL_LCDBPP24;
+               if (priv->variant->st_bitmux_control)
+                       cntl |= CNTL_LCDBPP24 | CNTL_BGR;
+               else
+                       cntl |= CNTL_LCDBPP24;
                break;
        case DRM_FORMAT_ARGB8888:
        case DRM_FORMAT_XRGB8888:
-               cntl |= CNTL_LCDBPP24 | CNTL_BGR;
+               if (priv->variant->st_bitmux_control)
+                       cntl |= CNTL_LCDBPP24;
+               else
+                       cntl |= CNTL_LCDBPP24 | CNTL_BGR;
                break;
        case DRM_FORMAT_BGR565:
                if (priv->variant->is_pl110)
                        cntl |= CNTL_LCDBPP16;
+               else if (priv->variant->st_bitmux_control)
+                       cntl |= CNTL_LCDBPP16 | CNTL_ST_1XBPP_565 | CNTL_BGR;
                else
                        cntl |= CNTL_LCDBPP16_565;
                break;
        case DRM_FORMAT_RGB565:
                if (priv->variant->is_pl110)
-                       cntl |= CNTL_LCDBPP16;
+                       cntl |= CNTL_LCDBPP16 | CNTL_BGR;
+               else if (priv->variant->st_bitmux_control)
+                       cntl |= CNTL_LCDBPP16 | CNTL_ST_1XBPP_565;
                else
-                       cntl |= CNTL_LCDBPP16_565;
-               cntl |= CNTL_BGR;
+                       cntl |= CNTL_LCDBPP16_565 | CNTL_BGR;
                break;
        case DRM_FORMAT_ABGR1555:
        case DRM_FORMAT_XBGR1555:
                cntl |= CNTL_LCDBPP16;
+               if (priv->variant->st_bitmux_control)
+                       cntl |= CNTL_ST_1XBPP_5551 | CNTL_BGR;
                break;
        case DRM_FORMAT_ARGB1555:
        case DRM_FORMAT_XRGB1555:
-               cntl |= CNTL_LCDBPP16 | CNTL_BGR;
+               cntl |= CNTL_LCDBPP16;
+               if (priv->variant->st_bitmux_control)
+                       cntl |= CNTL_ST_1XBPP_5551;
+               else
+                       cntl |= CNTL_BGR;
                break;
        case DRM_FORMAT_ABGR4444:
        case DRM_FORMAT_XBGR4444:
                cntl |= CNTL_LCDBPP16_444;
+               if (priv->variant->st_bitmux_control)
+                       cntl |= CNTL_ST_1XBPP_444 | CNTL_BGR;
                break;
        case DRM_FORMAT_ARGB4444:
        case DRM_FORMAT_XRGB4444:
-               cntl |= CNTL_LCDBPP16_444 | CNTL_BGR;
+               cntl |= CNTL_LCDBPP16_444;
+               if (priv->variant->st_bitmux_control)
+                       cntl |= CNTL_ST_1XBPP_444;
+               else
+                       cntl |= CNTL_BGR;
                break;
        default:
                WARN_ONCE(true, "Unknown FB format 0x%08x\n",
index ce4501d0ab481dfaf712f172a8f4f19b90ff6ac3..1aa015ccacefa7b0e5963a6ab077520681e1e2b9 100644 (file)
@@ -36,11 +36,14 @@ struct drm_minor;
  * struct pl111_variant_data - encodes IP differences
  * @name: the name of this variant
  * @is_pl110: this is the early PL110 variant
+ * @is_lcdc: this is the ST Microelectronics Nomadik LCDC variant
  * @external_bgr: this is the Versatile Pl110 variant with external
  *     BGR/RGB routing
  * @broken_clockdivider: the clock divider is broken and we need to
  *     use the supplied clock directly
  * @broken_vblank: the vblank IRQ is broken on this variant
+ * @st_bitmux_control: this variant is using the ST Micro bitmux
+ *     extensions to the control register
  * @formats: array of supported pixel formats on this variant
  * @nformats: the length of the array of supported pixel formats
  * @fb_bpp: desired bits per pixel on the default framebuffer
@@ -48,9 +51,11 @@ struct drm_minor;
 struct pl111_variant_data {
        const char *name;
        bool is_pl110;
+       bool is_lcdc;
        bool external_bgr;
        bool broken_clockdivider;
        bool broken_vblank;
+       bool st_bitmux_control;
        const u32 *formats;
        unsigned int nformats;
        unsigned int fb_bpp;
index 454ff08046424caa7e991949768852dbb9540f7c..47fe3022344426c6fa1dea4ae06e534bfff74faa 100644 (file)
@@ -75,6 +75,7 @@
 
 #include "pl111_drm.h"
 #include "pl111_versatile.h"
+#include "pl111_nomadik.h"
 
 #define DRIVER_DESC      "DRM module for PL111"
 
@@ -249,6 +250,8 @@ static struct drm_driver pl111_drm_driver = {
        .gem_prime_import_sg_table = pl111_gem_import_sg_table,
        .gem_prime_export = drm_gem_prime_export,
        .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+       .gem_prime_mmap = drm_gem_cma_prime_mmap,
+       .gem_prime_vmap = drm_gem_cma_prime_vmap,
 
 #if defined(CONFIG_DEBUG_FS)
        .debugfs_init = pl111_debugfs_init,
@@ -288,8 +291,8 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
                priv->memory_bw = 0;
        }
 
-       /* The two variants swap this register */
-       if (variant->is_pl110) {
+       /* The two main variants swap this register */
+       if (variant->is_pl110 || variant->is_lcdc) {
                priv->ienb = CLCD_PL110_IENB;
                priv->ctrl = CLCD_PL110_CNTL;
        } else {
@@ -301,13 +304,15 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
        if (IS_ERR(priv->regs)) {
                dev_err(dev, "%s failed mmio\n", __func__);
                ret = PTR_ERR(priv->regs);
-               goto dev_unref;
+               goto dev_put;
        }
 
        /* This may override some variant settings */
        ret = pl111_versatile_init(dev, priv);
        if (ret)
-               goto dev_unref;
+               goto dev_put;
+
+       pl111_nomadik_init(dev);
 
        /* turn off interrupts before requesting the irq */
        writel(0, priv->regs + priv->ienb);
@@ -321,16 +326,16 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
 
        ret = pl111_modeset_init(drm);
        if (ret != 0)
-               goto dev_unref;
+               goto dev_put;
 
        ret = drm_dev_register(drm, 0);
        if (ret < 0)
-               goto dev_unref;
+               goto dev_put;
 
        return 0;
 
-dev_unref:
-       drm_dev_unref(drm);
+dev_put:
+       drm_dev_put(drm);
        of_reserved_mem_device_release(dev);
 
        return ret;
@@ -347,7 +352,7 @@ static int pl111_amba_remove(struct amba_device *amba_dev)
        if (priv->panel)
                drm_panel_bridge_remove(priv->bridge);
        drm_mode_config_cleanup(drm);
-       drm_dev_unref(drm);
+       drm_dev_put(drm);
        of_reserved_mem_device_release(dev);
 
        return 0;
@@ -400,16 +405,50 @@ static const struct pl111_variant_data pl111_variant = {
        .fb_bpp = 32,
 };
 
+static const u32 pl110_nomadik_pixel_formats[] = {
+       DRM_FORMAT_RGB888,
+       DRM_FORMAT_BGR888,
+       DRM_FORMAT_ABGR8888,
+       DRM_FORMAT_XBGR8888,
+       DRM_FORMAT_ARGB8888,
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_BGR565,
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_ABGR1555,
+       DRM_FORMAT_XBGR1555,
+       DRM_FORMAT_ARGB1555,
+       DRM_FORMAT_XRGB1555,
+       DRM_FORMAT_ABGR4444,
+       DRM_FORMAT_XBGR4444,
+       DRM_FORMAT_ARGB4444,
+       DRM_FORMAT_XRGB4444,
+};
+
+static const struct pl111_variant_data pl110_nomadik_variant = {
+       .name = "LCDC (PL110 Nomadik)",
+       .formats = pl110_nomadik_pixel_formats,
+       .nformats = ARRAY_SIZE(pl110_nomadik_pixel_formats),
+       .is_lcdc = true,
+       .st_bitmux_control = true,
+       .broken_vblank = true,
+       .fb_bpp = 16,
+};
+
 static const struct amba_id pl111_id_table[] = {
        {
                .id = 0x00041110,
                .mask = 0x000fffff,
-               .data = (void*)&pl110_variant,
+               .data = (void *)&pl110_variant,
+       },
+       {
+               .id = 0x00180110,
+               .mask = 0x00fffffe,
+               .data = (void *)&pl110_nomadik_variant,
        },
        {
                .id = 0x00041111,
                .mask = 0x000fffff,
-               .data = (void*)&pl111_variant,
+               .data = (void *)&pl111_variant,
        },
        {0, 0},
 };
diff --git a/drivers/gpu/drm/pl111/pl111_nomadik.c b/drivers/gpu/drm/pl111/pl111_nomadik.c
new file mode 100644 (file)
index 0000000..6f385e5
--- /dev/null
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include "pl111_nomadik.h"
+
+#define PMU_CTRL_OFFSET 0x0000
+#define PMU_CTRL_LCDNDIF BIT(26)
+
+void pl111_nomadik_init(struct device *dev)
+{
+       struct regmap *pmu_regmap;
+
+       /*
+        * Just bail out of this is not found, we could be running
+        * multiplatform on something else than Nomadik.
+        */
+       pmu_regmap =
+               syscon_regmap_lookup_by_compatible("stericsson,nomadik-pmu");
+       if (IS_ERR(pmu_regmap))
+               return;
+
+       /*
+        * This bit in the PMU controller multiplexes the two graphics
+        * blocks found in the Nomadik STn8815. The other one is called
+        * MDIF (Master Display Interface) and gets muxed out here.
+        */
+       regmap_update_bits(pmu_regmap,
+                          PMU_CTRL_OFFSET,
+                          PMU_CTRL_LCDNDIF,
+                          0);
+       dev_info(dev, "set Nomadik PMU mux to CLCD mode\n");
+}
+EXPORT_SYMBOL_GPL(pl111_nomadik_init);
diff --git a/drivers/gpu/drm/pl111/pl111_nomadik.h b/drivers/gpu/drm/pl111/pl111_nomadik.h
new file mode 100644 (file)
index 0000000..19d663d
--- /dev/null
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <linux/device.h>
+
+#ifndef PL111_NOMADIK_H
+#define PL111_NOMADIK_H
+#endif
+
+#ifdef CONFIG_ARCH_NOMADIK
+
+void pl111_nomadik_init(struct device *dev);
+
+#else
+
+static inline void pl111_nomadik_init(struct device *dev)
+{
+}
+
+#endif
index b8cda94492412c820c2d44ec0f40afa298f163b0..0570c6826bff40911b120cb1a99e56a3db5ac421 100644 (file)
@@ -623,7 +623,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
        struct qxl_cursor_cmd *cmd;
        struct qxl_cursor *cursor;
        struct drm_gem_object *obj;
-       struct qxl_bo *cursor_bo = NULL, *user_bo = NULL;
+       struct qxl_bo *cursor_bo = NULL, *user_bo = NULL, *old_cursor_bo = NULL;
        int ret;
        void *user_ptr;
        int size = 64*64*4;
@@ -677,7 +677,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
                                                           cursor_bo, 0);
                cmd->type = QXL_CURSOR_SET;
 
-               qxl_bo_unref(&qcrtc->cursor_bo);
+               old_cursor_bo = qcrtc->cursor_bo;
                qcrtc->cursor_bo = cursor_bo;
                cursor_bo = NULL;
        } else {
@@ -697,6 +697,9 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
        qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
        qxl_release_fence_buffer_objects(release);
 
+       if (old_cursor_bo)
+               qxl_bo_unref(&old_cursor_bo);
+
        qxl_bo_unref(&cursor_bo);
 
        return;
@@ -1083,7 +1086,7 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
        /* we get HPD via client monitors config */
        connector->polled = DRM_CONNECTOR_POLL_HPD;
        encoder->possible_crtcs = 1 << num_output;
-       drm_mode_connector_attach_encoder(&qxl_output->base,
+       drm_connector_attach_encoder(&qxl_output->base,
                                          &qxl_output->enc);
        drm_encoder_helper_add(encoder, &qxl_enc_helper_funcs);
        drm_connector_helper_add(connector, &qxl_connector_helper_funcs);
index 7cb214577275ba76ab0b6045e5863394f1ebb5f9..e37f0097f7441323281b8e06f76a04e1104ab260 100644 (file)
@@ -50,12 +50,6 @@ static const char *qxl_get_timeline_name(struct dma_fence *fence)
        return "release";
 }
 
-static bool qxl_nop_signaling(struct dma_fence *fence)
-{
-       /* fences are always automatically signaled, so just pretend we did this.. */
-       return true;
-}
-
 static long qxl_fence_wait(struct dma_fence *fence, bool intr,
                           signed long timeout)
 {
@@ -119,7 +113,6 @@ signaled:
 static const struct dma_fence_ops qxl_fence_ops = {
        .get_driver_name = qxl_get_driver_name,
        .get_timeline_name = qxl_get_timeline_name,
-       .enable_signaling = qxl_nop_signaling,
        .wait = qxl_fence_wait,
 };
 
index b9302c91827100398591bd0852ca8e0ada7854ed..d587779a80b4d0748785b5430078a638ef290798 100644 (file)
@@ -5676,19 +5676,29 @@ int ci_dpm_init(struct radeon_device *rdev)
        u16 data_offset, size;
        u8 frev, crev;
        struct ci_power_info *pi;
+       enum pci_bus_speed speed_cap;
+       struct pci_dev *root = rdev->pdev->bus->self;
        int ret;
-       u32 mask;
 
        pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
        if (pi == NULL)
                return -ENOMEM;
        rdev->pm.dpm.priv = pi;
 
-       ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
-       if (ret)
+       speed_cap = pcie_get_speed_cap(root);
+       if (speed_cap == PCI_SPEED_UNKNOWN) {
                pi->sys_pcie_mask = 0;
-       else
-               pi->sys_pcie_mask = mask;
+       } else {
+               if (speed_cap == PCIE_SPEED_8_0GT)
+                       pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 |
+                               RADEON_PCIE_SPEED_50 |
+                               RADEON_PCIE_SPEED_80;
+               else if (speed_cap == PCIE_SPEED_5_0GT)
+                       pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 |
+                               RADEON_PCIE_SPEED_50;
+               else
+                       pi->sys_pcie_mask = RADEON_PCIE_SPEED_25;
+       }
        pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
 
        pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
index 7c73bc7e2f854e815aeb249bb4f80dbb932794f4..ebce4601a3056a0e961f074067a2a762ae53bcda 100644 (file)
@@ -9499,9 +9499,10 @@ int cik_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
 static void cik_pcie_gen3_enable(struct radeon_device *rdev)
 {
        struct pci_dev *root = rdev->pdev->bus->self;
+       enum pci_bus_speed speed_cap;
        int bridge_pos, gpu_pos;
-       u32 speed_cntl, mask, current_data_rate;
-       int ret, i;
+       u32 speed_cntl, current_data_rate;
+       int i;
        u16 tmp16;
 
        if (pci_is_root_bus(rdev->pdev->bus))
@@ -9516,23 +9517,24 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
        if (!(rdev->flags & RADEON_IS_PCIE))
                return;
 
-       ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
-       if (ret != 0)
+       speed_cap = pcie_get_speed_cap(root);
+       if (speed_cap == PCI_SPEED_UNKNOWN)
                return;
 
-       if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
+       if ((speed_cap != PCIE_SPEED_8_0GT) &&
+           (speed_cap != PCIE_SPEED_5_0GT))
                return;
 
        speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
        current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
                LC_CURRENT_DATA_RATE_SHIFT;
-       if (mask & DRM_PCIE_SPEED_80) {
+       if (speed_cap == PCIE_SPEED_8_0GT) {
                if (current_data_rate == 2) {
                        DRM_INFO("PCIE gen 3 link speeds already enabled\n");
                        return;
                }
                DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
-       } else if (mask & DRM_PCIE_SPEED_50) {
+       } else if (speed_cap == PCIE_SPEED_5_0GT) {
                if (current_data_rate == 1) {
                        DRM_INFO("PCIE gen 2 link speeds already enabled\n");
                        return;
@@ -9548,7 +9550,7 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
        if (!gpu_pos)
                return;
 
-       if (mask & DRM_PCIE_SPEED_80) {
+       if (speed_cap == PCIE_SPEED_8_0GT) {
                /* re-try equalization if gen3 is not already enabled */
                if (current_data_rate != 2) {
                        u16 bridge_cfg, gpu_cfg;
@@ -9636,9 +9638,9 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
 
        pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
        tmp16 &= ~0xf;
-       if (mask & DRM_PCIE_SPEED_80)
+       if (speed_cap == PCIE_SPEED_8_0GT)
                tmp16 |= 3; /* gen3 */
-       else if (mask & DRM_PCIE_SPEED_50)
+       else if (speed_cap == PCIE_SPEED_5_0GT)
                tmp16 |= 2; /* gen2 */
        else
                tmp16 |= 1; /* gen1 */
index 73d4c53481168b1681aa43df7bc85059b22b7397..5e044c98fca2b2a8234d2b32bf9baec3087cc177 100644 (file)
@@ -1327,9 +1327,9 @@ enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
        case RADEON_PCIE_GEN3:
                return RADEON_PCIE_GEN3;
        default:
-               if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == RADEON_PCIE_GEN3))
+               if ((sys_mask & RADEON_PCIE_SPEED_80) && (default_gen == RADEON_PCIE_GEN3))
                        return RADEON_PCIE_GEN3;
-               else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == RADEON_PCIE_GEN2))
+               else if ((sys_mask & RADEON_PCIE_SPEED_50) && (default_gen == RADEON_PCIE_GEN2))
                        return RADEON_PCIE_GEN2;
                else
                        return RADEON_PCIE_GEN1;
index 4a2eb409aaccc1ebe128988df3050c137db7a681..1a6f6edb3515188ea55e3ce8e2b9d2e951d0c8c7 100644 (file)
@@ -1653,6 +1653,10 @@ struct radeon_pm {
        struct radeon_dpm       dpm;
 };
 
+#define RADEON_PCIE_SPEED_25 1
+#define RADEON_PCIE_SPEED_50 2
+#define RADEON_PCIE_SPEED_80 4
+
 int radeon_pm_get_type_index(struct radeon_device *rdev,
                             enum radeon_pm_state_type ps_type,
                             int instance);
index 2aea2bdff99bca7b6bdd2baa7b42d21acee1d18b..414642e5b7a3110353bafb3ad022f0d7fee4e78f 100644 (file)
@@ -244,23 +244,15 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
 {
        struct drm_device *dev = connector->dev;
        struct radeon_device *rdev = dev->dev_private;
-       struct drm_encoder *best_encoder = NULL;
-       struct drm_encoder *encoder = NULL;
+       struct drm_encoder *best_encoder;
+       struct drm_encoder *encoder;
        const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
        bool connected;
        int i;
 
        best_encoder = connector_funcs->best_encoder(connector);
 
-       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-               if (connector->encoder_ids[i] == 0)
-                       break;
-
-               encoder = drm_encoder_find(connector->dev, NULL,
-                                          connector->encoder_ids[i]);
-               if (!encoder)
-                       continue;
-
+       drm_connector_for_each_possible_encoder(connector, encoder, i) {
                if ((encoder == best_encoder) && (status == connector_status_connected))
                        connected = true;
                else
@@ -270,7 +262,6 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
                        radeon_atombios_connected_scratch_regs(connector, encoder, connected);
                else
                        radeon_combios_connected_scratch_regs(connector, encoder, connected);
-
        }
 }
 
@@ -279,17 +270,11 @@ static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector,
        struct drm_encoder *encoder;
        int i;
 
-       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-               if (connector->encoder_ids[i] == 0)
-                       break;
-
-               encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
-               if (!encoder)
-                       continue;
-
+       drm_connector_for_each_possible_encoder(connector, encoder, i) {
                if (encoder->encoder_type == encoder_type)
                        return encoder;
        }
+
        return NULL;
 }
 
@@ -383,20 +368,23 @@ static int radeon_ddc_get_modes(struct drm_connector *connector)
        int ret;
 
        if (radeon_connector->edid) {
-               drm_mode_connector_update_edid_property(connector, radeon_connector->edid);
+               drm_connector_update_edid_property(connector, radeon_connector->edid);
                ret = drm_add_edid_modes(connector, radeon_connector->edid);
                return ret;
        }
-       drm_mode_connector_update_edid_property(connector, NULL);
+       drm_connector_update_edid_property(connector, NULL);
        return 0;
 }
 
 static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
 {
-       int enc_id = connector->encoder_ids[0];
-       /* pick the encoder ids */
-       if (enc_id)
-               return drm_encoder_find(connector->dev, NULL, enc_id);
+       struct drm_encoder *encoder;
+       int i;
+
+       /* pick the first one */
+       drm_connector_for_each_possible_encoder(connector, encoder, i)
+               return encoder;
+
        return NULL;
 }
 
@@ -436,19 +424,19 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
        struct drm_device *dev = connector->dev;
        struct drm_connector *conflict;
        struct radeon_connector *radeon_conflict;
-       int i;
 
        list_for_each_entry(conflict, &dev->mode_config.connector_list, head) {
+               struct drm_encoder *enc;
+               int i;
+
                if (conflict == connector)
                        continue;
 
                radeon_conflict = to_radeon_connector(conflict);
-               for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-                       if (conflict->encoder_ids[i] == 0)
-                               break;
 
+               drm_connector_for_each_possible_encoder(conflict, enc, i) {
                        /* if the IDs match */
-                       if (conflict->encoder_ids[i] == encoder->base.id) {
+                       if (enc == encoder) {
                                if (conflict->status != connector_status_connected)
                                        continue;
 
@@ -1256,7 +1244,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
        struct radeon_connector *radeon_connector = to_radeon_connector(connector);
        struct drm_encoder *encoder = NULL;
        const struct drm_encoder_helper_funcs *encoder_funcs;
-       int i, r;
+       int r;
        enum drm_connector_status ret = connector_status_disconnected;
        bool dret = false, broken_edid = false;
 
@@ -1374,15 +1362,9 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
 
        /* find analog encoder */
        if (radeon_connector->dac_load_detect) {
-               for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-                       if (connector->encoder_ids[i] == 0)
-                               break;
-
-                       encoder = drm_encoder_find(connector->dev, NULL,
-                                                  connector->encoder_ids[i]);
-                       if (!encoder)
-                               continue;
+               int i;
 
+               drm_connector_for_each_possible_encoder(connector, encoder, i) {
                        if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
                            encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
                                continue;
@@ -1458,18 +1440,11 @@ exit:
 /* okay need to be smart in here about which encoder to pick */
 static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
 {
-       int enc_id = connector->encoder_ids[0];
        struct radeon_connector *radeon_connector = to_radeon_connector(connector);
        struct drm_encoder *encoder;
        int i;
-       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-               if (connector->encoder_ids[i] == 0)
-                       break;
-
-               encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
-               if (!encoder)
-                       continue;
 
+       drm_connector_for_each_possible_encoder(connector, encoder, i) {
                if (radeon_connector->use_digital == true) {
                        if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
                                return encoder;
@@ -1484,8 +1459,9 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
 
        /* then check use digitial */
        /* pick the first one */
-       if (enc_id)
-               return drm_encoder_find(connector->dev, NULL, enc_id);
+       drm_connector_for_each_possible_encoder(connector, encoder, i)
+               return encoder;
+
        return NULL;
 }
 
@@ -1628,14 +1604,7 @@ u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
        struct radeon_encoder *radeon_encoder;
        int i;
 
-       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-               if (connector->encoder_ids[i] == 0)
-                       break;
-
-               encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
-               if (!encoder)
-                       continue;
-
+       drm_connector_for_each_possible_encoder(connector, encoder, i) {
                radeon_encoder = to_radeon_encoder(encoder);
 
                switch (radeon_encoder->encoder_id) {
@@ -1657,14 +1626,7 @@ static bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
        int i;
        bool found = false;
 
-       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-               if (connector->encoder_ids[i] == 0)
-                       break;
-
-               encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
-               if (!encoder)
-                       continue;
-
+       drm_connector_for_each_possible_encoder(connector, encoder, i) {
                radeon_encoder = to_radeon_encoder(encoder);
                if (radeon_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
                        found = true;
index cd8a3ee16649f2ff5e2033f724dd4a3b082f44d5..f920be236cc9d62aa6371e7e9d2010a4ba21b868 100644 (file)
@@ -195,11 +195,11 @@ static int radeon_dp_mst_get_ddc_modes(struct drm_connector *connector)
        radeon_connector->edid = edid;
        DRM_DEBUG_KMS("edid retrieved %p\n", edid);
        if (radeon_connector->edid) {
-               drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
+               drm_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
                ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
                return ret;
        }
-       drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
+       drm_connector_update_edid_property(&radeon_connector->base, NULL);
 
        return ret;
 }
@@ -290,7 +290,7 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
 
        drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
        drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
-       drm_mode_connector_set_path_property(connector, pathprop);
+       drm_connector_set_path_property(connector, pathprop);
 
        return connector;
 }
index c6ee80216cf4a71f510bf239fc4fdf6f67235f48..c341fb2a5b56246ba8e1bc842a243dc076c2976c 100644 (file)
@@ -211,7 +211,7 @@ radeon_link_encoder_connector(struct drm_device *dev)
                list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
                        radeon_encoder = to_radeon_encoder(encoder);
                        if (radeon_encoder->devices & radeon_connector->devices) {
-                               drm_mode_connector_attach_encoder(connector, encoder);
+                               drm_connector_attach_encoder(connector, encoder);
                                if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
                                        radeon_encoder_add_backlight(radeon_encoder, connector);
                        }
index edbb4cd519fd6843c7e65398e8a33c9a0da7c095..ba2fd295697fea1ce35aef2dc8965635e6b36a31 100644 (file)
@@ -307,7 +307,7 @@ struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
        if (bo == NULL)
                return NULL;
 
-       ttm_bo_reference(&bo->tbo);
+       ttm_bo_get(&bo->tbo);
        return bo;
 }
 
@@ -320,9 +320,8 @@ void radeon_bo_unref(struct radeon_bo **bo)
                return;
        rdev = (*bo)->rdev;
        tbo = &((*bo)->tbo);
-       ttm_bo_unref(&tbo);
-       if (tbo == NULL)
-               *bo = NULL;
+       ttm_bo_put(tbo);
+       *bo = NULL;
 }
 
 int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
index 48f4b273e31611f2b026b64bd555e67ff7fc14a4..0c7f228db6e3d9c6c267d256f1c68d78d33ba16f 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /*
  * Copyright 2009 VMware, Inc.
  *
index 8689fcca051c7b8b0af795e258d40fb33682915c..cbb67e9ffb3a52f413564f82db87dd0ece44c37e 100644 (file)
@@ -947,11 +947,11 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
 static struct vm_operations_struct radeon_ttm_vm_ops;
 static const struct vm_operations_struct *ttm_vm_ops = NULL;
 
-static int radeon_ttm_fault(struct vm_fault *vmf)
+static vm_fault_t radeon_ttm_fault(struct vm_fault *vmf)
 {
        struct ttm_buffer_object *bo;
        struct radeon_device *rdev;
-       int r;
+       vm_fault_t ret;
 
        bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
        if (bo == NULL) {
@@ -959,9 +959,9 @@ static int radeon_ttm_fault(struct vm_fault *vmf)
        }
        rdev = radeon_get_rdev(bo->bdev);
        down_read(&rdev->pm.mclk_lock);
-       r = ttm_vm_ops->fault(vmf);
+       ret = ttm_vm_ops->fault(vmf);
        up_read(&rdev->pm.mclk_lock);
-       return r;
+       return ret;
 }
 
 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
index 1907c950d76f05ef32811685baaee9f7d4db83b9..85c604d2923584370e59c88630bed491c2453b48 100644 (file)
@@ -7082,9 +7082,10 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
 static void si_pcie_gen3_enable(struct radeon_device *rdev)
 {
        struct pci_dev *root = rdev->pdev->bus->self;
+       enum pci_bus_speed speed_cap;
        int bridge_pos, gpu_pos;
-       u32 speed_cntl, mask, current_data_rate;
-       int ret, i;
+       u32 speed_cntl, current_data_rate;
+       int i;
        u16 tmp16;
 
        if (pci_is_root_bus(rdev->pdev->bus))
@@ -7099,23 +7100,24 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
        if (!(rdev->flags & RADEON_IS_PCIE))
                return;
 
-       ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
-       if (ret != 0)
+       speed_cap = pcie_get_speed_cap(root);
+       if (speed_cap == PCI_SPEED_UNKNOWN)
                return;
 
-       if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
+       if ((speed_cap != PCIE_SPEED_8_0GT) &&
+           (speed_cap != PCIE_SPEED_5_0GT))
                return;
 
        speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
        current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
                LC_CURRENT_DATA_RATE_SHIFT;
-       if (mask & DRM_PCIE_SPEED_80) {
+       if (speed_cap == PCIE_SPEED_8_0GT) {
                if (current_data_rate == 2) {
                        DRM_INFO("PCIE gen 3 link speeds already enabled\n");
                        return;
                }
                DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
-       } else if (mask & DRM_PCIE_SPEED_50) {
+       } else if (speed_cap == PCIE_SPEED_5_0GT) {
                if (current_data_rate == 1) {
                        DRM_INFO("PCIE gen 2 link speeds already enabled\n");
                        return;
@@ -7131,7 +7133,7 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
        if (!gpu_pos)
                return;
 
-       if (mask & DRM_PCIE_SPEED_80) {
+       if (speed_cap == PCIE_SPEED_8_0GT) {
                /* re-try equalization if gen3 is not already enabled */
                if (current_data_rate != 2) {
                        u16 bridge_cfg, gpu_cfg;
@@ -7219,9 +7221,9 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
 
        pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
        tmp16 &= ~0xf;
-       if (mask & DRM_PCIE_SPEED_80)
+       if (speed_cap == PCIE_SPEED_8_0GT)
                tmp16 |= 3; /* gen3 */
-       else if (mask & DRM_PCIE_SPEED_50)
+       else if (speed_cap == PCIE_SPEED_5_0GT)
                tmp16 |= 2; /* gen2 */
        else
                tmp16 |= 1; /* gen1 */
index fea88078cf8ea1f415394f09ec20fcc882c870fa..8fb60b3af015804d6d5ee3ef5d6f24ba74e2b1d9 100644 (file)
@@ -6899,8 +6899,9 @@ int si_dpm_init(struct radeon_device *rdev)
        struct ni_power_info *ni_pi;
        struct si_power_info *si_pi;
        struct atom_clock_dividers dividers;
+       enum pci_bus_speed speed_cap;
+       struct pci_dev *root = rdev->pdev->bus->self;
        int ret;
-       u32 mask;
 
        si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
        if (si_pi == NULL)
@@ -6910,11 +6911,20 @@ int si_dpm_init(struct radeon_device *rdev)
        eg_pi = &ni_pi->eg;
        pi = &eg_pi->rv7xx;
 
-       ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
-       if (ret)
+       speed_cap = pcie_get_speed_cap(root);
+       if (speed_cap == PCI_SPEED_UNKNOWN) {
                si_pi->sys_pcie_mask = 0;
-       else
-               si_pi->sys_pcie_mask = mask;
+       } else {
+               if (speed_cap == PCIE_SPEED_8_0GT)
+                       si_pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 |
+                               RADEON_PCIE_SPEED_50 |
+                               RADEON_PCIE_SPEED_80;
+               else if (speed_cap == PCIE_SPEED_5_0GT)
+                       si_pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 |
+                               RADEON_PCIE_SPEED_50;
+               else
+                       si_pi->sys_pcie_mask = RADEON_PCIE_SPEED_25;
+       }
        si_pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
        si_pi->boot_pcie_gen = si_get_current_pcie_speed(rdev);
 
index 155ad840f3c59d6cfae28caaba6a863ceb3880a9..4c39de3f4f0f374b3d264cd2a18a8f69a569a689 100644 (file)
@@ -353,7 +353,7 @@ static int rcar_lvds_attach(struct drm_bridge *bridge)
 
        drm_connector_helper_add(connector, &rcar_lvds_conn_helper_funcs);
 
-       ret = drm_mode_connector_attach_encoder(connector, encoder);
+       ret = drm_connector_attach_encoder(connector, encoder);
        if (ret < 0)
                return ret;
 
@@ -434,8 +434,8 @@ static int rcar_lvds_parse_dt(struct rcar_lvds *lvds)
                        ret = -EPROBE_DEFER;
        } else {
                lvds->panel = of_drm_find_panel(remote);
-               if (!lvds->panel)
-                       ret = -EPROBE_DEFER;
+               if (IS_ERR(lvds->panel))
+                       ret = PTR_ERR(lvds->panel);
        }
 
 done:
index c6fbdcd87c16d3b8727ac2618a591a23fd910eaf..8ad0d773dc33a63c09a8bc246b4ceb7920e10f94 100644 (file)
@@ -275,7 +275,7 @@ static int cdn_dp_connector_get_modes(struct drm_connector *connector)
                dp->sink_has_audio = drm_detect_monitor_audio(edid);
                ret = drm_add_edid_modes(connector, edid);
                if (ret)
-                       drm_mode_connector_update_edid_property(connector,
+                       drm_connector_update_edid_property(connector,
                                                                edid);
        }
        mutex_unlock(&dp->lock);
@@ -1062,7 +1062,7 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
 
        drm_connector_helper_add(connector, &cdn_dp_connector_helper_funcs);
 
-       ret = drm_mode_connector_attach_encoder(connector, encoder);
+       ret = drm_connector_attach_encoder(connector, encoder);
        if (ret) {
                DRM_ERROR("failed to attach connector and encoder\n");
                goto err_free_connector;
index eb3042c6d1b202bb27e2f1b55f6714d4f48b252a..3105965fc26034e921de4c86d39475180e70b03c 100644 (file)
@@ -792,7 +792,6 @@ err_config_video:
 
 int cdn_dp_audio_stop(struct cdn_dp_device *dp, struct audio_info *audio)
 {
-       u32 val;
        int ret;
 
        ret = cdn_dp_reg_write(dp, AUDIO_PACK_CONTROL, 0);
@@ -801,11 +800,7 @@ int cdn_dp_audio_stop(struct cdn_dp_device *dp, struct audio_info *audio)
                return ret;
        }
 
-       val = SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS;
-       val |= SPDIF_FIFO_MID_RANGE(0xe0);
-       val |= SPDIF_JITTER_THRSH(0xe0);
-       val |= SPDIF_JITTER_AVG_WIN(7);
-       writel(val, dp->regs + SPDIF_CTRL_ADDR);
+       writel(0, dp->regs + SPDIF_CTRL_ADDR);
 
        /* clearn the audio config and reset */
        writel(0, dp->regs + AUDIO_SRC_CNTL);
@@ -929,12 +924,6 @@ static void cdn_dp_audio_config_spdif(struct cdn_dp_device *dp)
 {
        u32 val;
 
-       val = SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS;
-       val |= SPDIF_FIFO_MID_RANGE(0xe0);
-       val |= SPDIF_JITTER_THRSH(0xe0);
-       val |= SPDIF_JITTER_AVG_WIN(7);
-       writel(val, dp->regs + SPDIF_CTRL_ADDR);
-
        writel(SYNC_WR_TO_CH_ZERO, dp->regs + FIFO_CNTL);
 
        val = MAX_NUM_CH(2) | AUDIO_TYPE_LPCM | CFG_SUB_PCKT_NUM(4);
@@ -942,9 +931,6 @@ static void cdn_dp_audio_config_spdif(struct cdn_dp_device *dp)
        writel(SMPL2PKT_EN, dp->regs + SMPL2PKT_CNTL);
 
        val = SPDIF_ENABLE | SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS;
-       val |= SPDIF_FIFO_MID_RANGE(0xe0);
-       val |= SPDIF_JITTER_THRSH(0xe0);
-       val |= SPDIF_JITTER_AVG_WIN(7);
        writel(val, dp->regs + SPDIF_CTRL_ADDR);
 
        clk_prepare_enable(dp->spdif_clk);
index d53d5a09547f13a8af2e40f222b6e648058830e9..662b6cb5d3f0241271d7ed62ea4aaa9829510659 100644 (file)
@@ -595,7 +595,7 @@ static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
        dsi->format = device->format;
        dsi->mode_flags = device->mode_flags;
        dsi->panel = of_drm_find_panel(device->dev.of_node);
-       if (dsi->panel)
+       if (!IS_ERR(dsi->panel))
                return drm_panel_attach(dsi->panel, &dsi->connector);
 
        return -EINVAL;
@@ -1129,7 +1129,7 @@ static int dw_mipi_dsi_register(struct drm_device *drm,
                           &dw_mipi_dsi_atomic_connector_funcs,
                           DRM_MODE_CONNECTOR_DSI);
 
-       drm_mode_connector_attach_encoder(connector, encoder);
+       drm_connector_attach_encoder(connector, encoder);
 
        return 0;
 }
index 88d0774c97bdc7a4f387fe79dd4de31112a38875..1c02b3e61299c800549519a9f065ac2691f665fc 100644 (file)
@@ -565,7 +565,7 @@ static int inno_hdmi_connector_get_modes(struct drm_connector *connector)
        if (edid) {
                hdmi->hdmi_data.sink_is_hdmi = drm_detect_hdmi_monitor(edid);
                hdmi->hdmi_data.sink_has_audio = drm_detect_monitor_audio(edid);
-               drm_mode_connector_update_edid_property(connector, edid);
+               drm_connector_update_edid_property(connector, edid);
                ret = drm_add_edid_modes(connector, edid);
                kfree(edid);
        }
@@ -634,7 +634,7 @@ static int inno_hdmi_register(struct drm_device *drm, struct inno_hdmi *hdmi)
        drm_connector_init(drm, &hdmi->connector, &inno_hdmi_connector_funcs,
                           DRM_MODE_CONNECTOR_HDMIA);
 
-       drm_mode_connector_attach_encoder(&hdmi->connector, encoder);
+       drm_connector_attach_encoder(&hdmi->connector, encoder);
 
        return 0;
 }
index d4f4118b482ddd6f215eba1cb54d1f4dd5c83b3e..ea18cb2a76c0dc7c7e9052307dda301743a607c2 100644 (file)
 #include <drm/drm_atomic.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_fb.h"
 #include "rockchip_drm_gem.h"
 #include "rockchip_drm_psr.h"
 
-#define to_rockchip_fb(x) container_of(x, struct rockchip_drm_fb, fb)
-
-struct rockchip_drm_fb {
-       struct drm_framebuffer fb;
-       struct drm_gem_object *obj[ROCKCHIP_MAX_FB_BUFFER];
-};
-
-struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
-                                              unsigned int plane)
-{
-       struct rockchip_drm_fb *rk_fb = to_rockchip_fb(fb);
-
-       if (plane >= ROCKCHIP_MAX_FB_BUFFER)
-               return NULL;
-
-       return rk_fb->obj[plane];
-}
-
-static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb)
-{
-       struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb);
-       int i;
-
-       for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++)
-               drm_gem_object_put_unlocked(rockchip_fb->obj[i]);
-
-       drm_framebuffer_cleanup(fb);
-       kfree(rockchip_fb);
-}
-
-static int rockchip_drm_fb_create_handle(struct drm_framebuffer *fb,
-                                        struct drm_file *file_priv,
-                                        unsigned int *handle)
-{
-       struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb);
-
-       return drm_gem_handle_create(file_priv,
-                                    rockchip_fb->obj[0], handle);
-}
-
 static int rockchip_drm_fb_dirty(struct drm_framebuffer *fb,
                                 struct drm_file *file,
                                 unsigned int flags, unsigned int color,
@@ -75,46 +36,45 @@ static int rockchip_drm_fb_dirty(struct drm_framebuffer *fb,
 }
 
 static const struct drm_framebuffer_funcs rockchip_drm_fb_funcs = {
-       .destroy        = rockchip_drm_fb_destroy,
-       .create_handle  = rockchip_drm_fb_create_handle,
-       .dirty          = rockchip_drm_fb_dirty,
+       .destroy       = drm_gem_fb_destroy,
+       .create_handle = drm_gem_fb_create_handle,
+       .dirty         = rockchip_drm_fb_dirty,
 };
 
-static struct rockchip_drm_fb *
+static struct drm_framebuffer *
 rockchip_fb_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd,
                  struct drm_gem_object **obj, unsigned int num_planes)
 {
-       struct rockchip_drm_fb *rockchip_fb;
+       struct drm_framebuffer *fb;
        int ret;
        int i;
 
-       rockchip_fb = kzalloc(sizeof(*rockchip_fb), GFP_KERNEL);
-       if (!rockchip_fb)
+       fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+       if (!fb)
                return ERR_PTR(-ENOMEM);
 
-       drm_helper_mode_fill_fb_struct(dev, &rockchip_fb->fb, mode_cmd);
+       drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
 
        for (i = 0; i < num_planes; i++)
-               rockchip_fb->obj[i] = obj[i];
+               fb->obj[i] = obj[i];
 
-       ret = drm_framebuffer_init(dev, &rockchip_fb->fb,
-                                  &rockchip_drm_fb_funcs);
+       ret = drm_framebuffer_init(dev, fb, &rockchip_drm_fb_funcs);
        if (ret) {
                DRM_DEV_ERROR(dev->dev,
                              "Failed to initialize framebuffer: %d\n",
                              ret);
-               kfree(rockchip_fb);
+               kfree(fb);
                return ERR_PTR(ret);
        }
 
-       return rockchip_fb;
+       return fb;
 }
 
 static struct drm_framebuffer *
 rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
                        const struct drm_mode_fb_cmd2 *mode_cmd)
 {
-       struct rockchip_drm_fb *rockchip_fb;
+       struct drm_framebuffer *fb;
        struct drm_gem_object *objs[ROCKCHIP_MAX_FB_BUFFER];
        struct drm_gem_object *obj;
        unsigned int hsub;
@@ -153,13 +113,13 @@ rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
                objs[i] = obj;
        }
 
-       rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, objs, i);
-       if (IS_ERR(rockchip_fb)) {
-               ret = PTR_ERR(rockchip_fb);
+       fb = rockchip_fb_alloc(dev, mode_cmd, objs, i);
+       if (IS_ERR(fb)) {
+               ret = PTR_ERR(fb);
                goto err_gem_object_unreference;
        }
 
-       return &rockchip_fb->fb;
+       return fb;
 
 err_gem_object_unreference:
        for (i--; i >= 0; i--)
@@ -242,13 +202,13 @@ rockchip_drm_framebuffer_init(struct drm_device *dev,
                              const struct drm_mode_fb_cmd2 *mode_cmd,
                              struct drm_gem_object *obj)
 {
-       struct rockchip_drm_fb *rockchip_fb;
+       struct drm_framebuffer *fb;
 
-       rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, &obj, 1);
-       if (IS_ERR(rockchip_fb))
-               return ERR_CAST(rockchip_fb);
+       fb = rockchip_fb_alloc(dev, mode_cmd, &obj, 1);
+       if (IS_ERR(fb))
+               return ERR_CAST(fb);
 
-       return &rockchip_fb->fb;
+       return fb;
 }
 
 void rockchip_drm_mode_config_init(struct drm_device *dev)
index 2fe47f1ee98fe226e0c893d333023a678d1995a1..f1265cb1aee86b4c302329f5f909b6fa8b45a598 100644 (file)
@@ -22,7 +22,4 @@ rockchip_drm_framebuffer_init(struct drm_device *dev,
 void rockchip_drm_framebuffer_fini(struct drm_framebuffer *fb);
 
 void rockchip_drm_mode_config_init(struct drm_device *dev);
-
-struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
-                                              unsigned int plane);
 #endif /* _ROCKCHIP_DRM_FB_H */
index 2121345a61affb3d915b18c583ee86e4864abc64..1359e5c773e4fe5bb9d31b465073897c3c741739 100644 (file)
@@ -243,18 +243,6 @@ static enum vop_data_format vop_convert_format(uint32_t format)
        }
 }
 
-static bool is_yuv_support(uint32_t format)
-{
-       switch (format) {
-       case DRM_FORMAT_NV12:
-       case DRM_FORMAT_NV16:
-       case DRM_FORMAT_NV24:
-               return true;
-       default:
-               return false;
-       }
-}
-
 static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src,
                                  uint32_t dst, bool is_horizontal,
                                  int vsu_mode, int *vskiplines)
@@ -298,7 +286,8 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
        uint16_t cbcr_ver_scl_mode = SCALE_NONE;
        int hsub = drm_format_horz_chroma_subsampling(pixel_format);
        int vsub = drm_format_vert_chroma_subsampling(pixel_format);
-       bool is_yuv = is_yuv_support(pixel_format);
+       const struct drm_format_info *info;
+       bool is_yuv = false;
        uint16_t cbcr_src_w = src_w / hsub;
        uint16_t cbcr_src_h = src_h / vsub;
        uint16_t vsu_mode;
@@ -306,6 +295,11 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
        uint32_t val;
        int vskiplines;
 
+       info = drm_format_info(pixel_format);
+
+       if (info->is_yuv)
+               is_yuv = true;
+
        if (dst_w > 3840) {
                DRM_DEV_ERROR(vop->dev, "Maximum dst width (3840) exceeded\n");
                return;
@@ -486,6 +480,31 @@ static void vop_line_flag_irq_disable(struct vop *vop)
        spin_unlock_irqrestore(&vop->irq_lock, flags);
 }
 
+static int vop_core_clks_enable(struct vop *vop)
+{
+       int ret;
+
+       ret = clk_enable(vop->hclk);
+       if (ret < 0)
+               return ret;
+
+       ret = clk_enable(vop->aclk);
+       if (ret < 0)
+               goto err_disable_hclk;
+
+       return 0;
+
+err_disable_hclk:
+       clk_disable(vop->hclk);
+       return ret;
+}
+
+static void vop_core_clks_disable(struct vop *vop)
+{
+       clk_disable(vop->aclk);
+       clk_disable(vop->hclk);
+}
+
 static int vop_enable(struct drm_crtc *crtc)
 {
        struct vop *vop = to_vop(crtc);
@@ -497,17 +516,13 @@ static int vop_enable(struct drm_crtc *crtc)
                return ret;
        }
 
-       ret = clk_enable(vop->hclk);
+       ret = vop_core_clks_enable(vop);
        if (WARN_ON(ret < 0))
                goto err_put_pm_runtime;
 
        ret = clk_enable(vop->dclk);
        if (WARN_ON(ret < 0))
-               goto err_disable_hclk;
-
-       ret = clk_enable(vop->aclk);
-       if (WARN_ON(ret < 0))
-               goto err_disable_dclk;
+               goto err_disable_core;
 
        /*
         * Slave iommu shares power, irq and clock with vop.  It was associated
@@ -519,7 +534,7 @@ static int vop_enable(struct drm_crtc *crtc)
        if (ret) {
                DRM_DEV_ERROR(vop->dev,
                              "failed to attach dma mapping, %d\n", ret);
-               goto err_disable_aclk;
+               goto err_disable_dclk;
        }
 
        spin_lock(&vop->reg_lock);
@@ -552,18 +567,14 @@ static int vop_enable(struct drm_crtc *crtc)
 
        spin_unlock(&vop->reg_lock);
 
-       enable_irq(vop->irq);
-
        drm_crtc_vblank_on(crtc);
 
        return 0;
 
-err_disable_aclk:
-       clk_disable(vop->aclk);
 err_disable_dclk:
        clk_disable(vop->dclk);
-err_disable_hclk:
-       clk_disable(vop->hclk);
+err_disable_core:
+       vop_core_clks_disable(vop);
 err_put_pm_runtime:
        pm_runtime_put_sync(vop->dev);
        return ret;
@@ -599,8 +610,6 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
 
        vop_dsp_hold_valid_irq_disable(vop);
 
-       disable_irq(vop->irq);
-
        vop->is_enabled = false;
 
        /*
@@ -609,8 +618,7 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
        rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev);
 
        clk_disable(vop->dclk);
-       clk_disable(vop->aclk);
-       clk_disable(vop->hclk);
+       vop_core_clks_disable(vop);
        pm_runtime_put(vop->dev);
        mutex_unlock(&vop->vop_lock);
 
@@ -666,7 +674,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
         * Src.x1 can be odd when do clip, but yuv plane start point
         * need align with 2 pixel.
         */
-       if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) {
+       if (fb->format->is_yuv && ((state->src.x1 >> 16) % 2)) {
                DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n");
                return -EINVAL;
        }
@@ -728,7 +736,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
                return;
        }
 
-       obj = rockchip_fb_get_gem_obj(fb, 0);
+       obj = fb->obj[0];
        rk_obj = to_rockchip_obj(obj);
 
        actual_w = drm_rect_width(src) >> 16;
@@ -753,12 +761,12 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
        VOP_WIN_SET(vop, win, format, format);
        VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4));
        VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
-       if (is_yuv_support(fb->format->format)) {
+       if (fb->format->is_yuv) {
                int hsub = drm_format_horz_chroma_subsampling(fb->format->format);
                int vsub = drm_format_vert_chroma_subsampling(fb->format->format);
                int bpp = fb->format->cpp[1];
 
-               uv_obj = rockchip_fb_get_gem_obj(fb, 1);
+               uv_obj = fb->obj[1];
                rk_uv_obj = to_rockchip_obj(uv_obj);
 
                offset = (src->x1 >> 16) * bpp / hsub;
@@ -1177,6 +1185,18 @@ static irqreturn_t vop_isr(int irq, void *data)
        uint32_t active_irqs;
        int ret = IRQ_NONE;
 
+       /*
+        * The irq is shared with the iommu. If the runtime-pm state of the
+        * vop-device is disabled the irq has to be targeted at the iommu.
+        */
+       if (!pm_runtime_get_if_in_use(vop->dev))
+               return IRQ_NONE;
+
+       if (vop_core_clks_enable(vop)) {
+               DRM_DEV_ERROR_RATELIMITED(vop->dev, "couldn't enable clocks\n");
+               goto out;
+       }
+
        /*
         * interrupt register has interrupt status, enable and clear bits, we
         * must hold irq_lock to avoid a race with enable/disable_vblank().
@@ -1192,7 +1212,7 @@ static irqreturn_t vop_isr(int irq, void *data)
 
        /* This is expected for vop iommu irqs, since the irq is shared */
        if (!active_irqs)
-               return IRQ_NONE;
+               goto out_disable;
 
        if (active_irqs & DSP_HOLD_VALID_INTR) {
                complete(&vop->dsp_hold_completion);
@@ -1218,6 +1238,10 @@ static irqreturn_t vop_isr(int irq, void *data)
                DRM_DEV_ERROR(vop->dev, "Unknown VOP IRQs: %#02x\n",
                              active_irqs);
 
+out_disable:
+       vop_core_clks_disable(vop);
+out:
+       pm_runtime_put(vop->dev);
        return ret;
 }
 
@@ -1278,7 +1302,7 @@ static int vop_create_crtc(struct vop *vop)
        for (i = 0; i < vop_data->win_size; i++) {
                struct vop_win *vop_win = &vop->win[i];
                const struct vop_win_data *win_data = vop_win->data;
-               unsigned long possible_crtcs = 1 << drm_crtc_index(crtc);
+               unsigned long possible_crtcs = drm_crtc_mask(crtc);
 
                if (win_data->type != DRM_PLANE_TYPE_OVERLAY)
                        continue;
@@ -1596,9 +1620,6 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
        if (ret)
                goto err_disable_pm_runtime;
 
-       /* IRQ is initially disabled; it gets enabled in power_on */
-       disable_irq(vop->irq);
-
        return 0;
 
 err_disable_pm_runtime:
index 084acdd0019a8d3d873e7fd8c590ed6de91313be..fcb91041a666d606257e20319023903b9a7f7e3d 100644 (file)
@@ -331,16 +331,19 @@ static inline int scl_vop_cal_lb_mode(int width, bool is_yuv)
 {
        int lb_mode;
 
-       if (width > 2560)
-               lb_mode = LB_RGB_3840X2;
-       else if (width > 1920)
-               lb_mode = LB_RGB_2560X4;
-       else if (!is_yuv)
-               lb_mode = LB_RGB_1920X5;
-       else if (width > 1280)
-               lb_mode = LB_YUV_3840X5;
-       else
-               lb_mode = LB_YUV_2560X8;
+       if (is_yuv) {
+               if (width > 1280)
+                       lb_mode = LB_YUV_3840X5;
+               else
+                       lb_mode = LB_YUV_2560X8;
+       } else {
+               if (width > 2560)
+                       lb_mode = LB_RGB_3840X2;
+               else if (width > 1920)
+                       lb_mode = LB_RGB_2560X4;
+               else
+                       lb_mode = LB_RGB_1920X5;
+       }
 
        return lb_mode;
 }
index e67f4ea28c0e4122e4a4ce5ae48843e2f9b8a847..456bd9f13baefdc87865e9a7404e4caa727b0b4c 100644 (file)
@@ -363,8 +363,10 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
                of_property_read_u32(endpoint, "reg", &endpoint_id);
                ret = drm_of_find_panel_or_bridge(dev->of_node, 1, endpoint_id,
                                                  &lvds->panel, &lvds->bridge);
-               if (!ret)
+               if (!ret) {
+                       of_node_put(endpoint);
                        break;
+               }
        }
        if (!child_count) {
                DRM_DEV_ERROR(dev, "lvds port does not have any children\n");
@@ -432,7 +434,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
                drm_connector_helper_add(connector,
                                         &rockchip_lvds_connector_helper_funcs);
 
-               ret = drm_mode_connector_attach_encoder(connector, encoder);
+               ret = drm_connector_attach_encoder(connector, encoder);
                if (ret < 0) {
                        DRM_DEV_ERROR(drm_dev->dev,
                                      "failed to attach encoder: %d\n", ret);
@@ -446,14 +448,12 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
                        goto err_free_connector;
                }
        } else {
-               lvds->bridge->encoder = encoder;
                ret = drm_bridge_attach(encoder, lvds->bridge, NULL);
                if (ret) {
                        DRM_DEV_ERROR(drm_dev->dev,
                                      "failed to attach bridge: %d\n", ret);
                        goto err_free_encoder;
                }
-               encoder->bridge = lvds->bridge;
        }
 
        pm_runtime_enable(dev);
index 2db89bed52e80625bee50917652a41c2dc2a5f8d..7559a820bd435362412ecb4eb76f6bc26735ad95 100644 (file)
@@ -971,7 +971,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
        LOCK_TEST_WITH_RETURN(dev, file_priv);
 
        if (dma && dma->buflist) {
-               if (cmdbuf->dma_idx > dma->buf_count) {
+               if (cmdbuf->dma_idx >= dma->buf_count) {
                        DRM_ERROR
                            ("vertex buffer index %u out of range (0-%u)\n",
                             cmdbuf->dma_idx, dma->buf_count - 1);
index bd0377c0d2eeaff0df66ce76ede9aa32d0225455..7665883f81d4b7d6756510cf22688665817963e1 100644 (file)
@@ -20,7 +20,6 @@
 # OTHER DEALINGS IN THE SOFTWARE.
 #
 #
-ccflags-y := -Iinclude/drm
 gpu-sched-y := gpu_scheduler.o sched_fence.o
 
 obj-$(CONFIG_DRM_SCHED) += gpu-sched.o
index 44d480768dfe2d202790ee4a3303e55d348af783..1b733229201edfd4578ae58fae4552b1b3a36f6a 100644 (file)
  *
  */
 
+/**
+ * DOC: Overview
+ *
+ * The GPU scheduler provides entities which allow userspace to push jobs
+ * into software queues which are then scheduled on a hardware run queue.
+ * The software queues have a priority among them. The scheduler selects the entities
+ * from the run queue using a FIFO. The scheduler provides dependency handling
+ * features among jobs. The driver is supposed to provide callback functions for
+ * backend operations to the scheduler like submitting a job to hardware run queue,
+ * returning the dependencies of a job etc.
+ *
+ * The organisation of the scheduler is the following:
+ *
+ * 1. Each hw run queue has one scheduler
+ * 2. Each scheduler has multiple run queues with different priorities
+ *    (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
+ * 3. Each scheduler run queue has a queue of entities to schedule
+ * 4. Entities themselves maintain a queue of jobs that will be scheduled on
+ *    the hardware.
+ *
+ * The jobs in a entity are always scheduled in the order that they were pushed.
+ */
+
 #include <linux/kthread.h>
 #include <linux/wait.h>
 #include <linux/sched.h>
@@ -39,14 +62,30 @@ static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
 static void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
 
-/* Initialize a given run queue struct */
-static void drm_sched_rq_init(struct drm_sched_rq *rq)
+/**
+ * drm_sched_rq_init - initialize a given run queue struct
+ *
+ * @rq: scheduler run queue
+ *
+ * Initializes a scheduler runqueue.
+ */
+static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
+                             struct drm_sched_rq *rq)
 {
        spin_lock_init(&rq->lock);
        INIT_LIST_HEAD(&rq->entities);
        rq->current_entity = NULL;
+       rq->sched = sched;
 }
 
+/**
+ * drm_sched_rq_add_entity - add an entity
+ *
+ * @rq: scheduler run queue
+ * @entity: scheduler entity
+ *
+ * Adds a scheduler entity to the run queue.
+ */
 static void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
                                    struct drm_sched_entity *entity)
 {
@@ -57,6 +96,14 @@ static void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
        spin_unlock(&rq->lock);
 }
 
+/**
+ * drm_sched_rq_remove_entity - remove an entity
+ *
+ * @rq: scheduler run queue
+ * @entity: scheduler entity
+ *
+ * Removes a scheduler entity from the run queue.
+ */
 static void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
                                       struct drm_sched_entity *entity)
 {
@@ -70,9 +117,9 @@ static void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
 }
 
 /**
- * Select an entity which could provide a job to run
+ * drm_sched_rq_select_entity - Select an entity which could provide a job to run
  *
- * @rq         The run queue to check.
+ * @rq: scheduler run queue to check.
  *
  * Try to find a ready entity, returns NULL if none found.
  */
@@ -112,30 +159,33 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
 }
 
 /**
- * Init a context entity used by scheduler when submit to HW ring.
+ * drm_sched_entity_init - Init a context entity used by scheduler when
+ * submit to HW ring.
  *
- * @sched      The pointer to the scheduler
- * @entity     The pointer to a valid drm_sched_entity
- * @rq         The run queue this entity belongs
- * @guilty      atomic_t set to 1 when a job on this queue
- *              is found to be guilty causing a timeout
+ * @entity: scheduler entity to init
+ * @rq_list: the list of run queue on which jobs from this
+ *           entity can be submitted
+ * @num_rq_list: number of run queue in rq_list
+ * @guilty: atomic_t set to 1 when a job on this queue
+ *          is found to be guilty causing a timeout
  *
- * return 0 if succeed. negative error code on failure
+ * Note: the rq_list should have atleast one element to schedule
+ *       the entity
+ *
+ * Returns 0 on success or a negative error code on failure.
 */
-int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
-                         struct drm_sched_entity *entity,
-                         struct drm_sched_rq *rq,
+int drm_sched_entity_init(struct drm_sched_entity *entity,
+                         struct drm_sched_rq **rq_list,
+                         unsigned int num_rq_list,
                          atomic_t *guilty)
 {
-       if (!(sched && entity && rq))
+       if (!(entity && rq_list && num_rq_list > 0 && rq_list[0]))
                return -EINVAL;
 
        memset(entity, 0, sizeof(struct drm_sched_entity));
        INIT_LIST_HEAD(&entity->list);
-       entity->rq = rq;
-       entity->sched = sched;
+       entity->rq = rq_list[0];
        entity->guilty = guilty;
-       entity->fini_status = 0;
        entity->last_scheduled = NULL;
 
        spin_lock_init(&entity->rq_lock);
@@ -149,40 +199,27 @@ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
 EXPORT_SYMBOL(drm_sched_entity_init);
 
 /**
- * Query if entity is initialized
+ * drm_sched_entity_is_idle - Check if entity is idle
  *
- * @sched       Pointer to scheduler instance
- * @entity     The pointer to a valid scheduler entity
+ * @entity: scheduler entity
  *
- * return true if entity is initialized, false otherwise
-*/
-static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched,
-                                           struct drm_sched_entity *entity)
-{
-       return entity->sched == sched &&
-               entity->rq != NULL;
-}
-
-/**
- * Check if entity is idle
- *
- * @entity     The pointer to a valid scheduler entity
- *
- * Return true if entity don't has any unscheduled jobs.
+ * Returns true if the entity does not have any unscheduled jobs.
  */
 static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
 {
        rmb();
-       if (spsc_queue_peek(&entity->job_queue) == NULL)
+
+       if (list_empty(&entity->list) ||
+           spsc_queue_peek(&entity->job_queue) == NULL)
                return true;
 
        return false;
 }
 
 /**
- * Check if entity is ready
+ * drm_sched_entity_is_ready - Check if entity is ready
  *
- * @entity     The pointer to a valid scheduler entity
+ * @entity: scheduler entity
  *
  * Return true if entity could provide a job.
  */
@@ -210,44 +247,69 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
 
 
 /**
- * Destroy a context entity
+ * drm_sched_entity_flush - Flush a context entity
  *
- * @sched       Pointer to scheduler instance
- * @entity     The pointer to a valid scheduler entity
+ * @sched: scheduler instance
+ * @entity: scheduler entity
+ * @timeout: time to wait in for Q to become empty in jiffies.
  *
- * Splitting drm_sched_entity_fini() into two functions, The first one is does the waiting,
+ * Splitting drm_sched_entity_fini() into two functions, The first one does the waiting,
  * removes the entity from the runqueue and returns an error when the process was killed.
+ *
+ * Returns the remaining time in jiffies left from the input timeout
  */
-void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
-                          struct drm_sched_entity *entity)
+long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
 {
-       if (!drm_sched_entity_is_initialized(sched, entity))
-               return;
+       struct drm_gpu_scheduler *sched;
+       struct task_struct *last_user;
+       long ret = timeout;
+
+       sched = entity->rq->sched;
        /**
         * The client will not queue more IBs during this fini, consume existing
         * queued IBs or discard them on SIGKILL
        */
-       if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
-               entity->fini_status = -ERESTARTSYS;
-       else
-               entity->fini_status = wait_event_killable(sched->job_scheduled,
-                                       drm_sched_entity_is_idle(entity));
-       drm_sched_entity_set_rq(entity, NULL);
+       if (current->flags & PF_EXITING) {
+               if (timeout)
+                       ret = wait_event_timeout(
+                                       sched->job_scheduled,
+                                       drm_sched_entity_is_idle(entity),
+                                       timeout);
+       } else
+               wait_event_killable(sched->job_scheduled, drm_sched_entity_is_idle(entity));
+
+
+       /* For killed process disable any more IBs enqueue right now */
+       last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
+       if ((!last_user || last_user == current->group_leader) &&
+           (current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
+               drm_sched_rq_remove_entity(entity->rq, entity);
+
+       return ret;
 }
-EXPORT_SYMBOL(drm_sched_entity_do_release);
+EXPORT_SYMBOL(drm_sched_entity_flush);
 
 /**
- * Destroy a context entity
+ * drm_sched_entity_cleanup - Destroy a context entity
+ *
+ * @sched: scheduler instance
+ * @entity: scheduler entity
  *
- * @sched       Pointer to scheduler instance
- * @entity     The pointer to a valid scheduler entity
+ * This should be called after @drm_sched_entity_do_release. It goes over the
+ * entity and signals all jobs with an error code if the process was killed.
  *
- * The second one then goes over the entity and signals all jobs with an error code.
  */
-void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
-                          struct drm_sched_entity *entity)
+void drm_sched_entity_fini(struct drm_sched_entity *entity)
 {
-       if (entity->fini_status) {
+       struct drm_gpu_scheduler *sched;
+
+       sched = entity->rq->sched;
+       drm_sched_rq_remove_entity(entity->rq, entity);
+
+       /* Consumption of existing IBs wasn't completed. Forcefully
+        * remove them here.
+        */
+       if (spsc_queue_peek(&entity->job_queue)) {
                struct drm_sched_job *job;
                int r;
 
@@ -267,27 +329,44 @@ void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
                        struct drm_sched_fence *s_fence = job->s_fence;
                        drm_sched_fence_scheduled(s_fence);
                        dma_fence_set_error(&s_fence->finished, -ESRCH);
-                       r = dma_fence_add_callback(entity->last_scheduled, &job->finish_cb,
-                                                       drm_sched_entity_kill_jobs_cb);
-                       if (r == -ENOENT)
+
+                       /*
+                        * When pipe is hanged by older entity, new entity might
+                        * not even have chance to submit it's first job to HW
+                        * and so entity->last_scheduled will remain NULL
+                        */
+                       if (!entity->last_scheduled) {
                                drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
-                       else if (r)
-                               DRM_ERROR("fence add callback failed (%d)\n", r);
+                       } else {
+                               r = dma_fence_add_callback(entity->last_scheduled, &job->finish_cb,
+                                                               drm_sched_entity_kill_jobs_cb);
+                               if (r == -ENOENT)
+                                       drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
+                               else if (r)
+                                       DRM_ERROR("fence add callback failed (%d)\n", r);
+                       }
                }
        }
 
        dma_fence_put(entity->last_scheduled);
        entity->last_scheduled = NULL;
 }
-EXPORT_SYMBOL(drm_sched_entity_cleanup);
+EXPORT_SYMBOL(drm_sched_entity_fini);
 
-void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
-                               struct drm_sched_entity *entity)
+/**
+ * drm_sched_entity_fini - Destroy a context entity
+ *
+ * @sched: scheduler instance
+ * @entity: scheduler entity
+ *
+ * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
+ */
+void drm_sched_entity_destroy(struct drm_sched_entity *entity)
 {
-       drm_sched_entity_do_release(sched, entity);
-       drm_sched_entity_cleanup(sched, entity);
+       drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
+       drm_sched_entity_fini(entity);
 }
-EXPORT_SYMBOL(drm_sched_entity_fini);
+EXPORT_SYMBOL(drm_sched_entity_destroy);
 
 static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
 {
@@ -295,7 +374,7 @@ static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb
                container_of(cb, struct drm_sched_entity, cb);
        entity->dependency = NULL;
        dma_fence_put(f);
-       drm_sched_wakeup(entity->sched);
+       drm_sched_wakeup(entity->rq->sched);
 }
 
 static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
@@ -306,29 +385,43 @@ static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb
        dma_fence_put(f);
 }
 
+/**
+ * drm_sched_entity_set_rq - Sets the run queue for an entity
+ *
+ * @entity: scheduler entity
+ * @rq: scheduler run queue
+ *
+ * Sets the run queue for an entity and removes the entity from the previous
+ * run queue in which was present.
+ */
 void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
                             struct drm_sched_rq *rq)
 {
        if (entity->rq == rq)
                return;
 
-       spin_lock(&entity->rq_lock);
-
-       if (entity->rq)
-               drm_sched_rq_remove_entity(entity->rq, entity);
+       BUG_ON(!rq);
 
+       spin_lock(&entity->rq_lock);
+       drm_sched_rq_remove_entity(entity->rq, entity);
        entity->rq = rq;
-       if (rq)
-               drm_sched_rq_add_entity(rq, entity);
-
+       drm_sched_rq_add_entity(rq, entity);
        spin_unlock(&entity->rq_lock);
 }
 EXPORT_SYMBOL(drm_sched_entity_set_rq);
 
+/**
+ * drm_sched_dependency_optimized
+ *
+ * @fence: the dependency fence
+ * @entity: the entity which depends on the above fence
+ *
+ * Returns true if the dependency can be optimized and false otherwise
+ */
 bool drm_sched_dependency_optimized(struct dma_fence* fence,
                                    struct drm_sched_entity *entity)
 {
-       struct drm_gpu_scheduler *sched = entity->sched;
+       struct drm_gpu_scheduler *sched = entity->rq->sched;
        struct drm_sched_fence *s_fence;
 
        if (!fence || dma_fence_is_signaled(fence))
@@ -345,7 +438,7 @@ EXPORT_SYMBOL(drm_sched_dependency_optimized);
 
 static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
 {
-       struct drm_gpu_scheduler *sched = entity->sched;
+       struct drm_gpu_scheduler *sched = entity->rq->sched;
        struct dma_fence * fence = entity->dependency;
        struct drm_sched_fence *s_fence;
 
@@ -390,7 +483,7 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
 static struct drm_sched_job *
 drm_sched_entity_pop_job(struct drm_sched_entity *entity)
 {
-       struct drm_gpu_scheduler *sched = entity->sched;
+       struct drm_gpu_scheduler *sched = entity->rq->sched;
        struct drm_sched_job *sched_job = to_drm_sched_job(
                                                spsc_queue_peek(&entity->job_queue));
 
@@ -413,9 +506,10 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
 }
 
 /**
- * Submit a job to the job queue
+ * drm_sched_entity_push_job - Submit a job to the entity's job queue
  *
- * @sched_job          The pointer to job required to submit
+ * @sched_job: job to submit
+ * @entity: scheduler entity
  *
  * Note: To guarantee that the order of insertion to queue matches
  * the job's fence sequence number this function should be
@@ -431,12 +525,18 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
 
        trace_drm_sched_job(sched_job, entity);
 
+       WRITE_ONCE(entity->last_user, current->group_leader);
        first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
 
        /* first job wakes up scheduler */
        if (first) {
                /* Add the entity to the run queue */
                spin_lock(&entity->rq_lock);
+               if (!entity->rq) {
+                       DRM_ERROR("Trying to push to a killed entity\n");
+                       spin_unlock(&entity->rq_lock);
+                       return;
+               }
                drm_sched_rq_add_entity(entity->rq, entity);
                spin_unlock(&entity->rq_lock);
                drm_sched_wakeup(sched);
@@ -506,6 +606,13 @@ static void drm_sched_job_timedout(struct work_struct *work)
        job->sched->ops->timedout_job(job);
 }
 
+/**
+ * drm_sched_hw_job_reset - stop the scheduler if it contains the bad job
+ *
+ * @sched: scheduler instance
+ * @bad: bad scheduler job
+ *
+ */
 void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
 {
        struct drm_sched_job *s_job;
@@ -550,6 +657,12 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
 }
 EXPORT_SYMBOL(drm_sched_hw_job_reset);
 
+/**
+ * drm_sched_job_recovery - recover jobs after a reset
+ *
+ * @sched: scheduler instance
+ *
+ */
 void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
 {
        struct drm_sched_job *s_job, *tmp;
@@ -599,16 +712,24 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
 EXPORT_SYMBOL(drm_sched_job_recovery);
 
 /**
- * Init a sched_job with basic field
+ * drm_sched_job_init - init a scheduler job
  *
- * Note: Refer to drm_sched_entity_push_job documentation
+ * @job: scheduler job to init
+ * @sched: scheduler instance
+ * @entity: scheduler entity to use
+ * @owner: job owner for debugging
+ *
+ * Refer to drm_sched_entity_push_job() documentation
  * for locking considerations.
+ *
+ * Returns 0 for success, negative error code otherwise.
  */
 int drm_sched_job_init(struct drm_sched_job *job,
-                      struct drm_gpu_scheduler *sched,
                       struct drm_sched_entity *entity,
                       void *owner)
 {
+       struct drm_gpu_scheduler *sched = entity->rq->sched;
+
        job->sched = sched;
        job->entity = entity;
        job->s_priority = entity->rq - sched->sched_rq;
@@ -626,7 +747,11 @@ int drm_sched_job_init(struct drm_sched_job *job,
 EXPORT_SYMBOL(drm_sched_job_init);
 
 /**
- * Return ture if we can push more jobs to the hw.
+ * drm_sched_ready - is the scheduler ready
+ *
+ * @sched: scheduler instance
+ *
+ * Return true if we can push more jobs to the hw, otherwise false.
  */
 static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
 {
@@ -635,7 +760,10 @@ static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
 }
 
 /**
- * Wake up the scheduler when it is ready
+ * drm_sched_wakeup - Wake up the scheduler when it is ready
+ *
+ * @sched: scheduler instance
+ *
  */
 static void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
 {
@@ -644,8 +772,12 @@ static void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
 }
 
 /**
- * Select next entity to process
-*/
+ * drm_sched_select_entity - Select next entity to process
+ *
+ * @sched: scheduler instance
+ *
+ * Returns the entity to process or NULL if none are found.
+ */
 static struct drm_sched_entity *
 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
 {
@@ -665,6 +797,14 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
        return entity;
 }
 
+/**
+ * drm_sched_process_job - process a job
+ *
+ * @f: fence
+ * @cb: fence callbacks
+ *
+ * Called after job has finished execution.
+ */
 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
 {
        struct drm_sched_fence *s_fence =
@@ -680,6 +820,13 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
        wake_up_interruptible(&sched->wake_up_worker);
 }
 
+/**
+ * drm_sched_blocked - check if the scheduler is blocked
+ *
+ * @sched: scheduler instance
+ *
+ * Returns true if blocked, otherwise false.
+ */
 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
 {
        if (kthread_should_park()) {
@@ -690,6 +837,13 @@ static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
        return false;
 }
 
+/**
+ * drm_sched_main - main scheduler thread
+ *
+ * @param: scheduler instance
+ *
+ * Returns 0.
+ */
 static int drm_sched_main(void *param)
 {
        struct sched_param sparam = {.sched_priority = 1};
@@ -744,15 +898,17 @@ static int drm_sched_main(void *param)
 }
 
 /**
- * Init a gpu scheduler instance
+ * drm_sched_init - Init a gpu scheduler instance
  *
- * @sched              The pointer to the scheduler
- * @ops                        The backend operations for this scheduler.
- * @hw_submissions     Number of hw submissions to do.
- * @name               Name used for debugging
+ * @sched: scheduler instance
+ * @ops: backend operations for this scheduler
+ * @hw_submission: number of hw submissions that can be in flight
+ * @hang_limit: number of times to allow a job to hang before dropping it
+ * @timeout: timeout value in jiffies for the scheduler
+ * @name: name used for debugging
  *
  * Return 0 on success, otherwise error code.
-*/
+ */
 int drm_sched_init(struct drm_gpu_scheduler *sched,
                   const struct drm_sched_backend_ops *ops,
                   unsigned hw_submission,
@@ -767,7 +923,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
        sched->timeout = timeout;
        sched->hang_limit = hang_limit;
        for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++)
-               drm_sched_rq_init(&sched->sched_rq[i]);
+               drm_sched_rq_init(sched, &sched->sched_rq[i]);
 
        init_waitqueue_head(&sched->wake_up_worker);
        init_waitqueue_head(&sched->job_scheduled);
@@ -788,9 +944,11 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
 EXPORT_SYMBOL(drm_sched_init);
 
 /**
- * Destroy a gpu scheduler
+ * drm_sched_fini - Destroy a gpu scheduler
+ *
+ * @sched: scheduler instance
  *
- * @sched      The pointer to the scheduler
+ * Tears down and cleans up the scheduler.
  */
 void drm_sched_fini(struct drm_gpu_scheduler *sched)
 {
index df4461648e3fe496e37125005c1516dc55034da1..d8d2dff9ea2f79d17479426db774a5b8049d90e7 100644 (file)
@@ -81,11 +81,6 @@ static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f)
        return (const char *)fence->sched->name;
 }
 
-static bool drm_sched_fence_enable_signaling(struct dma_fence *f)
-{
-       return true;
-}
-
 /**
  * drm_sched_fence_free - free up the fence memory
  *
@@ -134,18 +129,12 @@ static void drm_sched_fence_release_finished(struct dma_fence *f)
 const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
        .get_driver_name = drm_sched_fence_get_driver_name,
        .get_timeline_name = drm_sched_fence_get_timeline_name,
-       .enable_signaling = drm_sched_fence_enable_signaling,
-       .signaled = NULL,
-       .wait = dma_fence_default_wait,
        .release = drm_sched_fence_release_scheduled,
 };
 
 const struct dma_fence_ops drm_sched_fence_ops_finished = {
        .get_driver_name = drm_sched_fence_get_driver_name,
        .get_timeline_name = drm_sched_fence_get_timeline_name,
-       .enable_signaling = drm_sched_fence_enable_signaling,
-       .signaled = NULL,
-       .wait = dma_fence_default_wait,
        .release = drm_sched_fence_release_finished,
 };
 
@@ -172,7 +161,7 @@ struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
                return NULL;
 
        fence->owner = owner;
-       fence->sched = entity->sched;
+       fence->sched = entity->rq->sched;
        spin_lock_init(&fence->lock);
 
        seq = atomic_inc_return(&entity->fence_seq);
index 54acc117550cfac5fe16e38e753b6067a46ca884..6b943ea1c57daf97143b13f278b6679b6c1d9798 100644 (file)
@@ -19,7 +19,9 @@ selftest(align64, igt_align64)
 selftest(evict, igt_evict)
 selftest(evict_range, igt_evict_range)
 selftest(bottomup, igt_bottomup)
+selftest(lowest, igt_lowest)
 selftest(topdown, igt_topdown)
+selftest(highest, igt_highest)
 selftest(color, igt_color)
 selftest(color_evict, igt_color_evict)
 selftest(color_evict_range, igt_color_evict_range)
index 933af1c253878acc64e05da0b6f73eee01fcc5c9..fbed2c90fd51ea4dcb5aab0ee8d6e5c7dadadfd6 100644 (file)
@@ -1825,6 +1825,77 @@ err:
        return ret;
 }
 
+static int __igt_once(unsigned int mode)
+{
+       struct drm_mm mm;
+       struct drm_mm_node rsvd_lo, rsvd_hi, node;
+       int err;
+
+       drm_mm_init(&mm, 0, 7);
+
+       memset(&rsvd_lo, 0, sizeof(rsvd_lo));
+       rsvd_lo.start = 1;
+       rsvd_lo.size = 1;
+       err = drm_mm_reserve_node(&mm, &rsvd_lo);
+       if (err) {
+               pr_err("Could not reserve low node\n");
+               goto err;
+       }
+
+       memset(&rsvd_hi, 0, sizeof(rsvd_hi));
+       rsvd_hi.start = 5;
+       rsvd_hi.size = 1;
+       err = drm_mm_reserve_node(&mm, &rsvd_hi);
+       if (err) {
+               pr_err("Could not reserve low node\n");
+               goto err_lo;
+       }
+
+       if (!drm_mm_hole_follows(&rsvd_lo) || !drm_mm_hole_follows(&rsvd_hi)) {
+               pr_err("Expected a hole after lo and high nodes!\n");
+               err = -EINVAL;
+               goto err_hi;
+       }
+
+       memset(&node, 0, sizeof(node));
+       err = drm_mm_insert_node_generic(&mm, &node,
+                                        2, 0, 0,
+                                        mode | DRM_MM_INSERT_ONCE);
+       if (!err) {
+               pr_err("Unexpectedly inserted the node into the wrong hole: node.start=%llx\n",
+                      node.start);
+               err = -EINVAL;
+               goto err_node;
+       }
+
+       err = drm_mm_insert_node_generic(&mm, &node, 2, 0, 0, mode);
+       if (err) {
+               pr_err("Could not insert the node into the available hole!\n");
+               err = -EINVAL;
+               goto err_hi;
+       }
+
+err_node:
+       drm_mm_remove_node(&node);
+err_hi:
+       drm_mm_remove_node(&rsvd_hi);
+err_lo:
+       drm_mm_remove_node(&rsvd_lo);
+err:
+       drm_mm_takedown(&mm);
+       return err;
+}
+
+static int igt_lowest(void *ignored)
+{
+       return __igt_once(DRM_MM_INSERT_LOW);
+}
+
+static int igt_highest(void *ignored)
+{
+       return __igt_once(DRM_MM_INSERT_HIGH);
+}
+
 static void separate_adjacent_colors(const struct drm_mm_node *node,
                                     unsigned long color,
                                     u64 *start,
index 40df8887fc1765e3452f5490ccf9fc242f614cbc..fc66167b064177d15c23ca1d4060fdaa6c10385d 100644 (file)
@@ -675,7 +675,7 @@ int shmob_drm_connector_create(struct shmob_drm_device *sdev,
        if (ret < 0)
                goto err_cleanup;
 
-       ret = drm_mode_connector_attach_encoder(connector, encoder);
+       ret = drm_connector_attach_encoder(connector, encoder);
        if (ret < 0)
                goto err_backlight;
 
index df0a282b96152f6949cdb0d2db79a6260cc09c42..57b870e1e6964ae1739d81b5163f371dbf6daa92 100644 (file)
@@ -332,7 +332,7 @@ static void sti_cursor_destroy(struct drm_plane *drm_plane)
 {
        DRM_DEBUG_DRIVER("\n");
 
-       drm_plane_helper_disable(drm_plane);
+       drm_plane_helper_disable(drm_plane, NULL);
        drm_plane_cleanup(drm_plane);
 }
 
index 90c46b49c931571aae7c8d04e014932fbdd884c6..832fc43960ee48bf3482ac297f40661805527ded 100644 (file)
@@ -224,7 +224,7 @@ static int sti_bind(struct device *dev)
 
        ret = sti_init(ddev);
        if (ret)
-               goto err_drm_dev_unref;
+               goto err_drm_dev_put;
 
        ret = component_bind_all(ddev->dev, ddev);
        if (ret)
@@ -248,8 +248,8 @@ err_register:
        drm_mode_config_cleanup(ddev);
 err_cleanup:
        sti_cleanup(ddev);
-err_drm_dev_unref:
-       drm_dev_unref(ddev);
+err_drm_dev_put:
+       drm_dev_put(ddev);
        return ret;
 }
 
@@ -259,7 +259,7 @@ static void sti_unbind(struct device *dev)
 
        drm_dev_unregister(ddev);
        sti_cleanup(ddev);
-       drm_dev_unref(ddev);
+       drm_dev_put(ddev);
 }
 
 static const struct component_master_ops sti_ops = {
index a5979cd25cc7d8264b9bc9edd462bbb1a31a8e53..b08376b7611b896fbcb8fd6b3e91fe44dd4b7015 100644 (file)
@@ -387,7 +387,9 @@ sti_dvo_connector_detect(struct drm_connector *connector, bool force)
 
        if (!dvo->panel) {
                dvo->panel = of_drm_find_panel(dvo->panel_node);
-               if (dvo->panel)
+               if (IS_ERR(dvo->panel))
+                       dvo->panel = NULL;
+               else
                        drm_panel_attach(dvo->panel, connector);
        }
 
@@ -484,7 +486,7 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
        drm_connector_helper_add(drm_connector,
                                 &sti_dvo_connector_helper_funcs);
 
-       err = drm_mode_connector_attach_encoder(drm_connector, encoder);
+       err = drm_connector_attach_encoder(drm_connector, encoder);
        if (err) {
                DRM_ERROR("Failed to attach a connector to a encoder\n");
                goto err_sysfs;
index 9b2c47051b51ed50ce5153f8a33aa15ac2178348..c32de6cbf06164cb9a95542dfcdc2173976249e6 100644 (file)
@@ -211,7 +211,11 @@ static int gdp_dbg_show(struct seq_file *s, void *data)
        struct drm_info_node *node = s->private;
        struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
        struct drm_plane *drm_plane = &gdp->plane.drm_plane;
-       struct drm_crtc *crtc = drm_plane->crtc;
+       struct drm_crtc *crtc;
+
+       drm_modeset_lock(&drm_plane->mutex, NULL);
+       crtc = drm_plane->state->crtc;
+       drm_modeset_unlock(&drm_plane->mutex);
 
        seq_printf(s, "%s: (vaddr = 0x%p)",
                   sti_plane_to_str(&gdp->plane), gdp->regs);
@@ -879,7 +883,7 @@ static void sti_gdp_destroy(struct drm_plane *drm_plane)
 {
        DRM_DEBUG_DRIVER("\n");
 
-       drm_plane_helper_disable(drm_plane);
+       drm_plane_helper_disable(drm_plane, NULL);
        drm_plane_cleanup(drm_plane);
 }
 
index 67bbdb49fffc96fe54ebd44849b472db3ba248fc..49438337f70dc5e66ac2c12a5b41737dc17fd491 100644 (file)
@@ -709,7 +709,7 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
        drm_connector_helper_add(drm_connector,
                        &sti_hda_connector_helper_funcs);
 
-       err = drm_mode_connector_attach_encoder(drm_connector, encoder);
+       err = drm_connector_attach_encoder(drm_connector, encoder);
        if (err) {
                DRM_ERROR("Failed to attach a connector to a encoder\n");
                goto err_sysfs;
index 58f4311025128d90acd5b617b388a8276acec973..34cdc4644435046711d388e41cd7ba71660a7fe1 100644 (file)
@@ -977,7 +977,7 @@ static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
        cec_notifier_set_phys_addr_from_edid(hdmi->notifier, edid);
 
        count = drm_add_edid_modes(connector, edid);
-       drm_mode_connector_update_edid_property(connector, edid);
+       drm_connector_update_edid_property(connector, edid);
 
        kfree(edid);
        return count;
@@ -1290,7 +1290,7 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
 
        hdmi->drm_connector = drm_connector;
 
-       err = drm_mode_connector_attach_encoder(drm_connector, encoder);
+       err = drm_connector_attach_encoder(drm_connector, encoder);
        if (err) {
                DRM_ERROR("Failed to attach a connector to a encoder\n");
                goto err_sysfs;
index 106be8c4e58b9a186e664da52fd7afaf92df5d0b..03ac3b4a44692d529aec1b4f496d4648728e56c8 100644 (file)
@@ -1260,7 +1260,7 @@ static void sti_hqvdp_destroy(struct drm_plane *drm_plane)
 {
        DRM_DEBUG_DRIVER("\n");
 
-       drm_plane_helper_disable(drm_plane);
+       drm_plane_helper_disable(drm_plane, NULL);
        drm_plane_cleanup(drm_plane);
 }
 
index 8698e08313e1ef4411b8568116582ffb8adfcf03..f2021b23554d316b5270563fbb45f4ddf939bef9 100644 (file)
@@ -148,16 +148,16 @@ static int stm_drm_platform_probe(struct platform_device *pdev)
 
        ret = drv_load(ddev);
        if (ret)
-               goto err_unref;
+               goto err_put;
 
        ret = drm_dev_register(ddev, 0);
        if (ret)
-               goto err_unref;
+               goto err_put;
 
        return 0;
 
-err_unref:
-       drm_dev_unref(ddev);
+err_put:
+       drm_dev_put(ddev);
 
        return ret;
 }
@@ -170,7 +170,7 @@ static int stm_drm_platform_remove(struct platform_device *pdev)
 
        drm_dev_unregister(ddev);
        drv_unload(ddev);
-       drm_dev_unref(ddev);
+       drm_dev_put(ddev);
 
        return 0;
 }
index d997a6014d6c2a27342819ac5b2d91f7ab52f094..808d9fb627e97ab07562c17183ade0508abfe0b7 100644 (file)
@@ -457,6 +457,14 @@ ltdc_crtc_mode_valid(struct drm_crtc *crtc,
        int target_max = target + CLK_TOLERANCE_HZ;
        int result;
 
+       result = clk_round_rate(ldev->pixel_clk, target);
+
+       DRM_DEBUG_DRIVER("clk rate target %d, available %d\n", target, result);
+
+       /* Filter modes according to the max frequency supported by the pads */
+       if (result > ldev->caps.pad_max_freq_hz)
+               return MODE_CLOCK_HIGH;
+
        /*
         * Accept all "preferred" modes:
         * - this is important for panels because panel clock tolerances are
@@ -468,10 +476,6 @@ ltdc_crtc_mode_valid(struct drm_crtc *crtc,
        if (mode->type & DRM_MODE_TYPE_PREFERRED)
                return MODE_OK;
 
-       result = clk_round_rate(ldev->pixel_clk, target);
-
-       DRM_DEBUG_DRIVER("clk rate target %d, available %d\n", target, result);
-
        /*
         * Filter modes according to the clock value, particularly useful for
         * hdmi modes that require precise pixel clocks.
@@ -991,11 +995,15 @@ static int ltdc_get_caps(struct drm_device *ddev)
                 * does not work on 2nd layer.
                 */
                ldev->caps.non_alpha_only_l1 = true;
+               ldev->caps.pad_max_freq_hz = 90000000;
+               if (ldev->caps.hw_version == HWVER_10200)
+                       ldev->caps.pad_max_freq_hz = 65000000;
                break;
        case HWVER_20101:
                ldev->caps.reg_ofs = REG_OFS_4;
                ldev->caps.pix_fmt_hw = ltdc_pix_fmt_a1;
                ldev->caps.non_alpha_only_l1 = false;
+               ldev->caps.pad_max_freq_hz = 150000000;
                break;
        default:
                return -ENODEV;
@@ -1074,8 +1082,11 @@ int ltdc_load(struct drm_device *ddev)
                }
        }
 
-       if (!IS_ERR(rstc))
+       if (!IS_ERR(rstc)) {
+               reset_control_assert(rstc);
+               usleep_range(10, 20);
                reset_control_deassert(rstc);
+       }
 
        /* Disable interrupts */
        reg_clear(ldev->regs, LTDC_IER,
index 1e16d6afb0d2f6357f279fa3f5d05c650e6fc37e..d5afb89608671153681aca3983b0daacca528eda 100644 (file)
@@ -18,6 +18,7 @@ struct ltdc_caps {
        u32 bus_width;          /* bus width (32 or 64 bits) */
        const u32 *pix_fmt_hw;  /* supported pixel formats */
        bool non_alpha_only_l1; /* non-native no-alpha formats on layer 1 */
+       int pad_max_freq_hz;    /* max frequency supported by pad */
 };
 
 #define LTDC_MAX_LAYER 4
index 156a865c3e6d0bdf437ed0fc04a154dbd5d7dcac..c2c042287c19c26e899141e952bdf16d344c2904 100644 (file)
@@ -68,4 +68,11 @@ config DRM_SUN8I_MIXER
          graphics mixture and feed graphics to TCON, If M is
          selected the module will be called sun8i-mixer.
 
+config DRM_SUN8I_TCON_TOP
+       tristate
+       default DRM_SUN4I if DRM_SUN8I_MIXER!=n
+       help
+         TCON TOP is responsible for configuring display pipeline for
+         HTMI, TVE and LCD.
+
 endif
index 2589f4acd5ae22ab6255e3a7baa35db2ac38c073..0eb38ac8e86e51112b5800a69b3e45956c54cf6f 100644 (file)
@@ -32,8 +32,12 @@ obj-$(CONFIG_DRM_SUN4I)              += sun4i-tcon.o
 obj-$(CONFIG_DRM_SUN4I)                += sun4i_tv.o
 obj-$(CONFIG_DRM_SUN4I)                += sun6i_drc.o
 
-obj-$(CONFIG_DRM_SUN4I_BACKEND)        += sun4i-backend.o sun4i-frontend.o
+obj-$(CONFIG_DRM_SUN4I_BACKEND)        += sun4i-backend.o
+ifdef CONFIG_DRM_SUN4I_BACKEND
+obj-$(CONFIG_DRM_SUN4I)                += sun4i-frontend.o
+endif
 obj-$(CONFIG_DRM_SUN4I_HDMI)   += sun4i-drm-hdmi.o
 obj-$(CONFIG_DRM_SUN6I_DSI)    += sun6i-dsi.o
 obj-$(CONFIG_DRM_SUN8I_DW_HDMI)        += sun8i-drm-hdmi.o
 obj-$(CONFIG_DRM_SUN8I_MIXER)  += sun8i-mixer.o
+obj-$(CONFIG_DRM_SUN8I_TCON_TOP) += sun8i_tcon_top.o
index de0a76dfa1a26b33d4abf8873bbf65c0ec38e146..d7950b52a1fd996cdbbaba632dce92db3cfe3842 100644 (file)
@@ -86,12 +86,6 @@ static inline bool sun4i_backend_format_is_packed_yuv422(uint32_t format)
        }
 }
 
-static inline bool sun4i_backend_format_is_yuv(uint32_t format)
-{
-       return sun4i_backend_format_is_planar_yuv(format) ||
-               sun4i_backend_format_is_packed_yuv422(format);
-}
-
 static void sun4i_backend_apply_color_correction(struct sunxi_engine *engine)
 {
        int i;
@@ -304,7 +298,7 @@ int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
                           SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN,
                           val);
 
-       if (sun4i_backend_format_is_yuv(fb->format->format))
+       if (fb->format->is_yuv)
                return sun4i_backend_update_yuv_format(backend, layer, plane);
 
        ret = sun4i_backend_drm_format_to_layer(fb->format->format, &val);
@@ -384,7 +378,7 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
         */
        paddr -= PHYS_OFFSET;
 
-       if (sun4i_backend_format_is_yuv(fb->format->format))
+       if (fb->format->is_yuv)
                return sun4i_backend_update_yuv_buffer(backend, fb, paddr);
 
        /* Write the 32 lower bits of the address (in bits) */
@@ -502,7 +496,7 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
                if (fb->format->has_alpha || (plane_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
                        num_alpha_planes++;
 
-               if (sun4i_backend_format_is_yuv(fb->format->format)) {
+               if (fb->format->is_yuv) {
                        DRM_DEBUG_DRIVER("Plane FB format is YUV\n");
                        num_yuv_planes++;
                }
index 2d7c57406715a7cd7ea2a8e8466f4dffb7ea0f39..3eedf335a935c72b385657691d6c4290b7b9a954 100644 (file)
@@ -242,7 +242,7 @@ struct sun4i_crtc *sun4i_crtc_init(struct drm_device *drm,
 
        /* Set possible_crtcs to this crtc for overlay planes */
        for (i = 0; planes[i]; i++) {
-               uint32_t possible_crtcs = BIT(drm_crtc_index(&scrtc->crtc));
+               uint32_t possible_crtcs = drm_crtc_mask(&scrtc->crtc);
                struct drm_plane *plane = planes[i];
 
                if (plane->type == DRM_PLANE_TYPE_OVERLAY)
index 50d19605c38fb38ac90fe7778d1b2f6bb069ccd3..dd19d674055c625c20e4e78cfdc61b50fefa6743 100644 (file)
@@ -26,6 +26,7 @@
 #include "sun4i_frontend.h"
 #include "sun4i_framebuffer.h"
 #include "sun4i_tcon.h"
+#include "sun8i_tcon_top.h"
 
 DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops);
 
@@ -143,7 +144,7 @@ cleanup_mode_config:
        drm_mode_config_cleanup(drm);
        of_reserved_mem_device_release(dev);
 free_drm:
-       drm_dev_unref(drm);
+       drm_dev_put(drm);
        return ret;
 }
 
@@ -156,7 +157,7 @@ static void sun4i_drv_unbind(struct device *dev)
        sun4i_framebuffer_free(drm);
        drm_mode_config_cleanup(drm);
        of_reserved_mem_device_release(dev);
-       drm_dev_unref(drm);
+       drm_dev_put(drm);
 }
 
 static const struct component_master_ops sun4i_drv_master_ops = {
@@ -197,6 +198,28 @@ static bool sun4i_drv_node_is_tcon(struct device_node *node)
        return !!of_match_node(sun4i_tcon_of_table, node);
 }
 
+static bool sun4i_drv_node_is_tcon_with_ch0(struct device_node *node)
+{
+       const struct of_device_id *match;
+
+       match = of_match_node(sun4i_tcon_of_table, node);
+       if (match) {
+               struct sun4i_tcon_quirks *quirks;
+
+               quirks = (struct sun4i_tcon_quirks *)match->data;
+
+               return quirks->has_channel_0;
+       }
+
+       return false;
+}
+
+static bool sun4i_drv_node_is_tcon_top(struct device_node *node)
+{
+       return IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) &&
+               !!of_match_node(sun8i_tcon_top_of_table, node);
+}
+
 static int compare_of(struct device *dev, void *data)
 {
        DRM_DEBUG_DRIVER("Comparing of node %pOF with %pOF\n",
@@ -231,12 +254,69 @@ struct endpoint_list {
        DECLARE_KFIFO(fifo, struct device_node *, 16);
 };
 
+static void sun4i_drv_traverse_endpoints(struct endpoint_list *list,
+                                        struct device_node *node,
+                                        int port_id)
+{
+       struct device_node *ep, *remote, *port;
+
+       port = of_graph_get_port_by_id(node, port_id);
+       if (!port) {
+               DRM_DEBUG_DRIVER("No output to bind on port %d\n", port_id);
+               return;
+       }
+
+       for_each_available_child_of_node(port, ep) {
+               remote = of_graph_get_remote_port_parent(ep);
+               if (!remote) {
+                       DRM_DEBUG_DRIVER("Error retrieving the output node\n");
+                       continue;
+               }
+
+               if (sun4i_drv_node_is_tcon(node)) {
+                       /*
+                        * TCON TOP is always probed before TCON. However, TCON
+                        * points back to TCON TOP when it is source for HDMI.
+                        * We have to skip it here to prevent infinite looping
+                        * between TCON TOP and TCON.
+                        */
+                       if (sun4i_drv_node_is_tcon_top(remote)) {
+                               DRM_DEBUG_DRIVER("TCON output endpoint is TCON TOP... skipping\n");
+                               of_node_put(remote);
+                               continue;
+                       }
+
+                       /*
+                        * If the node is our TCON with channel 0, the first
+                        * port is used for panel or bridges, and will not be
+                        * part of the component framework.
+                        */
+                       if (sun4i_drv_node_is_tcon_with_ch0(node)) {
+                               struct of_endpoint endpoint;
+
+                               if (of_graph_parse_endpoint(ep, &endpoint)) {
+                                       DRM_DEBUG_DRIVER("Couldn't parse endpoint\n");
+                                       of_node_put(remote);
+                                       continue;
+                               }
+
+                               if (!endpoint.id) {
+                                       DRM_DEBUG_DRIVER("Endpoint is our panel... skipping\n");
+                                       of_node_put(remote);
+                                       continue;
+                               }
+                       }
+               }
+
+               kfifo_put(&list->fifo, remote);
+       }
+}
+
 static int sun4i_drv_add_endpoints(struct device *dev,
                                   struct endpoint_list *list,
                                   struct component_match **match,
                                   struct device_node *node)
 {
-       struct device_node *port, *ep, *remote;
        int count = 0;
 
        /*
@@ -272,41 +352,13 @@ static int sun4i_drv_add_endpoints(struct device *dev,
                count++;
        }
 
-       /* Inputs are listed first, then outputs */
-       port = of_graph_get_port_by_id(node, 1);
-       if (!port) {
-               DRM_DEBUG_DRIVER("No output to bind\n");
-               return count;
-       }
+       /* each node has at least one output */
+       sun4i_drv_traverse_endpoints(list, node, 1);
 
-       for_each_available_child_of_node(port, ep) {
-               remote = of_graph_get_remote_port_parent(ep);
-               if (!remote) {
-                       DRM_DEBUG_DRIVER("Error retrieving the output node\n");
-                       of_node_put(remote);
-                       continue;
-               }
-
-               /*
-                * If the node is our TCON, the first port is used for
-                * panel or bridges, and will not be part of the
-                * component framework.
-                */
-               if (sun4i_drv_node_is_tcon(node)) {
-                       struct of_endpoint endpoint;
-
-                       if (of_graph_parse_endpoint(ep, &endpoint)) {
-                               DRM_DEBUG_DRIVER("Couldn't parse endpoint\n");
-                               continue;
-                       }
-
-                       if (!endpoint.id) {
-                               DRM_DEBUG_DRIVER("Endpoint is our panel... skipping\n");
-                               continue;
-                       }
-               }
-
-               kfifo_put(&list->fifo, remote);
+       /* TCON TOP has second and third output */
+       if (sun4i_drv_node_is_tcon_top(node)) {
+               sun4i_drv_traverse_endpoints(list, node, 3);
+               sun4i_drv_traverse_endpoints(list, node, 5);
        }
 
        return count;
@@ -366,6 +418,7 @@ static const struct of_device_id sun4i_drv_of_table[] = {
        { .compatible = "allwinner,sun8i-a33-display-engine" },
        { .compatible = "allwinner,sun8i-a83t-display-engine" },
        { .compatible = "allwinner,sun8i-h3-display-engine" },
+       { .compatible = "allwinner,sun8i-r40-display-engine" },
        { .compatible = "allwinner,sun8i-v3s-display-engine" },
        { .compatible = "allwinner,sun9i-a80-display-engine" },
        { }
index fa4bcd092eaf20f9f04faaaf49ca9339f134f385..061d2e0d9011ee88991b3f0fb1b4e2dd54925bee 100644 (file)
@@ -220,7 +220,7 @@ static int sun4i_hdmi_get_modes(struct drm_connector *connector)
        DRM_DEBUG_DRIVER("Monitor is %s monitor\n",
                         hdmi->hdmi_monitor ? "an HDMI" : "a DVI");
 
-       drm_mode_connector_update_edid_property(connector, edid);
+       drm_connector_update_edid_property(connector, edid);
        cec_s_phys_addr_from_edid(hdmi->cec_adap, edid);
        ret = drm_add_edid_modes(connector, edid);
        kfree(edid);
@@ -623,7 +623,7 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
        ret = cec_register_adapter(hdmi->cec_adap, dev);
        if (ret < 0)
                goto err_cleanup_connector;
-       drm_mode_connector_attach_encoder(&hdmi->connector, &hdmi->encoder);
+       drm_connector_attach_encoder(&hdmi->connector, &hdmi->encoder);
 
        return 0;
 
index be3f14d7746deee1b0183c113dc653e4c7e3629d..af7dcb6da351408391892dff43c9fc574291ae50 100644 (file)
@@ -136,7 +136,7 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
        }
 
        /* The LVDS encoder can only work with the TCON channel 0 */
-       lvds->encoder.possible_crtcs = BIT(drm_crtc_index(&tcon->crtc->crtc));
+       lvds->encoder.possible_crtcs = drm_crtc_mask(&tcon->crtc->crtc);
 
        if (tcon->panel) {
                drm_connector_helper_add(&lvds->connector,
@@ -149,7 +149,7 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
                        goto err_cleanup_connector;
                }
 
-               drm_mode_connector_attach_encoder(&lvds->connector,
+               drm_connector_attach_encoder(&lvds->connector,
                                                  &lvds->encoder);
 
                ret = drm_panel_attach(tcon->panel, &lvds->connector);
index f2fa1f210509e1d9baecaf8140ff98b97e6e4635..bf068da6b12e11b7a9440fdca6ff03ca84ff2479 100644 (file)
@@ -202,7 +202,7 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
        }
 
        /* The RGB encoder can only work with the TCON channel 0 */
-       rgb->encoder.possible_crtcs = BIT(drm_crtc_index(&tcon->crtc->crtc));
+       rgb->encoder.possible_crtcs = drm_crtc_mask(&tcon->crtc->crtc);
 
        if (tcon->panel) {
                drm_connector_helper_add(&rgb->connector,
@@ -215,7 +215,7 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
                        goto err_cleanup_connector;
                }
 
-               drm_mode_connector_attach_encoder(&rgb->connector,
+               drm_connector_attach_encoder(&rgb->connector,
                                                  &rgb->encoder);
 
                ret = drm_panel_attach(tcon->panel, &rgb->connector);
index 08747fc3ee713d6ba796b946103334b302a48758..3fb084f802e298948d2115995dec660d1b1d22b0 100644 (file)
@@ -17,7 +17,6 @@
 #include <drm/drm_encoder.h>
 #include <drm/drm_modes.h>
 #include <drm/drm_of.h>
-#include <drm/drm_panel.h>
 
 #include <uapi/drm/drm_mode.h>
 
@@ -418,9 +417,6 @@ static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon,
 static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
                                     const struct drm_display_mode *mode)
 {
-       struct drm_panel *panel = tcon->panel;
-       struct drm_connector *connector = panel->connector;
-       struct drm_display_info display_info = connector->display_info;
        unsigned int bp, hsync, vsync;
        u8 clk_delay;
        u32 val = 0;
@@ -478,27 +474,6 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
        if (mode->flags & DRM_MODE_FLAG_PVSYNC)
                val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
 
-       /*
-        * On A20 and similar SoCs, the only way to achieve Positive Edge
-        * (Rising Edge), is setting dclk clock phase to 2/3(240°).
-        * By default TCON works in Negative Edge(Falling Edge),
-        * this is why phase is set to 0 in that case.
-        * Unfortunately there's no way to logically invert dclk through
-        * IO_POL register.
-        * The only acceptable way to work, triple checked with scope,
-        * is using clock phase set to 0° for Negative Edge and set to 240°
-        * for Positive Edge.
-        * On A33 and similar SoCs there would be a 90° phase option,
-        * but it divides also dclk by 2.
-        * Following code is a way to avoid quirks all around TCON
-        * and DOTCLOCK drivers.
-        */
-       if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
-               clk_set_phase(tcon->dclk, 240);
-
-       if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
-               clk_set_phase(tcon->dclk, 0);
-
        regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
                           SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE,
                           val);
@@ -791,12 +766,14 @@ static int sun4i_tcon_init_regmap(struct device *dev,
  */
 static struct sunxi_engine *
 sun4i_tcon_find_engine_traverse(struct sun4i_drv *drv,
-                               struct device_node *node)
+                               struct device_node *node,
+                               u32 port_id)
 {
        struct device_node *port, *ep, *remote;
        struct sunxi_engine *engine = ERR_PTR(-EINVAL);
+       u32 reg = 0;
 
-       port = of_graph_get_port_by_id(node, 0);
+       port = of_graph_get_port_by_id(node, port_id);
        if (!port)
                return ERR_PTR(-EINVAL);
 
@@ -826,8 +803,21 @@ sun4i_tcon_find_engine_traverse(struct sun4i_drv *drv,
                if (remote == engine->node)
                        goto out_put_remote;
 
+       /*
+        * According to device tree binding input ports have even id
+        * number and output ports have odd id. Since component with
+        * more than one input and one output (TCON TOP) exits, correct
+        * remote input id has to be calculated by subtracting 1 from
+        * remote output id. If this for some reason can't be done, 0
+        * is used as input port id.
+        */
+       of_node_put(port);
+       port = of_graph_get_remote_port(ep);
+       if (!of_property_read_u32(port, "reg", &reg) && reg > 0)
+               reg -= 1;
+
        /* keep looking through upstream ports */
-       engine = sun4i_tcon_find_engine_traverse(drv, remote);
+       engine = sun4i_tcon_find_engine_traverse(drv, remote, reg);
 
 out_put_remote:
        of_node_put(remote);
@@ -950,7 +940,7 @@ static struct sunxi_engine *sun4i_tcon_find_engine(struct sun4i_drv *drv,
 
        /* Fallback to old method by traversing input endpoints */
        of_node_put(port);
-       return sun4i_tcon_find_engine_traverse(drv, node);
+       return sun4i_tcon_find_engine_traverse(drv, node, 0);
 }
 
 static int sun4i_tcon_bind(struct device *dev, struct device *master,
@@ -1092,23 +1082,25 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
                goto err_free_dotclock;
        }
 
-       /*
-        * If we have an LVDS panel connected to the TCON, we should
-        * just probe the LVDS connector. Otherwise, just probe RGB as
-        * we used to.
-        */
-       remote = of_graph_get_remote_node(dev->of_node, 1, 0);
-       if (of_device_is_compatible(remote, "panel-lvds"))
-               if (can_lvds)
-                       ret = sun4i_lvds_init(drm, tcon);
+       if (tcon->quirks->has_channel_0) {
+               /*
+                * If we have an LVDS panel connected to the TCON, we should
+                * just probe the LVDS connector. Otherwise, just probe RGB as
+                * we used to.
+                */
+               remote = of_graph_get_remote_node(dev->of_node, 1, 0);
+               if (of_device_is_compatible(remote, "panel-lvds"))
+                       if (can_lvds)
+                               ret = sun4i_lvds_init(drm, tcon);
+                       else
+                               ret = -EINVAL;
                else
-                       ret = -EINVAL;
-       else
-               ret = sun4i_rgb_init(drm, tcon);
-       of_node_put(remote);
+                       ret = sun4i_rgb_init(drm, tcon);
+               of_node_put(remote);
 
-       if (ret < 0)
-               goto err_free_dotclock;
+               if (ret < 0)
+                       goto err_free_dotclock;
+       }
 
        if (tcon->quirks->needs_de_be_mux) {
                /*
@@ -1162,13 +1154,19 @@ static const struct component_ops sun4i_tcon_ops = {
 static int sun4i_tcon_probe(struct platform_device *pdev)
 {
        struct device_node *node = pdev->dev.of_node;
+       const struct sun4i_tcon_quirks *quirks;
        struct drm_bridge *bridge;
        struct drm_panel *panel;
        int ret;
 
-       ret = drm_of_find_panel_or_bridge(node, 1, 0, &panel, &bridge);
-       if (ret == -EPROBE_DEFER)
-               return ret;
+       quirks = of_device_get_match_data(&pdev->dev);
+
+       /* panels and bridges are present only on TCONs with channel 0 */
+       if (quirks->has_channel_0) {
+               ret = drm_of_find_panel_or_bridge(node, 1, 0, &panel, &bridge);
+               if (ret == -EPROBE_DEFER)
+                       return ret;
+       }
 
        return component_add(&pdev->dev, &sun4i_tcon_ops);
 }
index b070d522ed8da9c761e0512643036cecd0ee888a..1a838d2082110679324824e4a235b45691e5556a 100644 (file)
@@ -623,7 +623,7 @@ static int sun4i_tv_bind(struct device *dev, struct device *master,
        }
        tv->connector.interlace_allowed = true;
 
-       drm_mode_connector_attach_encoder(&tv->connector, &tv->encoder);
+       drm_connector_attach_encoder(&tv->connector, &tv->encoder);
 
        return 0;
 
index bfbf761f0c1dd899531c149fce4d6828e56a9579..e3b34a3455460fbab688148f6d19c2ed0607f4f4 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/regmap.h>
 #include <linux/reset.h>
+#include <linux/slab.h>
 
 #include <linux/phy/phy.h>
 
@@ -247,10 +248,8 @@ static u16 sun6i_dsi_crc_compute(u8 const *buffer, size_t len)
        return crc_ccitt(0xffff, buffer, len);
 }
 
-static u16 sun6i_dsi_crc_repeat_compute(u8 pd, size_t len)
+static u16 sun6i_dsi_crc_repeat(u8 pd, u8 *buffer, size_t len)
 {
-       u8 buffer[len];
-
        memset(buffer, pd, len);
 
        return sun6i_dsi_crc_compute(buffer, len);
@@ -274,11 +273,11 @@ static u32 sun6i_dsi_build_blk0_pkt(u8 vc, u16 wc)
                                        wc & 0xff, wc >> 8);
 }
 
-static u32 sun6i_dsi_build_blk1_pkt(u16 pd, size_t len)
+static u32 sun6i_dsi_build_blk1_pkt(u16 pd, u8 *buffer, size_t len)
 {
        u32 val = SUN6I_DSI_BLK_PD(pd);
 
-       return val | SUN6I_DSI_BLK_PF(sun6i_dsi_crc_repeat_compute(pd, len));
+       return val | SUN6I_DSI_BLK_PF(sun6i_dsi_crc_repeat(pd, buffer, len));
 }
 
 static void sun6i_dsi_inst_abort(struct sun6i_dsi *dsi)
@@ -452,6 +451,54 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
        struct mipi_dsi_device *device = dsi->device;
        unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
        u16 hbp, hfp, hsa, hblk, vblk;
+       size_t bytes;
+       u8 *buffer;
+
+       /* Do all timing calculations up front to allocate buffer space */
+
+       /*
+        * A sync period is composed of a blanking packet (4 bytes +
+        * payload + 2 bytes) and a sync event packet (4 bytes). Its
+        * minimal size is therefore 10 bytes
+        */
+#define HSA_PACKET_OVERHEAD    10
+       hsa = max((unsigned int)HSA_PACKET_OVERHEAD,
+                 (mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD);
+
+       /*
+        * The backporch is set using a blanking packet (4 bytes +
+        * payload + 2 bytes). Its minimal size is therefore 6 bytes
+        */
+#define HBP_PACKET_OVERHEAD    6
+       hbp = max((unsigned int)HBP_PACKET_OVERHEAD,
+                 (mode->hsync_start - mode->hdisplay) * Bpp - HBP_PACKET_OVERHEAD);
+
+       /*
+        * The frontporch is set using a blanking packet (4 bytes +
+        * payload + 2 bytes). Its minimal size is therefore 6 bytes
+        */
+#define HFP_PACKET_OVERHEAD    6
+       hfp = max((unsigned int)HFP_PACKET_OVERHEAD,
+                 (mode->htotal - mode->hsync_end) * Bpp - HFP_PACKET_OVERHEAD);
+
+       /*
+        * hblk seems to be the line + porches length.
+        */
+       hblk = mode->htotal * Bpp - hsa;
+
+       /*
+        * And I'm not entirely sure what vblk is about. The driver in
+        * Allwinner BSP is using a rather convoluted calculation
+        * there only for 4 lanes. However, using 0 (the !4 lanes
+        * case) even with a 4 lanes screen seems to work...
+        */
+       vblk = 0;
+
+       /* How many bytes do we need to send all payloads? */
+       bytes = max_t(size_t, max(max(hfp, hblk), max(hsa, hbp)), vblk);
+       buffer = kmalloc(bytes, GFP_KERNEL);
+       if (WARN_ON(!buffer))
+               return;
 
        regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL_REG, 0);
 
@@ -485,63 +532,37 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
                     SUN6I_DSI_BASIC_SIZE1_VACT(mode->vdisplay) |
                     SUN6I_DSI_BASIC_SIZE1_VT(mode->vtotal));
 
-       /*
-        * A sync period is composed of a blanking packet (4 bytes +
-        * payload + 2 bytes) and a sync event packet (4 bytes). Its
-        * minimal size is therefore 10 bytes
-        */
-#define HSA_PACKET_OVERHEAD    10
-       hsa = max((unsigned int)HSA_PACKET_OVERHEAD,
-                 (mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD);
+       /* sync */
        regmap_write(dsi->regs, SUN6I_DSI_BLK_HSA0_REG,
                     sun6i_dsi_build_blk0_pkt(device->channel, hsa));
        regmap_write(dsi->regs, SUN6I_DSI_BLK_HSA1_REG,
-                    sun6i_dsi_build_blk1_pkt(0, hsa));
+                    sun6i_dsi_build_blk1_pkt(0, buffer, hsa));
 
-       /*
-        * The backporch is set using a blanking packet (4 bytes +
-        * payload + 2 bytes). Its minimal size is therefore 6 bytes
-        */
-#define HBP_PACKET_OVERHEAD    6
-       hbp = max((unsigned int)HBP_PACKET_OVERHEAD,
-                 (mode->hsync_start - mode->hdisplay) * Bpp - HBP_PACKET_OVERHEAD);
+       /* backporch */
        regmap_write(dsi->regs, SUN6I_DSI_BLK_HBP0_REG,
                     sun6i_dsi_build_blk0_pkt(device->channel, hbp));
        regmap_write(dsi->regs, SUN6I_DSI_BLK_HBP1_REG,
-                    sun6i_dsi_build_blk1_pkt(0, hbp));
+                    sun6i_dsi_build_blk1_pkt(0, buffer, hbp));
 
-       /*
-        * The frontporch is set using a blanking packet (4 bytes +
-        * payload + 2 bytes). Its minimal size is therefore 6 bytes
-        */
-#define HFP_PACKET_OVERHEAD    6
-       hfp = max((unsigned int)HFP_PACKET_OVERHEAD,
-                 (mode->htotal - mode->hsync_end) * Bpp - HFP_PACKET_OVERHEAD);
+       /* frontporch */
        regmap_write(dsi->regs, SUN6I_DSI_BLK_HFP0_REG,
                     sun6i_dsi_build_blk0_pkt(device->channel, hfp));
        regmap_write(dsi->regs, SUN6I_DSI_BLK_HFP1_REG,
-                    sun6i_dsi_build_blk1_pkt(0, hfp));
+                    sun6i_dsi_build_blk1_pkt(0, buffer, hfp));
 
-       /*
-        * hblk seems to be the line + porches length.
-        */
-       hblk = mode->htotal * Bpp - hsa;
+       /* hblk */
        regmap_write(dsi->regs, SUN6I_DSI_BLK_HBLK0_REG,
                     sun6i_dsi_build_blk0_pkt(device->channel, hblk));
        regmap_write(dsi->regs, SUN6I_DSI_BLK_HBLK1_REG,
-                    sun6i_dsi_build_blk1_pkt(0, hblk));
+                    sun6i_dsi_build_blk1_pkt(0, buffer, hblk));
 
-       /*
-        * And I'm not entirely sure what vblk is about. The driver in
-        * Allwinner BSP is using a rather convoluted calculation
-        * there only for 4 lanes. However, using 0 (the !4 lanes
-        * case) even with a 4 lanes screen seems to work...
-        */
-       vblk = 0;
+       /* vblk */
        regmap_write(dsi->regs, SUN6I_DSI_BLK_VBLK0_REG,
                     sun6i_dsi_build_blk0_pkt(device->channel, vblk));
        regmap_write(dsi->regs, SUN6I_DSI_BLK_VBLK1_REG,
-                    sun6i_dsi_build_blk1_pkt(0, vblk));
+                    sun6i_dsi_build_blk1_pkt(0, buffer, vblk));
+
+       kfree(buffer);
 }
 
 static int sun6i_dsi_start(struct sun6i_dsi *dsi,
@@ -812,8 +833,8 @@ static int sun6i_dsi_attach(struct mipi_dsi_host *host,
 
        dsi->device = device;
        dsi->panel = of_drm_find_panel(device->dev.of_node);
-       if (!dsi->panel)
-               return -EINVAL;
+       if (IS_ERR(dsi->panel))
+               return PTR_ERR(dsi->panel);
 
        dev_info(host->dev, "Attached device %s\n", device->name);
 
@@ -920,7 +941,7 @@ static int sun6i_dsi_bind(struct device *dev, struct device *master,
                goto err_cleanup_connector;
        }
 
-       drm_mode_connector_attach_encoder(&dsi->connector, &dsi->encoder);
+       drm_connector_attach_encoder(&dsi->connector, &dsi->encoder);
        drm_panel_attach(dsi->panel, &dsi->connector);
 
        return 0;
@@ -1040,7 +1061,7 @@ static int sun6i_dsi_remove(struct platform_device *pdev)
        return 0;
 }
 
-static int sun6i_dsi_runtime_resume(struct device *dev)
+static int __maybe_unused sun6i_dsi_runtime_resume(struct device *dev)
 {
        struct sun6i_dsi *dsi = dev_get_drvdata(dev);
 
@@ -1069,7 +1090,7 @@ static int sun6i_dsi_runtime_resume(struct device *dev)
        return 0;
 }
 
-static int sun6i_dsi_runtime_suspend(struct device *dev)
+static int __maybe_unused sun6i_dsi_runtime_suspend(struct device *dev)
 {
        struct sun6i_dsi *dsi = dev_get_drvdata(dev);
 
index 9f40a44b456b209b8c35631486631624d6502d7e..31875b636434a12337b597f6358fac37861c8303 100644 (file)
@@ -12,6 +12,7 @@
 #include <drm/drm_crtc_helper.h>
 
 #include "sun8i_dw_hdmi.h"
+#include "sun8i_tcon_top.h"
 
 static void sun8i_dw_hdmi_encoder_mode_set(struct drm_encoder *encoder,
                                           struct drm_display_mode *mode,
@@ -41,6 +42,44 @@ sun8i_dw_hdmi_mode_valid(struct drm_connector *connector,
        return MODE_OK;
 }
 
+static bool sun8i_dw_hdmi_node_is_tcon_top(struct device_node *node)
+{
+       return IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) &&
+               !!of_match_node(sun8i_tcon_top_of_table, node);
+}
+
+static u32 sun8i_dw_hdmi_find_possible_crtcs(struct drm_device *drm,
+                                            struct device_node *node)
+{
+       struct device_node *port, *ep, *remote, *remote_port;
+       u32 crtcs = 0;
+
+       remote = of_graph_get_remote_node(node, 0, -1);
+       if (!remote)
+               return 0;
+
+       if (sun8i_dw_hdmi_node_is_tcon_top(remote)) {
+               port = of_graph_get_port_by_id(remote, 4);
+               if (!port)
+                       goto crtcs_exit;
+
+               for_each_child_of_node(port, ep) {
+                       remote_port = of_graph_get_remote_port(ep);
+                       if (remote_port) {
+                               crtcs |= drm_of_crtc_port_mask(drm, remote_port);
+                               of_node_put(remote_port);
+                       }
+               }
+       } else {
+               crtcs = drm_of_find_possible_crtcs(drm, node);
+       }
+
+crtcs_exit:
+       of_node_put(remote);
+
+       return crtcs;
+}
+
 static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
                              void *data)
 {
@@ -63,7 +102,8 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
        hdmi->dev = &pdev->dev;
        encoder = &hdmi->encoder;
 
-       encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
+       encoder->possible_crtcs =
+               sun8i_dw_hdmi_find_possible_crtcs(drm, dev->of_node);
        /*
         * If we failed to find the CRTC(s) which this encoder is
         * supposed to be connected to, it's because the CRTC has
@@ -181,7 +221,7 @@ static const struct of_device_id sun8i_dw_hdmi_dt_ids[] = {
 };
 MODULE_DEVICE_TABLE(of, sun8i_dw_hdmi_dt_ids);
 
-struct platform_driver sun8i_dw_hdmi_pltfm_driver = {
+static struct platform_driver sun8i_dw_hdmi_pltfm_driver = {
        .probe  = sun8i_dw_hdmi_probe,
        .remove = sun8i_dw_hdmi_remove,
        .driver = {
index 79154f0f674af07bf2ff64ba84ff96bfe9a19062..aadbe0a10b0c6b6dbd5da6b6538526994897abec 100644 (file)
@@ -98,7 +98,8 @@
 #define SUN8I_HDMI_PHY_PLL_CFG1_LDO2_EN                BIT(29)
 #define SUN8I_HDMI_PHY_PLL_CFG1_LDO1_EN                BIT(28)
 #define SUN8I_HDMI_PHY_PLL_CFG1_HV_IS_33       BIT(27)
-#define SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL       BIT(26)
+#define SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_MSK   BIT(26)
+#define SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_SHIFT 26
 #define SUN8I_HDMI_PHY_PLL_CFG1_PLLEN          BIT(25)
 #define SUN8I_HDMI_PHY_PLL_CFG1_LDO_VSET(x)    ((x) << 22)
 #define SUN8I_HDMI_PHY_PLL_CFG1_UNKNOWN(x)     ((x) << 20)
@@ -147,6 +148,7 @@ struct sun8i_hdmi_phy;
 
 struct sun8i_hdmi_phy_variant {
        bool has_phy_clk;
+       bool has_second_pll;
        void (*phy_init)(struct sun8i_hdmi_phy *phy);
        void (*phy_disable)(struct dw_hdmi *hdmi,
                            struct sun8i_hdmi_phy *phy);
@@ -160,6 +162,7 @@ struct sun8i_hdmi_phy {
        struct clk                      *clk_mod;
        struct clk                      *clk_phy;
        struct clk                      *clk_pll0;
+       struct clk                      *clk_pll1;
        unsigned int                    rcal;
        struct regmap                   *regs;
        struct reset_control            *rst_phy;
@@ -188,6 +191,7 @@ void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi);
 void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
 const struct dw_hdmi_phy_ops *sun8i_hdmi_phy_get_ops(void);
 
-int sun8i_phy_clk_create(struct sun8i_hdmi_phy *phy, struct device *dev);
+int sun8i_phy_clk_create(struct sun8i_hdmi_phy *phy, struct device *dev,
+                        bool second_parent);
 
 #endif /* _SUN8I_DW_HDMI_H_ */
index 5a52fc489a9d5f7c3cae789f2e3042f18a844e5c..82502b351aec8b6a9983b0fa8f88e6bb3106be15 100644 (file)
@@ -183,7 +183,13 @@ static int sun8i_hdmi_phy_config_h3(struct dw_hdmi *hdmi,
        regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
                           SUN8I_HDMI_PHY_ANA_CFG1_TXEN_MASK, 0);
 
-       regmap_write(phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG, pll_cfg1_init);
+       /*
+        * NOTE: We have to be careful not to overwrite PHY parent
+        * clock selection bit and clock divider.
+        */
+       regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG,
+                          (u32)~SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_MSK,
+                          pll_cfg1_init);
        regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_PLL_CFG2_REG,
                           (u32)~SUN8I_HDMI_PHY_PLL_CFG2_PREDIV_MSK,
                           pll_cfg2_init);
@@ -352,6 +358,10 @@ static void sun8i_hdmi_phy_init_h3(struct sun8i_hdmi_phy *phy)
                           SUN8I_HDMI_PHY_ANA_CFG3_SCLEN |
                           SUN8I_HDMI_PHY_ANA_CFG3_SDAEN);
 
+       /* reset PHY PLL clock parent */
+       regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG,
+                          SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_MSK, 0);
+
        /* set HW control of CEC pins */
        regmap_write(phy->regs, SUN8I_HDMI_PHY_CEC_REG, 0);
 
@@ -386,6 +396,14 @@ static struct regmap_config sun8i_hdmi_phy_regmap_config = {
        .name           = "phy"
 };
 
+static const struct sun8i_hdmi_phy_variant sun50i_a64_hdmi_phy = {
+       .has_phy_clk = true,
+       .has_second_pll = true,
+       .phy_init = &sun8i_hdmi_phy_init_h3,
+       .phy_disable = &sun8i_hdmi_phy_disable_h3,
+       .phy_config = &sun8i_hdmi_phy_config_h3,
+};
+
 static const struct sun8i_hdmi_phy_variant sun8i_a83t_hdmi_phy = {
        .phy_init = &sun8i_hdmi_phy_init_a83t,
        .phy_disable = &sun8i_hdmi_phy_disable_a83t,
@@ -400,6 +418,10 @@ static const struct sun8i_hdmi_phy_variant sun8i_h3_hdmi_phy = {
 };
 
 static const struct of_device_id sun8i_hdmi_phy_of_table[] = {
+       {
+               .compatible = "allwinner,sun50i-a64-hdmi-phy",
+               .data = &sun50i_a64_hdmi_phy,
+       },
        {
                .compatible = "allwinner,sun8i-a83t-hdmi-phy",
                .data = &sun8i_a83t_hdmi_phy,
@@ -472,18 +494,30 @@ int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
                        goto err_put_clk_mod;
                }
 
-               ret = sun8i_phy_clk_create(phy, dev);
+               if (phy->variant->has_second_pll) {
+                       phy->clk_pll1 = of_clk_get_by_name(node, "pll-1");
+                       if (IS_ERR(phy->clk_pll1)) {
+                               dev_err(dev, "Could not get pll-1 clock\n");
+                               ret = PTR_ERR(phy->clk_pll1);
+                               goto err_put_clk_pll0;
+                       }
+               }
+
+               ret = sun8i_phy_clk_create(phy, dev,
+                                          phy->variant->has_second_pll);
                if (ret) {
                        dev_err(dev, "Couldn't create the PHY clock\n");
-                       goto err_put_clk_pll0;
+                       goto err_put_clk_pll1;
                }
+
+               clk_prepare_enable(phy->clk_phy);
        }
 
        phy->rst_phy = of_reset_control_get_shared(node, "phy");
        if (IS_ERR(phy->rst_phy)) {
                dev_err(dev, "Could not get phy reset control\n");
                ret = PTR_ERR(phy->rst_phy);
-               goto err_put_clk_pll0;
+               goto err_disable_clk_phy;
        }
 
        ret = reset_control_deassert(phy->rst_phy);
@@ -514,9 +548,12 @@ err_deassert_rst_phy:
        reset_control_assert(phy->rst_phy);
 err_put_rst_phy:
        reset_control_put(phy->rst_phy);
+err_disable_clk_phy:
+       clk_disable_unprepare(phy->clk_phy);
+err_put_clk_pll1:
+       clk_put(phy->clk_pll1);
 err_put_clk_pll0:
-       if (phy->variant->has_phy_clk)
-               clk_put(phy->clk_pll0);
+       clk_put(phy->clk_pll0);
 err_put_clk_mod:
        clk_put(phy->clk_mod);
 err_put_clk_bus:
@@ -531,13 +568,14 @@ void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi)
 
        clk_disable_unprepare(phy->clk_mod);
        clk_disable_unprepare(phy->clk_bus);
+       clk_disable_unprepare(phy->clk_phy);
 
        reset_control_assert(phy->rst_phy);
 
        reset_control_put(phy->rst_phy);
 
-       if (phy->variant->has_phy_clk)
-               clk_put(phy->clk_pll0);
+       clk_put(phy->clk_pll0);
+       clk_put(phy->clk_pll1);
        clk_put(phy->clk_mod);
        clk_put(phy->clk_bus);
 }
index faea449812f8391396fbc23e27d7738a5d04516d..a4d31fe3abff1ab149358be9e0ab6c9c6ec0372b 100644 (file)
@@ -22,35 +22,45 @@ static int sun8i_phy_clk_determine_rate(struct clk_hw *hw,
 {
        unsigned long rate = req->rate;
        unsigned long best_rate = 0;
+       struct clk_hw *best_parent = NULL;
        struct clk_hw *parent;
        int best_div = 1;
-       int i;
+       int i, p;
 
-       parent = clk_hw_get_parent(hw);
-
-       for (i = 1; i <= 16; i++) {
-               unsigned long ideal = rate * i;
-               unsigned long rounded;
-
-               rounded = clk_hw_round_rate(parent, ideal);
+       for (p = 0; p < clk_hw_get_num_parents(hw); p++) {
+               parent = clk_hw_get_parent_by_index(hw, p);
+               if (!parent)
+                       continue;
 
-               if (rounded == ideal) {
-                       best_rate = rounded;
-                       best_div = i;
-                       break;
+               for (i = 1; i <= 16; i++) {
+                       unsigned long ideal = rate * i;
+                       unsigned long rounded;
+
+                       rounded = clk_hw_round_rate(parent, ideal);
+
+                       if (rounded == ideal) {
+                               best_rate = rounded;
+                               best_div = i;
+                               best_parent = parent;
+                               break;
+                       }
+
+                       if (!best_rate ||
+                           abs(rate - rounded / i) <
+                           abs(rate - best_rate / best_div)) {
+                               best_rate = rounded;
+                               best_div = i;
+                               best_parent = parent;
+                       }
                }
 
-               if (!best_rate ||
-                   abs(rate - rounded / i) <
-                   abs(rate - best_rate / best_div)) {
-                       best_rate = rounded;
-                       best_div = i;
-               }
+               if (best_rate / best_div == rate)
+                       break;
        }
 
        req->rate = best_rate / best_div;
        req->best_parent_rate = best_rate;
-       req->best_parent_hw = parent;
+       req->best_parent_hw = best_parent;
 
        return 0;
 }
@@ -95,22 +105,58 @@ static int sun8i_phy_clk_set_rate(struct clk_hw *hw, unsigned long rate,
        return 0;
 }
 
+static u8 sun8i_phy_clk_get_parent(struct clk_hw *hw)
+{
+       struct sun8i_phy_clk *priv = hw_to_phy_clk(hw);
+       u32 reg;
+
+       regmap_read(priv->phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG, &reg);
+       reg = (reg & SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_MSK) >>
+             SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_SHIFT;
+
+       return reg;
+}
+
+static int sun8i_phy_clk_set_parent(struct clk_hw *hw, u8 index)
+{
+       struct sun8i_phy_clk *priv = hw_to_phy_clk(hw);
+
+       if (index > 1)
+               return -EINVAL;
+
+       regmap_update_bits(priv->phy->regs, SUN8I_HDMI_PHY_PLL_CFG1_REG,
+                          SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_MSK,
+                          index << SUN8I_HDMI_PHY_PLL_CFG1_CKIN_SEL_SHIFT);
+
+       return 0;
+}
+
 static const struct clk_ops sun8i_phy_clk_ops = {
        .determine_rate = sun8i_phy_clk_determine_rate,
        .recalc_rate    = sun8i_phy_clk_recalc_rate,
        .set_rate       = sun8i_phy_clk_set_rate,
+
+       .get_parent     = sun8i_phy_clk_get_parent,
+       .set_parent     = sun8i_phy_clk_set_parent,
 };
 
-int sun8i_phy_clk_create(struct sun8i_hdmi_phy *phy, struct device *dev)
+int sun8i_phy_clk_create(struct sun8i_hdmi_phy *phy, struct device *dev,
+                        bool second_parent)
 {
        struct clk_init_data init;
        struct sun8i_phy_clk *priv;
-       const char *parents[1];
+       const char *parents[2];
 
        parents[0] = __clk_get_name(phy->clk_pll0);
        if (!parents[0])
                return -ENODEV;
 
+       if (second_parent) {
+               parents[1] = __clk_get_name(phy->clk_pll1);
+               if (!parents[1])
+                       return -ENODEV;
+       }
+
        priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
        if (!priv)
                return -ENOMEM;
@@ -118,7 +164,7 @@ int sun8i_phy_clk_create(struct sun8i_hdmi_phy *phy, struct device *dev)
        init.name = "hdmi-phy-clk";
        init.ops = &sun8i_phy_clk_ops;
        init.parent_names = parents;
-       init.num_parents = 1;
+       init.num_parents = second_parent ? 2 : 1;
        init.flags = CLK_SET_RATE_PARENT;
 
        priv->phy = phy;
index 126899d6f0d3a5942a00d2d4ed6ce26331520665..fc3713608f78d9742bb0364f7d81f2e3db10bd55 100644 (file)
@@ -21,8 +21,9 @@
 
 #include <linux/component.h>
 #include <linux/dma-mapping.h>
-#include <linux/reset.h>
 #include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/reset.h>
 
 #include "sun4i_drv.h"
 #include "sun8i_mixer.h"
@@ -322,6 +323,42 @@ static struct regmap_config sun8i_mixer_regmap_config = {
        .max_register   = 0xbfffc, /* guessed */
 };
 
+static int sun8i_mixer_of_get_id(struct device_node *node)
+{
+       struct device_node *port, *ep;
+       int ret = -EINVAL;
+
+       /* output is port 1 */
+       port = of_graph_get_port_by_id(node, 1);
+       if (!port)
+               return -EINVAL;
+
+       /* try to find downstream endpoint */
+       for_each_available_child_of_node(port, ep) {
+               struct device_node *remote;
+               u32 reg;
+
+               remote = of_graph_get_remote_endpoint(ep);
+               if (!remote)
+                       continue;
+
+               ret = of_property_read_u32(remote, "reg", &reg);
+               if (!ret) {
+                       of_node_put(remote);
+                       of_node_put(ep);
+                       of_node_put(port);
+
+                       return reg;
+               }
+
+               of_node_put(remote);
+       }
+
+       of_node_put(port);
+
+       return ret;
+}
+
 static int sun8i_mixer_bind(struct device *dev, struct device *master,
                              void *data)
 {
@@ -353,8 +390,16 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
        dev_set_drvdata(dev, mixer);
        mixer->engine.ops = &sun8i_engine_ops;
        mixer->engine.node = dev->of_node;
-       /* The ID of the mixer currently doesn't matter */
-       mixer->engine.id = -1;
+
+       /*
+        * While this function can fail, we shouldn't do anything
+        * if this happens. Some early DE2 DT entries don't provide
+        * mixer id but work nevertheless because matching between
+        * TCON and mixer is done by comparing node pointers (old
+        * way) instead comparing ids. If this function fails and
+        * id is needed, it will fail during id matching anyway.
+        */
+       mixer->engine.id = sun8i_mixer_of_get_id(dev->of_node);
 
        mixer->cfg = of_device_get_match_data(dev);
        if (!mixer->cfg)
@@ -432,14 +477,14 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
        regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_ATTR_FCOLOR(0),
                     SUN8I_MIXER_BLEND_COLOR_BLACK);
 
-       /* Fixed zpos for now */
-       regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_ROUTE, 0x43210);
-
        plane_cnt = mixer->cfg->vi_num + mixer->cfg->ui_num;
        for (i = 0; i < plane_cnt; i++)
                regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_MODE(i),
                             SUN8I_MIXER_BLEND_MODE_DEF);
 
+       regmap_update_bits(mixer->engine.regs, SUN8I_MIXER_BLEND_PIPE_CTL,
+                          SUN8I_MIXER_BLEND_PIPE_CTL_EN_MSK, 0);
+
        return 0;
 
 err_disable_bus_clk:
@@ -500,6 +545,22 @@ static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = {
        .vi_num         = 1,
 };
 
+static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = {
+       .ccsc           = 0,
+       .mod_rate       = 297000000,
+       .scaler_mask    = 0xf,
+       .ui_num         = 3,
+       .vi_num         = 1,
+};
+
+static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = {
+       .ccsc           = 1,
+       .mod_rate       = 297000000,
+       .scaler_mask    = 0x3,
+       .ui_num         = 1,
+       .vi_num         = 1,
+};
+
 static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = {
        .vi_num = 2,
        .ui_num = 1,
@@ -521,6 +582,14 @@ static const struct of_device_id sun8i_mixer_of_table[] = {
                .compatible = "allwinner,sun8i-h3-de2-mixer-0",
                .data = &sun8i_h3_mixer0_cfg,
        },
+       {
+               .compatible = "allwinner,sun8i-r40-de2-mixer-0",
+               .data = &sun8i_r40_mixer0_cfg,
+       },
+       {
+               .compatible = "allwinner,sun8i-r40-de2-mixer-1",
+               .data = &sun8i_r40_mixer1_cfg,
+       },
        {
                .compatible = "allwinner,sun8i-v3s-de2-mixer",
                .data = &sun8i_v3s_mixer_cfg,
index f34e70c42adf4707cee223d4bd8d9b1437f05a45..406c42e752d75c5c96faf1dd079c71539b3c4d20 100644 (file)
@@ -44,6 +44,7 @@
 #define SUN8I_MIXER_BLEND_CK_MIN(x)            (0x10e0 + 0x04 * (x))
 #define SUN8I_MIXER_BLEND_OUTCTL               0x10fc
 
+#define SUN8I_MIXER_BLEND_PIPE_CTL_EN_MSK      GENMASK(12, 8)
 #define SUN8I_MIXER_BLEND_PIPE_CTL_EN(pipe)    BIT(8 + pipe)
 #define SUN8I_MIXER_BLEND_PIPE_CTL_FC_EN(pipe) BIT(pipe)
 /* colors are always in AARRGGBB format */
@@ -51,6 +52,9 @@
 /* The following numbers are some still unknown magic numbers */
 #define SUN8I_MIXER_BLEND_MODE_DEF             0x03010301
 
+#define SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(n)    (0xf << ((n) << 2))
+#define SUN8I_MIXER_BLEND_ROUTE_PIPE_SHIFT(n)  ((n) << 2)
+
 #define SUN8I_MIXER_BLEND_OUTCTL_INTERLACED    BIT(1)
 
 #define SUN8I_MIXER_FBFMT_ARGB8888     0
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
new file mode 100644 (file)
index 0000000..55fe398
--- /dev/null
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2018 Jernej Skrabec <jernej.skrabec@siol.net> */
+
+#include <drm/drmP.h>
+
+#include <dt-bindings/clock/sun8i-tcon-top.h>
+
+#include <linux/bitfield.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+
+#include "sun8i_tcon_top.h"
+
+static bool sun8i_tcon_top_node_is_tcon_top(struct device_node *node)
+{
+       return !!of_match_node(sun8i_tcon_top_of_table, node);
+}
+
+int sun8i_tcon_top_set_hdmi_src(struct device *dev, int tcon)
+{
+       struct sun8i_tcon_top *tcon_top = dev_get_drvdata(dev);
+       unsigned long flags;
+       u32 val;
+
+       if (!sun8i_tcon_top_node_is_tcon_top(dev->of_node)) {
+               dev_err(dev, "Device is not TCON TOP!\n");
+               return -EINVAL;
+       }
+
+       if (tcon < 2 || tcon > 3) {
+               dev_err(dev, "TCON index must be 2 or 3!\n");
+               return -EINVAL;
+       }
+
+       spin_lock_irqsave(&tcon_top->reg_lock, flags);
+
+       val = readl(tcon_top->regs + TCON_TOP_GATE_SRC_REG);
+       val &= ~TCON_TOP_HDMI_SRC_MSK;
+       val |= FIELD_PREP(TCON_TOP_HDMI_SRC_MSK, tcon - 1);
+       writel(val, tcon_top->regs + TCON_TOP_GATE_SRC_REG);
+
+       spin_unlock_irqrestore(&tcon_top->reg_lock, flags);
+
+       return 0;
+}
+EXPORT_SYMBOL(sun8i_tcon_top_set_hdmi_src);
+
+int sun8i_tcon_top_de_config(struct device *dev, int mixer, int tcon)
+{
+       struct sun8i_tcon_top *tcon_top = dev_get_drvdata(dev);
+       unsigned long flags;
+       u32 reg;
+
+       if (!sun8i_tcon_top_node_is_tcon_top(dev->of_node)) {
+               dev_err(dev, "Device is not TCON TOP!\n");
+               return -EINVAL;
+       }
+
+       if (mixer > 1) {
+               dev_err(dev, "Mixer index is too high!\n");
+               return -EINVAL;
+       }
+
+       if (tcon > 3) {
+               dev_err(dev, "TCON index is too high!\n");
+               return -EINVAL;
+       }
+
+       spin_lock_irqsave(&tcon_top->reg_lock, flags);
+
+       reg = readl(tcon_top->regs + TCON_TOP_PORT_SEL_REG);
+       if (mixer == 0) {
+               reg &= ~TCON_TOP_PORT_DE0_MSK;
+               reg |= FIELD_PREP(TCON_TOP_PORT_DE0_MSK, tcon);
+       } else {
+               reg &= ~TCON_TOP_PORT_DE1_MSK;
+               reg |= FIELD_PREP(TCON_TOP_PORT_DE1_MSK, tcon);
+       }
+       writel(reg, tcon_top->regs + TCON_TOP_PORT_SEL_REG);
+
+       spin_unlock_irqrestore(&tcon_top->reg_lock, flags);
+
+       return 0;
+}
+EXPORT_SYMBOL(sun8i_tcon_top_de_config);
+
+
+static struct clk_hw *sun8i_tcon_top_register_gate(struct device *dev,
+                                                  const char *parent,
+                                                  void __iomem *regs,
+                                                  spinlock_t *lock,
+                                                  u8 bit, int name_index)
+{
+       const char *clk_name, *parent_name;
+       int ret, index;
+
+       index = of_property_match_string(dev->of_node, "clock-names", parent);
+       if (index < 0)
+               return ERR_PTR(index);
+
+       parent_name = of_clk_get_parent_name(dev->of_node, index);
+
+       ret = of_property_read_string_index(dev->of_node,
+                                           "clock-output-names", name_index,
+                                           &clk_name);
+       if (ret)
+               return ERR_PTR(ret);
+
+       return clk_hw_register_gate(dev, clk_name, parent_name,
+                                   CLK_SET_RATE_PARENT,
+                                   regs + TCON_TOP_GATE_SRC_REG,
+                                   bit, 0, lock);
+};
+
+static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
+                              void *data)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct clk_hw_onecell_data *clk_data;
+       struct sun8i_tcon_top *tcon_top;
+       struct resource *res;
+       void __iomem *regs;
+       int ret, i;
+
+       tcon_top = devm_kzalloc(dev, sizeof(*tcon_top), GFP_KERNEL);
+       if (!tcon_top)
+               return -ENOMEM;
+
+       clk_data = devm_kzalloc(dev, sizeof(*clk_data) +
+                               sizeof(*clk_data->hws) * CLK_NUM,
+                               GFP_KERNEL);
+       if (!clk_data)
+               return -ENOMEM;
+       tcon_top->clk_data = clk_data;
+
+       spin_lock_init(&tcon_top->reg_lock);
+
+       tcon_top->rst = devm_reset_control_get(dev, NULL);
+       if (IS_ERR(tcon_top->rst)) {
+               dev_err(dev, "Couldn't get our reset line\n");
+               return PTR_ERR(tcon_top->rst);
+       }
+
+       tcon_top->bus = devm_clk_get(dev, "bus");
+       if (IS_ERR(tcon_top->bus)) {
+               dev_err(dev, "Couldn't get the bus clock\n");
+               return PTR_ERR(tcon_top->bus);
+       }
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       regs = devm_ioremap_resource(dev, res);
+       tcon_top->regs = regs;
+       if (IS_ERR(regs))
+               return PTR_ERR(regs);
+
+       ret = reset_control_deassert(tcon_top->rst);
+       if (ret) {
+               dev_err(dev, "Could not deassert ctrl reset control\n");
+               return ret;
+       }
+
+       ret = clk_prepare_enable(tcon_top->bus);
+       if (ret) {
+               dev_err(dev, "Could not enable bus clock\n");
+               goto err_assert_reset;
+       }
+
+       /*
+        * TCON TOP has two muxes, which select parent clock for each TCON TV
+        * channel clock. Parent could be either TCON TV or TVE clock. For now
+        * we leave this fixed to TCON TV, since TVE driver for R40 is not yet
+        * implemented. Once it is, graph needs to be traversed to determine
+        * if TVE is active on each TCON TV. If it is, mux should be switched
+        * to TVE clock parent.
+        */
+       clk_data->hws[CLK_TCON_TOP_TV0] =
+               sun8i_tcon_top_register_gate(dev, "tcon-tv0", regs,
+                                            &tcon_top->reg_lock,
+                                            TCON_TOP_TCON_TV0_GATE, 0);
+
+       clk_data->hws[CLK_TCON_TOP_TV1] =
+               sun8i_tcon_top_register_gate(dev, "tcon-tv1", regs,
+                                            &tcon_top->reg_lock,
+                                            TCON_TOP_TCON_TV1_GATE, 1);
+
+       clk_data->hws[CLK_TCON_TOP_DSI] =
+               sun8i_tcon_top_register_gate(dev, "dsi", regs,
+                                            &tcon_top->reg_lock,
+                                            TCON_TOP_TCON_DSI_GATE, 2);
+
+       for (i = 0; i < CLK_NUM; i++)
+               if (IS_ERR(clk_data->hws[i])) {
+                       ret = PTR_ERR(clk_data->hws[i]);
+                       goto err_unregister_gates;
+               }
+
+       clk_data->num = CLK_NUM;
+
+       ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
+                                    clk_data);
+       if (ret)
+               goto err_unregister_gates;
+
+       dev_set_drvdata(dev, tcon_top);
+
+       return 0;
+
+err_unregister_gates:
+       for (i = 0; i < CLK_NUM; i++)
+               if (clk_data->hws[i])
+                       clk_hw_unregister_gate(clk_data->hws[i]);
+       clk_disable_unprepare(tcon_top->bus);
+err_assert_reset:
+       reset_control_assert(tcon_top->rst);
+
+       return ret;
+}
+
+static void sun8i_tcon_top_unbind(struct device *dev, struct device *master,
+                                 void *data)
+{
+       struct sun8i_tcon_top *tcon_top = dev_get_drvdata(dev);
+       struct clk_hw_onecell_data *clk_data = tcon_top->clk_data;
+       int i;
+
+       of_clk_del_provider(dev->of_node);
+       for (i = 0; i < CLK_NUM; i++)
+               clk_hw_unregister_gate(clk_data->hws[i]);
+
+       clk_disable_unprepare(tcon_top->bus);
+       reset_control_assert(tcon_top->rst);
+}
+
+static const struct component_ops sun8i_tcon_top_ops = {
+       .bind   = sun8i_tcon_top_bind,
+       .unbind = sun8i_tcon_top_unbind,
+};
+
+static int sun8i_tcon_top_probe(struct platform_device *pdev)
+{
+       return component_add(&pdev->dev, &sun8i_tcon_top_ops);
+}
+
+static int sun8i_tcon_top_remove(struct platform_device *pdev)
+{
+       component_del(&pdev->dev, &sun8i_tcon_top_ops);
+
+       return 0;
+}
+
+/* sun4i_drv uses this list to check if a device node is a TCON TOP */
+const struct of_device_id sun8i_tcon_top_of_table[] = {
+       { .compatible = "allwinner,sun8i-r40-tcon-top" },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sun8i_tcon_top_of_table);
+EXPORT_SYMBOL(sun8i_tcon_top_of_table);
+
+static struct platform_driver sun8i_tcon_top_platform_driver = {
+       .probe          = sun8i_tcon_top_probe,
+       .remove         = sun8i_tcon_top_remove,
+       .driver         = {
+               .name           = "sun8i-tcon-top",
+               .of_match_table = sun8i_tcon_top_of_table,
+       },
+};
+module_platform_driver(sun8i_tcon_top_platform_driver);
+
+MODULE_AUTHOR("Jernej Skrabec <jernej.skrabec@siol.net>");
+MODULE_DESCRIPTION("Allwinner R40 TCON TOP driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.h b/drivers/gpu/drm/sun4i/sun8i_tcon_top.h
new file mode 100644 (file)
index 0000000..0390584
--- /dev/null
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018 Jernej Skrabec <jernej.skrabec@siol.net> */
+
+#ifndef _SUN8I_TCON_TOP_H_
+#define _SUN8I_TCON_TOP_H_
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+#define TCON_TOP_TCON_TV_SETUP_REG     0x00
+
+#define TCON_TOP_PORT_SEL_REG          0x1C
+#define TCON_TOP_PORT_DE0_MSK                  GENMASK(1, 0)
+#define TCON_TOP_PORT_DE1_MSK                  GENMASK(5, 4)
+
+#define TCON_TOP_GATE_SRC_REG          0x20
+#define TCON_TOP_HDMI_SRC_MSK                  GENMASK(29, 28)
+#define TCON_TOP_TCON_TV1_GATE                 24
+#define TCON_TOP_TCON_TV0_GATE                 20
+#define TCON_TOP_TCON_DSI_GATE                 16
+
+#define CLK_NUM                                        3
+
+struct sun8i_tcon_top {
+       struct clk                      *bus;
+       struct clk_hw_onecell_data      *clk_data;
+       void __iomem                    *regs;
+       struct reset_control            *rst;
+
+       /*
+        * spinlock is used to synchronize access to same
+        * register where multiple clock gates can be set.
+        */
+       spinlock_t                      reg_lock;
+};
+
+extern const struct of_device_id sun8i_tcon_top_of_table[];
+
+int sun8i_tcon_top_set_hdmi_src(struct device *dev, int tcon);
+int sun8i_tcon_top_de_config(struct device *dev, int mixer, int tcon);
+
+#endif /* _SUN8I_TCON_TOP_H_ */
index 9a540330cb79808f74c7e5c944f77953ab4b1ccd..28c15c6ef1efbea5fc92e4d11c7bdcc4b03b2a73 100644 (file)
@@ -27,7 +27,8 @@
 #include "sun8i_ui_scaler.h"
 
 static void sun8i_ui_layer_enable(struct sun8i_mixer *mixer, int channel,
-                                 int overlay, bool enable)
+                                 int overlay, bool enable, unsigned int zpos,
+                                 unsigned int old_zpos)
 {
        u32 val;
 
@@ -43,18 +44,36 @@ static void sun8i_ui_layer_enable(struct sun8i_mixer *mixer, int channel,
                           SUN8I_MIXER_CHAN_UI_LAYER_ATTR(channel, overlay),
                           SUN8I_MIXER_CHAN_UI_LAYER_ATTR_EN, val);
 
-       if (enable)
-               val = SUN8I_MIXER_BLEND_PIPE_CTL_EN(channel);
-       else
-               val = 0;
+       if (!enable || zpos != old_zpos) {
+               regmap_update_bits(mixer->engine.regs,
+                                  SUN8I_MIXER_BLEND_PIPE_CTL,
+                                  SUN8I_MIXER_BLEND_PIPE_CTL_EN(old_zpos),
+                                  0);
 
-       regmap_update_bits(mixer->engine.regs,
-                          SUN8I_MIXER_BLEND_PIPE_CTL,
-                          SUN8I_MIXER_BLEND_PIPE_CTL_EN(channel), val);
+               regmap_update_bits(mixer->engine.regs,
+                                  SUN8I_MIXER_BLEND_ROUTE,
+                                  SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(old_zpos),
+                                  0);
+       }
+
+       if (enable) {
+               val = SUN8I_MIXER_BLEND_PIPE_CTL_EN(zpos);
+
+               regmap_update_bits(mixer->engine.regs,
+                                  SUN8I_MIXER_BLEND_PIPE_CTL, val, val);
+
+               val = channel << SUN8I_MIXER_BLEND_ROUTE_PIPE_SHIFT(zpos);
+
+               regmap_update_bits(mixer->engine.regs,
+                                  SUN8I_MIXER_BLEND_ROUTE,
+                                  SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(zpos),
+                                  val);
+       }
 }
 
 static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
-                                      int overlay, struct drm_plane *plane)
+                                      int overlay, struct drm_plane *plane,
+                                      unsigned int zpos)
 {
        struct drm_plane_state *state = plane->state;
        u32 src_w, src_h, dst_w, dst_h;
@@ -137,10 +156,10 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
                         state->dst.x1, state->dst.y1);
        DRM_DEBUG_DRIVER("Layer destination size W: %d H: %d\n", dst_w, dst_h);
        regmap_write(mixer->engine.regs,
-                    SUN8I_MIXER_BLEND_ATTR_COORD(channel),
+                    SUN8I_MIXER_BLEND_ATTR_COORD(zpos),
                     SUN8I_MIXER_COORD(state->dst.x1, state->dst.y1));
        regmap_write(mixer->engine.regs,
-                    SUN8I_MIXER_BLEND_ATTR_INSIZE(channel),
+                    SUN8I_MIXER_BLEND_ATTR_INSIZE(zpos),
                     outsize);
 
        return 0;
@@ -236,30 +255,35 @@ static void sun8i_ui_layer_atomic_disable(struct drm_plane *plane,
                                          struct drm_plane_state *old_state)
 {
        struct sun8i_ui_layer *layer = plane_to_sun8i_ui_layer(plane);
+       unsigned int old_zpos = old_state->normalized_zpos;
        struct sun8i_mixer *mixer = layer->mixer;
 
-       sun8i_ui_layer_enable(mixer, layer->channel, layer->overlay, false);
+       sun8i_ui_layer_enable(mixer, layer->channel, layer->overlay, false, 0,
+                             old_zpos);
 }
 
 static void sun8i_ui_layer_atomic_update(struct drm_plane *plane,
                                         struct drm_plane_state *old_state)
 {
        struct sun8i_ui_layer *layer = plane_to_sun8i_ui_layer(plane);
+       unsigned int zpos = plane->state->normalized_zpos;
+       unsigned int old_zpos = old_state->normalized_zpos;
        struct sun8i_mixer *mixer = layer->mixer;
 
        if (!plane->state->visible) {
                sun8i_ui_layer_enable(mixer, layer->channel,
-                                     layer->overlay, false);
+                                     layer->overlay, false, 0, old_zpos);
                return;
        }
 
        sun8i_ui_layer_update_coord(mixer, layer->channel,
-                                   layer->overlay, plane);
+                                   layer->overlay, plane, zpos);
        sun8i_ui_layer_update_formats(mixer, layer->channel,
                                      layer->overlay, plane);
        sun8i_ui_layer_update_buffer(mixer, layer->channel,
                                     layer->overlay, plane);
-       sun8i_ui_layer_enable(mixer, layer->channel, layer->overlay, true);
+       sun8i_ui_layer_enable(mixer, layer->channel, layer->overlay,
+                             true, zpos, old_zpos);
 }
 
 static struct drm_plane_helper_funcs sun8i_ui_layer_helper_funcs = {
@@ -307,6 +331,7 @@ struct sun8i_ui_layer *sun8i_ui_layer_init_one(struct drm_device *drm,
        enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
        int channel = mixer->cfg->vi_num + index;
        struct sun8i_ui_layer *layer;
+       unsigned int plane_cnt;
        int ret;
 
        layer = devm_kzalloc(drm->dev, sizeof(*layer), GFP_KERNEL);
@@ -327,8 +352,10 @@ struct sun8i_ui_layer *sun8i_ui_layer_init_one(struct drm_device *drm,
                return ERR_PTR(ret);
        }
 
-       /* fixed zpos for now */
-       ret = drm_plane_create_zpos_immutable_property(&layer->plane, channel);
+       plane_cnt = mixer->cfg->ui_num + mixer->cfg->vi_num;
+
+       ret = drm_plane_create_zpos_property(&layer->plane, channel,
+                                            0, plane_cnt - 1);
        if (ret) {
                dev_err(drm->dev, "Couldn't add zpos property\n");
                return ERR_PTR(ret);
index 5877f8ef5895fd19a70edea0c4f7ed89cd4ed0d9..f4fe97813f943f61b061c91604ad542b9446c4f8 100644 (file)
@@ -21,7 +21,8 @@
 #include "sun8i_vi_scaler.h"
 
 static void sun8i_vi_layer_enable(struct sun8i_mixer *mixer, int channel,
-                                 int overlay, bool enable)
+                                 int overlay, bool enable, unsigned int zpos,
+                                 unsigned int old_zpos)
 {
        u32 val;
 
@@ -37,18 +38,36 @@ static void sun8i_vi_layer_enable(struct sun8i_mixer *mixer, int channel,
                           SUN8I_MIXER_CHAN_VI_LAYER_ATTR(channel, overlay),
                           SUN8I_MIXER_CHAN_VI_LAYER_ATTR_EN, val);
 
-       if (enable)
-               val = SUN8I_MIXER_BLEND_PIPE_CTL_EN(channel);
-       else
-               val = 0;
+       if (!enable || zpos != old_zpos) {
+               regmap_update_bits(mixer->engine.regs,
+                                  SUN8I_MIXER_BLEND_PIPE_CTL,
+                                  SUN8I_MIXER_BLEND_PIPE_CTL_EN(old_zpos),
+                                  0);
 
-       regmap_update_bits(mixer->engine.regs,
-                          SUN8I_MIXER_BLEND_PIPE_CTL,
-                          SUN8I_MIXER_BLEND_PIPE_CTL_EN(channel), val);
+               regmap_update_bits(mixer->engine.regs,
+                                  SUN8I_MIXER_BLEND_ROUTE,
+                                  SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(old_zpos),
+                                  0);
+       }
+
+       if (enable) {
+               val = SUN8I_MIXER_BLEND_PIPE_CTL_EN(zpos);
+
+               regmap_update_bits(mixer->engine.regs,
+                                  SUN8I_MIXER_BLEND_PIPE_CTL, val, val);
+
+               val = channel << SUN8I_MIXER_BLEND_ROUTE_PIPE_SHIFT(zpos);
+
+               regmap_update_bits(mixer->engine.regs,
+                                  SUN8I_MIXER_BLEND_ROUTE,
+                                  SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(zpos),
+                                  val);
+       }
 }
 
 static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
-                                      int overlay, struct drm_plane *plane)
+                                      int overlay, struct drm_plane *plane,
+                                      unsigned int zpos)
 {
        struct drm_plane_state *state = plane->state;
        const struct drm_format_info *format = state->fb->format;
@@ -130,10 +149,10 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
                         state->dst.x1, state->dst.y1);
        DRM_DEBUG_DRIVER("Layer destination size W: %d H: %d\n", dst_w, dst_h);
        regmap_write(mixer->engine.regs,
-                    SUN8I_MIXER_BLEND_ATTR_COORD(channel),
+                    SUN8I_MIXER_BLEND_ATTR_COORD(zpos),
                     SUN8I_MIXER_COORD(state->dst.x1, state->dst.y1));
        regmap_write(mixer->engine.regs,
-                    SUN8I_MIXER_BLEND_ATTR_INSIZE(channel),
+                    SUN8I_MIXER_BLEND_ATTR_INSIZE(zpos),
                     outsize);
 
        return 0;
@@ -264,30 +283,35 @@ static void sun8i_vi_layer_atomic_disable(struct drm_plane *plane,
                                          struct drm_plane_state *old_state)
 {
        struct sun8i_vi_layer *layer = plane_to_sun8i_vi_layer(plane);
+       unsigned int old_zpos = old_state->normalized_zpos;
        struct sun8i_mixer *mixer = layer->mixer;
 
-       sun8i_vi_layer_enable(mixer, layer->channel, layer->overlay, false);
+       sun8i_vi_layer_enable(mixer, layer->channel, layer->overlay, false, 0,
+                             old_zpos);
 }
 
 static void sun8i_vi_layer_atomic_update(struct drm_plane *plane,
                                         struct drm_plane_state *old_state)
 {
        struct sun8i_vi_layer *layer = plane_to_sun8i_vi_layer(plane);
+       unsigned int zpos = plane->state->normalized_zpos;
+       unsigned int old_zpos = old_state->normalized_zpos;
        struct sun8i_mixer *mixer = layer->mixer;
 
        if (!plane->state->visible) {
                sun8i_vi_layer_enable(mixer, layer->channel,
-                                     layer->overlay, false);
+                                     layer->overlay, false, 0, old_zpos);
                return;
        }
 
        sun8i_vi_layer_update_coord(mixer, layer->channel,
-                                   layer->overlay, plane);
+                                   layer->overlay, plane, zpos);
        sun8i_vi_layer_update_formats(mixer, layer->channel,
                                      layer->overlay, plane);
        sun8i_vi_layer_update_buffer(mixer, layer->channel,
                                     layer->overlay, plane);
-       sun8i_vi_layer_enable(mixer, layer->channel, layer->overlay, true);
+       sun8i_vi_layer_enable(mixer, layer->channel, layer->overlay,
+                             true, zpos, old_zpos);
 }
 
 static struct drm_plane_helper_funcs sun8i_vi_layer_helper_funcs = {
@@ -351,6 +375,7 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
                                               int index)
 {
        struct sun8i_vi_layer *layer;
+       unsigned int plane_cnt;
        int ret;
 
        layer = devm_kzalloc(drm->dev, sizeof(*layer), GFP_KERNEL);
@@ -368,8 +393,10 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
                return ERR_PTR(ret);
        }
 
-       /* fixed zpos for now */
-       ret = drm_plane_create_zpos_immutable_property(&layer->plane, index);
+       plane_cnt = mixer->cfg->ui_num + mixer->cfg->vi_num;
+
+       ret = drm_plane_create_zpos_property(&layer->plane, index,
+                                            0, plane_cnt - 1);
        if (ret) {
                dev_err(drm->dev, "Couldn't add zpos property\n");
                return ERR_PTR(ret);
index 776c1513e582827aae0764e52e376df92e884ab6..a2bd5876c633515950f23be6cdff6f0f49306312 100644 (file)
@@ -398,7 +398,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
                 * unaligned offset is malformed and cause commands stream
                 * corruption on the buffer address relocation.
                 */
-               if (offset & 3 || offset >= obj->gem.size) {
+               if (offset & 3 || offset > obj->gem.size) {
                        err = -EINVAL;
                        goto fail;
                }
index 87c5d89bc9baf3cd09012f5a2385510b645ebe1b..ee6ca8fa1c6554d89b5f0ff0c8cb35f8bb09a5ab 100644 (file)
@@ -1052,7 +1052,7 @@ static int tegra_dsi_init(struct host1x_client *client)
                drm_encoder_helper_add(&dsi->output.encoder,
                                       &tegra_dsi_encoder_helper_funcs);
 
-               drm_mode_connector_attach_encoder(&dsi->output.connector,
+               drm_connector_attach_encoder(&dsi->output.connector,
                                                  &dsi->output.encoder);
                drm_connector_register(&dsi->output.connector);
 
@@ -1411,6 +1411,9 @@ static int tegra_dsi_host_attach(struct mipi_dsi_host *host,
                struct tegra_output *output = &dsi->output;
 
                output->panel = of_drm_find_panel(device->dev.of_node);
+               if (IS_ERR(output->panel))
+                       output->panel = NULL;
+
                if (output->panel && output->connector.dev) {
                        drm_panel_attach(output->panel, &output->connector);
                        drm_helper_hpd_irq_event(output->connector.dev);
index 00a5c9f322543d8fa6a16ec8515dc07bf9e7146d..4f80100ff5f34b7ca997fb5a31a8c3c6788e59cf 100644 (file)
@@ -582,18 +582,6 @@ static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
        return 0;
 }
 
-static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
-                                        unsigned long page)
-{
-       return NULL;
-}
-
-static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf,
-                                         unsigned long page,
-                                         void *addr)
-{
-}
-
 static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
 {
        return NULL;
@@ -634,8 +622,6 @@ static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
        .release = tegra_gem_prime_release,
        .begin_cpu_access = tegra_gem_prime_begin_cpu_access,
        .end_cpu_access = tegra_gem_prime_end_cpu_access,
-       .map_atomic = tegra_gem_prime_kmap_atomic,
-       .unmap_atomic = tegra_gem_prime_kunmap_atomic,
        .map = tegra_gem_prime_kmap,
        .unmap = tegra_gem_prime_kunmap,
        .mmap = tegra_gem_prime_mmap,
index 784739a9f497d530e7425a34bd958956033302bb..0082468f703c8c12ec89b142142b61b84b22fe89 100644 (file)
@@ -1488,7 +1488,7 @@ static int tegra_hdmi_init(struct host1x_client *client)
        drm_encoder_helper_add(&hdmi->output.encoder,
                               &tegra_hdmi_encoder_helper_funcs);
 
-       drm_mode_connector_attach_encoder(&hdmi->output.connector,
+       drm_connector_attach_encoder(&hdmi->output.connector,
                                          &hdmi->output.encoder);
        drm_connector_register(&hdmi->output.connector);
 
index ffe34bd0bb9d1f1a694299c1162767604a02c8bd..c662efc7e4139323d4c7d40b447b3466e2c5ce97 100644 (file)
@@ -37,7 +37,7 @@ int tegra_output_connector_get_modes(struct drm_connector *connector)
                edid = drm_get_edid(connector, output->ddc);
 
        cec_notifier_set_phys_addr_from_edid(output->notifier, edid);
-       drm_mode_connector_update_edid_property(connector, edid);
+       drm_connector_update_edid_property(connector, edid);
 
        if (edid) {
                err = drm_add_edid_modes(connector, edid);
@@ -110,8 +110,8 @@ int tegra_output_probe(struct tegra_output *output)
        panel = of_parse_phandle(output->of_node, "nvidia,panel", 0);
        if (panel) {
                output->panel = of_drm_find_panel(panel);
-               if (!output->panel)
-                       return -EPROBE_DEFER;
+               if (IS_ERR(output->panel))
+                       return PTR_ERR(output->panel);
 
                of_node_put(panel);
        }
index 78ec5193741dd51a6d8de1d7aeefd5ec8c14cb91..28a78d3120bce31448123eaa9dad1c5ac6264e57 100644 (file)
@@ -289,7 +289,7 @@ int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
        drm_encoder_helper_add(&output->encoder,
                               &tegra_rgb_encoder_helper_funcs);
 
-       drm_mode_connector_attach_encoder(&output->connector,
+       drm_connector_attach_encoder(&output->connector,
                                          &output->encoder);
        drm_connector_register(&output->connector);
 
index 7d2a955fc5152e3cb9dcace93fc877dc15b43e87..d7fe9f15def1dbf426b4f23bb7fe46cfa64248af 100644 (file)
@@ -2622,7 +2622,7 @@ static int tegra_sor_init(struct host1x_client *client)
                         encoder, NULL);
        drm_encoder_helper_add(&sor->output.encoder, helpers);
 
-       drm_mode_connector_attach_encoder(&sor->output.connector,
+       drm_connector_attach_encoder(&sor->output.connector,
                                          &sor->output.encoder);
        drm_connector_register(&sor->output.connector);
 
index b8a5e4ed22e6d84ce5637e9b42db93bbbc54b13a..0fb300d41a09c02508e70e89678695e09a8ff0f9 100644 (file)
@@ -378,7 +378,7 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
        if (!priv->external_connector &&
            ((priv->num_encoders == 0) || (priv->num_connectors == 0))) {
                dev_err(dev, "no encoders/connectors found\n");
-               ret = -ENXIO;
+               ret = -EPROBE_DEFER;
                goto init_failed;
        }
 
index d651bdd6597e6f5a3429300c3d2e2ba68bc66707..b4eaf9bc87f8e171246f4585dbfd470c114468f4 100644 (file)
@@ -103,12 +103,11 @@ struct drm_connector *tilcdc_encoder_find_connector(struct drm_device *ddev,
                                                    struct drm_encoder *encoder)
 {
        struct drm_connector *connector;
-       int i;
 
-       list_for_each_entry(connector, &ddev->mode_config.connector_list, head)
-               for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
-                       if (connector->encoder_ids[i] == encoder->base.id)
-                               return connector;
+       list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
+               if (drm_connector_has_possible_encoder(connector, encoder))
+                       return connector;
+       }
 
        dev_err(ddev->dev, "No connector found for %s encoder (id %d)\n",
                encoder->name, encoder->base.id);
index d616d64a672542d3dbd6a8a44d79007ea8979d59..a1acab39d87f49385c51edddbaeae87e9745c956 100644 (file)
@@ -223,7 +223,7 @@ static struct drm_connector *panel_connector_create(struct drm_device *dev,
        connector->interlace_allowed = 0;
        connector->doublescan_allowed = 0;
 
-       ret = drm_mode_connector_attach_encoder(connector, encoder);
+       ret = drm_connector_attach_encoder(connector, encoder);
        if (ret)
                goto fail;
 
index c45cabb38db0114da1d407e3f54e6b0866967a43..daebf1aa6b0a841dc28d8b681e591b5445a116c1 100644 (file)
@@ -173,7 +173,7 @@ static int tfp410_connector_get_modes(struct drm_connector *connector)
 
        edid = drm_get_edid(connector, tfp410_connector->mod->i2c);
 
-       drm_mode_connector_update_edid_property(connector, edid);
+       drm_connector_update_edid_property(connector, edid);
 
        if (edid) {
                ret = drm_add_edid_modes(connector, edid);
@@ -240,7 +240,7 @@ static struct drm_connector *tfp410_connector_create(struct drm_device *dev,
        connector->interlace_allowed = 0;
        connector->doublescan_allowed = 0;
 
-       ret = drm_mode_connector_attach_encoder(connector, encoder);
+       ret = drm_connector_attach_encoder(connector, encoder);
        if (ret)
                goto fail;
 
index 4592a5e3f20bf2f9420b6b4cdc355bbdd3fb2b6f..16f4b5c91f1b604b16fffde48aeaca845b5c1eee 100644 (file)
@@ -20,6 +20,17 @@ config TINYDRM_ILI9225
 
          If M is selected the module will be called ili9225.
 
+config TINYDRM_ILI9341
+       tristate "DRM support for ILI9341 display panels"
+       depends on DRM_TINYDRM && SPI
+       depends on BACKLIGHT_CLASS_DEVICE
+       select TINYDRM_MIPI_DBI
+       help
+         DRM driver for the following Ilitek ILI9341 panels:
+         * YX240QV29-T 2.4" 240x320 TFT (Adafruit 2.4")
+
+         If M is selected the module will be called ili9341.
+
 config TINYDRM_MI0283QT
        tristate "DRM support for MI0283QT"
        depends on DRM_TINYDRM && SPI
index 49a111929724a93f16f90953950b770b59ec8743..14d99080665a4a8d84ebfd4eb022c22902cc5f8d 100644 (file)
@@ -5,6 +5,7 @@ obj-$(CONFIG_TINYDRM_MIPI_DBI)          += mipi-dbi.o
 
 # Displays
 obj-$(CONFIG_TINYDRM_ILI9225)          += ili9225.o
+obj-$(CONFIG_TINYDRM_ILI9341)          += ili9341.o
 obj-$(CONFIG_TINYDRM_MI0283QT)         += mi0283qt.o
 obj-$(CONFIG_TINYDRM_REPAPER)          += repaper.o
 obj-$(CONFIG_TINYDRM_ST7586)           += st7586.o
index 24a33bf862fa161203893f429a09a089490b8e9f..19c7f70adfa5b7f29545108292c82cb83c6039f8 100644 (file)
@@ -204,7 +204,7 @@ static int tinydrm_register(struct tinydrm_device *tdev)
        if (ret)
                return ret;
 
-       ret = drm_fb_cma_fbdev_init_with_funcs(drm, 0, 0, tdev->fb_funcs);
+       ret = drm_fbdev_generic_setup(drm, 0);
        if (ret)
                DRM_ERROR("Failed to initialize fbdev: %d\n", ret);
 
@@ -214,7 +214,6 @@ static int tinydrm_register(struct tinydrm_device *tdev)
 static void tinydrm_unregister(struct tinydrm_device *tdev)
 {
        drm_atomic_helper_shutdown(tdev->drm);
-       drm_fb_cma_fbdev_fini(tdev->drm);
        drm_dev_unregister(tdev->drm);
 }
 
index 841c69aba0590455ddc5d5ce887f15d2a67d26c2..455fefe012f5912e825b4296e8c96b0129b814ea 100644 (file)
@@ -368,7 +368,6 @@ static struct drm_driver ili9225_driver = {
                                  DRIVER_ATOMIC,
        .fops                   = &ili9225_fops,
        TINYDRM_GEM_DRIVER_OPS,
-       .lastclose              = drm_fb_helper_lastclose,
        .name                   = "ili9225",
        .desc                   = "Ilitek ILI9225",
        .date                   = "20171106",
diff --git a/drivers/gpu/drm/tinydrm/ili9341.c b/drivers/gpu/drm/tinydrm/ili9341.c
new file mode 100644 (file)
index 0000000..6701037
--- /dev/null
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * DRM driver for Ilitek ILI9341 panels
+ *
+ * Copyright 2018 David Lechner <david@lechnology.com>
+ *
+ * Based on mi0283qt.c:
+ * Copyright 2016 Noralf Trønnes
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/tinydrm/mipi-dbi.h>
+#include <drm/tinydrm/tinydrm-helpers.h>
+#include <video/mipi_display.h>
+
+#define ILI9341_FRMCTR1                0xb1
+#define ILI9341_DISCTRL                0xb6
+#define ILI9341_ETMOD          0xb7
+
+#define ILI9341_PWCTRL1                0xc0
+#define ILI9341_PWCTRL2                0xc1
+#define ILI9341_VMCTRL1                0xc5
+#define ILI9341_VMCTRL2                0xc7
+#define ILI9341_PWCTRLA                0xcb
+#define ILI9341_PWCTRLB                0xcf
+
+#define ILI9341_PGAMCTRL       0xe0
+#define ILI9341_NGAMCTRL       0xe1
+#define ILI9341_DTCTRLA                0xe8
+#define ILI9341_DTCTRLB                0xea
+#define ILI9341_PWRSEQ         0xed
+
+#define ILI9341_EN3GAM         0xf2
+#define ILI9341_PUMPCTRL       0xf7
+
+#define ILI9341_MADCTL_BGR     BIT(3)
+#define ILI9341_MADCTL_MV      BIT(5)
+#define ILI9341_MADCTL_MX      BIT(6)
+#define ILI9341_MADCTL_MY      BIT(7)
+
+static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
+                            struct drm_crtc_state *crtc_state,
+                            struct drm_plane_state *plane_state)
+{
+       struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
+       struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+       u8 addr_mode;
+       int ret;
+
+       DRM_DEBUG_KMS("\n");
+
+       ret = mipi_dbi_poweron_conditional_reset(mipi);
+       if (ret < 0)
+               return;
+       if (ret == 1)
+               goto out_enable;
+
+       mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_OFF);
+
+       mipi_dbi_command(mipi, ILI9341_PWCTRLB, 0x00, 0xc1, 0x30);
+       mipi_dbi_command(mipi, ILI9341_PWRSEQ, 0x64, 0x03, 0x12, 0x81);
+       mipi_dbi_command(mipi, ILI9341_DTCTRLA, 0x85, 0x00, 0x78);
+       mipi_dbi_command(mipi, ILI9341_PWCTRLA, 0x39, 0x2c, 0x00, 0x34, 0x02);
+       mipi_dbi_command(mipi, ILI9341_PUMPCTRL, 0x20);
+       mipi_dbi_command(mipi, ILI9341_DTCTRLB, 0x00, 0x00);
+
+       /* Power Control */
+       mipi_dbi_command(mipi, ILI9341_PWCTRL1, 0x23);
+       mipi_dbi_command(mipi, ILI9341_PWCTRL2, 0x10);
+       /* VCOM */
+       mipi_dbi_command(mipi, ILI9341_VMCTRL1, 0x3e, 0x28);
+       mipi_dbi_command(mipi, ILI9341_VMCTRL2, 0x86);
+
+       /* Memory Access Control */
+       mipi_dbi_command(mipi, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT);
+
+       /* Frame Rate */
+       mipi_dbi_command(mipi, ILI9341_FRMCTR1, 0x00, 0x1b);
+
+       /* Gamma */
+       mipi_dbi_command(mipi, ILI9341_EN3GAM, 0x00);
+       mipi_dbi_command(mipi, MIPI_DCS_SET_GAMMA_CURVE, 0x01);
+       mipi_dbi_command(mipi, ILI9341_PGAMCTRL,
+                        0x0f, 0x31, 0x2b, 0x0c, 0x0e, 0x08, 0x4e, 0xf1,
+                        0x37, 0x07, 0x10, 0x03, 0x0e, 0x09, 0x00);
+       mipi_dbi_command(mipi, ILI9341_NGAMCTRL,
+                        0x00, 0x0e, 0x14, 0x03, 0x11, 0x07, 0x31, 0xc1,
+                        0x48, 0x08, 0x0f, 0x0c, 0x31, 0x36, 0x0f);
+
+       /* DDRAM */
+       mipi_dbi_command(mipi, ILI9341_ETMOD, 0x07);
+
+       /* Display */
+       mipi_dbi_command(mipi, ILI9341_DISCTRL, 0x08, 0x82, 0x27, 0x00);
+       mipi_dbi_command(mipi, MIPI_DCS_EXIT_SLEEP_MODE);
+       msleep(100);
+
+       mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
+       msleep(100);
+
+out_enable:
+       switch (mipi->rotation) {
+       default:
+               addr_mode = ILI9341_MADCTL_MX;
+               break;
+       case 90:
+               addr_mode = ILI9341_MADCTL_MV;
+               break;
+       case 180:
+               addr_mode = ILI9341_MADCTL_MY;
+               break;
+       case 270:
+               addr_mode = ILI9341_MADCTL_MV | ILI9341_MADCTL_MY |
+                           ILI9341_MADCTL_MX;
+               break;
+       }
+       addr_mode |= ILI9341_MADCTL_BGR;
+       mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
+       mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
+}
+
+static const struct drm_simple_display_pipe_funcs ili9341_pipe_funcs = {
+       .enable = yx240qv29_enable,
+       .disable = mipi_dbi_pipe_disable,
+       .update = tinydrm_display_pipe_update,
+       .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+};
+
+static const struct drm_display_mode yx240qv29_mode = {
+       TINYDRM_MODE(240, 320, 37, 49),
+};
+
+DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops);
+
+static struct drm_driver ili9341_driver = {
+       .driver_features        = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
+       .fops                   = &ili9341_fops,
+       TINYDRM_GEM_DRIVER_OPS,
+       .debugfs_init           = mipi_dbi_debugfs_init,
+       .name                   = "ili9341",
+       .desc                   = "Ilitek ILI9341",
+       .date                   = "20180514",
+       .major                  = 1,
+       .minor                  = 0,
+};
+
+static const struct of_device_id ili9341_of_match[] = {
+       { .compatible = "adafruit,yx240qv29" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, ili9341_of_match);
+
+static const struct spi_device_id ili9341_id[] = {
+       { "yx240qv29", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(spi, ili9341_id);
+
+static int ili9341_probe(struct spi_device *spi)
+{
+       struct device *dev = &spi->dev;
+       struct mipi_dbi *mipi;
+       struct gpio_desc *dc;
+       u32 rotation = 0;
+       int ret;
+
+       mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
+       if (!mipi)
+               return -ENOMEM;
+
+       mipi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+       if (IS_ERR(mipi->reset)) {
+               DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
+               return PTR_ERR(mipi->reset);
+       }
+
+       dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW);
+       if (IS_ERR(dc)) {
+               DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n");
+               return PTR_ERR(dc);
+       }
+
+       mipi->backlight = devm_of_find_backlight(dev);
+       if (IS_ERR(mipi->backlight))
+               return PTR_ERR(mipi->backlight);
+
+       device_property_read_u32(dev, "rotation", &rotation);
+
+       ret = mipi_dbi_spi_init(spi, mipi, dc);
+       if (ret)
+               return ret;
+
+       ret = mipi_dbi_init(&spi->dev, mipi, &ili9341_pipe_funcs,
+                           &ili9341_driver, &yx240qv29_mode, rotation);
+       if (ret)
+               return ret;
+
+       spi_set_drvdata(spi, mipi);
+
+       return devm_tinydrm_register(&mipi->tinydrm);
+}
+
+static void ili9341_shutdown(struct spi_device *spi)
+{
+       struct mipi_dbi *mipi = spi_get_drvdata(spi);
+
+       tinydrm_shutdown(&mipi->tinydrm);
+}
+
+static struct spi_driver ili9341_spi_driver = {
+       .driver = {
+               .name = "ili9341",
+               .of_match_table = ili9341_of_match,
+       },
+       .id_table = ili9341_id,
+       .probe = ili9341_probe,
+       .shutdown = ili9341_shutdown,
+};
+module_spi_driver(ili9341_spi_driver);
+
+MODULE_DESCRIPTION("Ilitek ILI9341 DRM driver");
+MODULE_AUTHOR("David Lechner <david@lechnology.com>");
+MODULE_LICENSE("GPL");
index 015d03f2acba87baa0f158424cbade44f7ade977..d7bb4c5e6657a723e9ae03f2cd91e043c942b847 100644 (file)
@@ -154,7 +154,6 @@ static struct drm_driver mi0283qt_driver = {
                                  DRIVER_ATOMIC,
        .fops                   = &mi0283qt_fops,
        TINYDRM_GEM_DRIVER_OPS,
-       .lastclose              = drm_fb_helper_lastclose,
        .debugfs_init           = mipi_dbi_debugfs_init,
        .name                   = "mi0283qt",
        .desc                   = "Multi-Inno MI0283QT",
index 4d1fb31a781ff45df1984c9df78353719f37cf62..cb3441e51d5f03f4c3e1f6b82cb091ef64af7637 100644 (file)
@@ -260,6 +260,8 @@ static const struct drm_framebuffer_funcs mipi_dbi_fb_funcs = {
 /**
  * mipi_dbi_enable_flush - MIPI DBI enable helper
  * @mipi: MIPI DBI structure
+ * @crtc_state: CRTC state
+ * @plane_state: Plane state
  *
  * This function sets &mipi_dbi->enabled, flushes the whole framebuffer and
  * enables the backlight. Drivers can use this in their
index 5c29e3803ecba2a417f7a5d5e68c68f63738cd82..2fcbc3067d71b237d3a8a49ffd496d25367ac000 100644 (file)
@@ -304,7 +304,6 @@ static struct drm_driver st7586_driver = {
                                  DRIVER_ATOMIC,
        .fops                   = &st7586_fops,
        TINYDRM_GEM_DRIVER_OPS,
-       .lastclose              = drm_fb_helper_lastclose,
        .debugfs_init           = mipi_dbi_debugfs_init,
        .name                   = "st7586",
        .desc                   = "Sitronix ST7586",
index 6c7b15c9da4fc3eeac3b8d5976301c397eb85908..3081bc57c1166dc6849d24ea2abfab47d07e780b 100644 (file)
@@ -120,7 +120,6 @@ static struct drm_driver st7735r_driver = {
                                  DRIVER_ATOMIC,
        .fops                   = &st7735r_fops,
        TINYDRM_GEM_DRIVER_OPS,
-       .lastclose              = drm_fb_helper_lastclose,
        .debugfs_init           = mipi_dbi_debugfs_init,
        .name                   = "st7735r",
        .desc                   = "Sitronix ST7735R",
index 5d8688e522d1a6cd4db1a0401048e6ff4088686b..7c484729f9b21ad9f7bcf99b810261e8b5d8f68b 100644 (file)
@@ -287,12 +287,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 
        if (ret) {
                if (bdev->driver->move_notify) {
-                       struct ttm_mem_reg tmp_mem = *mem;
-                       *mem = bo->mem;
-                       bo->mem = tmp_mem;
+                       swap(*mem, bo->mem);
                        bdev->driver->move_notify(bo, false, mem);
-                       bo->mem = *mem;
-                       *mem = tmp_mem;
+                       swap(*mem, bo->mem);
                }
 
                goto out_err;
@@ -590,12 +587,18 @@ static void ttm_bo_release(struct kref *kref)
        kref_put(&bo->list_kref, ttm_bo_release_list);
 }
 
+void ttm_bo_put(struct ttm_buffer_object *bo)
+{
+       kref_put(&bo->kref, ttm_bo_release);
+}
+EXPORT_SYMBOL(ttm_bo_put);
+
 void ttm_bo_unref(struct ttm_buffer_object **p_bo)
 {
        struct ttm_buffer_object *bo = *p_bo;
 
        *p_bo = NULL;
-       kref_put(&bo->kref, ttm_bo_release);
+       ttm_bo_put(bo);
 }
 EXPORT_SYMBOL(ttm_bo_unref);
 
@@ -1201,7 +1204,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
                if (!resv)
                        ttm_bo_unreserve(bo);
 
-               ttm_bo_unref(&bo);
+               ttm_bo_put(bo);
                return ret;
        }
 
index f2c167702eef57bdc3b624ec5d2d7075b0e0b313..046a6dda690a268df98159e5a8014b6826a739c9 100644 (file)
@@ -463,7 +463,7 @@ static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
        struct ttm_transfer_obj *fbo;
 
        fbo = container_of(bo, struct ttm_transfer_obj, base);
-       ttm_bo_unref(&fbo->bo);
+       ttm_bo_put(fbo->bo);
        kfree(fbo);
 }
 
@@ -492,8 +492,9 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
        if (!fbo)
                return -ENOMEM;
 
+       ttm_bo_get(bo);
        fbo->base = *bo;
-       fbo->bo = ttm_bo_reference(bo);
+       fbo->bo = bo;
 
        /**
         * Fix up members that we shouldn't copy directly:
@@ -730,7 +731,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
                        bo->ttm = NULL;
 
                ttm_bo_unreserve(ghost_obj);
-               ttm_bo_unref(&ghost_obj);
+               ttm_bo_put(ghost_obj);
        }
 
        *old_mem = *new_mem;
@@ -786,7 +787,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
                        bo->ttm = NULL;
 
                ttm_bo_unreserve(ghost_obj);
-               ttm_bo_unref(&ghost_obj);
+               ttm_bo_put(ghost_obj);
 
        } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
 
@@ -851,7 +852,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
        bo->ttm = NULL;
 
        ttm_bo_unreserve(ghost);
-       ttm_bo_unref(&ghost);
+       ttm_bo_put(ghost);
 
        return 0;
 }
index c7ece7613a6aa4c0e5d64bcd7054b8ad34c37e4f..6fe91c1b692d6fd547fcfff08b1633c229e77b0a 100644 (file)
 
 #define TTM_BO_VM_NUM_PREFAULT 16
 
-static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
+static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
                                struct vm_fault *vmf)
 {
-       int ret = 0;
+       vm_fault_t ret = 0;
+       int err = 0;
 
        if (likely(!bo->moving))
                goto out_unlock;
@@ -67,20 +68,20 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
                if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
                        goto out_unlock;
 
-               ttm_bo_reference(bo);
+               ttm_bo_get(bo);
                up_read(&vmf->vma->vm_mm->mmap_sem);
                (void) dma_fence_wait(bo->moving, true);
                ttm_bo_unreserve(bo);
-               ttm_bo_unref(&bo);
+               ttm_bo_put(bo);
                goto out_unlock;
        }
 
        /*
         * Ordinary wait.
         */
-       ret = dma_fence_wait(bo->moving, true);
-       if (unlikely(ret != 0)) {
-               ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
+       err = dma_fence_wait(bo->moving, true);
+       if (unlikely(err != 0)) {
+               ret = (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
                        VM_FAULT_NOPAGE;
                goto out_unlock;
        }
@@ -105,7 +106,7 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
                + page_offset;
 }
 
-static int ttm_bo_vm_fault(struct vm_fault *vmf)
+static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
@@ -116,8 +117,9 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
        unsigned long pfn;
        struct ttm_tt *ttm = NULL;
        struct page *page;
-       int ret;
+       int err;
        int i;
+       vm_fault_t ret = VM_FAULT_NOPAGE;
        unsigned long address = vmf->address;
        struct ttm_mem_type_manager *man =
                &bdev->man[bo->mem.mem_type];
@@ -129,17 +131,17 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
         * for reserve, and if it fails, retry the fault after waiting
         * for the buffer to become unreserved.
         */
-       ret = ttm_bo_reserve(bo, true, true, NULL);
-       if (unlikely(ret != 0)) {
-               if (ret != -EBUSY)
+       err = ttm_bo_reserve(bo, true, true, NULL);
+       if (unlikely(err != 0)) {
+               if (err != -EBUSY)
                        return VM_FAULT_NOPAGE;
 
                if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
                        if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
-                               ttm_bo_reference(bo);
+                               ttm_bo_get(bo);
                                up_read(&vmf->vma->vm_mm->mmap_sem);
                                (void) ttm_bo_wait_unreserved(bo);
-                               ttm_bo_unref(&bo);
+                               ttm_bo_put(bo);
                        }
 
                        return VM_FAULT_RETRY;
@@ -163,8 +165,8 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
        }
 
        if (bdev->driver->fault_reserve_notify) {
-               ret = bdev->driver->fault_reserve_notify(bo);
-               switch (ret) {
+               err = bdev->driver->fault_reserve_notify(bo);
+               switch (err) {
                case 0:
                        break;
                case -EBUSY:
@@ -192,13 +194,13 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
                goto out_unlock;
        }
 
-       ret = ttm_mem_io_lock(man, true);
-       if (unlikely(ret != 0)) {
+       err = ttm_mem_io_lock(man, true);
+       if (unlikely(err != 0)) {
                ret = VM_FAULT_NOPAGE;
                goto out_unlock;
        }
-       ret = ttm_mem_io_reserve_vm(bo);
-       if (unlikely(ret != 0)) {
+       err = ttm_mem_io_reserve_vm(bo);
+       if (unlikely(err != 0)) {
                ret = VM_FAULT_SIGBUS;
                goto out_io_unlock;
        }
@@ -266,23 +268,20 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
                }
 
                if (vma->vm_flags & VM_MIXEDMAP)
-                       ret = vm_insert_mixed(&cvma, address,
+                       ret = vmf_insert_mixed(&cvma, address,
                                        __pfn_to_pfn_t(pfn, PFN_DEV));
                else
-                       ret = vm_insert_pfn(&cvma, address, pfn);
+                       ret = vmf_insert_pfn(&cvma, address, pfn);
 
                /*
                 * Somebody beat us to this PTE or prefaulting to
                 * an already populated PTE, or prefaulting error.
                 */
 
-               if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
+               if (unlikely((ret == VM_FAULT_NOPAGE && i > 0)))
                        break;
-               else if (unlikely(ret != 0)) {
-                       ret =
-                           (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
+               else if (unlikely(ret & VM_FAULT_ERROR))
                        goto out_io_unlock;
-               }
 
                address += PAGE_SIZE;
                if (unlikely(++page_offset >= page_last))
@@ -303,14 +302,14 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma)
 
        WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
 
-       (void)ttm_bo_reference(bo);
+       ttm_bo_get(bo);
 }
 
 static void ttm_bo_vm_close(struct vm_area_struct *vma)
 {
        struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
 
-       ttm_bo_unref(&bo);
+       ttm_bo_put(bo);
        vma->vm_private_data = NULL;
 }
 
@@ -462,7 +461,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
        vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
        return 0;
 out_unref:
-       ttm_bo_unref(&bo);
+       ttm_bo_put(bo);
        return ret;
 }
 EXPORT_SYMBOL(ttm_bo_mmap);
@@ -472,8 +471,10 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
        if (vma->vm_pgoff != 0)
                return -EACCES;
 
+       ttm_bo_get(bo);
+
        vma->vm_ops = &ttm_bo_vm_ops;
-       vma->vm_private_data = ttm_bo_reference(bo);
+       vma->vm_private_data = bo;
        vma->vm_flags |= VM_MIXEDMAP;
        vma->vm_flags |= VM_IO | VM_DONTEXPAND;
        return 0;
index 6e2d1300b457b973e3e6f1e5f072659ed665aaf1..f841accc2c0064a3edd865423a10818480477f39 100644 (file)
 
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_page_alloc.h>
-
-#if IS_ENABLED(CONFIG_AGP)
-#include <asm/agp.h>
-#endif
-#ifdef CONFIG_X86
-#include <asm/set_memory.h>
-#endif
+#include <drm/ttm/ttm_set_memory.h>
 
 #define NUM_PAGES_TO_ALLOC             (PAGE_SIZE/sizeof(struct page *))
 #define SMALL_ALLOCATION               16
@@ -222,52 +216,6 @@ static struct kobj_type ttm_pool_kobj_type = {
 
 static struct ttm_pool_manager *_manager;
 
-#ifndef CONFIG_X86
-static int set_pages_wb(struct page *page, int numpages)
-{
-#if IS_ENABLED(CONFIG_AGP)
-       int i;
-
-       for (i = 0; i < numpages; i++)
-               unmap_page_from_agp(page++);
-#endif
-       return 0;
-}
-
-static int set_pages_array_wb(struct page **pages, int addrinarray)
-{
-#if IS_ENABLED(CONFIG_AGP)
-       int i;
-
-       for (i = 0; i < addrinarray; i++)
-               unmap_page_from_agp(pages[i]);
-#endif
-       return 0;
-}
-
-static int set_pages_array_wc(struct page **pages, int addrinarray)
-{
-#if IS_ENABLED(CONFIG_AGP)
-       int i;
-
-       for (i = 0; i < addrinarray; i++)
-               map_page_into_agp(pages[i]);
-#endif
-       return 0;
-}
-
-static int set_pages_array_uc(struct page **pages, int addrinarray)
-{
-#if IS_ENABLED(CONFIG_AGP)
-       int i;
-
-       for (i = 0; i < addrinarray; i++)
-               map_page_into_agp(pages[i]);
-#endif
-       return 0;
-}
-#endif
-
 /**
  * Select the right pool or requested caching state and ttm flags. */
 static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
@@ -302,13 +250,13 @@ static void ttm_pages_put(struct page *pages[], unsigned npages,
        unsigned int i, pages_nr = (1 << order);
 
        if (order == 0) {
-               if (set_pages_array_wb(pages, npages))
+               if (ttm_set_pages_array_wb(pages, npages))
                        pr_err("Failed to set %d pages to wb!\n", npages);
        }
 
        for (i = 0; i < npages; ++i) {
                if (order > 0) {
-                       if (set_pages_wb(pages[i], pages_nr))
+                       if (ttm_set_pages_wb(pages[i], pages_nr))
                                pr_err("Failed to set %d pages to wb!\n", pages_nr);
                }
                __free_pages(pages[i], order);
@@ -498,12 +446,12 @@ static int ttm_set_pages_caching(struct page **pages,
        /* Set page caching */
        switch (cstate) {
        case tt_uncached:
-               r = set_pages_array_uc(pages, cpages);
+               r = ttm_set_pages_array_uc(pages, cpages);
                if (r)
                        pr_err("Failed to set %d pages to uc!\n", cpages);
                break;
        case tt_wc:
-               r = set_pages_array_wc(pages, cpages);
+               r = ttm_set_pages_array_wc(pages, cpages);
                if (r)
                        pr_err("Failed to set %d pages to wc!\n", cpages);
                break;
index 3f14c1cc078912b65340a58b661d0b96e441718f..507be7ac11655235fe83e50404f5470a01a385a6 100644 (file)
 #include <linux/kthread.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_page_alloc.h>
-#if IS_ENABLED(CONFIG_AGP)
-#include <asm/agp.h>
-#endif
-#ifdef CONFIG_X86
-#include <asm/set_memory.h>
-#endif
+#include <drm/ttm/ttm_set_memory.h>
 
 #define NUM_PAGES_TO_ALLOC             (PAGE_SIZE/sizeof(struct page *))
 #define SMALL_ALLOCATION               4
@@ -268,54 +263,19 @@ static struct kobj_type ttm_pool_kobj_type = {
        .default_attrs = ttm_pool_attrs,
 };
 
-#ifndef CONFIG_X86
-static int set_pages_array_wb(struct page **pages, int addrinarray)
-{
-#if IS_ENABLED(CONFIG_AGP)
-       int i;
-
-       for (i = 0; i < addrinarray; i++)
-               unmap_page_from_agp(pages[i]);
-#endif
-       return 0;
-}
-
-static int set_pages_array_wc(struct page **pages, int addrinarray)
-{
-#if IS_ENABLED(CONFIG_AGP)
-       int i;
-
-       for (i = 0; i < addrinarray; i++)
-               map_page_into_agp(pages[i]);
-#endif
-       return 0;
-}
-
-static int set_pages_array_uc(struct page **pages, int addrinarray)
-{
-#if IS_ENABLED(CONFIG_AGP)
-       int i;
-
-       for (i = 0; i < addrinarray; i++)
-               map_page_into_agp(pages[i]);
-#endif
-       return 0;
-}
-#endif /* for !CONFIG_X86 */
-
 static int ttm_set_pages_caching(struct dma_pool *pool,
                                 struct page **pages, unsigned cpages)
 {
        int r = 0;
        /* Set page caching */
        if (pool->type & IS_UC) {
-               r = set_pages_array_uc(pages, cpages);
+               r = ttm_set_pages_array_uc(pages, cpages);
                if (r)
                        pr_err("%s: Failed to set %d pages to uc!\n",
                               pool->dev_name, cpages);
        }
        if (pool->type & IS_WC) {
-               r = set_pages_array_wc(pages, cpages);
+               r = ttm_set_pages_array_wc(pages, cpages);
                if (r)
                        pr_err("%s: Failed to set %d pages to wc!\n",
                               pool->dev_name, cpages);
@@ -389,17 +349,14 @@ static void ttm_pool_update_free_locked(struct dma_pool *pool,
 static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
 {
        struct page *page = d_page->p;
-       unsigned i, num_pages;
+       unsigned num_pages;
 
        /* Don't set WB on WB page pool. */
        if (!(pool->type & IS_CACHED)) {
                num_pages = pool->size / PAGE_SIZE;
-               for (i = 0; i < num_pages; ++i, ++page) {
-                       if (set_pages_array_wb(&page, 1)) {
-                               pr_err("%s: Failed to set %d pages to wb!\n",
-                                      pool->dev_name, 1);
-                       }
-               }
+               if (ttm_set_pages_wb(page, num_pages))
+                       pr_err("%s: Failed to set %d pages to wb!\n",
+                              pool->dev_name, num_pages);
        }
 
        list_del(&d_page->page_list);
@@ -420,7 +377,7 @@ static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
 
        /* Don't set WB on WB page pool. */
        if (npages && !(pool->type & IS_CACHED) &&
-           set_pages_array_wb(pages, npages))
+           ttm_set_pages_array_wb(pages, npages))
                pr_err("%s: Failed to set %d pages to wb!\n",
                       pool->dev_name, npages);
 
index a1e543972ca7be65a56bc7672f4d489a083dd2bb..e3a0691582ffdf0d85bc7c84ab424efc310e01be 100644 (file)
@@ -38,9 +38,7 @@
 #include <drm/drm_cache.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_page_alloc.h>
-#ifdef CONFIG_X86
-#include <asm/set_memory.h>
-#endif
+#include <drm/ttm/ttm_set_memory.h>
 
 /**
  * Allocates a ttm structure for the given BO.
@@ -115,10 +113,9 @@ static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
        return 0;
 }
 
-#ifdef CONFIG_X86
-static inline int ttm_tt_set_page_caching(struct page *p,
-                                         enum ttm_caching_state c_old,
-                                         enum ttm_caching_state c_new)
+static int ttm_tt_set_page_caching(struct page *p,
+                                  enum ttm_caching_state c_old,
+                                  enum ttm_caching_state c_new)
 {
        int ret = 0;
 
@@ -129,26 +126,18 @@ static inline int ttm_tt_set_page_caching(struct page *p,
                /* p isn't in the default caching state, set it to
                 * writeback first to free its current memtype. */
 
-               ret = set_pages_wb(p, 1);
+               ret = ttm_set_pages_wb(p, 1);
                if (ret)
                        return ret;
        }
 
        if (c_new == tt_wc)
-               ret = set_memory_wc((unsigned long) page_address(p), 1);
+               ret = ttm_set_pages_wc(p, 1);
        else if (c_new == tt_uncached)
-               ret = set_pages_uc(p, 1);
+               ret = ttm_set_pages_uc(p, 1);
 
        return ret;
 }
-#else /* CONFIG_X86 */
-static inline int ttm_tt_set_page_caching(struct page *p,
-                                         enum ttm_caching_state c_old,
-                                         enum ttm_caching_state c_new)
-{
-       return 0;
-}
-#endif /* CONFIG_X86 */
 
 /*
  * Change caching policy for the linear kernel map
index 09dc585aa46f8bd3ef9cc446f2c521b5966cf13f..68e88bed77ca795dbb3537599e76fdab86e62128 100644 (file)
@@ -99,7 +99,7 @@ static int udl_get_modes(struct drm_connector *connector)
                                        struct udl_drm_connector,
                                        connector);
 
-       drm_mode_connector_update_edid_property(connector, udl_connector->edid);
+       drm_connector_update_edid_property(connector, udl_connector->edid);
        if (udl_connector->edid)
                return drm_add_edid_modes(connector, udl_connector->edid);
        return 0;
@@ -200,7 +200,7 @@ int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder)
        drm_connector_helper_add(connector, &udl_connector_helper_funcs);
 
        drm_connector_register(connector);
-       drm_mode_connector_attach_encoder(connector, encoder);
+       drm_connector_attach_encoder(connector, encoder);
        connector->polled = DRM_CONNECTOR_POLL_HPD |
                DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
 
index 0a20695eb120e80bd448db9bfa540fcc42817c93..556f62662aa92b41fefb738c1036dbd2eef18432 100644 (file)
@@ -29,7 +29,6 @@ struct udl_drm_dmabuf_attachment {
 };
 
 static int udl_attach_dma_buf(struct dma_buf *dmabuf,
-                             struct device *dev,
                              struct dma_buf_attachment *attach)
 {
        struct udl_drm_dmabuf_attachment *udl_attach;
@@ -158,27 +157,12 @@ static void *udl_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
        return NULL;
 }
 
-static void *udl_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
-                                   unsigned long page_num)
-{
-       /* TODO */
-
-       return NULL;
-}
-
 static void udl_dmabuf_kunmap(struct dma_buf *dma_buf,
                              unsigned long page_num, void *addr)
 {
        /* TODO */
 }
 
-static void udl_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
-                                    unsigned long page_num,
-                                    void *addr)
-{
-       /* TODO */
-}
-
 static int udl_dmabuf_mmap(struct dma_buf *dma_buf,
                           struct vm_area_struct *vma)
 {
@@ -193,9 +177,7 @@ static const struct dma_buf_ops udl_dmabuf_ops = {
        .map_dma_buf            = udl_map_dma_buf,
        .unmap_dma_buf          = udl_unmap_dma_buf,
        .map                    = udl_dmabuf_kmap,
-       .map_atomic             = udl_dmabuf_kmap_atomic,
        .unmap                  = udl_dmabuf_kunmap,
-       .unmap_atomic           = udl_dmabuf_kunmap_atomic,
        .mmap                   = udl_dmabuf_mmap,
        .release                = drm_gem_dmabuf_release,
 };
index 55c0cc3091981754d9449a3c7001211f13621ced..e9e9b1ff678ee0a81d0d4b100b816b19122c7f0c 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <linux/usb.h>
 #include <drm/drm_gem.h>
+#include <linux/mm_types.h>
 
 #define DRIVER_NAME            "udl"
 #define DRIVER_DESC            "DisplayLink"
@@ -112,7 +113,7 @@ udl_fb_user_fb_create(struct drm_device *dev,
                      struct drm_file *file,
                      const struct drm_mode_fb_cmd2 *mode_cmd);
 
-int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
+int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
                     const char *front, char **urb_buf_ptr,
                     u32 byte_offset, u32 device_byte_offset, u32 byte_width,
                     int *ident_ptr, int *sent_ptr);
@@ -136,7 +137,7 @@ void udl_gem_put_pages(struct udl_gem_object *obj);
 int udl_gem_vmap(struct udl_gem_object *obj);
 void udl_gem_vunmap(struct udl_gem_object *obj);
 int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-int udl_gem_fault(struct vm_fault *vmf);
+vm_fault_t udl_gem_fault(struct vm_fault *vmf);
 
 int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
                      int width, int height);
index 2ebdc6d5a76e60a33d6a271ff158258a61b7908c..dbb62f6eb48a5fba107acd9981b3cfae45f8f6a4 100644 (file)
@@ -90,7 +90,10 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
        int bytes_identical = 0;
        struct urb *urb;
        int aligned_x;
-       int bpp = fb->base.format->cpp[0];
+       int log_bpp;
+
+       BUG_ON(!is_power_of_2(fb->base.format->cpp[0]));
+       log_bpp = __ffs(fb->base.format->cpp[0]);
 
        if (!fb->active_16)
                return 0;
@@ -125,19 +128,22 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
 
        for (i = y; i < y + height ; i++) {
                const int line_offset = fb->base.pitches[0] * i;
-               const int byte_offset = line_offset + (x bpp);
-               const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
-               if (udl_render_hline(dev, bpp, &urb,
+               const int byte_offset = line_offset + (x << log_bpp);
+               const int dev_byte_offset = (fb->base.width * i + x) << log_bpp;
+               if (udl_render_hline(dev, log_bpp, &urb,
                                     (char *) fb->obj->vmapping,
                                     &cmd, byte_offset, dev_byte_offset,
-                                    width bpp,
+                                    width << log_bpp,
                                     &bytes_identical, &bytes_sent))
                        goto error;
        }
 
        if (cmd > (char *) urb->transfer_buffer) {
                /* Send partial buffer remaining before exiting */
-               int len = cmd - (char *) urb->transfer_buffer;
+               int len;
+               if (cmd < (char *) urb->transfer_buffer + urb->transfer_buffer_length)
+                       *cmd++ = 0xAF;
+               len = cmd - (char *) urb->transfer_buffer;
                ret = udl_submit_urb(dev, urb, len);
                bytes_sent += len;
        } else
@@ -146,7 +152,7 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
 error:
        atomic_add(bytes_sent, &udl->bytes_sent);
        atomic_add(bytes_identical, &udl->bytes_identical);
-       atomic_add(width*height*bpp, &udl->bytes_rendered);
+       atomic_add((width * height) << log_bpp, &udl->bytes_rendered);
        end_cycles = get_cycles();
        atomic_add(((unsigned int) ((end_cycles - start_cycles)
                    >> 10)), /* Kcycles */
@@ -172,7 +178,7 @@ static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
 
        pos = (unsigned long)info->fix.smem_start + offset;
 
-       pr_notice("mmap() framebuffer addr:%lu size:%lu\n",
+       pr_debug("mmap() framebuffer addr:%lu size:%lu\n",
                  pos, size);
 
        /* We don't want the framebuffer to be mapped encrypted */
@@ -218,7 +224,7 @@ static int udl_fb_open(struct fb_info *info, int user)
 
                struct fb_deferred_io *fbdefio;
 
-               fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
+               fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
 
                if (fbdefio) {
                        fbdefio->delay = DL_DEFIO_WRITE_DELAY;
@@ -230,7 +236,7 @@ static int udl_fb_open(struct fb_info *info, int user)
        }
 #endif
 
-       pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n",
+       pr_debug("open /dev/fb%d user=%d fb_info=%p count=%d\n",
                  info->node, user, info, ufbdev->fb_count);
 
        return 0;
@@ -255,7 +261,7 @@ static int udl_fb_release(struct fb_info *info, int user)
        }
 #endif
 
-       pr_warn("released /dev/fb%d user=%d count=%d\n",
+       pr_debug("released /dev/fb%d user=%d count=%d\n",
                info->node, user, ufbdev->fb_count);
 
        return 0;
index 9a15cce22ccee8ef2e1d08d6857024e8fd547192..d5a23295dd80c1a9c1f2cc202c4c93048fc163ef 100644 (file)
@@ -100,13 +100,12 @@ int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
        return ret;
 }
 
-int udl_gem_fault(struct vm_fault *vmf)
+vm_fault_t udl_gem_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
        struct page *page;
        unsigned int page_offset;
-       int ret = 0;
 
        page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 
@@ -114,17 +113,7 @@ int udl_gem_fault(struct vm_fault *vmf)
                return VM_FAULT_SIGBUS;
 
        page = obj->pages[page_offset];
-       ret = vm_insert_page(vma, vmf->address, page);
-       switch (ret) {
-       case -EAGAIN:
-       case 0:
-       case -ERESTARTSYS:
-               return VM_FAULT_NOPAGE;
-       case -ENOMEM:
-               return VM_FAULT_OOM;
-       default:
-               return VM_FAULT_SIGBUS;
-       }
+       return vmf_insert_page(vma, vmf->address, page);
 }
 
 int udl_gem_get_pages(struct udl_gem_object *obj)
index d518de8f496bc75524d246dff580fa7a2a7bcf36..f455f095a14685d234569eaed56121d6269393b1 100644 (file)
@@ -170,25 +170,19 @@ static void udl_free_urb_list(struct drm_device *dev)
        struct list_head *node;
        struct urb_node *unode;
        struct urb *urb;
-       int ret;
-       unsigned long flags;
 
        DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
 
        /* keep waiting and freeing, until we've got 'em all */
        while (count--) {
+               down(&udl->urbs.limit_sem);
 
-               /* Getting interrupted means a leak, but ok at shutdown*/
-               ret = down_interruptible(&udl->urbs.limit_sem);
-               if (ret)
-                       break;
-
-               spin_lock_irqsave(&udl->urbs.lock, flags);
+               spin_lock_irq(&udl->urbs.lock);
 
                node = udl->urbs.list.next; /* have reserved one with sem */
                list_del_init(node);
 
-               spin_unlock_irqrestore(&udl->urbs.lock, flags);
+               spin_unlock_irq(&udl->urbs.lock);
 
                unode = list_entry(node, struct urb_node, entry);
                urb = unode->urb;
@@ -205,17 +199,22 @@ static void udl_free_urb_list(struct drm_device *dev)
 static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
 {
        struct udl_device *udl = dev->dev_private;
-       int i = 0;
        struct urb *urb;
        struct urb_node *unode;
        char *buf;
+       size_t wanted_size = count * size;
 
        spin_lock_init(&udl->urbs.lock);
 
+retry:
        udl->urbs.size = size;
        INIT_LIST_HEAD(&udl->urbs.list);
 
-       while (i < count) {
+       sema_init(&udl->urbs.limit_sem, 0);
+       udl->urbs.count = 0;
+       udl->urbs.available = 0;
+
+       while (udl->urbs.count * size < wanted_size) {
                unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
                if (!unode)
                        break;
@@ -231,11 +230,16 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
                }
                unode->urb = urb;
 
-               buf = usb_alloc_coherent(udl->udev, MAX_TRANSFER, GFP_KERNEL,
+               buf = usb_alloc_coherent(udl->udev, size, GFP_KERNEL,
                                         &urb->transfer_dma);
                if (!buf) {
                        kfree(unode);
                        usb_free_urb(urb);
+                       if (size > PAGE_SIZE) {
+                               size /= 2;
+                               udl_free_urb_list(dev);
+                               goto retry;
+                       }
                        break;
                }
 
@@ -246,16 +250,14 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
 
                list_add_tail(&unode->entry, &udl->urbs.list);
 
-               i++;
+               up(&udl->urbs.limit_sem);
+               udl->urbs.count++;
+               udl->urbs.available++;
        }
 
-       sema_init(&udl->urbs.limit_sem, i);
-       udl->urbs.count = i;
-       udl->urbs.available = i;
-
-       DRM_DEBUG("allocated %d %d byte urbs\n", i, (int) size);
+       DRM_DEBUG("allocated %d %d byte urbs\n", udl->urbs.count, (int) size);
 
-       return i;
+       return udl->urbs.count;
 }
 
 struct urb *udl_get_urb(struct drm_device *dev)
@@ -265,7 +267,6 @@ struct urb *udl_get_urb(struct drm_device *dev)
        struct list_head *entry;
        struct urb_node *unode;
        struct urb *urb = NULL;
-       unsigned long flags;
 
        /* Wait for an in-flight buffer to complete and get re-queued */
        ret = down_timeout(&udl->urbs.limit_sem, GET_URB_TIMEOUT);
@@ -276,14 +277,14 @@ struct urb *udl_get_urb(struct drm_device *dev)
                goto error;
        }
 
-       spin_lock_irqsave(&udl->urbs.lock, flags);
+       spin_lock_irq(&udl->urbs.lock);
 
        BUG_ON(list_empty(&udl->urbs.list)); /* reserved one with limit_sem */
        entry = udl->urbs.list.next;
        list_del_init(entry);
        udl->urbs.available--;
 
-       spin_unlock_irqrestore(&udl->urbs.lock, flags);
+       spin_unlock_irq(&udl->urbs.lock);
 
        unode = list_entry(entry, struct urb_node, entry);
        urb = unode->urb;
index 5bcae76497959bf3a14ba483df525eb052dd6300..7e37765cf5acd1ec0d2d74798b308c1a4be0bc5c 100644 (file)
@@ -243,7 +243,7 @@ static int udl_crtc_write_mode_to_hw(struct drm_crtc *crtc)
 
        memcpy(buf, udl->mode_buf, udl->mode_buf_len);
        retval = udl_submit_urb(dev, urb, udl->mode_buf_len);
-       DRM_INFO("write mode info %d\n", udl->mode_buf_len);
+       DRM_DEBUG("write mode info %d\n", udl->mode_buf_len);
        return retval;
 }
 
@@ -366,7 +366,6 @@ static int udl_crtc_page_flip(struct drm_crtc *crtc,
 {
        struct udl_framebuffer *ufb = to_udl_fb(fb);
        struct drm_device *dev = crtc->dev;
-       unsigned long flags;
 
        struct drm_framebuffer *old_fb = crtc->primary->fb;
        if (old_fb) {
@@ -377,10 +376,10 @@ static int udl_crtc_page_flip(struct drm_crtc *crtc,
 
        udl_handle_damage(ufb, 0, 0, fb->width, fb->height);
 
-       spin_lock_irqsave(&dev->event_lock, flags);
+       spin_lock_irq(&dev->event_lock);
        if (event)
                drm_crtc_send_vblank_event(crtc, event);
-       spin_unlock_irqrestore(&dev->event_lock, flags);
+       spin_unlock_irq(&dev->event_lock);
        crtc->primary->fb = fb;
 
        return 0;
index 0c87b1ac6b68f0d41cfd01851a14b9a092455f4f..ce87661e544f7a4cc37bb6a03d89104d3ea97e72 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/fb.h>
-#include <linux/prefetch.h>
 #include <asm/unaligned.h>
 
 #include <drm/drmP.h>
@@ -51,9 +50,6 @@ static int udl_trim_hline(const u8 *bback, const u8 **bfront, int *width_bytes)
        int start = width;
        int end = width;
 
-       prefetch((void *) front);
-       prefetch((void *) back);
-
        for (j = 0; j < width; j++) {
                if (back[j] != front[j]) {
                        start = j;
@@ -83,12 +79,12 @@ static inline u16 pixel32_to_be16(const uint32_t pixel)
                ((pixel >> 8) & 0xf800));
 }
 
-static inline u16 get_pixel_val16(const uint8_t *pixel, int bpp)
+static inline u16 get_pixel_val16(const uint8_t *pixel, int log_bpp)
 {
-       u16 pixel_val16 = 0;
-       if (bpp == 2)
+       u16 pixel_val16;
+       if (log_bpp == 1)
                pixel_val16 = *(const uint16_t *)pixel;
-       else if (bpp == 4)
+       else
                pixel_val16 = pixel32_to_be16(*(const uint32_t *)pixel);
        return pixel_val16;
 }
@@ -125,8 +121,9 @@ static void udl_compress_hline16(
        const u8 *const pixel_end,
        uint32_t *device_address_ptr,
        uint8_t **command_buffer_ptr,
-       const uint8_t *const cmd_buffer_end, int bpp)
+       const uint8_t *const cmd_buffer_end, int log_bpp)
 {
+       const int bpp = 1 << log_bpp;
        const u8 *pixel = *pixel_start_ptr;
        uint32_t dev_addr  = *device_address_ptr;
        uint8_t *cmd = *command_buffer_ptr;
@@ -139,8 +136,6 @@ static void udl_compress_hline16(
                const u8 *cmd_pixel_start, *cmd_pixel_end = NULL;
                uint16_t pixel_val16;
 
-               prefetchw((void *) cmd); /* pull in one cache line at least */
-
                *cmd++ = 0xaf;
                *cmd++ = 0x6b;
                *cmd++ = (uint8_t) ((dev_addr >> 16) & 0xFF);
@@ -153,12 +148,11 @@ static void udl_compress_hline16(
                raw_pixels_count_byte = cmd++; /*  we'll know this later */
                raw_pixel_start = pixel;
 
-               cmd_pixel_end = pixel + (min(MAX_CMD_PIXELS + 1,
-                       min((int)(pixel_end - pixel) / bpp,
-                           (int)(cmd_buffer_end - cmd) / 2))) * bpp;
+               cmd_pixel_end = pixel + (min3(MAX_CMD_PIXELS + 1UL,
+                                       (unsigned long)(pixel_end - pixel) >> log_bpp,
+                                       (unsigned long)(cmd_buffer_end - 1 - cmd) / 2) << log_bpp);
 
-               prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp);
-               pixel_val16 = get_pixel_val16(pixel, bpp);
+               pixel_val16 = get_pixel_val16(pixel, log_bpp);
 
                while (pixel < cmd_pixel_end) {
                        const u8 *const start = pixel;
@@ -170,7 +164,7 @@ static void udl_compress_hline16(
                        pixel += bpp;
 
                        while (pixel < cmd_pixel_end) {
-                               pixel_val16 = get_pixel_val16(pixel, bpp);
+                               pixel_val16 = get_pixel_val16(pixel, log_bpp);
                                if (pixel_val16 != repeating_pixel_val16)
                                        break;
                                pixel += bpp;
@@ -179,10 +173,10 @@ static void udl_compress_hline16(
                        if (unlikely(pixel > start + bpp)) {
                                /* go back and fill in raw pixel count */
                                *raw_pixels_count_byte = (((start -
-                                               raw_pixel_start) bpp) + 1) & 0xFF;
+                                               raw_pixel_start) >> log_bpp) + 1) & 0xFF;
 
                                /* immediately after raw data is repeat byte */
-                               *cmd++ = (((pixel - start) bpp) - 1) & 0xFF;
+                               *cmd++ = (((pixel - start) >> log_bpp) - 1) & 0xFF;
 
                                /* Then start another raw pixel span */
                                raw_pixel_start = pixel;
@@ -192,11 +186,14 @@ static void udl_compress_hline16(
 
                if (pixel > raw_pixel_start) {
                        /* finalize last RAW span */
-                       *raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF;
+                       *raw_pixels_count_byte = ((pixel - raw_pixel_start) >> log_bpp) & 0xFF;
+               } else {
+                       /* undo unused byte */
+                       cmd--;
                }
 
-               *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) bpp) & 0xFF;
-               dev_addr += ((pixel - cmd_pixel_start) bpp) * 2;
+               *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) >> log_bpp) & 0xFF;
+               dev_addr += ((pixel - cmd_pixel_start) >> log_bpp) * 2;
        }
 
        if (cmd_buffer_end <= MIN_RLX_CMD_BYTES + cmd) {
@@ -219,19 +216,19 @@ static void udl_compress_hline16(
  * (that we can only write to, slowly, and can never read), and (optionally)
  * our shadow copy that tracks what's been sent to that hardware buffer.
  */
-int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
+int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
                     const char *front, char **urb_buf_ptr,
                     u32 byte_offset, u32 device_byte_offset,
                     u32 byte_width,
                     int *ident_ptr, int *sent_ptr)
 {
        const u8 *line_start, *line_end, *next_pixel;
-       u32 base16 = 0 + (device_byte_offset bpp) * 2;
+       u32 base16 = 0 + (device_byte_offset >> log_bpp) * 2;
        struct urb *urb = *urb_ptr;
        u8 *cmd = *urb_buf_ptr;
        u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length;
 
-       BUG_ON(!(bpp == 2 || bpp == 4));
+       BUG_ON(!(log_bpp == 1 || log_bpp == 2));
 
        line_start = (u8 *) (front + byte_offset);
        next_pixel = line_start;
@@ -241,7 +238,7 @@ int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
 
                udl_compress_hline16(&next_pixel,
                             line_end, &base16,
-                            (u8 **) &cmd, (u8 *) cmd_end, bpp);
+                            (u8 **) &cmd, (u8 *) cmd_end, log_bpp);
 
                if (cmd >= cmd_end) {
                        int len = cmd - (u8 *) urb->transfer_buffer;
index 7b1e2a549a71fb7d9e5cc1a80f4a4e5a51c0e109..54d96518a131672ae02223be468fc6e6ca71fa96 100644 (file)
@@ -227,37 +227,19 @@ v3d_set_mmap_vma_flags(struct vm_area_struct *vma)
        vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
 }
 
-int v3d_gem_fault(struct vm_fault *vmf)
+vm_fault_t v3d_gem_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct drm_gem_object *obj = vma->vm_private_data;
        struct v3d_bo *bo = to_v3d_bo(obj);
-       unsigned long pfn;
+       pfn_t pfn;
        pgoff_t pgoff;
-       int ret;
 
        /* We don't use vmf->pgoff since that has the fake offset: */
        pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
-       pfn = page_to_pfn(bo->pages[pgoff]);
-
-       ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
-
-       switch (ret) {
-       case -EAGAIN:
-       case 0:
-       case -ERESTARTSYS:
-       case -EINTR:
-       case -EBUSY:
-               /*
-                * EBUSY is ok: this just means that another thread
-                * already did the job.
-                */
-               return VM_FAULT_NOPAGE;
-       case -ENOMEM:
-               return VM_FAULT_OOM;
-       default:
-               return VM_FAULT_SIGBUS;
-       }
+       pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV);
+
+       return vmf_insert_mixed(vma, vmf->address, pfn);
 }
 
 int v3d_mmap(struct file *filp, struct vm_area_struct *vma)
index cdb582043b4fc26cc63a64e04eadcd1bd559e752..2a85fa68ffea51042b4c08dd8dc3497153590197 100644 (file)
@@ -123,6 +123,7 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
 {
        struct v3d_dev *v3d = to_v3d_dev(dev);
        struct v3d_file_priv *v3d_priv;
+       struct drm_sched_rq *rq;
        int i;
 
        v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL);
@@ -132,10 +133,8 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
        v3d_priv->v3d = v3d;
 
        for (i = 0; i < V3D_MAX_QUEUES; i++) {
-               drm_sched_entity_init(&v3d->queue[i].sched,
-                                     &v3d_priv->sched_entity[i],
-                                     &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
-                                     NULL);
+               rq = &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+               drm_sched_entity_init(&v3d_priv->sched_entity[i], &rq, 1, NULL);
        }
 
        file->driver_priv = v3d_priv;
@@ -146,13 +145,11 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
 static void
 v3d_postclose(struct drm_device *dev, struct drm_file *file)
 {
-       struct v3d_dev *v3d = to_v3d_dev(dev);
        struct v3d_file_priv *v3d_priv = file->driver_priv;
        enum v3d_queue q;
 
        for (q = 0; q < V3D_MAX_QUEUES; q++) {
-               drm_sched_entity_fini(&v3d->queue[q].sched,
-                                     &v3d_priv->sched_entity[q]);
+               drm_sched_entity_destroy(&v3d_priv->sched_entity[q]);
        }
 
        kfree(v3d_priv);
index a043ac3aae9824aa8c8938c5647895a0143f292a..e6fed696ad869e2d700a94c2e6f4a10d36bf7529 100644 (file)
@@ -2,6 +2,7 @@
 /* Copyright (C) 2015-2018 Broadcom */
 
 #include <linux/reservation.h>
+#include <linux/mm_types.h>
 #include <drm/drmP.h>
 #include <drm/drm_encoder.h>
 #include <drm/drm_gem.h>
@@ -25,7 +26,6 @@ struct v3d_queue_state {
 
        u64 fence_context;
        u64 emit_seqno;
-       u64 finished_seqno;
 };
 
 struct v3d_dev {
@@ -85,6 +85,11 @@ struct v3d_dev {
         */
        struct mutex reset_lock;
 
+       /* Lock taken when creating and pushing the GPU scheduler
+        * jobs, to keep the sched-fence seqnos in order.
+        */
+       struct mutex sched_lock;
+
        struct {
                u32 num_allocated;
                u32 pages_allocated;
@@ -179,6 +184,8 @@ struct v3d_job {
 
        /* GPU virtual addresses of the start/end of the CL job. */
        u32 start, end;
+
+       u32 timedout_ctca, timedout_ctra;
 };
 
 struct v3d_exec_info {
@@ -248,7 +255,7 @@ int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
                      struct drm_file *file_priv);
 int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
                            struct drm_file *file_priv);
-int v3d_gem_fault(struct vm_fault *vmf);
+vm_fault_t v3d_gem_fault(struct vm_fault *vmf);
 int v3d_mmap(struct file *filp, struct vm_area_struct *vma);
 struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj);
 int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
index 087d49c8cb12ad9d7fcdc99a5a8f1f7496639cc8..50bfcf9a8a1ac57d536fe4883f2727e88b42cdf7 100644 (file)
@@ -35,24 +35,7 @@ static const char *v3d_fence_get_timeline_name(struct dma_fence *fence)
                return "v3d-render";
 }
 
-static bool v3d_fence_enable_signaling(struct dma_fence *fence)
-{
-       return true;
-}
-
-static bool v3d_fence_signaled(struct dma_fence *fence)
-{
-       struct v3d_fence *f = to_v3d_fence(fence);
-       struct v3d_dev *v3d = to_v3d_dev(f->dev);
-
-       return v3d->queue[f->queue].finished_seqno >= f->seqno;
-}
-
 const struct dma_fence_ops v3d_fence_ops = {
        .get_driver_name = v3d_fence_get_driver_name,
        .get_timeline_name = v3d_fence_get_timeline_name,
-       .enable_signaling = v3d_fence_enable_signaling,
-       .signaled = v3d_fence_signaled,
-       .wait = dma_fence_default_wait,
-       .release = dma_fence_free,
 };
index b513f9189cafd6cea830ad3312bbc6911e396a7a..5ce24098a5fdaa363e69bdde24e98322b152e2c1 100644 (file)
@@ -550,9 +550,9 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
        if (ret)
                goto fail;
 
+       mutex_lock(&v3d->sched_lock);
        if (exec->bin.start != exec->bin.end) {
                ret = drm_sched_job_init(&exec->bin.base,
-                                        &v3d->queue[V3D_BIN].sched,
                                         &v3d_priv->sched_entity[V3D_BIN],
                                         v3d_priv);
                if (ret)
@@ -567,7 +567,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
        }
 
        ret = drm_sched_job_init(&exec->render.base,
-                                &v3d->queue[V3D_RENDER].sched,
                                 &v3d_priv->sched_entity[V3D_RENDER],
                                 v3d_priv);
        if (ret)
@@ -576,6 +575,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
        kref_get(&exec->refcount); /* put by scheduler job completion */
        drm_sched_entity_push_job(&exec->render.base,
                                  &v3d_priv->sched_entity[V3D_RENDER]);
+       mutex_unlock(&v3d->sched_lock);
 
        v3d_attach_object_fences(exec);
 
@@ -594,6 +594,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
        return 0;
 
 fail_unreserve:
+       mutex_unlock(&v3d->sched_lock);
        v3d_unlock_bo_reservations(dev, exec, &acquire_ctx);
 fail:
        v3d_exec_put(exec);
@@ -615,6 +616,7 @@ v3d_gem_init(struct drm_device *dev)
        spin_lock_init(&v3d->job_lock);
        mutex_init(&v3d->bo_lock);
        mutex_init(&v3d->reset_lock);
+       mutex_init(&v3d->sched_lock);
 
        /* Note: We don't allocate address 0.  Various bits of HW
         * treat 0 as special, such as the occlusion query counters
@@ -650,17 +652,14 @@ void
 v3d_gem_destroy(struct drm_device *dev)
 {
        struct v3d_dev *v3d = to_v3d_dev(dev);
-       enum v3d_queue q;
 
        v3d_sched_fini(v3d);
 
        /* Waiting for exec to finish would need to be done before
         * unregistering V3D.
         */
-       for (q = 0; q < V3D_MAX_QUEUES; q++) {
-               WARN_ON(v3d->queue[q].emit_seqno !=
-                       v3d->queue[q].finished_seqno);
-       }
+       WARN_ON(v3d->bin_job);
+       WARN_ON(v3d->render_job);
 
        drm_mm_takedown(&v3d->mm);
 
index 77e1fa046c10a2c3045959008b085ab50cedbd73..e07514eb11b511ddaacd9ddd0cf0c3e3fbbbb1a7 100644 (file)
@@ -87,15 +87,12 @@ v3d_irq(int irq, void *arg)
        }
 
        if (intsts & V3D_INT_FLDONE) {
-               v3d->queue[V3D_BIN].finished_seqno++;
                dma_fence_signal(v3d->bin_job->bin.done_fence);
                status = IRQ_HANDLED;
        }
 
        if (intsts & V3D_INT_FRDONE) {
-               v3d->queue[V3D_RENDER].finished_seqno++;
                dma_fence_signal(v3d->render_job->render.done_fence);
-
                status = IRQ_HANDLED;
        }
 
index fc13282dfc2f0b75dc3ea167180950b63f7c8a76..854046565989e12c5b2e822f82b8fae40c01e5b0 100644 (file)
 #define V3D_CLE_CTNCA(n) (V3D_CLE_CT0CA + 4 * n)
 #define V3D_CLE_CT0RA                                  0x00118
 #define V3D_CLE_CT1RA                                  0x0011c
+#define V3D_CLE_CTNRA(n) (V3D_CLE_CT0RA + 4 * n)
 #define V3D_CLE_CT0LC                                  0x00120
 #define V3D_CLE_CT1LC                                  0x00124
 #define V3D_CLE_CT0PC                                  0x00128
index b07bece9417dc6a3a592789dc0684ba930824f5b..a5501581d96b3a3c4293e4504895a01de3c1d0d2 100644 (file)
@@ -14,8 +14,8 @@
  * to the HW only when it has completed the last one, instead of
  * filling up the CT[01]Q FIFOs with jobs.  Similarly, we use
  * v3d_job_dependency() to manage the dependency between bin and
- * render, instead of having the clients submit jobs with using the
- * HW's semaphores to interlock between them.
+ * render, instead of having the clients submit jobs using the HW's
+ * semaphores to interlock between them.
  */
 
 #include <linux/kthread.h>
@@ -114,8 +114,8 @@ static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job)
        v3d_invalidate_caches(v3d);
 
        fence = v3d_fence_create(v3d, q);
-       if (!fence)
-               return fence;
+       if (IS_ERR(fence))
+               return NULL;
 
        if (job->done_fence)
                dma_fence_put(job->done_fence);
@@ -153,7 +153,25 @@ v3d_job_timedout(struct drm_sched_job *sched_job)
        struct v3d_job *job = to_v3d_job(sched_job);
        struct v3d_exec_info *exec = job->exec;
        struct v3d_dev *v3d = exec->v3d;
+       enum v3d_queue job_q = job == &exec->bin ? V3D_BIN : V3D_RENDER;
        enum v3d_queue q;
+       u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(job_q));
+       u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(job_q));
+
+       /* If the current address or return address have changed, then
+        * the GPU has probably made progress and we should delay the
+        * reset.  This could fail if the GPU got in an infinite loop
+        * in the CL, but that is pretty unlikely outside of an i-g-t
+        * testcase.
+        */
+       if (job->timedout_ctca != ctca || job->timedout_ctra != ctra) {
+               job->timedout_ctca = ctca;
+               job->timedout_ctra = ctra;
+
+               schedule_delayed_work(&job->base.work_tdr,
+                                     job->base.sched->timeout);
+               return;
+       }
 
        mutex_lock(&v3d->reset_lock);
 
index 4a3a868235f848698c3bcc062475a8d3e4518a69..b303703bc7f37e3f1b032096c0dc00a6d7326bb6 100644 (file)
@@ -19,6 +19,7 @@ vc4-y := \
        vc4_plane.o \
        vc4_render_cl.o \
        vc4_trace_points.o \
+       vc4_txp.o \
        vc4_v3d.o \
        vc4_validate.o \
        vc4_validate_shaders.o
index add9cc97a3b63baa1194ec82a497180b11d204b5..8dcce7182bb7c06e23187dea7852388bf9d350b3 100644 (file)
@@ -721,7 +721,7 @@ vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
        return dmabuf;
 }
 
-int vc4_fault(struct vm_fault *vmf)
+vm_fault_t vc4_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct drm_gem_object *obj = vma->vm_private_data;
index c8650bbcbcb3b34898320982137c40a1c19739e4..0e6a121858d13ec335ae0a1f19ad839123f9cbe6 100644 (file)
@@ -46,6 +46,8 @@ struct vc4_crtc_state {
        struct drm_crtc_state base;
        /* Dlist area for this CRTC configuration. */
        struct drm_mm_node mm;
+       bool feed_txp;
+       bool txp_armed;
 };
 
 static inline struct vc4_crtc_state *
@@ -324,10 +326,8 @@ static struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc)
        return NULL;
 }
 
-static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
+static void vc4_crtc_config_pv(struct drm_crtc *crtc)
 {
-       struct drm_device *dev = crtc->dev;
-       struct vc4_dev *vc4 = to_vc4_dev(dev);
        struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc);
        struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
        struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
@@ -338,12 +338,6 @@ static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
        bool is_dsi = (vc4_encoder->type == VC4_ENCODER_TYPE_DSI0 ||
                       vc4_encoder->type == VC4_ENCODER_TYPE_DSI1);
        u32 format = is_dsi ? PV_CONTROL_FORMAT_DSIV_24 : PV_CONTROL_FORMAT_24;
-       bool debug_dump_regs = false;
-
-       if (debug_dump_regs) {
-               DRM_INFO("CRTC %d regs before:\n", drm_crtc_index(crtc));
-               vc4_crtc_dump_regs(vc4_crtc);
-       }
 
        /* Reset the PV fifo. */
        CRTC_WRITE(PV_CONTROL, 0);
@@ -419,6 +413,49 @@ static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
                                 PV_CONTROL_CLK_SELECT) |
                   PV_CONTROL_FIFO_CLR |
                   PV_CONTROL_EN);
+}
+
+static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct vc4_dev *vc4 = to_vc4_dev(dev);
+       struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+       struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+       struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+       bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE;
+       bool debug_dump_regs = false;
+
+       if (debug_dump_regs) {
+               DRM_INFO("CRTC %d regs before:\n", drm_crtc_index(crtc));
+               vc4_crtc_dump_regs(vc4_crtc);
+       }
+
+       if (vc4_crtc->channel == 2) {
+               u32 dispctrl;
+               u32 dsp3_mux;
+
+               /*
+                * SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to
+                * FIFO X'.
+                * SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'.
+                *
+                * DSP3 is connected to FIFO2 unless the transposer is
+                * enabled. In this case, FIFO 2 is directly accessed by the
+                * TXP IP, and we need to disable the FIFO2 -> pixelvalve1
+                * route.
+                */
+               if (vc4_state->feed_txp)
+                       dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX);
+               else
+                       dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
+
+               dispctrl = HVS_READ(SCALER_DISPCTRL) &
+                          ~SCALER_DISPCTRL_DSP3_MUX_MASK;
+               HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux);
+       }
+
+       if (!vc4_state->feed_txp)
+               vc4_crtc_config_pv(crtc);
 
        HVS_WRITE(SCALER_DISPBKGNDX(vc4_crtc->channel),
                  SCALER_DISPBKGND_AUTOHS |
@@ -499,6 +536,13 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
        }
 }
 
+void vc4_crtc_txp_armed(struct drm_crtc_state *state)
+{
+       struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
+
+       vc4_state->txp_armed = true;
+}
+
 static void vc4_crtc_update_dlist(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
@@ -514,8 +558,11 @@ static void vc4_crtc_update_dlist(struct drm_crtc *crtc)
                WARN_ON(drm_crtc_vblank_get(crtc) != 0);
 
                spin_lock_irqsave(&dev->event_lock, flags);
-               vc4_crtc->event = crtc->state->event;
-               crtc->state->event = NULL;
+
+               if (!vc4_state->feed_txp || vc4_state->txp_armed) {
+                       vc4_crtc->event = crtc->state->event;
+                       crtc->state->event = NULL;
+               }
 
                HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
                          vc4_state->mm.start);
@@ -533,8 +580,8 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
        struct drm_device *dev = crtc->dev;
        struct vc4_dev *vc4 = to_vc4_dev(dev);
        struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
-       struct drm_crtc_state *state = crtc->state;
-       struct drm_display_mode *mode = &state->adjusted_mode;
+       struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+       struct drm_display_mode *mode = &crtc->state->adjusted_mode;
 
        require_hvs_enabled(dev);
 
@@ -546,15 +593,21 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
 
        /* Turn on the scaler, which will wait for vstart to start
         * compositing.
+        * When feeding the transposer, we should operate in oneshot
+        * mode.
         */
        HVS_WRITE(SCALER_DISPCTRLX(vc4_crtc->channel),
                  VC4_SET_FIELD(mode->hdisplay, SCALER_DISPCTRLX_WIDTH) |
                  VC4_SET_FIELD(mode->vdisplay, SCALER_DISPCTRLX_HEIGHT) |
-                 SCALER_DISPCTRLX_ENABLE);
+                 SCALER_DISPCTRLX_ENABLE |
+                 (vc4_state->feed_txp ? SCALER_DISPCTRLX_ONESHOT : 0));
 
-       /* Turn on the pixel valve, which will emit the vstart signal. */
-       CRTC_WRITE(PV_V_CONTROL,
-                  CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN);
+       /* When feeding the transposer block the pixelvalve is unneeded and
+        * should not be enabled.
+        */
+       if (!vc4_state->feed_txp)
+               CRTC_WRITE(PV_V_CONTROL,
+                          CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN);
 }
 
 static enum drm_mode_status vc4_crtc_mode_valid(struct drm_crtc *crtc,
@@ -579,8 +632,10 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
        struct drm_plane *plane;
        unsigned long flags;
        const struct drm_plane_state *plane_state;
+       struct drm_connector *conn;
+       struct drm_connector_state *conn_state;
        u32 dlist_count = 0;
-       int ret;
+       int ret, i;
 
        /* The pixelvalve can only feed one encoder (and encoders are
         * 1:1 with connectors.)
@@ -600,6 +655,24 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
        if (ret)
                return ret;
 
+       for_each_new_connector_in_state(state->state, conn, conn_state, i) {
+               if (conn_state->crtc != crtc)
+                       continue;
+
+               /* The writeback connector is implemented using the transposer
+                * block which is directly taking its data from the HVS FIFO.
+                */
+               if (conn->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) {
+                       state->no_vblank = true;
+                       vc4_state->feed_txp = true;
+               } else {
+                       state->no_vblank = false;
+                       vc4_state->feed_txp = false;
+               }
+
+               break;
+       }
+
        return 0;
 }
 
@@ -713,7 +786,8 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
 
        spin_lock_irqsave(&dev->event_lock, flags);
        if (vc4_crtc->event &&
-           (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)))) {
+           (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)) ||
+            vc4_state->feed_txp)) {
                drm_crtc_send_vblank_event(crtc, vc4_crtc->event);
                vc4_crtc->event = NULL;
                drm_crtc_vblank_put(crtc);
@@ -721,6 +795,13 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
        spin_unlock_irqrestore(&dev->event_lock, flags);
 }
 
+void vc4_crtc_handle_vblank(struct vc4_crtc *crtc)
+{
+       crtc->t_vblank = ktime_get();
+       drm_crtc_handle_vblank(&crtc->base);
+       vc4_crtc_handle_page_flip(crtc);
+}
+
 static irqreturn_t vc4_crtc_irq_handler(int irq, void *data)
 {
        struct vc4_crtc *vc4_crtc = data;
@@ -728,10 +809,8 @@ static irqreturn_t vc4_crtc_irq_handler(int irq, void *data)
        irqreturn_t ret = IRQ_NONE;
 
        if (stat & PV_INT_VFP_START) {
-               vc4_crtc->t_vblank = ktime_get();
                CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START);
-               drm_crtc_handle_vblank(&vc4_crtc->base);
-               vc4_crtc_handle_page_flip(vc4_crtc);
+               vc4_crtc_handle_vblank(vc4_crtc);
                ret = IRQ_HANDLED;
        }
 
@@ -862,7 +941,6 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
         * is released.
         */
        drm_atomic_set_fb_for_plane(plane->state, fb);
-       plane->fb = fb;
 
        vc4_queue_seqno_cb(dev, &flip_state->cb, bo->seqno,
                           vc4_async_page_flip_complete);
@@ -885,12 +963,15 @@ static int vc4_page_flip(struct drm_crtc *crtc,
 
 static struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc)
 {
-       struct vc4_crtc_state *vc4_state;
+       struct vc4_crtc_state *vc4_state, *old_vc4_state;
 
        vc4_state = kzalloc(sizeof(*vc4_state), GFP_KERNEL);
        if (!vc4_state)
                return NULL;
 
+       old_vc4_state = to_vc4_crtc_state(crtc->state);
+       vc4_state->feed_txp = old_vc4_state->feed_txp;
+
        __drm_atomic_helper_crtc_duplicate_state(crtc, &vc4_state->base);
        return &vc4_state->base;
 }
@@ -988,9 +1069,17 @@ static void vc4_set_crtc_possible_masks(struct drm_device *drm,
        struct drm_encoder *encoder;
 
        drm_for_each_encoder(encoder, drm) {
-               struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
+               struct vc4_encoder *vc4_encoder;
                int i;
 
+               /* HVS FIFO2 can feed the TXP IP. */
+               if (crtc_data->hvs_channel == 2 &&
+                   encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL) {
+                       encoder->possible_crtcs |= drm_crtc_mask(crtc);
+                       continue;
+               }
+
+               vc4_encoder = to_vc4_encoder(encoder);
                for (i = 0; i < ARRAY_SIZE(crtc_data->encoder_types); i++) {
                        if (vc4_encoder->type == encoder_types[i]) {
                                vc4_encoder->clock_select = i;
@@ -1057,7 +1146,6 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
        drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
                                  &vc4_crtc_funcs, NULL);
        drm_crtc_helper_add(crtc, &vc4_crtc_helper_funcs);
-       primary_plane->crtc = crtc;
        vc4_crtc->channel = vc4_crtc->data->hvs_channel;
        drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r));
        drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size);
@@ -1083,7 +1171,7 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
                if (IS_ERR(plane))
                        continue;
 
-               plane->possible_crtcs = 1 << drm_crtc_index(crtc);
+               plane->possible_crtcs = drm_crtc_mask(crtc);
        }
 
        /* Set up the legacy cursor after overlay initialization,
@@ -1092,8 +1180,7 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
         */
        cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR);
        if (!IS_ERR(cursor_plane)) {
-               cursor_plane->possible_crtcs = 1 << drm_crtc_index(crtc);
-               cursor_plane->crtc = crtc;
+               cursor_plane->possible_crtcs = drm_crtc_mask(crtc);
                crtc->cursor = cursor_plane;
        }
 
@@ -1121,7 +1208,7 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
 err_destroy_planes:
        list_for_each_entry_safe(destroy_plane, temp,
                                 &drm->mode_config.plane_list, head) {
-               if (destroy_plane->possible_crtcs == 1 << drm_crtc_index(crtc))
+               if (destroy_plane->possible_crtcs == drm_crtc_mask(crtc))
                    destroy_plane->funcs->destroy(destroy_plane);
        }
 err:
index 5db06bdb5f27b28eca3bfee651d6ccf660c3e351..7a0003de71ab0bf74d07549dd2df985bb0afa493 100644 (file)
@@ -21,6 +21,7 @@ static const struct drm_info_list vc4_debugfs_list[] = {
        {"dsi1_regs", vc4_dsi_debugfs_regs, 0, (void *)(uintptr_t)1},
        {"hdmi_regs", vc4_hdmi_debugfs_regs, 0},
        {"vec_regs", vc4_vec_debugfs_regs, 0},
+       {"txp_regs", vc4_txp_debugfs_regs, 0},
        {"hvs_regs", vc4_hvs_debugfs_regs, 0},
        {"crtc0_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)0},
        {"crtc1_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)1},
index 466d0a27b41510085a93b4119bb737d33b8af1a0..04270a14fcaaf491a3344f13ce169e7da42cd186 100644 (file)
@@ -288,7 +288,7 @@ static int vc4_drm_bind(struct device *dev)
 
        ret = vc4_bo_cache_init(drm);
        if (ret)
-               goto dev_unref;
+               goto dev_put;
 
        drm_mode_config_init(drm);
 
@@ -313,8 +313,8 @@ unbind_all:
 gem_destroy:
        vc4_gem_destroy(drm);
        vc4_bo_cache_destroy(drm);
-dev_unref:
-       drm_dev_unref(drm);
+dev_put:
+       drm_dev_put(drm);
        return ret;
 }
 
@@ -331,7 +331,7 @@ static void vc4_drm_unbind(struct device *dev)
 
        drm_atomic_private_obj_fini(&vc4->ctm_manager);
 
-       drm_dev_unref(drm);
+       drm_dev_put(drm);
 }
 
 static const struct component_master_ops vc4_drm_ops = {
@@ -344,6 +344,7 @@ static struct platform_driver *const component_drivers[] = {
        &vc4_vec_driver,
        &vc4_dpi_driver,
        &vc4_dsi_driver,
+       &vc4_txp_driver,
        &vc4_hvs_driver,
        &vc4_crtc_driver,
        &vc4_v3d_driver,
index 554a4e810d5b04d57dcbc0eb35a28f046fe826d8..bd6ef1f318220556e067b158936f9d1f6447eedb 100644 (file)
@@ -6,6 +6,7 @@
  * published by the Free Software Foundation.
  */
 
+#include <linux/mm_types.h>
 #include <linux/reservation.h>
 #include <drm/drmP.h>
 #include <drm/drm_encoder.h>
@@ -72,6 +73,7 @@ struct vc4_dev {
        struct vc4_dpi *dpi;
        struct vc4_dsi *dsi1;
        struct vc4_vec *vec;
+       struct vc4_txp *txp;
 
        struct vc4_hang_state *hang_state;
 
@@ -674,7 +676,7 @@ int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
                             struct drm_file *file_priv);
 int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv);
-int vc4_fault(struct vm_fault *vmf);
+vm_fault_t vc4_fault(struct vm_fault *vmf);
 int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
 struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj);
 int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
@@ -697,6 +699,8 @@ bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
                             bool in_vblank_irq, int *vpos, int *hpos,
                             ktime_t *stime, ktime_t *etime,
                             const struct drm_display_mode *mode);
+void vc4_crtc_handle_vblank(struct vc4_crtc *crtc);
+void vc4_crtc_txp_armed(struct drm_crtc_state *state);
 
 /* vc4_debugfs.c */
 int vc4_debugfs_init(struct drm_minor *minor);
@@ -744,6 +748,10 @@ int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused);
 extern struct platform_driver vc4_vec_driver;
 int vc4_vec_debugfs_regs(struct seq_file *m, void *unused);
 
+/* vc4_txp.c */
+extern struct platform_driver vc4_txp_driver;
+int vc4_txp_debugfs_regs(struct seq_file *m, void *unused);
+
 /* vc4_irq.c */
 irqreturn_t vc4_irq(int irq, void *arg);
 void vc4_irq_preinstall(struct drm_device *dev);
index 8aa8978351185209b2c76d147468ce6e18eb8155..0c607eb33d7e0515016e117717a8388aea5330d8 100644 (file)
@@ -814,7 +814,9 @@ static void vc4_dsi_encoder_disable(struct drm_encoder *encoder)
        struct vc4_dsi *dsi = vc4_encoder->dsi;
        struct device *dev = &dsi->pdev->dev;
 
+       drm_bridge_disable(dsi->bridge);
        vc4_dsi_ulps(dsi, true);
+       drm_bridge_post_disable(dsi->bridge);
 
        clk_disable_unprepare(dsi->pll_phy_clock);
        clk_disable_unprepare(dsi->escape_clock);
@@ -1089,21 +1091,6 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
        /* Display reset sequence timeout */
        DSI_PORT_WRITE(PR_TO_CNT, 100000);
 
-       if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
-               DSI_PORT_WRITE(DISP0_CTRL,
-                              VC4_SET_FIELD(dsi->divider,
-                                            DSI_DISP0_PIX_CLK_DIV) |
-                              VC4_SET_FIELD(dsi->format, DSI_DISP0_PFORMAT) |
-                              VC4_SET_FIELD(DSI_DISP0_LP_STOP_PERFRAME,
-                                            DSI_DISP0_LP_STOP_CTRL) |
-                              DSI_DISP0_ST_END |
-                              DSI_DISP0_ENABLE);
-       } else {
-               DSI_PORT_WRITE(DISP0_CTRL,
-                              DSI_DISP0_COMMAND_MODE |
-                              DSI_DISP0_ENABLE);
-       }
-
        /* Set up DISP1 for transferring long command payloads through
         * the pixfifo.
         */
@@ -1128,6 +1115,25 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
 
        vc4_dsi_ulps(dsi, false);
 
+       drm_bridge_pre_enable(dsi->bridge);
+
+       if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
+               DSI_PORT_WRITE(DISP0_CTRL,
+                              VC4_SET_FIELD(dsi->divider,
+                                            DSI_DISP0_PIX_CLK_DIV) |
+                              VC4_SET_FIELD(dsi->format, DSI_DISP0_PFORMAT) |
+                              VC4_SET_FIELD(DSI_DISP0_LP_STOP_PERFRAME,
+                                            DSI_DISP0_LP_STOP_CTRL) |
+                              DSI_DISP0_ST_END |
+                              DSI_DISP0_ENABLE);
+       } else {
+               DSI_PORT_WRITE(DISP0_CTRL,
+                              DSI_DISP0_COMMAND_MODE |
+                              DSI_DISP0_ENABLE);
+       }
+
+       drm_bridge_enable(dsi->bridge);
+
        if (debug_dump_regs) {
                DRM_INFO("DSI regs after:\n");
                vc4_dsi_dump_regs(dsi);
@@ -1606,8 +1612,18 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
 
        ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
                                          &panel, &dsi->bridge);
-       if (ret)
+       if (ret) {
+               /* If the bridge or panel pointed by dev->of_node is not
+                * enabled, just return 0 here so that we don't prevent the DRM
+                * dev from being registered. Of course that means the DSI
+                * encoder won't be exposed, but that's not a problem since
+                * nothing is connected to it.
+                */
+               if (ret == -ENODEV)
+                       return 0;
+
                return ret;
+       }
 
        if (panel) {
                dsi->bridge = devm_drm_panel_bridge_add(dev, panel,
@@ -1639,6 +1655,12 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
                dev_err(dev, "bridge attach failed: %d\n", ret);
                return ret;
        }
+       /* Disable the atomic helper calls into the bridge.  We
+        * manually call the bridge pre_enable / enable / etc. calls
+        * from our driver, since we need to sequence them within the
+        * encoder's enable/disable paths.
+        */
+       dsi->encoder->bridge = NULL;
 
        pm_runtime_enable(dev);
 
@@ -1652,7 +1674,8 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master,
        struct vc4_dev *vc4 = to_vc4_dev(drm);
        struct vc4_dsi *dsi = dev_get_drvdata(dev);
 
-       pm_runtime_disable(dev);
+       if (dsi->bridge)
+               pm_runtime_disable(dev);
 
        vc4_dsi_encoder_destroy(dsi->encoder);
 
index dbf5a5a5d5f5712850e4aa1afe8b99e5e8a53523..580214e2158c7dcf39253f2148dea478ffe20e68 100644 (file)
@@ -33,11 +33,6 @@ static const char *vc4_fence_get_timeline_name(struct dma_fence *fence)
        return "vc4-v3d";
 }
 
-static bool vc4_fence_enable_signaling(struct dma_fence *fence)
-{
-       return true;
-}
-
 static bool vc4_fence_signaled(struct dma_fence *fence)
 {
        struct vc4_fence *f = to_vc4_fence(fence);
@@ -49,8 +44,5 @@ static bool vc4_fence_signaled(struct dma_fence *fence)
 const struct dma_fence_ops vc4_fence_ops = {
        .get_driver_name = vc4_fence_get_driver_name,
        .get_timeline_name = vc4_fence_get_timeline_name,
-       .enable_signaling = vc4_fence_enable_signaling,
        .signaled = vc4_fence_signaled,
-       .wait = dma_fence_default_wait,
-       .release = dma_fence_free,
 };
index b8d50533e2bb4ac195fa63d32a4c2c748c036e4f..fd5522fd179e56399c63ce819e0f2a3580bc9ec1 100644 (file)
@@ -285,7 +285,7 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
                        drm_rgb_quant_range_selectable(edid);
        }
 
-       drm_mode_connector_update_edid_property(connector, edid);
+       drm_connector_update_edid_property(connector, edid);
        ret = drm_add_edid_modes(connector, edid);
        kfree(edid);
 
@@ -329,7 +329,7 @@ static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
        connector->interlace_allowed = 1;
        connector->doublescan_allowed = 0;
 
-       drm_mode_connector_attach_encoder(connector, encoder);
+       drm_connector_attach_encoder(connector, encoder);
 
        return connector;
 }
index 8a411e5f87768d9c56d5d1207e37be4962d4bbe0..ca5aa7fba7694e35e0fe4d543a9b01aa0b897424 100644 (file)
@@ -153,18 +153,11 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state)
 
        drm_atomic_helper_commit_modeset_enables(dev, state);
 
-       /* Make sure that drm_atomic_helper_wait_for_vblanks()
-        * actually waits for vblank.  If we're doing a full atomic
-        * modeset (as opposed to a vc4_update_plane() short circuit),
-        * then we need to wait for scanout to be done with our
-        * display lists before we free it and potentially reallocate
-        * and overwrite the dlist memory with a new modeset.
-        */
-       state->legacy_cursor_update = false;
+       drm_atomic_helper_fake_vblank(state);
 
        drm_atomic_helper_commit_hw_done(state);
 
-       drm_atomic_helper_wait_for_vblanks(dev, state);
+       drm_atomic_helper_wait_for_flip_done(dev, state);
 
        drm_atomic_helper_cleanup_planes(dev, state);
 
index 1d34619eb3fe3f57402291e7dbd05c4a193c45e1..9d7a36f148cfe1d0fef1cc1d1a8fdb0aab3e0cca 100644 (file)
@@ -467,12 +467,14 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
        struct drm_framebuffer *fb = state->fb;
        u32 ctl0_offset = vc4_state->dlist_count;
        const struct hvs_format *format = vc4_get_hvs_format(fb->format->format);
+       u64 base_format_mod = fourcc_mod_broadcom_mod(fb->modifier);
        int num_planes = drm_format_num_planes(format->drm);
        bool mix_plane_alpha;
        bool covers_screen;
        u32 scl0, scl1, pitch0;
        u32 lbm_size, tiling;
        unsigned long irqflags;
+       u32 hvs_format = format->hvs;
        int ret, i;
 
        ret = vc4_plane_setup_clipping_and_scaling(state);
@@ -512,7 +514,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
                scl1 = vc4_get_scl_field(state, 0);
        }
 
-       switch (fb->modifier) {
+       switch (base_format_mod) {
        case DRM_FORMAT_MOD_LINEAR:
                tiling = SCALER_CTL0_TILING_LINEAR;
                pitch0 = VC4_SET_FIELD(fb->pitches[0], SCALER_SRC_PITCH);
@@ -535,6 +537,49 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
                break;
        }
 
+       case DRM_FORMAT_MOD_BROADCOM_SAND64:
+       case DRM_FORMAT_MOD_BROADCOM_SAND128:
+       case DRM_FORMAT_MOD_BROADCOM_SAND256: {
+               uint32_t param = fourcc_mod_broadcom_param(fb->modifier);
+
+               /* Column-based NV12 or RGBA.
+                */
+               if (fb->format->num_planes > 1) {
+                       if (hvs_format != HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE) {
+                               DRM_DEBUG_KMS("SAND format only valid for NV12/21");
+                               return -EINVAL;
+                       }
+                       hvs_format = HVS_PIXEL_FORMAT_H264;
+               } else {
+                       if (base_format_mod == DRM_FORMAT_MOD_BROADCOM_SAND256) {
+                               DRM_DEBUG_KMS("SAND256 format only valid for H.264");
+                               return -EINVAL;
+                       }
+               }
+
+               switch (base_format_mod) {
+               case DRM_FORMAT_MOD_BROADCOM_SAND64:
+                       tiling = SCALER_CTL0_TILING_64B;
+                       break;
+               case DRM_FORMAT_MOD_BROADCOM_SAND128:
+                       tiling = SCALER_CTL0_TILING_128B;
+                       break;
+               case DRM_FORMAT_MOD_BROADCOM_SAND256:
+                       tiling = SCALER_CTL0_TILING_256B_OR_T;
+                       break;
+               default:
+                       break;
+               }
+
+               if (param > SCALER_TILE_HEIGHT_MASK) {
+                       DRM_DEBUG_KMS("SAND height too large (%d)\n", param);
+                       return -EINVAL;
+               }
+
+               pitch0 = VC4_SET_FIELD(param, SCALER_TILE_HEIGHT);
+               break;
+       }
+
        default:
                DRM_DEBUG_KMS("Unsupported FB tiling flag 0x%16llx",
                              (long long)fb->modifier);
@@ -544,8 +589,9 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
        /* Control word */
        vc4_dlist_write(vc4_state,
                        SCALER_CTL0_VALID |
+                       VC4_SET_FIELD(SCALER_CTL0_RGBA_EXPAND_ROUND, SCALER_CTL0_RGBA_EXPAND) |
                        (format->pixel_order << SCALER_CTL0_ORDER_SHIFT) |
-                       (format->hvs << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
+                       (hvs_format << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
                        VC4_SET_FIELD(tiling, SCALER_CTL0_TILING) |
                        (vc4_state->is_unity ? SCALER_CTL0_UNITY : 0) |
                        VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) |
@@ -607,8 +653,13 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
 
        /* Pitch word 1/2 */
        for (i = 1; i < num_planes; i++) {
-               vc4_dlist_write(vc4_state,
-                               VC4_SET_FIELD(fb->pitches[i], SCALER_SRC_PITCH));
+               if (hvs_format != HVS_PIXEL_FORMAT_H264) {
+                       vc4_dlist_write(vc4_state,
+                                       VC4_SET_FIELD(fb->pitches[i],
+                                                     SCALER_SRC_PITCH));
+               } else {
+                       vc4_dlist_write(vc4_state, pitch0);
+               }
        }
 
        /* Colorspace conversion words */
@@ -810,18 +861,21 @@ static int vc4_prepare_fb(struct drm_plane *plane,
        struct dma_fence *fence;
        int ret;
 
-       if ((plane->state->fb == state->fb) || !state->fb)
+       if (!state->fb)
                return 0;
 
        bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
 
+       fence = reservation_object_get_excl_rcu(bo->resv);
+       drm_atomic_set_fence_for_plane(state, fence);
+
+       if (plane->state->fb == state->fb)
+               return 0;
+
        ret = vc4_bo_inc_usecnt(bo);
        if (ret)
                return ret;
 
-       fence = reservation_object_get_excl_rcu(bo->resv);
-       drm_atomic_set_fence_for_plane(state, fence);
-
        return 0;
 }
 
@@ -848,7 +902,7 @@ static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
 
 static void vc4_plane_destroy(struct drm_plane *plane)
 {
-       drm_plane_helper_disable(plane);
+       drm_plane_helper_disable(plane, NULL);
        drm_plane_cleanup(plane);
 }
 
@@ -866,13 +920,32 @@ static bool vc4_format_mod_supported(struct drm_plane *plane,
        case DRM_FORMAT_BGR565:
        case DRM_FORMAT_ARGB1555:
        case DRM_FORMAT_XRGB1555:
-               return true;
+               switch (fourcc_mod_broadcom_mod(modifier)) {
+               case DRM_FORMAT_MOD_LINEAR:
+               case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
+               case DRM_FORMAT_MOD_BROADCOM_SAND64:
+               case DRM_FORMAT_MOD_BROADCOM_SAND128:
+                       return true;
+               default:
+                       return false;
+               }
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV21:
+               switch (fourcc_mod_broadcom_mod(modifier)) {
+               case DRM_FORMAT_MOD_LINEAR:
+               case DRM_FORMAT_MOD_BROADCOM_SAND64:
+               case DRM_FORMAT_MOD_BROADCOM_SAND128:
+               case DRM_FORMAT_MOD_BROADCOM_SAND256:
+                       return true;
+               default:
+                       return false;
+               }
        case DRM_FORMAT_YUV422:
        case DRM_FORMAT_YVU422:
        case DRM_FORMAT_YUV420:
        case DRM_FORMAT_YVU420:
-       case DRM_FORMAT_NV12:
        case DRM_FORMAT_NV16:
+       case DRM_FORMAT_NV61:
        default:
                return (modifier == DRM_FORMAT_MOD_LINEAR);
        }
@@ -900,6 +973,9 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
        unsigned i;
        static const uint64_t modifiers[] = {
                DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED,
+               DRM_FORMAT_MOD_BROADCOM_SAND128,
+               DRM_FORMAT_MOD_BROADCOM_SAND64,
+               DRM_FORMAT_MOD_BROADCOM_SAND256,
                DRM_FORMAT_MOD_LINEAR,
                DRM_FORMAT_MOD_INVALID
        };
index d1fb6fec46eb2c6ee016c59e238c368764ffe8b3..d6864fa4bd141d032a4cfb730841e16e8321d192 100644 (file)
@@ -1031,6 +1031,12 @@ enum hvs_pixel_format {
 #define SCALER_SRC_PITCH_MASK                  VC4_MASK(15, 0)
 #define SCALER_SRC_PITCH_SHIFT                 0
 
+/* PITCH0/1/2 fields for tiled (SAND). */
+#define SCALER_TILE_SKIP_0_MASK                        VC4_MASK(18, 16)
+#define SCALER_TILE_SKIP_0_SHIFT               16
+#define SCALER_TILE_HEIGHT_MASK                        VC4_MASK(15, 0)
+#define SCALER_TILE_HEIGHT_SHIFT               0
+
 /* PITCH0 fields for T-tiled. */
 #define SCALER_PITCH0_TILE_WIDTH_L_MASK                VC4_MASK(22, 16)
 #define SCALER_PITCH0_TILE_WIDTH_L_SHIFT       16
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
new file mode 100644 (file)
index 0000000..6e23c50
--- /dev/null
@@ -0,0 +1,477 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2018 Broadcom
+ *
+ * Authors:
+ *     Eric Anholt <eric@anholt.net>
+ *     Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_writeback.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+
+#include "vc4_drv.h"
+#include "vc4_regs.h"
+
+/* Base address of the output.  Raster formats must be 4-byte aligned,
+ * T and LT must be 16-byte aligned or maybe utile-aligned (docs are
+ * inconsistent, but probably utile).
+ */
+#define TXP_DST_PTR            0x00
+
+/* Pitch in bytes for raster images, 16-byte aligned.  For tiled, it's
+ * the width in tiles.
+ */
+#define TXP_DST_PITCH          0x04
+/* For T-tiled imgaes, DST_PITCH should be the number of tiles wide,
+ * shifted up.
+ */
+# define TXP_T_TILE_WIDTH_SHIFT                7
+/* For LT-tiled images, DST_PITCH should be the number of utiles wide,
+ * shifted up.
+ */
+# define TXP_LT_TILE_WIDTH_SHIFT       4
+
+/* Pre-rotation width/height of the image.  Must match HVS config.
+ *
+ * If TFORMAT and 32-bit, limit is 1920 for 32-bit and 3840 to 16-bit
+ * and width/height must be tile or utile-aligned as appropriate.  If
+ * transposing (rotating), width is limited to 1920.
+ *
+ * Height is limited to various numbers between 4088 and 4095.  I'd
+ * just use 4088 to be safe.
+ */
+#define TXP_DIM                        0x08
+# define TXP_HEIGHT_SHIFT              16
+# define TXP_HEIGHT_MASK               GENMASK(31, 16)
+# define TXP_WIDTH_SHIFT               0
+# define TXP_WIDTH_MASK                        GENMASK(15, 0)
+
+#define TXP_DST_CTRL           0x0c
+/* These bits are set to 0x54 */
+#define TXP_PILOT_SHIFT                        24
+#define TXP_PILOT_MASK                 GENMASK(31, 24)
+/* Bits 22-23 are set to 0x01 */
+#define TXP_VERSION_SHIFT              22
+#define TXP_VERSION_MASK               GENMASK(23, 22)
+
+/* Powers down the internal memory. */
+# define TXP_POWERDOWN                 BIT(21)
+
+/* Enables storing the alpha component in 8888/4444, instead of
+ * filling with ~ALPHA_INVERT.
+ */
+# define TXP_ALPHA_ENABLE              BIT(20)
+
+/* 4 bits, each enables stores for a channel in each set of 4 bytes.
+ * Set to 0xf for normal operation.
+ */
+# define TXP_BYTE_ENABLE_SHIFT         16
+# define TXP_BYTE_ENABLE_MASK          GENMASK(19, 16)
+
+/* Debug: Generate VSTART again at EOF. */
+# define TXP_VSTART_AT_EOF             BIT(15)
+
+/* Debug: Terminate the current frame immediately.  Stops AXI
+ * writes.
+ */
+# define TXP_ABORT                     BIT(14)
+
+# define TXP_DITHER                    BIT(13)
+
+/* Inverts alpha if TXP_ALPHA_ENABLE, chooses fill value for
+ * !TXP_ALPHA_ENABLE.
+ */
+# define TXP_ALPHA_INVERT              BIT(12)
+
+/* Note: I've listed the channels here in high bit (in byte 3/2/1) to
+ * low bit (in byte 0) order.
+ */
+# define TXP_FORMAT_SHIFT              8
+# define TXP_FORMAT_MASK               GENMASK(11, 8)
+# define TXP_FORMAT_ABGR4444           0
+# define TXP_FORMAT_ARGB4444           1
+# define TXP_FORMAT_BGRA4444           2
+# define TXP_FORMAT_RGBA4444           3
+# define TXP_FORMAT_BGR565             6
+# define TXP_FORMAT_RGB565             7
+/* 888s are non-rotated, raster-only */
+# define TXP_FORMAT_BGR888             8
+# define TXP_FORMAT_RGB888             9
+# define TXP_FORMAT_ABGR8888           12
+# define TXP_FORMAT_ARGB8888           13
+# define TXP_FORMAT_BGRA8888           14
+# define TXP_FORMAT_RGBA8888           15
+
+/* If TFORMAT is set, generates LT instead of T format. */
+# define TXP_LINEAR_UTILE              BIT(7)
+
+/* Rotate output by 90 degrees. */
+# define TXP_TRANSPOSE                 BIT(6)
+
+/* Generate a tiled format for V3D. */
+# define TXP_TFORMAT                   BIT(5)
+
+/* Generates some undefined test mode output. */
+# define TXP_TEST_MODE                 BIT(4)
+
+/* Request odd field from HVS. */
+# define TXP_FIELD                     BIT(3)
+
+/* Raise interrupt when idle. */
+# define TXP_EI                                BIT(2)
+
+/* Set when generating a frame, clears when idle. */
+# define TXP_BUSY                      BIT(1)
+
+/* Starts a frame.  Self-clearing. */
+# define TXP_GO                                BIT(0)
+
+/* Number of lines received and committed to memory. */
+#define TXP_PROGRESS           0x10
+
+#define TXP_READ(offset) readl(txp->regs + (offset))
+#define TXP_WRITE(offset, val) writel(val, txp->regs + (offset))
+
+struct vc4_txp {
+       struct platform_device *pdev;
+
+       struct drm_writeback_connector connector;
+
+       void __iomem *regs;
+};
+
+static inline struct vc4_txp *encoder_to_vc4_txp(struct drm_encoder *encoder)
+{
+       return container_of(encoder, struct vc4_txp, connector.encoder);
+}
+
+static inline struct vc4_txp *connector_to_vc4_txp(struct drm_connector *conn)
+{
+       return container_of(conn, struct vc4_txp, connector.base);
+}
+
+#define TXP_REG(reg) { reg, #reg }
+static const struct {
+       u32 reg;
+       const char *name;
+} txp_regs[] = {
+       TXP_REG(TXP_DST_PTR),
+       TXP_REG(TXP_DST_PITCH),
+       TXP_REG(TXP_DIM),
+       TXP_REG(TXP_DST_CTRL),
+       TXP_REG(TXP_PROGRESS),
+};
+
+#ifdef CONFIG_DEBUG_FS
+int vc4_txp_debugfs_regs(struct seq_file *m, void *unused)
+{
+       struct drm_info_node *node = (struct drm_info_node *)m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct vc4_dev *vc4 = to_vc4_dev(dev);
+       struct vc4_txp *txp = vc4->txp;
+       int i;
+
+       if (!txp)
+               return 0;
+
+       for (i = 0; i < ARRAY_SIZE(txp_regs); i++) {
+               seq_printf(m, "%s (0x%04x): 0x%08x\n",
+                          txp_regs[i].name, txp_regs[i].reg,
+                          TXP_READ(txp_regs[i].reg));
+       }
+
+       return 0;
+}
+#endif
+
+static int vc4_txp_connector_get_modes(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+
+       return drm_add_modes_noedid(connector, dev->mode_config.max_width,
+                                   dev->mode_config.max_height);
+}
+
+static enum drm_mode_status
+vc4_txp_connector_mode_valid(struct drm_connector *connector,
+                            struct drm_display_mode *mode)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       int w = mode->hdisplay, h = mode->vdisplay;
+
+       if (w < mode_config->min_width || w > mode_config->max_width)
+               return MODE_BAD_HVALUE;
+
+       if (h < mode_config->min_height || h > mode_config->max_height)
+               return MODE_BAD_VVALUE;
+
+       return MODE_OK;
+}
+
+static const u32 drm_fmts[] = {
+       DRM_FORMAT_RGB888,
+       DRM_FORMAT_BGR888,
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_XBGR8888,
+       DRM_FORMAT_ARGB8888,
+       DRM_FORMAT_ABGR8888,
+       DRM_FORMAT_RGBX8888,
+       DRM_FORMAT_BGRX8888,
+       DRM_FORMAT_RGBA8888,
+       DRM_FORMAT_BGRA8888,
+};
+
+static const u32 txp_fmts[] = {
+       TXP_FORMAT_RGB888,
+       TXP_FORMAT_BGR888,
+       TXP_FORMAT_ARGB8888,
+       TXP_FORMAT_ABGR8888,
+       TXP_FORMAT_ARGB8888,
+       TXP_FORMAT_ABGR8888,
+       TXP_FORMAT_RGBA8888,
+       TXP_FORMAT_BGRA8888,
+       TXP_FORMAT_RGBA8888,
+       TXP_FORMAT_BGRA8888,
+};
+
+static int vc4_txp_connector_atomic_check(struct drm_connector *conn,
+                                       struct drm_connector_state *conn_state)
+{
+       struct drm_crtc_state *crtc_state;
+       struct drm_gem_cma_object *gem;
+       struct drm_framebuffer *fb;
+       int i;
+
+       if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+               return 0;
+
+       crtc_state = drm_atomic_get_new_crtc_state(conn_state->state,
+                                                  conn_state->crtc);
+
+       fb = conn_state->writeback_job->fb;
+       if (fb->width != crtc_state->mode.hdisplay ||
+           fb->height != crtc_state->mode.vdisplay) {
+               DRM_DEBUG_KMS("Invalid framebuffer size %ux%u\n",
+                             fb->width, fb->height);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(drm_fmts); i++) {
+               if (fb->format->format == drm_fmts[i])
+                       break;
+       }
+
+       if (i == ARRAY_SIZE(drm_fmts))
+               return -EINVAL;
+
+       gem = drm_fb_cma_get_gem_obj(fb, 0);
+
+       /* Pitch must be aligned on 16 bytes. */
+       if (fb->pitches[0] & GENMASK(3, 0))
+               return -EINVAL;
+
+       vc4_crtc_txp_armed(crtc_state);
+
+       return 0;
+}
+
+static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
+                                       struct drm_connector_state *conn_state)
+{
+       struct vc4_txp *txp = connector_to_vc4_txp(conn);
+       struct drm_gem_cma_object *gem;
+       struct drm_display_mode *mode;
+       struct drm_framebuffer *fb;
+       u32 ctrl;
+       int i;
+
+       if (WARN_ON(!conn_state->writeback_job ||
+                   !conn_state->writeback_job->fb))
+               return;
+
+       mode = &conn_state->crtc->state->adjusted_mode;
+       fb = conn_state->writeback_job->fb;
+
+       for (i = 0; i < ARRAY_SIZE(drm_fmts); i++) {
+               if (fb->format->format == drm_fmts[i])
+                       break;
+       }
+
+       if (WARN_ON(i == ARRAY_SIZE(drm_fmts)))
+               return;
+
+       ctrl = TXP_GO | TXP_VSTART_AT_EOF | TXP_EI |
+              VC4_SET_FIELD(0xf, TXP_BYTE_ENABLE) |
+              VC4_SET_FIELD(txp_fmts[i], TXP_FORMAT);
+
+       if (fb->format->has_alpha)
+               ctrl |= TXP_ALPHA_ENABLE;
+
+       gem = drm_fb_cma_get_gem_obj(fb, 0);
+       TXP_WRITE(TXP_DST_PTR, gem->paddr + fb->offsets[0]);
+       TXP_WRITE(TXP_DST_PITCH, fb->pitches[0]);
+       TXP_WRITE(TXP_DIM,
+                 VC4_SET_FIELD(mode->hdisplay, TXP_WIDTH) |
+                 VC4_SET_FIELD(mode->vdisplay, TXP_HEIGHT));
+
+       TXP_WRITE(TXP_DST_CTRL, ctrl);
+
+       drm_writeback_queue_job(&txp->connector, conn_state->writeback_job);
+}
+
+static const struct drm_connector_helper_funcs vc4_txp_connector_helper_funcs = {
+       .get_modes = vc4_txp_connector_get_modes,
+       .mode_valid = vc4_txp_connector_mode_valid,
+       .atomic_check = vc4_txp_connector_atomic_check,
+       .atomic_commit = vc4_txp_connector_atomic_commit,
+};
+
+static enum drm_connector_status
+vc4_txp_connector_detect(struct drm_connector *connector, bool force)
+{
+       return connector_status_connected;
+}
+
+static void vc4_txp_connector_destroy(struct drm_connector *connector)
+{
+       drm_connector_unregister(connector);
+       drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs vc4_txp_connector_funcs = {
+       .detect = vc4_txp_connector_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .destroy = vc4_txp_connector_destroy,
+       .reset = drm_atomic_helper_connector_reset,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static void vc4_txp_encoder_disable(struct drm_encoder *encoder)
+{
+       struct vc4_txp *txp = encoder_to_vc4_txp(encoder);
+
+       if (TXP_READ(TXP_DST_CTRL) & TXP_BUSY) {
+               unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+               TXP_WRITE(TXP_DST_CTRL, TXP_ABORT);
+
+               while (TXP_READ(TXP_DST_CTRL) & TXP_BUSY &&
+                      time_before(jiffies, timeout))
+                       ;
+
+               WARN_ON(TXP_READ(TXP_DST_CTRL) & TXP_BUSY);
+       }
+
+       TXP_WRITE(TXP_DST_CTRL, TXP_POWERDOWN);
+}
+
+static const struct drm_encoder_helper_funcs vc4_txp_encoder_helper_funcs = {
+       .disable = vc4_txp_encoder_disable,
+};
+
+static irqreturn_t vc4_txp_interrupt(int irq, void *data)
+{
+       struct vc4_txp *txp = data;
+
+       TXP_WRITE(TXP_DST_CTRL, TXP_READ(TXP_DST_CTRL) & ~TXP_EI);
+       vc4_crtc_handle_vblank(to_vc4_crtc(txp->connector.base.state->crtc));
+       drm_writeback_signal_completion(&txp->connector, 0);
+
+       return IRQ_HANDLED;
+}
+
+static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct drm_device *drm = dev_get_drvdata(master);
+       struct vc4_dev *vc4 = to_vc4_dev(drm);
+       struct vc4_txp *txp;
+       int ret, irq;
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0)
+               return irq;
+
+       txp = devm_kzalloc(dev, sizeof(*txp), GFP_KERNEL);
+       if (!txp)
+               return -ENOMEM;
+
+       txp->pdev = pdev;
+
+       txp->regs = vc4_ioremap_regs(pdev, 0);
+       if (IS_ERR(txp->regs))
+               return PTR_ERR(txp->regs);
+
+       drm_connector_helper_add(&txp->connector.base,
+                                &vc4_txp_connector_helper_funcs);
+       ret = drm_writeback_connector_init(drm, &txp->connector,
+                                          &vc4_txp_connector_funcs,
+                                          &vc4_txp_encoder_helper_funcs,
+                                          drm_fmts, ARRAY_SIZE(drm_fmts));
+       if (ret)
+               return ret;
+
+       ret = devm_request_irq(dev, irq, vc4_txp_interrupt, 0,
+                              dev_name(dev), txp);
+       if (ret)
+               return ret;
+
+       dev_set_drvdata(dev, txp);
+       vc4->txp = txp;
+
+       return 0;
+}
+
+static void vc4_txp_unbind(struct device *dev, struct device *master,
+                          void *data)
+{
+       struct drm_device *drm = dev_get_drvdata(master);
+       struct vc4_dev *vc4 = to_vc4_dev(drm);
+       struct vc4_txp *txp = dev_get_drvdata(dev);
+
+       vc4_txp_connector_destroy(&txp->connector.base);
+
+       vc4->txp = NULL;
+}
+
+static const struct component_ops vc4_txp_ops = {
+       .bind   = vc4_txp_bind,
+       .unbind = vc4_txp_unbind,
+};
+
+static int vc4_txp_probe(struct platform_device *pdev)
+{
+       return component_add(&pdev->dev, &vc4_txp_ops);
+}
+
+static int vc4_txp_remove(struct platform_device *pdev)
+{
+       component_del(&pdev->dev, &vc4_txp_ops);
+       return 0;
+}
+
+static const struct of_device_id vc4_txp_dt_match[] = {
+       { .compatible = "brcm,bcm2835-txp" },
+       { /* sentinel */ },
+};
+
+struct platform_driver vc4_txp_driver = {
+       .probe = vc4_txp_probe,
+       .remove = vc4_txp_remove,
+       .driver = {
+               .name = "vc4_txp",
+               .of_match_table = vc4_txp_dt_match,
+       },
+};
index 3a9a302247a2f162fc77688bb25ae9ebf86523b5..8e7facb6514efdc7e0d90eadb7240e867cff4ba4 100644 (file)
@@ -404,7 +404,7 @@ static struct drm_connector *vc4_vec_connector_init(struct drm_device *dev,
                                   VC4_VEC_TV_MODE_NTSC);
        vec->tv_mode = &vc4_vec_tv_modes[VC4_VEC_TV_MODE_NTSC];
 
-       drm_mode_connector_attach_encoder(connector, vec->encoder);
+       drm_connector_attach_encoder(connector, vec->encoder);
 
        return connector;
 }
index 2524ff116f00d922267213d1a4f8554172057400..0e5620f76ee09b43d3fbc2b03ad20e3797bd1d2a 100644 (file)
@@ -61,23 +61,22 @@ static void vgem_gem_free_object(struct drm_gem_object *obj)
        kfree(vgem_obj);
 }
 
-static int vgem_gem_fault(struct vm_fault *vmf)
+static vm_fault_t vgem_gem_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct drm_vgem_gem_object *obj = vma->vm_private_data;
        /* We don't use vmf->pgoff since that has the fake offset */
        unsigned long vaddr = vmf->address;
-       int ret;
+       vm_fault_t ret = VM_FAULT_SIGBUS;
        loff_t num_pages;
        pgoff_t page_offset;
        page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
 
        num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
 
-       if (page_offset > num_pages)
+       if (page_offset >= num_pages)
                return VM_FAULT_SIGBUS;
 
-       ret = -ENOENT;
        mutex_lock(&obj->pages_lock);
        if (obj->pages) {
                get_page(obj->pages[page_offset]);
index a5edd86603d9ee3321a5cba885f753cc318ff338..25503b93359910f16caa1c64cbe14f7030c2a7c6 100644 (file)
@@ -28,6 +28,7 @@
 #include "virtgpu_drv.h"
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
 
 #define XRES_MIN    32
 #define YRES_MIN    32
@@ -48,16 +49,6 @@ static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
        .atomic_destroy_state   = drm_atomic_helper_crtc_destroy_state,
 };
 
-static void virtio_gpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
-       struct virtio_gpu_framebuffer *virtio_gpu_fb
-               = to_virtio_gpu_framebuffer(fb);
-
-       drm_gem_object_put_unlocked(virtio_gpu_fb->obj);
-       drm_framebuffer_cleanup(fb);
-       kfree(virtio_gpu_fb);
-}
-
 static int
 virtio_gpu_framebuffer_surface_dirty(struct drm_framebuffer *fb,
                                     struct drm_file *file_priv,
@@ -71,20 +62,9 @@ virtio_gpu_framebuffer_surface_dirty(struct drm_framebuffer *fb,
        return virtio_gpu_surface_dirty(virtio_gpu_fb, clips, num_clips);
 }
 
-static int
-virtio_gpu_framebuffer_create_handle(struct drm_framebuffer *fb,
-                                    struct drm_file *file_priv,
-                                    unsigned int *handle)
-{
-       struct virtio_gpu_framebuffer *virtio_gpu_fb =
-               to_virtio_gpu_framebuffer(fb);
-
-       return drm_gem_handle_create(file_priv, virtio_gpu_fb->obj, handle);
-}
-
 static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = {
-       .create_handle = virtio_gpu_framebuffer_create_handle,
-       .destroy = virtio_gpu_user_framebuffer_destroy,
+       .create_handle = drm_gem_fb_create_handle,
+       .destroy = drm_gem_fb_destroy,
        .dirty = virtio_gpu_framebuffer_surface_dirty,
 };
 
@@ -97,7 +77,7 @@ virtio_gpu_framebuffer_init(struct drm_device *dev,
        int ret;
        struct virtio_gpu_object *bo;
 
-       vgfb->obj = obj;
+       vgfb->base.obj[0] = obj;
 
        bo = gem_to_virtio_gpu_obj(obj);
 
@@ -105,7 +85,7 @@ virtio_gpu_framebuffer_init(struct drm_device *dev,
 
        ret = drm_framebuffer_init(dev, &vgfb->base, &virtio_gpu_fb_funcs);
        if (ret) {
-               vgfb->obj = NULL;
+               vgfb->base.obj[0] = NULL;
                return ret;
        }
 
@@ -302,8 +282,6 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
        drm_crtc_init_with_planes(dev, crtc, primary, cursor,
                                  &virtio_gpu_crtc_funcs, NULL);
        drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs);
-       primary->crtc = crtc;
-       cursor->crtc = crtc;
 
        drm_connector_init(dev, connector, &virtio_gpu_connector_funcs,
                           DRM_MODE_CONNECTOR_VIRTUAL);
@@ -314,7 +292,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
        drm_encoder_helper_add(encoder, &virtio_gpu_enc_helper_funcs);
        encoder->possible_crtcs = 1 << index;
 
-       drm_mode_connector_attach_encoder(connector, encoder);
+       drm_connector_attach_encoder(connector, encoder);
        drm_connector_register(connector);
        return 0;
 }
index d25c8ca224aa12c58fae5d58a7ac726685d1bacf..65605e207bbe88028c76a19fb2e4eb1e4d216401 100644 (file)
@@ -124,7 +124,6 @@ struct virtio_gpu_output {
 
 struct virtio_gpu_framebuffer {
        struct drm_framebuffer base;
-       struct drm_gem_object *obj;
        int x1, y1, x2, y2; /* dirty rect */
        spinlock_t dirty_lock;
        uint32_t hw_res_handle;
index 8af69ab58b89ffb27fe2ebb27fb2948c7d8b18f5..a121b1c79522fbf37dd79f51b0f2006bbfc517de 100644 (file)
@@ -46,7 +46,7 @@ static int virtio_gpu_dirty_update(struct virtio_gpu_framebuffer *fb,
        int bpp = fb->base.format->cpp[0];
        int x2, y2;
        unsigned long flags;
-       struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->obj);
+       struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->base.obj[0]);
 
        if ((width <= 0) ||
            (x + width > fb->base.width) ||
@@ -121,7 +121,7 @@ int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *vgfb,
                             unsigned int num_clips)
 {
        struct virtio_gpu_device *vgdev = vgfb->base.dev->dev_private;
-       struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(vgfb->obj);
+       struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
        struct drm_clip_rect norect;
        struct drm_clip_rect *clips_ptr;
        int left, right, top, bottom;
@@ -305,8 +305,8 @@ static int virtio_gpu_fbdev_destroy(struct drm_device *dev,
 
        drm_fb_helper_unregister_fbi(&vgfbdev->helper);
 
-       if (vgfb->obj)
-               vgfb->obj = NULL;
+       if (vgfb->base.obj[0])
+               vgfb->base.obj[0] = NULL;
        drm_fb_helper_fini(&vgfbdev->helper);
        drm_framebuffer_cleanup(&vgfb->base);
 
index 23353521f903adb15a8587f53ff3af889962704f..00c742a441bfc546b2369b55bebd0a0f9bec6e05 100644 (file)
@@ -36,11 +36,6 @@ static const char *virtio_get_timeline_name(struct dma_fence *f)
        return "controlq";
 }
 
-static bool virtio_enable_signaling(struct dma_fence *f)
-{
-       return true;
-}
-
 static bool virtio_signaled(struct dma_fence *f)
 {
        struct virtio_gpu_fence *fence = to_virtio_fence(f);
@@ -67,9 +62,7 @@ static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
 static const struct dma_fence_ops virtio_fence_ops = {
        .get_driver_name     = virtio_get_driver_name,
        .get_timeline_name   = virtio_get_timeline_name,
-       .enable_signaling    = virtio_enable_signaling,
        .signaled            = virtio_signaled,
-       .wait                = dma_fence_default_wait,
        .fence_value_str     = virtio_fence_value_str,
        .timeline_value_str  = virtio_timeline_value_str,
 };
index 71ba455af915b78298662ee5daf588bd554984b0..dc5b5b2b7aab3f470ed9c3035cc7edc62790d478 100644 (file)
@@ -154,7 +154,7 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
 
        if (plane->state->fb) {
                vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
-               bo = gem_to_virtio_gpu_obj(vgfb->obj);
+               bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
                handle = bo->hw_res_handle;
                if (bo->dumb) {
                        virtio_gpu_cmd_transfer_to_host_2d
@@ -208,7 +208,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
 
        if (plane->state->fb) {
                vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
-               bo = gem_to_virtio_gpu_obj(vgfb->obj);
+               bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
                handle = bo->hw_res_handle;
        } else {
                handle = 0;
diff --git a/drivers/gpu/drm/vkms/Makefile b/drivers/gpu/drm/vkms/Makefile
new file mode 100644 (file)
index 0000000..986297d
--- /dev/null
@@ -0,0 +1,3 @@
+vkms-y := vkms_drv.o vkms_plane.o vkms_output.o vkms_crtc.o vkms_gem.o
+
+obj-$(CONFIG_DRM_VKMS) += vkms.o
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
new file mode 100644 (file)
index 0000000..875fca6
--- /dev/null
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "vkms_drv.h"
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
+{
+       struct vkms_output *output = container_of(timer, struct vkms_output,
+                                                 vblank_hrtimer);
+       struct drm_crtc *crtc = &output->crtc;
+       int ret_overrun;
+       bool ret;
+
+       ret = drm_crtc_handle_vblank(crtc);
+       if (!ret)
+               DRM_ERROR("vkms failure on handling vblank");
+
+       ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
+                                         output->period_ns);
+
+       return HRTIMER_RESTART;
+}
+
+static int vkms_enable_vblank(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       unsigned int pipe = drm_crtc_index(crtc);
+       struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+       struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
+
+       drm_calc_timestamping_constants(crtc, &crtc->mode);
+
+       hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       out->vblank_hrtimer.function = &vkms_vblank_simulate;
+       out->period_ns = ktime_set(0, vblank->framedur_ns);
+       hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL);
+
+       return 0;
+}
+
+static void vkms_disable_vblank(struct drm_crtc *crtc)
+{
+       struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
+
+       hrtimer_cancel(&out->vblank_hrtimer);
+}
+
+bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
+                              int *max_error, ktime_t *vblank_time,
+                              bool in_vblank_irq)
+{
+       struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
+       struct vkms_output *output = &vkmsdev->output;
+
+       *vblank_time = output->vblank_hrtimer.node.expires;
+
+       return true;
+}
+
+static const struct drm_crtc_funcs vkms_crtc_funcs = {
+       .set_config             = drm_atomic_helper_set_config,
+       .destroy                = drm_crtc_cleanup,
+       .page_flip              = drm_atomic_helper_page_flip,
+       .reset                  = drm_atomic_helper_crtc_reset,
+       .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+       .atomic_destroy_state   = drm_atomic_helper_crtc_destroy_state,
+       .enable_vblank          = vkms_enable_vblank,
+       .disable_vblank         = vkms_disable_vblank,
+};
+
+static void vkms_crtc_atomic_enable(struct drm_crtc *crtc,
+                                   struct drm_crtc_state *old_state)
+{
+       drm_crtc_vblank_on(crtc);
+}
+
+static void vkms_crtc_atomic_disable(struct drm_crtc *crtc,
+                                    struct drm_crtc_state *old_state)
+{
+       drm_crtc_vblank_off(crtc);
+}
+
+static void vkms_crtc_atomic_flush(struct drm_crtc *crtc,
+                                  struct drm_crtc_state *old_crtc_state)
+{
+       unsigned long flags;
+
+       if (crtc->state->event) {
+               spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+               if (drm_crtc_vblank_get(crtc) != 0)
+                       drm_crtc_send_vblank_event(crtc, crtc->state->event);
+               else
+                       drm_crtc_arm_vblank_event(crtc, crtc->state->event);
+
+               spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+               crtc->state->event = NULL;
+       }
+}
+
+static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = {
+       .atomic_flush   = vkms_crtc_atomic_flush,
+       .atomic_enable  = vkms_crtc_atomic_enable,
+       .atomic_disable = vkms_crtc_atomic_disable,
+};
+
+int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+                  struct drm_plane *primary, struct drm_plane *cursor)
+{
+       int ret;
+
+       ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor,
+                                       &vkms_crtc_funcs, NULL);
+       if (ret) {
+               DRM_ERROR("Failed to init CRTC\n");
+               return ret;
+       }
+
+       drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs);
+
+       return ret;
+}
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
new file mode 100644 (file)
index 0000000..6e728b8
--- /dev/null
@@ -0,0 +1,156 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_fb_helper.h>
+#include "vkms_drv.h"
+
+#define DRIVER_NAME    "vkms"
+#define DRIVER_DESC    "Virtual Kernel Mode Setting"
+#define DRIVER_DATE    "20180514"
+#define DRIVER_MAJOR   1
+#define DRIVER_MINOR   0
+
+static struct vkms_device *vkms_device;
+
+static const struct file_operations vkms_driver_fops = {
+       .owner          = THIS_MODULE,
+       .open           = drm_open,
+       .mmap           = drm_gem_mmap,
+       .unlocked_ioctl = drm_ioctl,
+       .compat_ioctl   = drm_compat_ioctl,
+       .poll           = drm_poll,
+       .read           = drm_read,
+       .llseek         = no_llseek,
+       .release        = drm_release,
+};
+
+static const struct vm_operations_struct vkms_gem_vm_ops = {
+       .fault = vkms_gem_fault,
+       .open = drm_gem_vm_open,
+       .close = drm_gem_vm_close,
+};
+
+static void vkms_release(struct drm_device *dev)
+{
+       struct vkms_device *vkms = container_of(dev, struct vkms_device, drm);
+
+       platform_device_unregister(vkms->platform);
+       drm_atomic_helper_shutdown(&vkms->drm);
+       drm_mode_config_cleanup(&vkms->drm);
+       drm_dev_fini(&vkms->drm);
+}
+
+static struct drm_driver vkms_driver = {
+       .driver_features        = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM,
+       .release                = vkms_release,
+       .fops                   = &vkms_driver_fops,
+       .dumb_create            = vkms_dumb_create,
+       .dumb_map_offset        = vkms_dumb_map,
+       .gem_vm_ops             = &vkms_gem_vm_ops,
+       .gem_free_object_unlocked = vkms_gem_free_object,
+       .get_vblank_timestamp   = vkms_get_vblank_timestamp,
+
+       .name                   = DRIVER_NAME,
+       .desc                   = DRIVER_DESC,
+       .date                   = DRIVER_DATE,
+       .major                  = DRIVER_MAJOR,
+       .minor                  = DRIVER_MINOR,
+};
+
+static const struct drm_mode_config_funcs vkms_mode_funcs = {
+       .fb_create = drm_gem_fb_create,
+       .atomic_check = drm_atomic_helper_check,
+       .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int vkms_modeset_init(struct vkms_device *vkmsdev)
+{
+       struct drm_device *dev = &vkmsdev->drm;
+
+       drm_mode_config_init(dev);
+       dev->mode_config.funcs = &vkms_mode_funcs;
+       dev->mode_config.min_width = XRES_MIN;
+       dev->mode_config.min_height = YRES_MIN;
+       dev->mode_config.max_width = XRES_MAX;
+       dev->mode_config.max_height = YRES_MAX;
+
+       return vkms_output_init(vkmsdev);
+}
+
+static int __init vkms_init(void)
+{
+       int ret;
+
+       vkms_device = kzalloc(sizeof(*vkms_device), GFP_KERNEL);
+       if (!vkms_device)
+               return -ENOMEM;
+
+       ret = drm_dev_init(&vkms_device->drm, &vkms_driver, NULL);
+       if (ret)
+               goto out_free;
+
+       vkms_device->platform =
+               platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
+       if (IS_ERR(vkms_device->platform)) {
+               ret = PTR_ERR(vkms_device->platform);
+               goto out_fini;
+       }
+
+       vkms_device->drm.irq_enabled = true;
+
+       ret = drm_vblank_init(&vkms_device->drm, 1);
+       if (ret) {
+               DRM_ERROR("Failed to vblank\n");
+               goto out_fini;
+       }
+
+       ret = vkms_modeset_init(vkms_device);
+       if (ret)
+               goto out_unregister;
+
+       ret = drm_dev_register(&vkms_device->drm, 0);
+       if (ret)
+               goto out_unregister;
+
+       return 0;
+
+out_unregister:
+       platform_device_unregister(vkms_device->platform);
+
+out_fini:
+       drm_dev_fini(&vkms_device->drm);
+
+out_free:
+       kfree(vkms_device);
+       return ret;
+}
+
+static void __exit vkms_exit(void)
+{
+       if (!vkms_device) {
+               DRM_INFO("vkms_device is NULL.\n");
+               return;
+       }
+
+       drm_dev_unregister(&vkms_device->drm);
+       drm_dev_put(&vkms_device->drm);
+
+       kfree(vkms_device);
+}
+
+module_init(vkms_init);
+module_exit(vkms_exit);
+
+MODULE_AUTHOR("Haneen Mohammed <hamohammed.sa@gmail.com>");
+MODULE_AUTHOR("Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
new file mode 100644 (file)
index 0000000..07be29f
--- /dev/null
@@ -0,0 +1,78 @@
+#ifndef _VKMS_DRV_H_
+#define _VKMS_DRV_H_
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_encoder.h>
+#include <linux/hrtimer.h>
+
+#define XRES_MIN    32
+#define YRES_MIN    32
+
+#define XRES_DEF  1024
+#define YRES_DEF   768
+
+#define XRES_MAX  8192
+#define YRES_MAX  8192
+
+static const u32 vkms_formats[] = {
+       DRM_FORMAT_XRGB8888,
+};
+
+struct vkms_output {
+       struct drm_crtc crtc;
+       struct drm_encoder encoder;
+       struct drm_connector connector;
+       struct hrtimer vblank_hrtimer;
+       ktime_t period_ns;
+       struct drm_pending_vblank_event *event;
+};
+
+struct vkms_device {
+       struct drm_device drm;
+       struct platform_device *platform;
+       struct vkms_output output;
+};
+
+struct vkms_gem_object {
+       struct drm_gem_object gem;
+       struct mutex pages_lock; /* Page lock used in page fault handler */
+       struct page **pages;
+};
+
+#define drm_crtc_to_vkms_output(target) \
+       container_of(target, struct vkms_output, crtc)
+
+#define drm_device_to_vkms_device(target) \
+       container_of(target, struct vkms_device, drm)
+
+/* CRTC */
+int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+                  struct drm_plane *primary, struct drm_plane *cursor);
+
+bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
+                              int *max_error, ktime_t *vblank_time,
+                              bool in_vblank_irq);
+
+int vkms_output_init(struct vkms_device *vkmsdev);
+
+struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev);
+
+/* Gem stuff */
+struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
+                                      struct drm_file *file,
+                                      u32 *handle,
+                                      u64 size);
+
+int vkms_gem_fault(struct vm_fault *vmf);
+
+int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
+                    struct drm_mode_create_dumb *args);
+
+int vkms_dumb_map(struct drm_file *file, struct drm_device *dev,
+                 u32 handle, u64 *offset);
+
+void vkms_gem_free_object(struct drm_gem_object *obj);
+
+#endif /* _VKMS_DRV_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
new file mode 100644 (file)
index 0000000..c7e3836
--- /dev/null
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/shmem_fs.h>
+
+#include "vkms_drv.h"
+
+static struct vkms_gem_object *__vkms_gem_create(struct drm_device *dev,
+                                                u64 size)
+{
+       struct vkms_gem_object *obj;
+       int ret;
+
+       obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+       if (!obj)
+               return ERR_PTR(-ENOMEM);
+
+       size = roundup(size, PAGE_SIZE);
+       ret = drm_gem_object_init(dev, &obj->gem, size);
+       if (ret) {
+               kfree(obj);
+               return ERR_PTR(ret);
+       }
+
+       mutex_init(&obj->pages_lock);
+
+       return obj;
+}
+
+void vkms_gem_free_object(struct drm_gem_object *obj)
+{
+       struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object,
+                                                  gem);
+
+       kvfree(gem->pages);
+       mutex_destroy(&gem->pages_lock);
+       drm_gem_object_release(obj);
+       kfree(gem);
+}
+
+int vkms_gem_fault(struct vm_fault *vmf)
+{
+       struct vm_area_struct *vma = vmf->vma;
+       struct vkms_gem_object *obj = vma->vm_private_data;
+       unsigned long vaddr = vmf->address;
+       pgoff_t page_offset;
+       loff_t num_pages;
+       int ret;
+
+       page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
+       num_pages = DIV_ROUND_UP(obj->gem.size, PAGE_SIZE);
+
+       if (page_offset > num_pages)
+               return VM_FAULT_SIGBUS;
+
+       ret = -ENOENT;
+       mutex_lock(&obj->pages_lock);
+       if (obj->pages) {
+               get_page(obj->pages[page_offset]);
+               vmf->page = obj->pages[page_offset];
+               ret = 0;
+       }
+       mutex_unlock(&obj->pages_lock);
+       if (ret) {
+               struct page *page;
+               struct address_space *mapping;
+
+               mapping = file_inode(obj->gem.filp)->i_mapping;
+               page = shmem_read_mapping_page(mapping, page_offset);
+
+               if (!IS_ERR(page)) {
+                       vmf->page = page;
+                       ret = 0;
+               } else {
+                       switch (PTR_ERR(page)) {
+                       case -ENOSPC:
+                       case -ENOMEM:
+                               ret = VM_FAULT_OOM;
+                               break;
+                       case -EBUSY:
+                               ret = VM_FAULT_RETRY;
+                               break;
+                       case -EFAULT:
+                       case -EINVAL:
+                               ret = VM_FAULT_SIGBUS;
+                               break;
+                       default:
+                               WARN_ON(PTR_ERR(page));
+                               ret = VM_FAULT_SIGBUS;
+                               break;
+                       }
+               }
+       }
+       return ret;
+}
+
+struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
+                                      struct drm_file *file,
+                                      u32 *handle,
+                                      u64 size)
+{
+       struct vkms_gem_object *obj;
+       int ret;
+
+       if (!file || !dev || !handle)
+               return ERR_PTR(-EINVAL);
+
+       obj = __vkms_gem_create(dev, size);
+       if (IS_ERR(obj))
+               return ERR_CAST(obj);
+
+       ret = drm_gem_handle_create(file, &obj->gem, handle);
+       drm_gem_object_put_unlocked(&obj->gem);
+       if (ret) {
+               drm_gem_object_release(&obj->gem);
+               kfree(obj);
+               return ERR_PTR(ret);
+       }
+
+       return &obj->gem;
+}
+
+int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
+                    struct drm_mode_create_dumb *args)
+{
+       struct drm_gem_object *gem_obj;
+       u64 pitch, size;
+
+       if (!args || !dev || !file)
+               return -EINVAL;
+
+       pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
+       size = pitch * args->height;
+
+       if (!size)
+               return -EINVAL;
+
+       gem_obj = vkms_gem_create(dev, file, &args->handle, size);
+       if (IS_ERR(gem_obj))
+               return PTR_ERR(gem_obj);
+
+       args->size = gem_obj->size;
+       args->pitch = pitch;
+
+       DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
+
+       return 0;
+}
+
+int vkms_dumb_map(struct drm_file *file, struct drm_device *dev,
+                 u32 handle, u64 *offset)
+{
+       struct drm_gem_object *obj;
+       int ret;
+
+       obj = drm_gem_object_lookup(file, handle);
+       if (!obj)
+               return -ENOENT;
+
+       if (!obj->filp) {
+               ret = -EINVAL;
+               goto unref;
+       }
+
+       ret = drm_gem_create_mmap_offset(obj);
+       if (ret)
+               goto unref;
+
+       *offset = drm_vma_node_offset_addr(&obj->vma_node);
+unref:
+       drm_gem_object_put_unlocked(obj);
+
+       return ret;
+}
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
new file mode 100644 (file)
index 0000000..901012c
--- /dev/null
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "vkms_drv.h"
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic_helper.h>
+
+static void vkms_connector_destroy(struct drm_connector *connector)
+{
+       drm_connector_unregister(connector);
+       drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs vkms_connector_funcs = {
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .destroy = vkms_connector_destroy,
+       .reset = drm_atomic_helper_connector_reset,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_encoder_funcs vkms_encoder_funcs = {
+       .destroy = drm_encoder_cleanup,
+};
+
+static int vkms_conn_get_modes(struct drm_connector *connector)
+{
+       int count;
+
+       count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
+       drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
+
+       return count;
+}
+
+static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = {
+       .get_modes    = vkms_conn_get_modes,
+};
+
+int vkms_output_init(struct vkms_device *vkmsdev)
+{
+       struct vkms_output *output = &vkmsdev->output;
+       struct drm_device *dev = &vkmsdev->drm;
+       struct drm_connector *connector = &output->connector;
+       struct drm_encoder *encoder = &output->encoder;
+       struct drm_crtc *crtc = &output->crtc;
+       struct drm_plane *primary;
+       int ret;
+
+       primary = vkms_plane_init(vkmsdev);
+       if (IS_ERR(primary))
+               return PTR_ERR(primary);
+
+       ret = vkms_crtc_init(dev, crtc, primary, NULL);
+       if (ret)
+               goto err_crtc;
+
+       ret = drm_connector_init(dev, connector, &vkms_connector_funcs,
+                                DRM_MODE_CONNECTOR_VIRTUAL);
+       if (ret) {
+               DRM_ERROR("Failed to init connector\n");
+               goto err_connector;
+       }
+
+       drm_connector_helper_add(connector, &vkms_conn_helper_funcs);
+
+       ret = drm_connector_register(connector);
+       if (ret) {
+               DRM_ERROR("Failed to register connector\n");
+               goto err_connector_register;
+       }
+
+       ret = drm_encoder_init(dev, encoder, &vkms_encoder_funcs,
+                              DRM_MODE_ENCODER_VIRTUAL, NULL);
+       if (ret) {
+               DRM_ERROR("Failed to init encoder\n");
+               goto err_encoder;
+       }
+       encoder->possible_crtcs = 1;
+
+       ret = drm_connector_attach_encoder(connector, encoder);
+       if (ret) {
+               DRM_ERROR("Failed to attach connector to encoder\n");
+               goto err_attach;
+       }
+
+       drm_mode_config_reset(dev);
+
+       return 0;
+
+err_attach:
+       drm_encoder_cleanup(encoder);
+
+err_encoder:
+       drm_connector_unregister(connector);
+
+err_connector_register:
+       drm_connector_cleanup(connector);
+
+err_connector:
+       drm_crtc_cleanup(crtc);
+
+err_crtc:
+       drm_plane_cleanup(primary);
+       return ret;
+}
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
new file mode 100644 (file)
index 0000000..9f75b1e
--- /dev/null
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "vkms_drv.h"
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic_helper.h>
+
+static const struct drm_plane_funcs vkms_plane_funcs = {
+       .update_plane           = drm_atomic_helper_update_plane,
+       .disable_plane          = drm_atomic_helper_disable_plane,
+       .destroy                = drm_plane_cleanup,
+       .reset                  = drm_atomic_helper_plane_reset,
+       .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+       .atomic_destroy_state   = drm_atomic_helper_plane_destroy_state,
+};
+
+static void vkms_primary_plane_update(struct drm_plane *plane,
+                                     struct drm_plane_state *old_state)
+{
+}
+
+static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = {
+       .atomic_update          = vkms_primary_plane_update,
+};
+
+struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev)
+{
+       struct drm_device *dev = &vkmsdev->drm;
+       struct drm_plane *plane;
+       const u32 *formats;
+       int ret, nformats;
+
+       plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+       if (!plane)
+               return ERR_PTR(-ENOMEM);
+
+       formats = vkms_formats;
+       nformats = ARRAY_SIZE(vkms_formats);
+
+       ret = drm_universal_plane_init(dev, plane, 0,
+                                      &vkms_plane_funcs,
+                                      formats, nformats,
+                                      NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
+       if (ret) {
+               kfree(plane);
+               return ERR_PTR(ret);
+       }
+
+       drm_plane_helper_add(plane, &vkms_primary_helper_funcs);
+
+       return plane;
+}
index 8c308dac99c5064e850c906d500ec87a8bf14ead..6b28a326f8bb2d3d9591c180debe24c2de61c7b5 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 config DRM_VMWGFX
        tristate "DRM driver for VMware Virtual GPU"
        depends on DRM && PCI && X86 && MMU
index 794cc9d5c9b09036967d3a5948ecdf127112be6e..09b2aa08363e6b5ca7c13c25a72b4b73d080cdde 100644 (file)
@@ -1,9 +1,9 @@
 # SPDX-License-Identifier: GPL-2.0
 vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
-           vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
+           vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \
            vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
            vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
-           vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
+           vmwgfx_fence.o vmwgfx_bo.o vmwgfx_scrn.o vmwgfx_context.o \
            vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
            vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
            vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
index 9ce2466a5d0073747bd3226dc92180eefd487eca..69c4253fbfbb1be32d22b43aa631287ff7d3deb8 100644 (file)
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**********************************************************
- * Copyright 2007-2015 VMware, Inc.  All rights reserved.
+ * Copyright 2007-2015 VMware, Inc.
  *
  * Permission is hereby granted, free of charge, to any person
  * obtaining a copy of this software and associated documentation
index 2dfd57c5f4633ef5aa7f34ae5e0e784ea12ccd0e..9cbba0e8ce6a6b0b1ddba42c210c09cd7e7f2f7f 100644 (file)
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**********************************************************
- * Copyright 1998-2015 VMware, Inc.  All rights reserved.
+ * Copyright 1998-2015 VMware, Inc.
  *
  * Permission is hereby granted, free of charge, to any person
  * obtaining a copy of this software and associated documentation
  * the SVGA3D protocol and remain reserved; they should not be used in the
  * future.
  *
- * IDs between 1040 and 1999 (inclusive) are available for use by the
+ * IDs between 1040 and 2999 (inclusive) are available for use by the
  * current SVGA3D protocol.
  *
- * FIFO clients other than SVGA3D should stay below 1000, or at 2000
+ * FIFO clients other than SVGA3D should stay below 1000, or at 3000
  * and up.
  */
 
@@ -89,19 +90,19 @@ typedef enum {
    SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN                     = 1069,
    SVGA_3D_CMD_SURFACE_DEFINE_V2                          = 1070,
    SVGA_3D_CMD_GENERATE_MIPMAPS                           = 1071,
-   SVGA_3D_CMD_VIDEO_CREATE_DECODER                       = 1072,
-   SVGA_3D_CMD_VIDEO_DESTROY_DECODER                      = 1073,
-   SVGA_3D_CMD_VIDEO_CREATE_PROCESSOR                     = 1074,
-   SVGA_3D_CMD_VIDEO_DESTROY_PROCESSOR                    = 1075,
-   SVGA_3D_CMD_VIDEO_DECODE_START_FRAME                   = 1076,
-   SVGA_3D_CMD_VIDEO_DECODE_RENDER                        = 1077,
-   SVGA_3D_CMD_VIDEO_DECODE_END_FRAME                     = 1078,
-   SVGA_3D_CMD_VIDEO_PROCESS_FRAME                        = 1079,
+   SVGA_3D_CMD_DEAD4                                      = 1072,
+   SVGA_3D_CMD_DEAD5                                      = 1073,
+   SVGA_3D_CMD_DEAD6                                      = 1074,
+   SVGA_3D_CMD_DEAD7                                      = 1075,
+   SVGA_3D_CMD_DEAD8                                      = 1076,
+   SVGA_3D_CMD_DEAD9                                      = 1077,
+   SVGA_3D_CMD_DEAD10                                     = 1078,
+   SVGA_3D_CMD_DEAD11                                     = 1079,
    SVGA_3D_CMD_ACTIVATE_SURFACE                           = 1080,
    SVGA_3D_CMD_DEACTIVATE_SURFACE                         = 1081,
    SVGA_3D_CMD_SCREEN_DMA                                 = 1082,
-   SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE                   = 1083,
-   SVGA_3D_CMD_OPEN_CONTEXT_SURFACE                       = 1084,
+   SVGA_3D_CMD_DEAD1                                      = 1083,
+   SVGA_3D_CMD_DEAD2                                      = 1084,
 
    SVGA_3D_CMD_LOGICOPS_BITBLT                            = 1085,
    SVGA_3D_CMD_LOGICOPS_TRANSBLT                          = 1086,
@@ -217,7 +218,7 @@ typedef enum {
    SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW                 = 1177,
    SVGA_3D_CMD_DX_PRED_COPY_REGION                        = 1178,
    SVGA_3D_CMD_DX_PRED_COPY                               = 1179,
-   SVGA_3D_CMD_DX_STRETCHBLT                              = 1180,
+   SVGA_3D_CMD_DX_PRESENTBLT                              = 1180,
    SVGA_3D_CMD_DX_GENMIPS                                 = 1181,
    SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE                      = 1182,
    SVGA_3D_CMD_DX_READBACK_SUBRESOURCE                    = 1183,
@@ -254,7 +255,7 @@ typedef enum {
    SVGA_3D_CMD_DX_READBACK_ALL_QUERY                      = 1214,
    SVGA_3D_CMD_DX_PRED_TRANSFER_FROM_BUFFER               = 1215,
    SVGA_3D_CMD_DX_MOB_FENCE_64                            = 1216,
-   SVGA_3D_CMD_DX_BIND_SHADER_ON_CONTEXT                  = 1217,
+   SVGA_3D_CMD_DX_BIND_ALL_SHADER                         = 1217,
    SVGA_3D_CMD_DX_HINT                                    = 1218,
    SVGA_3D_CMD_DX_BUFFER_UPDATE                           = 1219,
    SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET           = 1220,
@@ -262,17 +263,47 @@ typedef enum {
    SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET           = 1222,
 
    /*
-    * Reserve some IDs to be used for the DX11 shader types.
+    * Reserve some IDs to be used for the SM5 shader types.
     */
    SVGA_3D_CMD_DX_RESERVED1                               = 1223,
    SVGA_3D_CMD_DX_RESERVED2                               = 1224,
    SVGA_3D_CMD_DX_RESERVED3                               = 1225,
 
-   SVGA_3D_CMD_DX_MAX                                     = 1226,
-   SVGA_3D_CMD_MAX                                        = 1226,
+   SVGA_3D_CMD_DX_COND_BIND_ALL_SHADER                    = 1226,
+   SVGA_3D_CMD_DX_MAX                                     = 1227,
+
+   SVGA_3D_CMD_SCREEN_COPY                                = 1227,
+
+   /*
+    * Reserve some IDs to be used for video.
+    */
+   SVGA_3D_CMD_VIDEO_RESERVED1                            = 1228,
+   SVGA_3D_CMD_VIDEO_RESERVED2                            = 1229,
+   SVGA_3D_CMD_VIDEO_RESERVED3                            = 1230,
+   SVGA_3D_CMD_VIDEO_RESERVED4                            = 1231,
+   SVGA_3D_CMD_VIDEO_RESERVED5                            = 1232,
+   SVGA_3D_CMD_VIDEO_RESERVED6                            = 1233,
+   SVGA_3D_CMD_VIDEO_RESERVED7                            = 1234,
+   SVGA_3D_CMD_VIDEO_RESERVED8                            = 1235,
+
+   SVGA_3D_CMD_GROW_OTABLE                                = 1236,
+   SVGA_3D_CMD_DX_GROW_COTABLE                            = 1237,
+   SVGA_3D_CMD_INTRA_SURFACE_COPY                         = 1238,
+
+   SVGA_3D_CMD_DEFINE_GB_SURFACE_V3                       = 1239,
+
+   SVGA_3D_CMD_DX_RESOLVE_COPY                            = 1240,
+   SVGA_3D_CMD_DX_PRED_RESOLVE_COPY                       = 1241,
+   SVGA_3D_CMD_DX_PRED_CONVERT_REGION                     = 1242,
+   SVGA_3D_CMD_DX_PRED_CONVERT                            = 1243,
+   SVGA_3D_CMD_WHOLE_SURFACE_COPY                         = 1244,
+
+   SVGA_3D_CMD_MAX                                        = 1245,
    SVGA_3D_CMD_FUTURE_MAX                                 = 3000
 } SVGAFifo3dCmdId;
 
+#define SVGA_NUM_3D_CMD (SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)
+
 /*
  * FIFO command format definitions:
  */
@@ -301,7 +332,7 @@ typedef
 #include "vmware_pack_begin.h"
 struct {
    uint32                      sid;
-   SVGA3dSurfaceFlags          surfaceFlags;
+   SVGA3dSurface1Flags         surfaceFlags;
    SVGA3dSurfaceFormat         format;
    /*
     * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
@@ -327,7 +358,7 @@ typedef
 #include "vmware_pack_begin.h"
 struct {
    uint32                      sid;
-   SVGA3dSurfaceFlags          surfaceFlags;
+   SVGA3dSurface1Flags         surfaceFlags;
    SVGA3dSurfaceFormat         format;
    /*
     * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
@@ -459,6 +490,28 @@ struct {
 #include "vmware_pack_end.h"
 SVGA3dCmdSurfaceCopy;               /* SVGA_3D_CMD_SURFACE_COPY */
 
+/*
+ * Perform a surface copy within the same image.
+ * The src/dest boxes are allowed to overlap.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceImageId  surface;
+   SVGA3dCopyBox box;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdIntraSurfaceCopy;               /* SVGA_3D_CMD_INTRA_SURFACE_COPY */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 srcSid;
+   uint32 destSid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdWholeSurfaceCopy;               /* SVGA_3D_CMD_WHOLE_SURFACE_COPY */
+
 typedef
 #include "vmware_pack_begin.h"
 struct {
@@ -772,6 +825,17 @@ struct {
 #include "vmware_pack_end.h"
 SVGA3dVertexElement;
 
+/*
+ * Should the vertex element respect the stream value?  The high bit of the
+ * stream should be set to indicate that the stream should be respected.  If
+ * the high bit is not set, the stream will be ignored and replaced by the index
+ * of the position of the currently considered vertex element.
+ *
+ * All guests should set this bit and correctly specify the stream going
+ * forward.
+ */
+#define SVGA3D_VERTEX_ELEMENT_RESPECT_STREAM (1 << 7)
+
 typedef
 #include "vmware_pack_begin.h"
 struct {
@@ -1102,8 +1166,6 @@ struct {
 #include "vmware_pack_end.h"
 SVGA3dCmdGenerateMipmaps;             /* SVGA_3D_CMD_GENERATE_MIPMAPS */
 
-
-
 typedef
 #include "vmware_pack_begin.h"
 struct {
@@ -1146,38 +1208,6 @@ struct SVGA3dCmdScreenDMA {
 #include "vmware_pack_end.h"
 SVGA3dCmdScreenDMA;        /* SVGA_3D_CMD_SCREEN_DMA */
 
-/*
- * Set Unity Surface Cookie
- *
- * Associates the supplied cookie with the surface id for use with
- * Unity.  This cookie is a hint from guest to host, there is no way
- * for the guest to readback the cookie and the host is free to drop
- * the cookie association at will.  The default value for the cookie
- * on all surfaces is 0.
- */
-
-typedef
-#include "vmware_pack_begin.h"
-struct SVGA3dCmdSetUnitySurfaceCookie {
-   uint32 sid;
-   uint64 cookie;
-}
-#include "vmware_pack_end.h"
-SVGA3dCmdSetUnitySurfaceCookie;   /* SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE */
-
-/*
- * Open a context-specific surface in a non-context-specific manner.
- */
-
-typedef
-#include "vmware_pack_begin.h"
-struct SVGA3dCmdOpenContextSurface {
-   uint32 sid;
-}
-#include "vmware_pack_end.h"
-SVGA3dCmdOpenContextSurface;   /* SVGA_3D_CMD_OPEN_CONTEXT_SURFACE */
-
-
 /*
  * Logic ops
  */
@@ -1324,7 +1354,7 @@ typedef
 #include "vmware_pack_begin.h"
 struct {
    SVGA3dSurfaceFormat format;
-   SVGA3dSurfaceFlags surfaceFlags;
+   SVGA3dSurface1Flags surface1Flags;
    uint32 numMipLevels;
    uint32 multisampleCount;
    SVGA3dTextureFilter autogenFilter;
@@ -1332,7 +1362,11 @@ struct {
    SVGAMobId mobid;
    uint32 arraySize;
    uint32 mobPitch;
-   uint32 pad[5];
+   SVGA3dSurface2Flags surface2Flags;
+   uint8 multisamplePattern;
+   uint8 qualityLevel;
+   uint8  pad0[2];
+   uint32 pad1[3];
 }
 #include "vmware_pack_end.h"
 SVGAOTableSurfaceEntry;
@@ -1360,7 +1394,8 @@ struct {
 SVGAOTableShaderEntry;
 #define SVGA3D_OTABLE_SHADER_ENTRY_SIZE (sizeof(SVGAOTableShaderEntry))
 
-#define SVGA_STFLAG_PRIMARY (1 << 0)
+#define SVGA_STFLAG_PRIMARY  (1 << 0)
+#define SVGA_STFLAG_RESERVED (1 << 1) /* Added with cap SVGA_CAP_HP_CMD_QUEUE */
 typedef uint32 SVGAScreenTargetFlags;
 
 typedef
@@ -1528,6 +1563,25 @@ struct {
 #include "vmware_pack_end.h"
 SVGA3dCmdSetOTableBase64;  /* SVGA_3D_CMD_SET_OTABLE_BASE64 */
 
+/*
+ * Guests using SVGA_3D_CMD_GROW_OTABLE are promising that
+ * the new OTable contains the same contents as the old one, except possibly
+ * for some new invalid entries at the end.
+ *
+ * (Otherwise, guests should use one of the SetOTableBase commands.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGAOTableType type;
+   PPN64 baseAddress;
+   uint32 sizeInBytes;
+   uint32 validSizeInBytes;
+   SVGAMobFormat ptDepth;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdGrowOTable;  /* SVGA_3D_CMD_GROW_OTABLE */
+
 typedef
 #include "vmware_pack_begin.h"
 struct {
@@ -1615,7 +1669,7 @@ typedef
 #include "vmware_pack_begin.h"
 struct SVGA3dCmdDefineGBSurface {
    uint32 sid;
-   SVGA3dSurfaceFlags surfaceFlags;
+   SVGA3dSurface1Flags surfaceFlags;
    SVGA3dSurfaceFormat format;
    uint32 numMipLevels;
    uint32 multisampleCount;
@@ -1625,6 +1679,45 @@ struct SVGA3dCmdDefineGBSurface {
 #include "vmware_pack_end.h"
 SVGA3dCmdDefineGBSurface;   /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
 
+/*
+ * Defines a guest-backed surface, adding the arraySize field.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBSurface_v2 {
+   uint32 sid;
+   SVGA3dSurface1Flags surfaceFlags;
+   SVGA3dSurfaceFormat format;
+   uint32 numMipLevels;
+   uint32 multisampleCount;
+   SVGA3dTextureFilter autogenFilter;
+   SVGA3dSize size;
+   uint32 arraySize;
+   uint32 pad;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBSurface_v2;   /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 */
+
+/*
+ * Defines a guest-backed surface, adding the larger flags.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDefineGBSurface_v3 {
+   uint32 sid;
+   SVGA3dSurfaceAllFlags surfaceFlags;
+   SVGA3dSurfaceFormat format;
+   uint32 numMipLevels;
+   uint32 multisampleCount;
+   SVGA3dMSPattern multisamplePattern;
+   SVGA3dMSQualityLevel qualityLevel;
+   SVGA3dTextureFilter autogenFilter;
+   SVGA3dSize size;
+   uint32 arraySize;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDefineGBSurface_v3;   /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V3 */
+
 /*
  * Destroy a guest-backed surface.
  */
@@ -1672,7 +1765,7 @@ SVGA3dCmdBindGBSurfaceWithPitch;   /* SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH */
 
 typedef
 #include "vmware_pack_begin.h"
-struct{
+struct SVGA3dCmdCondBindGBSurface {
    uint32 sid;
    SVGAMobId testMobid;
    SVGAMobId mobid;
@@ -2066,6 +2159,26 @@ struct {
    uint32 mobOffset;
 }
 #include "vmware_pack_end.h"
-SVGA3dCmdGBMobFence;  /* SVGA_3D_CMD_GB_MOB_FENCE*/
+SVGA3dCmdGBMobFence;  /* SVGA_3D_CMD_GB_MOB_FENCE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 stid;
+   SVGA3dSurfaceImageId dest;
+
+   uint32 statusMobId;
+   uint32 statusMobOffset;
+
+   /* Reserved fields */
+   uint32 mustBeInvalidId;
+   uint32 mustBeZero;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdScreenCopy;  /* SVGA_3D_CMD_SCREEN_COPY */
+
+#define SVGA_SCREEN_COPY_STATUS_FAILURE 0x00
+#define SVGA_SCREEN_COPY_STATUS_SUCCESS 0x01
+#define SVGA_SCREEN_COPY_STATUS_INVALID 0xFFFFFFFF
 
 #endif /* _SVGA3D_CMD_H_ */
index c18b663f360f7676b8615fa359170af63c9a7ecc..f256560049bfde168f670842a48eed7ac13504c0 100644 (file)
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**********************************************************
- * Copyright 1998-2015 VMware, Inc.  All rights reserved.
+ * Copyright 1998-2015 VMware, Inc.
  *
  * Permission is hereby granted, free of charge, to any person
  * obtaining a copy of this software and associated documentation
@@ -229,9 +230,9 @@ typedef enum {
    SVGA3D_DEVCAP_DEAD2                             = 94,
 
    /*
-    * Does the device support the DX commands?
+    * Does the device support DXContexts?
     */
-   SVGA3D_DEVCAP_DX                                = 95,
+   SVGA3D_DEVCAP_DXCONTEXT                         = 95,
 
    /*
     * What is the maximum size of a texture array?
@@ -241,21 +242,47 @@ typedef enum {
    SVGA3D_DEVCAP_MAX_TEXTURE_ARRAY_SIZE            = 96,
 
    /*
-    * What is the maximum number of vertex buffers that can
-    * be used in the DXContext inputAssembly?
+    * What is the maximum number of vertex buffers or vertex input registers
+    * that can be expected to work correctly with a DXContext?
+    *
+    * The guest is allowed to set up to SVGA3D_DX_MAX_VERTEXBUFFERS, but
+    * anything in excess of this cap is not guaranteed to render correctly.
+    *
+    * Similarly, the guest can set up to SVGA3D_DX_MAX_VERTEXINPUTREGISTERS
+    * input registers without the SVGA3D_DEVCAP_SM4_1 cap, or
+    * SVGA3D_DX_SM41_MAX_VERTEXINPUTREGISTERS with the SVGA3D_DEVCAP_SM4_1,
+    * but only the registers up to this cap value are guaranteed to render
+    * correctly.
+    *
+    * If guest-drivers are able to expose a lower-limit, it's recommended
+    * that they clamp to this value.  Otherwise, the host will make a
+    * best-effort on case-by-case basis if guests exceed this.
     */
    SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS              = 97,
 
    /*
-    * What is the maximum number of constant buffers
-    * that can be expected to work correctly with a
-    * DX context?
+    * What is the maximum number of constant buffers that can be expected to
+    * work correctly with a DX context?
+    *
+    * The guest is allowed to set up to SVGA3D_DX_MAX_CONSTBUFFERS, but
+    * anything in excess of this cap is not guaranteed to render correctly.
+    *
+    * If guest-drivers are able to expose a lower-limit, it's recommended
+    * that they clamp to this value.  Otherwise, the host will make a
+    * best-effort on case-by-case basis if guests exceed this.
     */
    SVGA3D_DEVCAP_DX_MAX_CONSTANT_BUFFERS           = 98,
 
    /*
     * Does the device support provoking vertex control?
-    * If zero, the first vertex will always be the provoking vertex.
+    *
+    * If this cap is present, the provokingVertexLast field in the
+    * rasterizer state is enabled.  (Guests can then set it to FALSE,
+    * meaning that the first vertex is the provoking vertex, or TRUE,
+    * meaning that the last verteix is the provoking vertex.)
+    *
+    * If this cap is FALSE, then guests should set the provokingVertexLast
+    * to FALSE, otherwise rendering behavior is undefined.
     */
    SVGA3D_DEVCAP_DX_PROVOKING_VERTEX               = 99,
 
@@ -281,7 +308,7 @@ typedef enum {
    SVGA3D_DEVCAP_DXFMT_BUMPU8V8                    = 119,
    SVGA3D_DEVCAP_DXFMT_BUMPL6V5U5                  = 120,
    SVGA3D_DEVCAP_DXFMT_BUMPX8L8V8U8                = 121,
-   SVGA3D_DEVCAP_DXFMT_BUMPL8V8U8                  = 122,
+   SVGA3D_DEVCAP_DXFMT_FORMAT_DEAD1                = 122,
    SVGA3D_DEVCAP_DXFMT_ARGB_S10E5                  = 123,
    SVGA3D_DEVCAP_DXFMT_ARGB_S23E8                  = 124,
    SVGA3D_DEVCAP_DXFMT_A2R10G10B10                 = 125,
@@ -320,8 +347,8 @@ typedef enum {
    SVGA3D_DEVCAP_DXFMT_R32G32_SINT                 = 158,
    SVGA3D_DEVCAP_DXFMT_R32G8X24_TYPELESS           = 159,
    SVGA3D_DEVCAP_DXFMT_D32_FLOAT_S8X24_UINT        = 160,
-   SVGA3D_DEVCAP_DXFMT_R32_FLOAT_X8X24_TYPELESS    = 161,
-   SVGA3D_DEVCAP_DXFMT_X32_TYPELESS_G8X24_UINT     = 162,
+   SVGA3D_DEVCAP_DXFMT_R32_FLOAT_X8X24             = 161,
+   SVGA3D_DEVCAP_DXFMT_X32_G8X24_UINT              = 162,
    SVGA3D_DEVCAP_DXFMT_R10G10B10A2_TYPELESS        = 163,
    SVGA3D_DEVCAP_DXFMT_R10G10B10A2_UINT            = 164,
    SVGA3D_DEVCAP_DXFMT_R11G11B10_FLOAT             = 165,
@@ -339,8 +366,8 @@ typedef enum {
    SVGA3D_DEVCAP_DXFMT_R32_SINT                    = 177,
    SVGA3D_DEVCAP_DXFMT_R24G8_TYPELESS              = 178,
    SVGA3D_DEVCAP_DXFMT_D24_UNORM_S8_UINT           = 179,
-   SVGA3D_DEVCAP_DXFMT_R24_UNORM_X8_TYPELESS       = 180,
-   SVGA3D_DEVCAP_DXFMT_X24_TYPELESS_G8_UINT        = 181,
+   SVGA3D_DEVCAP_DXFMT_R24_UNORM_X8                = 180,
+   SVGA3D_DEVCAP_DXFMT_X24_G8_UINT                 = 181,
    SVGA3D_DEVCAP_DXFMT_R8G8_TYPELESS               = 182,
    SVGA3D_DEVCAP_DXFMT_R8G8_UNORM                  = 183,
    SVGA3D_DEVCAP_DXFMT_R8G8_UINT                   = 184,
@@ -404,6 +431,17 @@ typedef enum {
    SVGA3D_DEVCAP_DXFMT_BC4_UNORM                   = 242,
    SVGA3D_DEVCAP_DXFMT_BC5_UNORM                   = 243,
 
+   /*
+    * Advertises shaderModel 4.1 support, independent blend-states,
+    * cube-map arrays, and a higher vertex input registers limit.
+    *
+    * (See documentation on SVGA3D_DEVCAP_DX_MAX_VERTEXBUFFERS.)
+    */
+   SVGA3D_DEVCAP_SM41                              = 244,
+
+   SVGA3D_DEVCAP_MULTISAMPLE_2X                    = 245,
+   SVGA3D_DEVCAP_MULTISAMPLE_4X                    = 246,
+
    SVGA3D_DEVCAP_MAX                       /* This must be the last index. */
 } SVGA3dDevCapIndex;
 
@@ -419,9 +457,7 @@ typedef enum {
  * MIPS: Does the format support mip levels?
  * ARRAY: Does the format support texture arrays?
  * VOLUME: Does the format support having volume?
- * MULTISAMPLE_2: Does the format support 2x multisample?
- * MULTISAMPLE_4: Does the format support 4x multisample?
- * MULTISAMPLE_8: Does the format support 8x multisample?
+ * MULTISAMPLE: Does the format support multisample?
  */
 #define SVGA3D_DXFMT_SUPPORTED                (1 <<  0)
 #define SVGA3D_DXFMT_SHADER_SAMPLE            (1 <<  1)
@@ -432,20 +468,8 @@ typedef enum {
 #define SVGA3D_DXFMT_ARRAY                    (1 <<  6)
 #define SVGA3D_DXFMT_VOLUME                   (1 <<  7)
 #define SVGA3D_DXFMT_DX_VERTEX_BUFFER         (1 <<  8)
-#define SVGADX_DXFMT_MULTISAMPLE_2            (1 <<  9)
-#define SVGADX_DXFMT_MULTISAMPLE_4            (1 << 10)
-#define SVGADX_DXFMT_MULTISAMPLE_8            (1 << 11)
-#define SVGADX_DXFMT_MAX                      (1 << 12)
-
-/*
- * Convenience mask for any multisample capability.
- *
- * The multisample bits imply both load and render capability.
- */
-#define SVGA3D_DXFMT_MULTISAMPLE ( \
-           SVGADX_DXFMT_MULTISAMPLE_2 | \
-           SVGADX_DXFMT_MULTISAMPLE_4 | \
-           SVGADX_DXFMT_MULTISAMPLE_8 )
+#define SVGA3D_DXFMT_MULTISAMPLE              (1 <<  9)
+#define SVGA3D_DXFMT_MAX                      (1 << 10)
 
 typedef union {
    Bool   b;
index 8c5ae608cfb4f9c535c41aeead16e27fc1f91157..7a49c94df221c77c01ca0cdf0c086db7d69d39cc 100644 (file)
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**********************************************************
- * Copyright 2012-2015 VMware, Inc.  All rights reserved.
+ * Copyright 2012-2015 VMware, Inc.
  *
  * Permission is hereby granted, free of charge, to any person
  * obtaining a copy of this software and associated documentation
@@ -56,6 +57,16 @@ typedef uint32 SVGA3dInputClassification;
 #define SVGA3D_RESOURCE_TYPE_MAX      7
 typedef uint32 SVGA3dResourceType;
 
+#define SVGA3D_COLOR_WRITE_ENABLE_RED     (1 << 0)
+#define SVGA3D_COLOR_WRITE_ENABLE_GREEN   (1 << 1)
+#define SVGA3D_COLOR_WRITE_ENABLE_BLUE    (1 << 2)
+#define SVGA3D_COLOR_WRITE_ENABLE_ALPHA   (1 << 3)
+#define SVGA3D_COLOR_WRITE_ENABLE_ALL     (SVGA3D_COLOR_WRITE_ENABLE_RED |   \
+                                           SVGA3D_COLOR_WRITE_ENABLE_GREEN | \
+                                           SVGA3D_COLOR_WRITE_ENABLE_BLUE |  \
+                                           SVGA3D_COLOR_WRITE_ENABLE_ALPHA)
+typedef uint8 SVGA3dColorWriteEnable;
+
 #define SVGA3D_DEPTH_WRITE_MASK_ZERO   0
 #define SVGA3D_DEPTH_WRITE_MASK_ALL    1
 typedef uint8 SVGA3dDepthWriteMask;
@@ -88,17 +99,28 @@ typedef uint8 SVGA3dCullMode;
 #define SVGA3D_COMPARISON_MAX             9
 typedef uint8 SVGA3dComparisonFunc;
 
+/*
+ * SVGA3D_MULTISAMPLE_RAST_DISABLE disables MSAA for all primitives.
+ * SVGA3D_MULTISAMPLE_RAST_DISABLE_LINE, which is supported in SM41,
+ * disables MSAA for lines only.
+ */
+#define SVGA3D_MULTISAMPLE_RAST_DISABLE        0
+#define SVGA3D_MULTISAMPLE_RAST_ENABLE         1
+#define SVGA3D_MULTISAMPLE_RAST_DX_MAX         1
+#define SVGA3D_MULTISAMPLE_RAST_DISABLE_LINE   2
+#define SVGA3D_MULTISAMPLE_RAST_MAX            2
+typedef uint8 SVGA3dMultisampleRastEnable;
+
 #define SVGA3D_DX_MAX_VERTEXBUFFERS 32
+#define SVGA3D_DX_MAX_VERTEXINPUTREGISTERS 16
+#define SVGA3D_DX_SM41_MAX_VERTEXINPUTREGISTERS 32
 #define SVGA3D_DX_MAX_SOTARGETS 4
 #define SVGA3D_DX_MAX_SRVIEWS 128
 #define SVGA3D_DX_MAX_CONSTBUFFERS 16
 #define SVGA3D_DX_MAX_SAMPLERS 16
 
-/* Id limits */
-static const uint32 SVGA3dBlendObjectCountPerContext = 4096;
-static const uint32 SVGA3dDepthStencilObjectCountPerContext = 4096;
+#define SVGA3D_DX_MAX_CONSTBUF_BINDING_SIZE (4096 * 4 * (uint32)sizeof(uint32))
 
-typedef uint32 SVGA3dSurfaceId;
 typedef uint32 SVGA3dShaderResourceViewId;
 typedef uint32 SVGA3dRenderTargetViewId;
 typedef uint32 SVGA3dDepthStencilViewId;
@@ -192,20 +214,6 @@ struct SVGA3dCmdDXInvalidateContext {
 #include "vmware_pack_end.h"
 SVGA3dCmdDXInvalidateContext;   /* SVGA_3D_CMD_DX_INVALIDATE_CONTEXT */
 
-typedef
-#include "vmware_pack_begin.h"
-struct SVGA3dReplyFormatData {
-   uint32 formatSupport;
-   uint32 msaa2xQualityLevels:5;
-   uint32 msaa4xQualityLevels:5;
-   uint32 msaa8xQualityLevels:5;
-   uint32 msaa16xQualityLevels:5;
-   uint32 msaa32xQualityLevels:5;
-   uint32 pad:7;
-}
-#include "vmware_pack_end.h"
-SVGA3dReplyFormatData;
-
 typedef
 #include "vmware_pack_begin.h"
 struct SVGA3dCmdDXSetSingleConstantBuffer {
@@ -622,6 +630,28 @@ struct SVGA3dCmdDXPredCopy {
 #include "vmware_pack_end.h"
 SVGA3dCmdDXPredCopy; /* SVGA_3D_CMD_DX_PRED_COPY */
 
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXPredConvertRegion {
+   SVGA3dSurfaceId dstSid;
+   uint32 dstSubResource;
+   SVGA3dBox destBox;
+   SVGA3dSurfaceId srcSid;
+   uint32 srcSubResource;
+   SVGA3dBox srcBox;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredConvertRegion; /* SVGA_3D_CMD_DX_PRED_CONVERT_REGION */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXPredConvert {
+   SVGA3dSurfaceId dstSid;
+   SVGA3dSurfaceId srcSid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredConvert; /* SVGA_3D_CMD_DX_PRED_CONVERT */
+
 typedef
 #include "vmware_pack_begin.h"
 struct SVGA3dCmdDXBufferCopy {
@@ -635,23 +665,57 @@ struct SVGA3dCmdDXBufferCopy {
 SVGA3dCmdDXBufferCopy;
 /* SVGA_3D_CMD_DX_BUFFER_COPY */
 
-typedef uint32 SVGA3dDXStretchBltMode;
-#define SVGADX_STRETCHBLT_LINEAR         (1 << 0)
-#define SVGADX_STRETCHBLT_FORCE_SRC_SRGB (1 << 1)
+/*
+ * Perform a surface copy between a multisample, and a non-multisampled
+ * surface.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceId dstSid;
+   uint32 dstSubResource;
+   SVGA3dSurfaceId srcSid;
+   uint32 srcSubResource;
+   SVGA3dSurfaceFormat copyFormat;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXResolveCopy;               /* SVGA_3D_CMD_DX_RESOLVE_COPY */
+
+/*
+ * Perform a predicated surface copy between a multisample, and a
+ * non-multisampled surface.
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   SVGA3dSurfaceId dstSid;
+   uint32 dstSubResource;
+   SVGA3dSurfaceId srcSid;
+   uint32 srcSubResource;
+   SVGA3dSurfaceFormat copyFormat;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXPredResolveCopy;           /* SVGA_3D_CMD_DX_PRED_RESOLVE_COPY */
+
+typedef uint32 SVGA3dDXPresentBltMode;
+#define SVGADX_PRESENTBLT_LINEAR           (1 << 0)
+#define SVGADX_PRESENTBLT_FORCE_SRC_SRGB   (1 << 1)
+#define SVGADX_PRESENTBLT_FORCE_SRC_XRBIAS (1 << 2)
+#define SVGADX_PRESENTBLT_MODE_MAX         (1 << 3)
 
 typedef
 #include "vmware_pack_begin.h"
-struct SVGA3dCmdDXStretchBlt {
+struct SVGA3dCmdDXPresentBlt {
    SVGA3dSurfaceId srcSid;
    uint32 srcSubResource;
    SVGA3dSurfaceId dstSid;
    uint32 destSubResource;
    SVGA3dBox boxSrc;
    SVGA3dBox boxDest;
-   SVGA3dDXStretchBltMode mode;
+   SVGA3dDXPresentBltMode mode;
 }
 #include "vmware_pack_end.h"
-SVGA3dCmdDXStretchBlt; /* SVGA_3D_CMD_DX_STRETCHBLT */
+SVGA3dCmdDXPresentBlt; /* SVGA_3D_CMD_DX_PRESENTBLT*/
 
 typedef
 #include "vmware_pack_begin.h"
@@ -661,26 +725,6 @@ struct SVGA3dCmdDXGenMips {
 #include "vmware_pack_end.h"
 SVGA3dCmdDXGenMips; /* SVGA_3D_CMD_DX_GENMIPS */
 
-/*
- * Defines a resource/DX surface.  Resources share the surfaceId namespace.
- *
- */
-typedef
-#include "vmware_pack_begin.h"
-struct SVGA3dCmdDefineGBSurface_v2 {
-   uint32 sid;
-   SVGA3dSurfaceFlags surfaceFlags;
-   SVGA3dSurfaceFormat format;
-   uint32 numMipLevels;
-   uint32 multisampleCount;
-   SVGA3dTextureFilter autogenFilter;
-   SVGA3dSize size;
-   uint32 arraySize;
-   uint32 pad;
-}
-#include "vmware_pack_end.h"
-SVGA3dCmdDefineGBSurface_v2;   /* SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 */
-
 /*
  * Update a sub-resource in a guest-backed resource.
  * (Inform the device that the guest-contents have been updated.)
@@ -724,7 +768,8 @@ SVGA3dCmdDXInvalidateSubResource;   /* SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE */
 
 /*
  * Raw byte wise transfer from a buffer surface into another surface
- * of the requested box.
+ * of the requested box.  Supported if 3d is enabled and SVGA_CAP_DX
+ * is set.  This command does not take a context.
  */
 typedef
 #include "vmware_pack_begin.h"
@@ -773,6 +818,93 @@ struct SVGA3dCmdDXSurfaceCopyAndReadback {
 SVGA3dCmdDXSurfaceCopyAndReadback;
 /* SVGA_3D_CMD_DX_SURFACE_COPY_AND_READBACK */
 
+/*
+ * SVGA_DX_HINT_NONE: Does nothing.
+ *
+ * SVGA_DX_HINT_PREFETCH_OBJECT:
+ * SVGA_DX_HINT_PREEVICT_OBJECT:
+ *      Consumes a SVGAObjectRef, and hints that the host should consider
+ *      fetching/evicting the specified object.
+ *
+ *      An id of SVGA3D_INVALID_ID can be used if the guest isn't sure
+ *      what object was affected.  (For instance, if the guest knows that
+ *      it is about to evict a DXShader, but doesn't know precisely which one,
+ *      the device can still use this to help limit it's search, or track
+ *      how many page-outs have happened.)
+ *
+ * SVGA_DX_HINT_PREFETCH_COBJECT:
+ * SVGA_DX_HINT_PREEVICT_COBJECT:
+ *      Same as the above, except they consume an SVGACObjectRef.
+ */
+typedef uint32 SVGADXHintId;
+#define SVGA_DX_HINT_NONE              0
+#define SVGA_DX_HINT_PREFETCH_OBJECT   1
+#define SVGA_DX_HINT_PREEVICT_OBJECT   2
+#define SVGA_DX_HINT_PREFETCH_COBJECT  3
+#define SVGA_DX_HINT_PREEVICT_COBJECT  4
+#define SVGA_DX_HINT_MAX               5
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGAObjectRef {
+   SVGAOTableType type;
+   uint32 id;
+}
+#include "vmware_pack_end.h"
+SVGAObjectRef;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGACObjectRef {
+   SVGACOTableType type;
+   uint32 cid;
+   uint32 id;
+}
+#include "vmware_pack_end.h"
+SVGACObjectRef;
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXHint {
+   SVGADXHintId hintId;
+
+   /*
+    * Followed by variable sized data depending on the hintId.
+    */
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXHint;
+/* SVGA_3D_CMD_DX_HINT */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBufferUpdate {
+   SVGA3dSurfaceId sid;
+   uint32 x;
+   uint32 width;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBufferUpdate;
+/* SVGA_3D_CMD_DX_BUFFER_UPDATE */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXSetConstantBufferOffset {
+   uint32 slot;
+   uint32 offsetInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXSetConstantBufferOffset;
+
+typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetVSConstantBufferOffset;
+/* SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET */
+
+typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetPSConstantBufferOffset;
+/* SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET */
+
+typedef SVGA3dCmdDXSetConstantBufferOffset SVGA3dCmdDXSetGSConstantBufferOffset;
+/* SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET */
+
 
 typedef
 #include "vmware_pack_begin.h"
@@ -789,7 +921,7 @@ struct {
          uint32 firstArraySlice;
          uint32 mipLevels;
          uint32 arraySize;
-      } tex;
+      } tex; /* 1d, 2d, 3d, cube */
       struct {
          uint32 firstElement;
          uint32 numElements;
@@ -844,6 +976,7 @@ struct SVGA3dRenderTargetViewDesc {
       struct {
          uint32 firstElement;
          uint32 numElements;
+         uint32 padding0;
       } buffer;
       struct {
          uint32 mipSlice;
@@ -964,9 +1097,6 @@ SVGA3dInputElementDesc;
 typedef
 #include "vmware_pack_begin.h"
 struct {
-   /*
-    * XXX: How many of these can there be?
-    */
    uint32 elid;
    uint32 numDescs;
    SVGA3dInputElementDesc desc[32];
@@ -1007,7 +1137,7 @@ struct SVGA3dDXBlendStatePerRT {
       uint8 srcBlendAlpha;
       uint8 destBlendAlpha;
       uint8 blendOpAlpha;
-      uint8 renderTargetWriteMask;
+      SVGA3dColorWriteEnable renderTargetWriteMask;
       uint8 logicOpEnable;
       uint8 logicOp;
       uint16 pad0;
@@ -1125,7 +1255,7 @@ struct {
    float slopeScaledDepthBias;
    uint8 depthClipEnable;
    uint8 scissorEnable;
-   uint8 multisampleEnable;
+   SVGA3dMultisampleRastEnable multisampleEnable;
    uint8 antialiasedLineEnable;
    float lineWidth;
    uint8 lineStippleEnable;
@@ -1152,7 +1282,7 @@ struct SVGA3dCmdDXDefineRasterizerState {
    float slopeScaledDepthBias;
    uint8 depthClipEnable;
    uint8 scissorEnable;
-   uint8 multisampleEnable;
+   SVGA3dMultisampleRastEnable multisampleEnable;
    uint8 antialiasedLineEnable;
    float lineWidth;
    uint8 lineStippleEnable;
@@ -1222,21 +1352,6 @@ struct SVGA3dCmdDXDestroySamplerState {
 #include "vmware_pack_end.h"
 SVGA3dCmdDXDestroySamplerState; /* SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE */
 
-/*
- */
-typedef
-#include "vmware_pack_begin.h"
-struct SVGA3dSignatureEntry {
-   uint8 systemValue;
-   uint8 reg;                 /* register is a reserved word */
-   uint16 mask;
-   uint8 registerComponentType;
-   uint8 minPrecision;
-   uint16 pad0;
-}
-#include "vmware_pack_end.h"
-SVGA3dSignatureEntry;
-
 typedef
 #include "vmware_pack_begin.h"
 struct SVGA3dCmdDXDefineShader {
@@ -1254,12 +1369,7 @@ struct SVGACOTableDXShaderEntry {
    uint32 sizeInBytes;
    uint32 offsetInBytes;
    SVGAMobId mobid;
-   uint32 numInputSignatureEntries;
-   uint32 numOutputSignatureEntries;
-
-   uint32 numPatchConstantSignatureEntries;
-
-   uint32 pad;
+   uint32 pad[4];
 }
 #include "vmware_pack_end.h"
 SVGACOTableDXShaderEntry;
@@ -1283,6 +1393,25 @@ struct SVGA3dCmdDXBindShader {
 #include "vmware_pack_end.h"
 SVGA3dCmdDXBindShader;   /* SVGA_3D_CMD_DX_BIND_SHADER */
 
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXBindAllShader {
+   uint32 cid;
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXBindAllShader;   /* SVGA_3D_CMD_DX_BIND_ALL_SHADER */
+
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXCondBindAllShader {
+   uint32 cid;
+   SVGAMobId testMobid;
+   SVGAMobId mobid;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXCondBindAllShader;   /* SVGA_3D_CMD_DX_COND_BIND_ALL_SHADER */
+
 /*
  * The maximum number of streamout decl's in each streamout entry.
  */
@@ -1356,7 +1485,6 @@ SVGA3dCmdDXMobFence64;  /* SVGA_3D_CMD_DX_MOB_FENCE_64 */
  *
  * This command allows the guest to bind a mob to a context-object table.
  */
-
 typedef
 #include "vmware_pack_begin.h"
 struct SVGA3dCmdDXSetCOTable {
@@ -1368,6 +1496,26 @@ struct SVGA3dCmdDXSetCOTable {
 #include "vmware_pack_end.h"
 SVGA3dCmdDXSetCOTable; /* SVGA_3D_CMD_DX_SET_COTABLE */
 
+/*
+ * Guests using SVGA_3D_CMD_DX_GROW_COTABLE are promising that
+ * the new COTable contains the same contents as the old one, except possibly
+ * for some new invalid entries at the end.
+ *
+ * If there is an old cotable mob bound, it also has to still be valid.
+ *
+ * (Otherwise, guests should use the DXSetCOTableBase command.)
+ */
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dCmdDXGrowCOTable {
+   uint32 cid;
+   uint32 mobid;
+   SVGACOTableType type;
+   uint32 validSizeInBytes;
+}
+#include "vmware_pack_end.h"
+SVGA3dCmdDXGrowCOTable; /* SVGA_3D_CMD_DX_GROW_COTABLE */
+
 typedef
 #include "vmware_pack_begin.h"
 struct SVGA3dCmdDXReadbackCOTable {
@@ -1471,7 +1619,7 @@ struct SVGADXContextMobFormat {
    SVGA3dQueryId queryID[SVGA3D_MAX_QUERY];
 
    SVGA3dCOTableData cotables[SVGA_COTABLE_MAX];
-   uint32 pad7[381];
+   uint32 pad7[380];
 }
 #include "vmware_pack_end.h"
 SVGADXContextMobFormat;
index a1c36877ad5527ef31154ea9ddfa5d87979a63c6..b22a67f15660f57c6cab421dbf6e16dae95ec0d4 100644 (file)
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**********************************************************
- * Copyright 2007-2015 VMware, Inc.  All rights reserved.
+ * Copyright 2007-2015 VMware, Inc.
  *
  * Permission is hereby granted, free of charge, to any person
  * obtaining a copy of this software and associated documentation
@@ -62,7 +63,9 @@
  * Maximum size in dwords of shader text the SVGA device will allow.
  * Currently 8 MB.
  */
-#define SVGA3D_MAX_SHADER_MEMORY  (8 * 1024 * 1024 / sizeof(uint32))
+#define SVGA3D_MAX_SHADER_MEMORY_BYTES (8 * 1024 * 1024)
+#define SVGA3D_MAX_SHADER_MEMORY  (SVGA3D_MAX_SHADER_MEMORY_BYTES / \
+                                   sizeof(uint32))
 
 #define SVGA3D_MAX_CLIP_PLANES    6
 
index b44ce648f592da2b32828450db6ba126da96edb2..bdfc404c91e3ca3ce81c15a5ce8ad3c8d8a5c886 100644 (file)
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**********************************************************
- * Copyright 1998-2015 VMware, Inc.  All rights reserved.
+ * Copyright 1998-2015 VMware, Inc.
  *
  * Permission is hereby granted, free of charge, to any person
  * obtaining a copy of this software and associated documentation
index babe7cb84fc21c20f0fc2c801d08d43e10c02804..f2bfd3d8059869a9f7d9307e396242255a32bfe3 100644 (file)
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**************************************************************************
  *
- * Copyright © 2008-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2008-2015 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
  *
  **************************************************************************/
 
-#include <linux/kernel.h>
-
-#ifdef __KERNEL__
-
-#include <drm/vmwgfx_drm.h>
-#define surf_size_struct struct drm_vmw_size
-
-#else /* __KERNEL__ */
+/*
+ * svga3d_surfacedefs.h --
+ *
+ *      Surface definitions and inlineable utilities for SVGA3d.
+ */
 
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
-#endif /* ARRAY_SIZE */
+#ifndef _SVGA3D_SURFACEDEFS_H_
+#define _SVGA3D_SURFACEDEFS_H_
 
-#define max_t(type, x, y)  ((x) > (y) ? (x) : (y))
-#define surf_size_struct SVGA3dSize
-#define u32 uint32
+#define INCLUDE_ALLOW_USERLEVEL
+#define INCLUDE_ALLOW_MODULE
+#include "includeCheck.h"
 
-#endif /* __KERNEL__ */
+#include <linux/kernel.h>
+#include <drm/vmwgfx_drm.h>
 
 #include "svga3d_reg.h"
 
+#define surf_size_struct struct drm_vmw_size
+
 /*
- * enum svga3d_block_desc describes the active data channels in a block.
- *
- * There can be at-most four active channels in a block:
- *    1. Red, bump W, luminance and depth are stored in the first channel.
- *    2. Green, bump V and stencil are stored in the second channel.
- *    3. Blue and bump U are stored in the third channel.
- *    4. Alpha and bump Q are stored in the fourth channel.
- *
- * Block channels can be used to store compressed and buffer data:
- *    1. For compressed formats, only the data channel is used and its size
- *       is equal to that of a singular block in the compression scheme.
- *    2. For buffer formats, only the data channel is used and its size is
- *       exactly one byte in length.
- *    3. In each case the bit depth represent the size of a singular block.
- *
- * Note: Compressed and IEEE formats do not use the bitMask structure.
+ * enum svga3d_block_desc - describes generic properties about formats.
  */
-
 enum svga3d_block_desc {
-       SVGA3DBLOCKDESC_NONE        = 0,         /* No channels are active */
-       SVGA3DBLOCKDESC_BLUE        = 1 << 0,    /* Block with red channel
-                                                   data */
-       SVGA3DBLOCKDESC_U           = 1 << 0,    /* Block with bump U channel
-                                                   data */
-       SVGA3DBLOCKDESC_UV_VIDEO    = 1 << 7,    /* Block with alternating video
-                                                   U and V */
-       SVGA3DBLOCKDESC_GREEN       = 1 << 1,    /* Block with green channel
-                                                   data */
-       SVGA3DBLOCKDESC_V           = 1 << 1,    /* Block with bump V channel
-                                                   data */
-       SVGA3DBLOCKDESC_STENCIL     = 1 << 1,    /* Block with a stencil
-                                                   channel */
-       SVGA3DBLOCKDESC_RED         = 1 << 2,    /* Block with blue channel
-                                                   data */
-       SVGA3DBLOCKDESC_W           = 1 << 2,    /* Block with bump W channel
-                                                   data */
-       SVGA3DBLOCKDESC_LUMINANCE   = 1 << 2,    /* Block with luminance channel
-                                                   data */
-       SVGA3DBLOCKDESC_Y           = 1 << 2,    /* Block with video luminance
-                                                   data */
-       SVGA3DBLOCKDESC_DEPTH       = 1 << 2,    /* Block with depth channel */
-       SVGA3DBLOCKDESC_ALPHA       = 1 << 3,    /* Block with an alpha
-                                                   channel */
-       SVGA3DBLOCKDESC_Q           = 1 << 3,    /* Block with bump Q channel
-                                                   data */
-       SVGA3DBLOCKDESC_BUFFER      = 1 << 4,    /* Block stores 1 byte of
-                                                   data */
-       SVGA3DBLOCKDESC_COMPRESSED  = 1 << 5,    /* Block stores n bytes of
-                                                   data depending on the
-                                                   compression method used */
-       SVGA3DBLOCKDESC_IEEE_FP     = 1 << 6,    /* Block stores data in an IEEE
-                                                   floating point
-                                                   representation in
-                                                   all channels */
-       SVGA3DBLOCKDESC_PLANAR_YUV  = 1 << 8,    /* Three separate blocks store
-                                                   data. */
-       SVGA3DBLOCKDESC_U_VIDEO     = 1 << 9,    /* Block with U video data */
-       SVGA3DBLOCKDESC_V_VIDEO     = 1 << 10,   /* Block with V video data */
-       SVGA3DBLOCKDESC_EXP         = 1 << 11,   /* Shared exponent */
-       SVGA3DBLOCKDESC_SRGB        = 1 << 12,   /* Data is in sRGB format */
-       SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13,   /* 2 planes of Y, UV,
-                                                   e.g., NV12. */
-       SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14,   /* 3 planes of separate
-                                                   Y, U, V, e.g., YV12. */
-
-       SVGA3DBLOCKDESC_RG         = SVGA3DBLOCKDESC_RED |
-       SVGA3DBLOCKDESC_GREEN,
-       SVGA3DBLOCKDESC_RGB        = SVGA3DBLOCKDESC_RG |
-       SVGA3DBLOCKDESC_BLUE,
-       SVGA3DBLOCKDESC_RGB_SRGB   = SVGA3DBLOCKDESC_RGB |
-       SVGA3DBLOCKDESC_SRGB,
-       SVGA3DBLOCKDESC_RGBA       = SVGA3DBLOCKDESC_RGB |
-       SVGA3DBLOCKDESC_ALPHA,
-       SVGA3DBLOCKDESC_RGBA_SRGB  = SVGA3DBLOCKDESC_RGBA |
-       SVGA3DBLOCKDESC_SRGB,
+       /* Nothing special can be said about this format. */
+       SVGA3DBLOCKDESC_NONE        = 0,
+
+       /* Format contains Blue/U data */
+       SVGA3DBLOCKDESC_BLUE        = 1 << 0,
+       SVGA3DBLOCKDESC_W           = 1 << 0,
+       SVGA3DBLOCKDESC_BUMP_L      = 1 << 0,
+
+       /* Format contains Green/V data */
+       SVGA3DBLOCKDESC_GREEN       = 1 << 1,
+       SVGA3DBLOCKDESC_V           = 1 << 1,
+
+       /* Format contains Red/W/Luminance data */
+       SVGA3DBLOCKDESC_RED         = 1 << 2,
+       SVGA3DBLOCKDESC_U           = 1 << 2,
+       SVGA3DBLOCKDESC_LUMINANCE   = 1 << 2,
+
+       /* Format contains Alpha/Q data */
+       SVGA3DBLOCKDESC_ALPHA       = 1 << 3,
+       SVGA3DBLOCKDESC_Q           = 1 << 3,
+
+       /* Format is a buffer */
+       SVGA3DBLOCKDESC_BUFFER      = 1 << 4,
+
+       /* Format is compressed */
+       SVGA3DBLOCKDESC_COMPRESSED  = 1 << 5,
+
+       /* Format uses IEEE floating point */
+       SVGA3DBLOCKDESC_FP          = 1 << 6,
+
+       /* Three separate blocks store data. */
+       SVGA3DBLOCKDESC_PLANAR_YUV  = 1 << 7,
+
+       /* 2 planes of Y, UV, e.g., NV12. */
+       SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 8,
+
+       /* 3 planes of separate Y, U, V, e.g., YV12. */
+       SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 9,
+
+       /* Block with a stencil channel */
+       SVGA3DBLOCKDESC_STENCIL     = 1 << 11,
+
+       /* Typeless format */
+       SVGA3DBLOCKDESC_TYPELESS    = 1 << 12,
+
+       /* Channels are signed integers */
+       SVGA3DBLOCKDESC_SINT        = 1 << 13,
+
+       /* Channels are unsigned integers */
+       SVGA3DBLOCKDESC_UINT        = 1 << 14,
+
+       /* Channels are normalized (when sampling) */
+       SVGA3DBLOCKDESC_NORM        = 1 << 15,
+
+       /* Channels are in SRGB */
+       SVGA3DBLOCKDESC_SRGB        = 1 << 16,
+
+       /* Shared exponent */
+       SVGA3DBLOCKDESC_EXP         = 1 << 17,
+
+       /* Format contains color data. */
+       SVGA3DBLOCKDESC_COLOR       = 1 << 18,
+       /* Format contains depth data. */
+       SVGA3DBLOCKDESC_DEPTH       = 1 << 19,
+       /* Format contains bump data. */
+       SVGA3DBLOCKDESC_BUMP        = 1 << 20,
+
+       /* Format contains YUV video data. */
+       SVGA3DBLOCKDESC_YUV_VIDEO   = 1 << 21,
+
+       /* For mixed unsigned/signed formats. */
+       SVGA3DBLOCKDESC_MIXED       = 1 << 22,
+
+       /* For distingushing CxV8U8. */
+       SVGA3DBLOCKDESC_CX          = 1 << 23,
+
+       /* Different compressed format groups. */
+       SVGA3DBLOCKDESC_BC1         = 1 << 24,
+       SVGA3DBLOCKDESC_BC2         = 1 << 25,
+       SVGA3DBLOCKDESC_BC3         = 1 << 26,
+       SVGA3DBLOCKDESC_BC4         = 1 << 27,
+       SVGA3DBLOCKDESC_BC5         = 1 << 28,
+
+       SVGA3DBLOCKDESC_A_UINT    = SVGA3DBLOCKDESC_ALPHA |
+                                   SVGA3DBLOCKDESC_UINT |
+                                   SVGA3DBLOCKDESC_COLOR,
+       SVGA3DBLOCKDESC_A_UNORM   = SVGA3DBLOCKDESC_A_UINT |
+                                   SVGA3DBLOCKDESC_NORM,
+       SVGA3DBLOCKDESC_R_UINT    = SVGA3DBLOCKDESC_RED |
+                                   SVGA3DBLOCKDESC_UINT |
+                                   SVGA3DBLOCKDESC_COLOR,
+       SVGA3DBLOCKDESC_R_UNORM   = SVGA3DBLOCKDESC_R_UINT |
+                                   SVGA3DBLOCKDESC_NORM,
+       SVGA3DBLOCKDESC_R_SINT    = SVGA3DBLOCKDESC_RED |
+                                   SVGA3DBLOCKDESC_SINT |
+                                   SVGA3DBLOCKDESC_COLOR,
+       SVGA3DBLOCKDESC_R_SNORM   = SVGA3DBLOCKDESC_R_SINT |
+                                   SVGA3DBLOCKDESC_NORM,
+       SVGA3DBLOCKDESC_G_UINT    = SVGA3DBLOCKDESC_GREEN |
+                                   SVGA3DBLOCKDESC_UINT |
+                                   SVGA3DBLOCKDESC_COLOR,
+       SVGA3DBLOCKDESC_RG_UINT    = SVGA3DBLOCKDESC_RED |
+                                    SVGA3DBLOCKDESC_GREEN |
+                                    SVGA3DBLOCKDESC_UINT |
+                                    SVGA3DBLOCKDESC_COLOR,
+       SVGA3DBLOCKDESC_RG_UNORM   = SVGA3DBLOCKDESC_RG_UINT |
+                                    SVGA3DBLOCKDESC_NORM,
+       SVGA3DBLOCKDESC_RG_SINT    = SVGA3DBLOCKDESC_RED |
+                                    SVGA3DBLOCKDESC_GREEN |
+                                    SVGA3DBLOCKDESC_SINT |
+                                    SVGA3DBLOCKDESC_COLOR,
+       SVGA3DBLOCKDESC_RG_SNORM   = SVGA3DBLOCKDESC_RG_SINT |
+                                    SVGA3DBLOCKDESC_NORM,
+       SVGA3DBLOCKDESC_RGB_UINT   = SVGA3DBLOCKDESC_RED |
+                                    SVGA3DBLOCKDESC_GREEN |
+                                    SVGA3DBLOCKDESC_BLUE |
+                                    SVGA3DBLOCKDESC_UINT |
+                                    SVGA3DBLOCKDESC_COLOR,
+       SVGA3DBLOCKDESC_RGB_SINT   = SVGA3DBLOCKDESC_RED |
+                                    SVGA3DBLOCKDESC_GREEN |
+                                    SVGA3DBLOCKDESC_BLUE |
+                                    SVGA3DBLOCKDESC_SINT |
+                                    SVGA3DBLOCKDESC_COLOR,
+       SVGA3DBLOCKDESC_RGB_UNORM   = SVGA3DBLOCKDESC_RGB_UINT |
+                                     SVGA3DBLOCKDESC_NORM,
+       SVGA3DBLOCKDESC_RGB_UNORM_SRGB = SVGA3DBLOCKDESC_RGB_UNORM |
+                                        SVGA3DBLOCKDESC_SRGB,
+       SVGA3DBLOCKDESC_RGBA_UINT  = SVGA3DBLOCKDESC_RED |
+                                    SVGA3DBLOCKDESC_GREEN |
+                                    SVGA3DBLOCKDESC_BLUE |
+                                    SVGA3DBLOCKDESC_ALPHA |
+                                    SVGA3DBLOCKDESC_UINT |
+                                    SVGA3DBLOCKDESC_COLOR,
+       SVGA3DBLOCKDESC_RGBA_UNORM = SVGA3DBLOCKDESC_RGBA_UINT |
+                                    SVGA3DBLOCKDESC_NORM,
+       SVGA3DBLOCKDESC_RGBA_UNORM_SRGB = SVGA3DBLOCKDESC_RGBA_UNORM |
+                                         SVGA3DBLOCKDESC_SRGB,
+       SVGA3DBLOCKDESC_RGBA_SINT  = SVGA3DBLOCKDESC_RED |
+                                    SVGA3DBLOCKDESC_GREEN |
+                                    SVGA3DBLOCKDESC_BLUE |
+                                    SVGA3DBLOCKDESC_ALPHA |
+                                    SVGA3DBLOCKDESC_SINT |
+                                    SVGA3DBLOCKDESC_COLOR,
+       SVGA3DBLOCKDESC_RGBA_SNORM = SVGA3DBLOCKDESC_RGBA_SINT |
+                                    SVGA3DBLOCKDESC_NORM,
+       SVGA3DBLOCKDESC_RGBA_FP    = SVGA3DBLOCKDESC_RED |
+                                    SVGA3DBLOCKDESC_GREEN |
+                                    SVGA3DBLOCKDESC_BLUE |
+                                    SVGA3DBLOCKDESC_ALPHA |
+                                    SVGA3DBLOCKDESC_FP |
+                                    SVGA3DBLOCKDESC_COLOR,
        SVGA3DBLOCKDESC_UV         = SVGA3DBLOCKDESC_U |
-       SVGA3DBLOCKDESC_V,
+                                    SVGA3DBLOCKDESC_V |
+                                    SVGA3DBLOCKDESC_BUMP,
        SVGA3DBLOCKDESC_UVL        = SVGA3DBLOCKDESC_UV |
-       SVGA3DBLOCKDESC_LUMINANCE,
+                                    SVGA3DBLOCKDESC_BUMP_L |
+                                    SVGA3DBLOCKDESC_MIXED |
+                                    SVGA3DBLOCKDESC_BUMP,
        SVGA3DBLOCKDESC_UVW        = SVGA3DBLOCKDESC_UV |
-       SVGA3DBLOCKDESC_W,
+                                    SVGA3DBLOCKDESC_W |
+                                    SVGA3DBLOCKDESC_BUMP,
        SVGA3DBLOCKDESC_UVWA       = SVGA3DBLOCKDESC_UVW |
-       SVGA3DBLOCKDESC_ALPHA,
+                                    SVGA3DBLOCKDESC_ALPHA |
+                                    SVGA3DBLOCKDESC_MIXED |
+                                    SVGA3DBLOCKDESC_BUMP,
        SVGA3DBLOCKDESC_UVWQ       = SVGA3DBLOCKDESC_U |
-       SVGA3DBLOCKDESC_V |
-       SVGA3DBLOCKDESC_W |
-       SVGA3DBLOCKDESC_Q,
-       SVGA3DBLOCKDESC_LA         = SVGA3DBLOCKDESC_LUMINANCE |
-       SVGA3DBLOCKDESC_ALPHA,
+                                    SVGA3DBLOCKDESC_V |
+                                    SVGA3DBLOCKDESC_W |
+                                    SVGA3DBLOCKDESC_Q |
+                                    SVGA3DBLOCKDESC_BUMP,
+       SVGA3DBLOCKDESC_L_UNORM    = SVGA3DBLOCKDESC_LUMINANCE |
+                                    SVGA3DBLOCKDESC_UINT |
+                                    SVGA3DBLOCKDESC_NORM |
+                                    SVGA3DBLOCKDESC_COLOR,
+       SVGA3DBLOCKDESC_LA_UNORM   = SVGA3DBLOCKDESC_LUMINANCE |
+                                    SVGA3DBLOCKDESC_ALPHA |
+                                    SVGA3DBLOCKDESC_UINT |
+                                    SVGA3DBLOCKDESC_NORM |
+                                    SVGA3DBLOCKDESC_COLOR,
        SVGA3DBLOCKDESC_R_FP       = SVGA3DBLOCKDESC_RED |
-       SVGA3DBLOCKDESC_IEEE_FP,
+                                    SVGA3DBLOCKDESC_FP |
+                                    SVGA3DBLOCKDESC_COLOR,
        SVGA3DBLOCKDESC_RG_FP      = SVGA3DBLOCKDESC_R_FP |
-       SVGA3DBLOCKDESC_GREEN,
+                                    SVGA3DBLOCKDESC_GREEN |
+                                    SVGA3DBLOCKDESC_COLOR,
        SVGA3DBLOCKDESC_RGB_FP     = SVGA3DBLOCKDESC_RG_FP |
-       SVGA3DBLOCKDESC_BLUE,
-       SVGA3DBLOCKDESC_RGBA_FP    = SVGA3DBLOCKDESC_RGB_FP |
-       SVGA3DBLOCKDESC_ALPHA,
-       SVGA3DBLOCKDESC_DS         = SVGA3DBLOCKDESC_DEPTH |
-       SVGA3DBLOCKDESC_STENCIL,
-       SVGA3DBLOCKDESC_YUV        = SVGA3DBLOCKDESC_UV_VIDEO |
-       SVGA3DBLOCKDESC_Y,
+                                    SVGA3DBLOCKDESC_BLUE |
+                                    SVGA3DBLOCKDESC_COLOR,
+       SVGA3DBLOCKDESC_YUV        = SVGA3DBLOCKDESC_YUV_VIDEO |
+                                    SVGA3DBLOCKDESC_COLOR,
        SVGA3DBLOCKDESC_AYUV       = SVGA3DBLOCKDESC_ALPHA |
-       SVGA3DBLOCKDESC_Y |
-       SVGA3DBLOCKDESC_U_VIDEO |
-       SVGA3DBLOCKDESC_V_VIDEO,
-       SVGA3DBLOCKDESC_RGBE       = SVGA3DBLOCKDESC_RGB |
-       SVGA3DBLOCKDESC_EXP,
-       SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
-       SVGA3DBLOCKDESC_SRGB,
-       SVGA3DBLOCKDESC_NV12       = SVGA3DBLOCKDESC_PLANAR_YUV |
-       SVGA3DBLOCKDESC_2PLANAR_YUV,
-       SVGA3DBLOCKDESC_YV12       = SVGA3DBLOCKDESC_PLANAR_YUV |
-       SVGA3DBLOCKDESC_3PLANAR_YUV,
+                                    SVGA3DBLOCKDESC_YUV_VIDEO |
+                                    SVGA3DBLOCKDESC_COLOR,
+       SVGA3DBLOCKDESC_RGB_EXP       = SVGA3DBLOCKDESC_RED |
+                                       SVGA3DBLOCKDESC_GREEN |
+                                       SVGA3DBLOCKDESC_BLUE |
+                                       SVGA3DBLOCKDESC_EXP |
+                                       SVGA3DBLOCKDESC_COLOR,
+
+       SVGA3DBLOCKDESC_COMP_TYPELESS = SVGA3DBLOCKDESC_COMPRESSED |
+                                       SVGA3DBLOCKDESC_TYPELESS,
+       SVGA3DBLOCKDESC_COMP_UNORM = SVGA3DBLOCKDESC_COMPRESSED |
+                                    SVGA3DBLOCKDESC_UINT |
+                                    SVGA3DBLOCKDESC_NORM |
+                                    SVGA3DBLOCKDESC_COLOR,
+       SVGA3DBLOCKDESC_COMP_SNORM = SVGA3DBLOCKDESC_COMPRESSED |
+                                    SVGA3DBLOCKDESC_SINT |
+                                    SVGA3DBLOCKDESC_NORM |
+                                    SVGA3DBLOCKDESC_COLOR,
+       SVGA3DBLOCKDESC_COMP_UNORM_SRGB = SVGA3DBLOCKDESC_COMP_UNORM |
+                                         SVGA3DBLOCKDESC_SRGB,
+       SVGA3DBLOCKDESC_BC1_COMP_TYPELESS = SVGA3DBLOCKDESC_BC1 |
+                                           SVGA3DBLOCKDESC_COMP_TYPELESS,
+       SVGA3DBLOCKDESC_BC1_COMP_UNORM = SVGA3DBLOCKDESC_BC1 |
+                                        SVGA3DBLOCKDESC_COMP_UNORM,
+       SVGA3DBLOCKDESC_BC1_COMP_UNORM_SRGB = SVGA3DBLOCKDESC_BC1_COMP_UNORM |
+                                             SVGA3DBLOCKDESC_SRGB,
+       SVGA3DBLOCKDESC_BC2_COMP_TYPELESS = SVGA3DBLOCKDESC_BC2 |
+                                           SVGA3DBLOCKDESC_COMP_TYPELESS,
+       SVGA3DBLOCKDESC_BC2_COMP_UNORM = SVGA3DBLOCKDESC_BC2 |
+                                        SVGA3DBLOCKDESC_COMP_UNORM,
+       SVGA3DBLOCKDESC_BC2_COMP_UNORM_SRGB = SVGA3DBLOCKDESC_BC2_COMP_UNORM |
+                                             SVGA3DBLOCKDESC_SRGB,
+       SVGA3DBLOCKDESC_BC3_COMP_TYPELESS = SVGA3DBLOCKDESC_BC3 |
+                                           SVGA3DBLOCKDESC_COMP_TYPELESS,
+       SVGA3DBLOCKDESC_BC3_COMP_UNORM = SVGA3DBLOCKDESC_BC3 |
+                                        SVGA3DBLOCKDESC_COMP_UNORM,
+       SVGA3DBLOCKDESC_BC3_COMP_UNORM_SRGB = SVGA3DBLOCKDESC_BC3_COMP_UNORM |
+                                             SVGA3DBLOCKDESC_SRGB,
+       SVGA3DBLOCKDESC_BC4_COMP_TYPELESS = SVGA3DBLOCKDESC_BC4 |
+                                           SVGA3DBLOCKDESC_COMP_TYPELESS,
+       SVGA3DBLOCKDESC_BC4_COMP_UNORM = SVGA3DBLOCKDESC_BC4 |
+                                        SVGA3DBLOCKDESC_COMP_UNORM,
+       SVGA3DBLOCKDESC_BC4_COMP_SNORM = SVGA3DBLOCKDESC_BC4 |
+                                        SVGA3DBLOCKDESC_COMP_SNORM,
+       SVGA3DBLOCKDESC_BC5_COMP_TYPELESS = SVGA3DBLOCKDESC_BC5 |
+                                           SVGA3DBLOCKDESC_COMP_TYPELESS,
+       SVGA3DBLOCKDESC_BC5_COMP_UNORM = SVGA3DBLOCKDESC_BC5 |
+                                        SVGA3DBLOCKDESC_COMP_UNORM,
+       SVGA3DBLOCKDESC_BC5_COMP_SNORM = SVGA3DBLOCKDESC_BC5 |
+                                        SVGA3DBLOCKDESC_COMP_SNORM,
+
+       SVGA3DBLOCKDESC_NV12       = SVGA3DBLOCKDESC_YUV_VIDEO |
+                                    SVGA3DBLOCKDESC_PLANAR_YUV |
+                                    SVGA3DBLOCKDESC_2PLANAR_YUV |
+                                    SVGA3DBLOCKDESC_COLOR,
+       SVGA3DBLOCKDESC_YV12       = SVGA3DBLOCKDESC_YUV_VIDEO |
+                                    SVGA3DBLOCKDESC_PLANAR_YUV |
+                                    SVGA3DBLOCKDESC_3PLANAR_YUV |
+                                    SVGA3DBLOCKDESC_COLOR,
+
+       SVGA3DBLOCKDESC_DEPTH_UINT = SVGA3DBLOCKDESC_DEPTH |
+                                    SVGA3DBLOCKDESC_UINT,
+       SVGA3DBLOCKDESC_DEPTH_UNORM = SVGA3DBLOCKDESC_DEPTH_UINT |
+                                    SVGA3DBLOCKDESC_NORM,
+       SVGA3DBLOCKDESC_DS      =    SVGA3DBLOCKDESC_DEPTH |
+                                    SVGA3DBLOCKDESC_STENCIL,
+       SVGA3DBLOCKDESC_DS_UINT =    SVGA3DBLOCKDESC_DEPTH |
+                                    SVGA3DBLOCKDESC_STENCIL |
+                                    SVGA3DBLOCKDESC_UINT,
+       SVGA3DBLOCKDESC_DS_UNORM =   SVGA3DBLOCKDESC_DS_UINT |
+                                    SVGA3DBLOCKDESC_NORM,
+       SVGA3DBLOCKDESC_DEPTH_FP   = SVGA3DBLOCKDESC_DEPTH |
+                                    SVGA3DBLOCKDESC_FP,
+
+       SVGA3DBLOCKDESC_UV_UINT    = SVGA3DBLOCKDESC_UV |
+                                    SVGA3DBLOCKDESC_UINT,
+       SVGA3DBLOCKDESC_UV_SNORM   = SVGA3DBLOCKDESC_UV |
+                                    SVGA3DBLOCKDESC_SINT |
+                                    SVGA3DBLOCKDESC_NORM,
+       SVGA3DBLOCKDESC_UVCX_SNORM = SVGA3DBLOCKDESC_UV_SNORM |
+                                    SVGA3DBLOCKDESC_CX,
+       SVGA3DBLOCKDESC_UVWQ_SNORM = SVGA3DBLOCKDESC_UVWQ |
+                                    SVGA3DBLOCKDESC_SINT |
+                                    SVGA3DBLOCKDESC_NORM,
 };
 
-/*
- * SVGA3dSurfaceDesc describes the actual pixel data.
- *
- * This structure provides the following information:
- *    1. Block description.
- *    2. Dimensions of a block in the surface.
- *    3. Size of block in bytes.
- *    4. Bit depth of the pixel data.
- *    5. Channel bit depths and masks (if applicable).
- */
 struct svga3d_channel_def {
        union {
                u8 blue;
-               u8 u;
+               u8 w_bump;
+               u8 l_bump;
                u8 uv_video;
                u8 u_video;
        };
        union {
                u8 green;
-               u8 v;
                u8 stencil;
+               u8 v_bump;
                u8 v_video;
        };
        union {
                u8 red;
-               u8 w;
+               u8 u_bump;
                u8 luminance;
-               u8 y;
+               u8 y_video;
                u8 depth;
                u8 data;
        };
        union {
                u8 alpha;
-               u8 q;
+               u8 q_bump;
                u8 exp;
        };
 };
 
+/*
+ * struct svga3d_surface_desc - describes the actual pixel data.
+ *
+ * @format: Format
+ * @block_desc: Block description
+ * @block_size: Dimensions in pixels of a block
+ * @bytes_per_block: Size of block in bytes
+ * @pitch_bytes_per_block: Size of a block in bytes for purposes of pitch
+ * @bit_depth: Channel bit depths
+ * @bit_offset: Channel bit masks (in bits offset from the start of the pointer)
+ */
 struct svga3d_surface_desc {
        SVGA3dSurfaceFormat format;
        enum svga3d_block_desc block_desc;
+
        surf_size_struct block_size;
        u32 bytes_per_block;
        u32 pitch_bytes_per_block;
 
-       u32 total_bit_depth;
        struct svga3d_channel_def bit_depth;
        struct svga3d_channel_def bit_offset;
 };
@@ -215,729 +381,728 @@ struct svga3d_surface_desc {
 static const struct svga3d_surface_desc svga3d_surface_descs[] = {
    {SVGA3D_FORMAT_INVALID, SVGA3DBLOCKDESC_NONE,
       {1, 1, 1},  0, 0,
-      0, {{0}, {0}, {0}, {0}},
+      {{0}, {0}, {0}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_X8R8G8B8, SVGA3DBLOCKDESC_RGB,
+   {SVGA3D_X8R8G8B8, SVGA3DBLOCKDESC_RGB_UNORM,
       {1, 1, 1},  4, 4,
-      24, {{8}, {8}, {8}, {0}},
+      {{8}, {8}, {8}, {0}},
       {{0}, {8}, {16}, {24}}},
 
-   {SVGA3D_A8R8G8B8, SVGA3DBLOCKDESC_RGBA,
+   {SVGA3D_A8R8G8B8, SVGA3DBLOCKDESC_RGBA_UNORM,
       {1, 1, 1},  4, 4,
-      32, {{8}, {8}, {8}, {8}},
+      {{8}, {8}, {8}, {8}},
       {{0}, {8}, {16}, {24}}},
 
-   {SVGA3D_R5G6B5, SVGA3DBLOCKDESC_RGB,
+   {SVGA3D_R5G6B5, SVGA3DBLOCKDESC_RGB_UNORM,
       {1, 1, 1},  2, 2,
-      16, {{5}, {6}, {5}, {0}},
+      {{5}, {6}, {5}, {0}},
       {{0}, {5}, {11}, {0}}},
 
-   {SVGA3D_X1R5G5B5, SVGA3DBLOCKDESC_RGB,
+   {SVGA3D_X1R5G5B5, SVGA3DBLOCKDESC_RGB_UNORM,
       {1, 1, 1},  2, 2,
-      15, {{5}, {5}, {5}, {0}},
+      {{5}, {5}, {5}, {0}},
       {{0}, {5}, {10}, {0}}},
 
-   {SVGA3D_A1R5G5B5, SVGA3DBLOCKDESC_RGBA,
+   {SVGA3D_A1R5G5B5, SVGA3DBLOCKDESC_RGBA_UNORM,
       {1, 1, 1},  2, 2,
-      16, {{5}, {5}, {5}, {1}},
+      {{5}, {5}, {5}, {1}},
       {{0}, {5}, {10}, {15}}},
 
-   {SVGA3D_A4R4G4B4, SVGA3DBLOCKDESC_RGBA,
+   {SVGA3D_A4R4G4B4, SVGA3DBLOCKDESC_RGBA_UNORM,
       {1, 1, 1},  2, 2,
-      16, {{4}, {4}, {4}, {4}},
+      {{4}, {4}, {4}, {4}},
       {{0}, {4}, {8}, {12}}},
 
-   {SVGA3D_Z_D32, SVGA3DBLOCKDESC_DEPTH,
+   {SVGA3D_Z_D32, SVGA3DBLOCKDESC_DEPTH_UNORM,
       {1, 1, 1},  4, 4,
-      32, {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {32}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_Z_D16, SVGA3DBLOCKDESC_DEPTH,
+   {SVGA3D_Z_D16, SVGA3DBLOCKDESC_DEPTH_UNORM,
       {1, 1, 1},  2, 2,
-      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {16}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_Z_D24S8, SVGA3DBLOCKDESC_DS,
+   {SVGA3D_Z_D24S8, SVGA3DBLOCKDESC_DS_UNORM,
       {1, 1, 1},  4, 4,
-      32, {{0}, {8}, {24}, {0}},
-      {{0}, {24}, {0}, {0}}},
+      {{0}, {8}, {24}, {0}},
+      {{0}, {0}, {8}, {0}}},
 
-   {SVGA3D_Z_D15S1, SVGA3DBLOCKDESC_DS,
+   {SVGA3D_Z_D15S1, SVGA3DBLOCKDESC_DS_UNORM,
       {1, 1, 1},  2, 2,
-      16, {{0}, {1}, {15}, {0}},
-      {{0}, {15}, {0}, {0}}},
+      {{0}, {1}, {15}, {0}},
+      {{0}, {0}, {1}, {0}}},
 
-   {SVGA3D_LUMINANCE8, SVGA3DBLOCKDESC_LUMINANCE,
+   {SVGA3D_LUMINANCE8, SVGA3DBLOCKDESC_L_UNORM,
       {1, 1, 1},  1, 1,
-      8, {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {8}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_LUMINANCE4_ALPHA4, SVGA3DBLOCKDESC_LA,
-    {1  , 1, 1},  1, 1,
-      8, {{0}, {0}, {4}, {4}},
+   {SVGA3D_LUMINANCE4_ALPHA4, SVGA3DBLOCKDESC_LA_UNORM,
+      {1, 1, 1},  1, 1,
+      {{0}, {0}, {4}, {4}},
       {{0}, {0}, {0}, {4}}},
 
-   {SVGA3D_LUMINANCE16, SVGA3DBLOCKDESC_LUMINANCE,
+   {SVGA3D_LUMINANCE16, SVGA3DBLOCKDESC_L_UNORM,
       {1, 1, 1},  2, 2,
-      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {16}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_LUMINANCE8_ALPHA8, SVGA3DBLOCKDESC_LA,
+   {SVGA3D_LUMINANCE8_ALPHA8, SVGA3DBLOCKDESC_LA_UNORM,
       {1, 1, 1},  2, 2,
-      16, {{0}, {0}, {8}, {8}},
+      {{0}, {0}, {8}, {8}},
       {{0}, {0}, {0}, {8}}},
 
-   {SVGA3D_DXT1, SVGA3DBLOCKDESC_COMPRESSED,
+   {SVGA3D_DXT1, SVGA3DBLOCKDESC_BC1_COMP_UNORM,
       {4, 4, 1},  8, 8,
-      64, {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {64}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_DXT2, SVGA3DBLOCKDESC_COMPRESSED,
+   {SVGA3D_DXT2, SVGA3DBLOCKDESC_BC2_COMP_UNORM,
       {4, 4, 1},  16, 16,
-      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {128}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_DXT3, SVGA3DBLOCKDESC_COMPRESSED,
+   {SVGA3D_DXT3, SVGA3DBLOCKDESC_BC2_COMP_UNORM,
       {4, 4, 1},  16, 16,
-      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {128}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_DXT4, SVGA3DBLOCKDESC_COMPRESSED,
+   {SVGA3D_DXT4, SVGA3DBLOCKDESC_BC3_COMP_UNORM,
       {4, 4, 1},  16, 16,
-      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {128}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_DXT5, SVGA3DBLOCKDESC_COMPRESSED,
+   {SVGA3D_DXT5, SVGA3DBLOCKDESC_BC3_COMP_UNORM,
       {4, 4, 1},  16, 16,
-      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {128}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_BUMPU8V8, SVGA3DBLOCKDESC_UV,
+   {SVGA3D_BUMPU8V8, SVGA3DBLOCKDESC_UV_SNORM,
       {1, 1, 1},  2, 2,
-      16, {{0}, {0}, {8}, {8}},
-      {{0}, {0}, {0}, {8}}},
+      {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {0}, {0}}},
 
    {SVGA3D_BUMPL6V5U5, SVGA3DBLOCKDESC_UVL,
       {1, 1, 1},  2, 2,
-      16, {{5}, {5}, {6}, {0}},
-      {{11}, {6}, {0}, {0}}},
+      {{6}, {5}, {5}, {0}},
+      {{10}, {5}, {0}, {0}}},
 
    {SVGA3D_BUMPX8L8V8U8, SVGA3DBLOCKDESC_UVL,
       {1, 1, 1},  4, 4,
-      32, {{8}, {8}, {8}, {0}},
+      {{8}, {8}, {8}, {0}},
       {{16}, {8}, {0}, {0}}},
 
-   {SVGA3D_BUMPL8V8U8, SVGA3DBLOCKDESC_UVL,
+   {SVGA3D_FORMAT_DEAD1, SVGA3DBLOCKDESC_UVL,
       {1, 1, 1},  3, 3,
-      24, {{8}, {8}, {8}, {0}},
+      {{8}, {8}, {8}, {0}},
       {{16}, {8}, {0}, {0}}},
 
    {SVGA3D_ARGB_S10E5, SVGA3DBLOCKDESC_RGBA_FP,
       {1, 1, 1},  8, 8,
-      64, {{16}, {16}, {16}, {16}},
+      {{16}, {16}, {16}, {16}},
       {{32}, {16}, {0}, {48}}},
 
    {SVGA3D_ARGB_S23E8, SVGA3DBLOCKDESC_RGBA_FP,
       {1, 1, 1},  16, 16,
-      128, {{32}, {32}, {32}, {32}},
+      {{32}, {32}, {32}, {32}},
       {{64}, {32}, {0}, {96}}},
 
-   {SVGA3D_A2R10G10B10, SVGA3DBLOCKDESC_RGBA,
+   {SVGA3D_A2R10G10B10, SVGA3DBLOCKDESC_RGBA_UNORM,
       {1, 1, 1},  4, 4,
-      32, {{10}, {10}, {10}, {2}},
+      {{10}, {10}, {10}, {2}},
       {{0}, {10}, {20}, {30}}},
 
-   {SVGA3D_V8U8, SVGA3DBLOCKDESC_UV,
+   {SVGA3D_V8U8, SVGA3DBLOCKDESC_UV_SNORM,
       {1, 1, 1},  2, 2,
-      16, {{8}, {8}, {0}, {0}},
-      {{8}, {0}, {0}, {0}}},
+      {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {0}, {0}}},
 
-   {SVGA3D_Q8W8V8U8, SVGA3DBLOCKDESC_UVWQ,
+   {SVGA3D_Q8W8V8U8, SVGA3DBLOCKDESC_UVWQ_SNORM,
       {1, 1, 1},  4, 4,
-      32, {{8}, {8}, {8}, {8}},
-      {{24}, {16}, {8}, {0}}},
+      {{8}, {8}, {8}, {8}},
+      {{16}, {8}, {0}, {24}}},
 
-   {SVGA3D_CxV8U8, SVGA3DBLOCKDESC_UV,
+   {SVGA3D_CxV8U8, SVGA3DBLOCKDESC_UVCX_SNORM,
       {1, 1, 1},  2, 2,
-      16, {{8}, {8}, {0}, {0}},
-      {{8}, {0}, {0}, {0}}},
+      {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {0}, {0}}},
 
    {SVGA3D_X8L8V8U8, SVGA3DBLOCKDESC_UVL,
       {1, 1, 1},  4, 4,
-      24, {{8}, {8}, {8}, {0}},
+      {{8}, {8}, {8}, {0}},
       {{16}, {8}, {0}, {0}}},
 
    {SVGA3D_A2W10V10U10, SVGA3DBLOCKDESC_UVWA,
       {1, 1, 1},  4, 4,
-      32, {{10}, {10}, {10}, {2}},
-      {{0}, {10}, {20}, {30}}},
+      {{10}, {10}, {10}, {2}},
+      {{20}, {10}, {0}, {30}}},
 
-   {SVGA3D_ALPHA8, SVGA3DBLOCKDESC_ALPHA,
+   {SVGA3D_ALPHA8, SVGA3DBLOCKDESC_A_UNORM,
       {1, 1, 1},  1, 1,
-      8, {{0}, {0}, {0}, {8}},
+      {{0}, {0}, {0}, {8}},
       {{0}, {0}, {0}, {0}}},
 
    {SVGA3D_R_S10E5, SVGA3DBLOCKDESC_R_FP,
       {1, 1, 1},  2, 2,
-      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {16}, {0}},
       {{0}, {0}, {0}, {0}}},
 
    {SVGA3D_R_S23E8, SVGA3DBLOCKDESC_R_FP,
       {1, 1, 1},  4, 4,
-      32, {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {32}, {0}},
       {{0}, {0}, {0}, {0}}},
 
    {SVGA3D_RG_S10E5, SVGA3DBLOCKDESC_RG_FP,
       {1, 1, 1},  4, 4,
-      32, {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {16}, {0}},
       {{0}, {16}, {0}, {0}}},
 
    {SVGA3D_RG_S23E8, SVGA3DBLOCKDESC_RG_FP,
       {1, 1, 1},  8, 8,
-      64, {{0}, {32}, {32}, {0}},
+      {{0}, {32}, {32}, {0}},
       {{0}, {32}, {0}, {0}}},
 
    {SVGA3D_BUFFER, SVGA3DBLOCKDESC_BUFFER,
       {1, 1, 1},  1, 1,
-      8, {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {8}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_Z_D24X8, SVGA3DBLOCKDESC_DEPTH,
+   {SVGA3D_Z_D24X8, SVGA3DBLOCKDESC_DEPTH_UNORM,
       {1, 1, 1},  4, 4,
-      32, {{0}, {0}, {24}, {0}},
-      {{0}, {24}, {0}, {0}}},
+      {{0}, {0}, {24}, {0}},
+      {{0}, {0}, {8}, {0}}},
 
-   {SVGA3D_V16U16, SVGA3DBLOCKDESC_UV,
+   {SVGA3D_V16U16, SVGA3DBLOCKDESC_UV_SNORM,
       {1, 1, 1},  4, 4,
-      32, {{16}, {16}, {0}, {0}},
-      {{16}, {0}, {0}, {0}}},
+      {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {0}, {0}}},
 
-   {SVGA3D_G16R16, SVGA3DBLOCKDESC_RG,
+   {SVGA3D_G16R16, SVGA3DBLOCKDESC_RG_UNORM,
       {1, 1, 1},  4, 4,
-      32, {{0}, {16}, {16}, {0}},
-      {{0}, {0}, {16}, {0}}},
+      {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {0}, {0}}},
 
-   {SVGA3D_A16B16G16R16, SVGA3DBLOCKDESC_RGBA,
+   {SVGA3D_A16B16G16R16, SVGA3DBLOCKDESC_RGBA_UNORM,
       {1, 1, 1},  8, 8,
-      64, {{16}, {16}, {16}, {16}},
+      {{16}, {16}, {16}, {16}},
       {{32}, {16}, {0}, {48}}},
 
    {SVGA3D_UYVY, SVGA3DBLOCKDESC_YUV,
-      {1, 1, 1},  2, 2,
-      16, {{8}, {0}, {8}, {0}},
+      {2, 1, 1},  4, 4,
+      {{8}, {0}, {8}, {0}},
       {{0}, {0}, {8}, {0}}},
 
    {SVGA3D_YUY2, SVGA3DBLOCKDESC_YUV,
-      {1, 1, 1},  2, 2,
-      16, {{8}, {0}, {8}, {0}},
+      {2, 1, 1},  4, 4,
+      {{8}, {0}, {8}, {0}},
       {{8}, {0}, {0}, {0}}},
 
    {SVGA3D_NV12, SVGA3DBLOCKDESC_NV12,
       {2, 2, 1},  6, 2,
-      48, {{0}, {0}, {48}, {0}},
+      {{0}, {0}, {48}, {0}},
       {{0}, {0}, {0}, {0}}},
 
    {SVGA3D_AYUV, SVGA3DBLOCKDESC_AYUV,
       {1, 1, 1},  4, 4,
-      32, {{8}, {8}, {8}, {8}},
+      {{8}, {8}, {8}, {8}},
       {{0}, {8}, {16}, {24}}},
 
-   {SVGA3D_R32G32B32A32_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+   {SVGA3D_R32G32B32A32_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
       {1, 1, 1},  16, 16,
-      128, {{32}, {32}, {32}, {32}},
+      {{32}, {32}, {32}, {32}},
       {{64}, {32}, {0}, {96}}},
 
-   {SVGA3D_R32G32B32A32_UINT, SVGA3DBLOCKDESC_RGBA,
+   {SVGA3D_R32G32B32A32_UINT, SVGA3DBLOCKDESC_RGBA_UINT,
       {1, 1, 1},  16, 16,
-      128, {{32}, {32}, {32}, {32}},
+      {{32}, {32}, {32}, {32}},
       {{64}, {32}, {0}, {96}}},
 
-   {SVGA3D_R32G32B32A32_SINT, SVGA3DBLOCKDESC_UVWQ,
+   {SVGA3D_R32G32B32A32_SINT, SVGA3DBLOCKDESC_RGBA_SINT,
       {1, 1, 1},  16, 16,
-      128, {{32}, {32}, {32}, {32}},
+      {{32}, {32}, {32}, {32}},
       {{64}, {32}, {0}, {96}}},
 
-   {SVGA3D_R32G32B32_TYPELESS, SVGA3DBLOCKDESC_RGB,
+   {SVGA3D_R32G32B32_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
       {1, 1, 1},  12, 12,
-      96, {{32}, {32}, {32}, {0}},
+      {{32}, {32}, {32}, {0}},
       {{64}, {32}, {0}, {0}}},
 
    {SVGA3D_R32G32B32_FLOAT, SVGA3DBLOCKDESC_RGB_FP,
       {1, 1, 1},  12, 12,
-      96, {{32}, {32}, {32}, {0}},
+      {{32}, {32}, {32}, {0}},
       {{64}, {32}, {0}, {0}}},
 
-   {SVGA3D_R32G32B32_UINT, SVGA3DBLOCKDESC_RGB,
+   {SVGA3D_R32G32B32_UINT, SVGA3DBLOCKDESC_RGB_UINT,
       {1, 1, 1},  12, 12,
-      96, {{32}, {32}, {32}, {0}},
+      {{32}, {32}, {32}, {0}},
       {{64}, {32}, {0}, {0}}},
 
-   {SVGA3D_R32G32B32_SINT, SVGA3DBLOCKDESC_UVW,
+   {SVGA3D_R32G32B32_SINT, SVGA3DBLOCKDESC_RGB_SINT,
       {1, 1, 1},  12, 12,
-      96, {{32}, {32}, {32}, {0}},
+      {{32}, {32}, {32}, {0}},
       {{64}, {32}, {0}, {0}}},
 
-   {SVGA3D_R16G16B16A16_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+   {SVGA3D_R16G16B16A16_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
       {1, 1, 1},  8, 8,
-      64, {{16}, {16}, {16}, {16}},
+      {{16}, {16}, {16}, {16}},
       {{32}, {16}, {0}, {48}}},
 
-   {SVGA3D_R16G16B16A16_UINT, SVGA3DBLOCKDESC_RGBA,
+   {SVGA3D_R16G16B16A16_UINT, SVGA3DBLOCKDESC_RGBA_UINT,
       {1, 1, 1},  8, 8,
-      64, {{16}, {16}, {16}, {16}},
+      {{16}, {16}, {16}, {16}},
       {{32}, {16}, {0}, {48}}},
 
-   {SVGA3D_R16G16B16A16_SNORM, SVGA3DBLOCKDESC_UVWQ,
+   {SVGA3D_R16G16B16A16_SNORM, SVGA3DBLOCKDESC_RGBA_SNORM,
       {1, 1, 1},  8, 8,
-      64, {{16}, {16}, {16}, {16}},
+      {{16}, {16}, {16}, {16}},
       {{32}, {16}, {0}, {48}}},
 
-   {SVGA3D_R16G16B16A16_SINT, SVGA3DBLOCKDESC_UVWQ,
+   {SVGA3D_R16G16B16A16_SINT, SVGA3DBLOCKDESC_RGBA_SINT,
       {1, 1, 1},  8, 8,
-      64, {{16}, {16}, {16}, {16}},
+      {{16}, {16}, {16}, {16}},
       {{32}, {16}, {0}, {48}}},
 
-   {SVGA3D_R32G32_TYPELESS, SVGA3DBLOCKDESC_RG,
+   {SVGA3D_R32G32_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
       {1, 1, 1},  8, 8,
-      64, {{0}, {32}, {32}, {0}},
+      {{0}, {32}, {32}, {0}},
       {{0}, {32}, {0}, {0}}},
 
-   {SVGA3D_R32G32_UINT, SVGA3DBLOCKDESC_RG,
+   {SVGA3D_R32G32_UINT, SVGA3DBLOCKDESC_RG_UINT,
       {1, 1, 1},  8, 8,
-      64, {{0}, {32}, {32}, {0}},
+      {{0}, {32}, {32}, {0}},
       {{0}, {32}, {0}, {0}}},
 
-   {SVGA3D_R32G32_SINT, SVGA3DBLOCKDESC_UV,
+   {SVGA3D_R32G32_SINT, SVGA3DBLOCKDESC_RG_SINT,
       {1, 1, 1},  8, 8,
-      64, {{0}, {32}, {32}, {0}},
+      {{0}, {32}, {32}, {0}},
       {{0}, {32}, {0}, {0}}},
 
-   {SVGA3D_R32G8X24_TYPELESS, SVGA3DBLOCKDESC_RG,
+   {SVGA3D_R32G8X24_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
       {1, 1, 1},  8, 8,
-      64, {{0}, {8}, {32}, {0}},
+      {{0}, {8}, {32}, {0}},
       {{0}, {32}, {0}, {0}}},
 
    {SVGA3D_D32_FLOAT_S8X24_UINT, SVGA3DBLOCKDESC_DS,
       {1, 1, 1},  8, 8,
-      64, {{0}, {8}, {32}, {0}},
+      {{0}, {8}, {32}, {0}},
       {{0}, {32}, {0}, {0}}},
 
-   {SVGA3D_R32_FLOAT_X8X24_TYPELESS, SVGA3DBLOCKDESC_R_FP,
+   {SVGA3D_R32_FLOAT_X8X24, SVGA3DBLOCKDESC_R_FP,
       {1, 1, 1},  8, 8,
-      64, {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {32}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_X32_TYPELESS_G8X24_UINT, SVGA3DBLOCKDESC_GREEN,
+   {SVGA3D_X32_G8X24_UINT, SVGA3DBLOCKDESC_G_UINT,
       {1, 1, 1},  8, 8,
-      64, {{0}, {8}, {0}, {0}},
+      {{0}, {8}, {0}, {0}},
       {{0}, {32}, {0}, {0}}},
 
-   {SVGA3D_R10G10B10A2_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+   {SVGA3D_R10G10B10A2_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
       {1, 1, 1},  4, 4,
-      32, {{10}, {10}, {10}, {2}},
-      {{0}, {10}, {20}, {30}}},
+      {{10}, {10}, {10}, {2}},
+      {{20}, {10}, {0}, {30}}},
 
-   {SVGA3D_R10G10B10A2_UINT, SVGA3DBLOCKDESC_RGBA,
+   {SVGA3D_R10G10B10A2_UINT, SVGA3DBLOCKDESC_RGBA_UINT,
       {1, 1, 1},  4, 4,
-      32, {{10}, {10}, {10}, {2}},
-      {{0}, {10}, {20}, {30}}},
+      {{10}, {10}, {10}, {2}},
+      {{20}, {10}, {0}, {30}}},
 
    {SVGA3D_R11G11B10_FLOAT, SVGA3DBLOCKDESC_RGB_FP,
       {1, 1, 1},  4, 4,
-      32, {{10}, {11}, {11}, {0}},
-      {{0}, {10}, {21}, {0}}},
+      {{10}, {11}, {11}, {0}},
+      {{22}, {11}, {0}, {0}}},
 
-   {SVGA3D_R8G8B8A8_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+   {SVGA3D_R8G8B8A8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
       {1, 1, 1},  4, 4,
-      32, {{8}, {8}, {8}, {8}},
+      {{8}, {8}, {8}, {8}},
       {{16}, {8}, {0}, {24}}},
 
-   {SVGA3D_R8G8B8A8_UNORM, SVGA3DBLOCKDESC_RGBA,
+   {SVGA3D_R8G8B8A8_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
       {1, 1, 1},  4, 4,
-      32, {{8}, {8}, {8}, {8}},
+      {{8}, {8}, {8}, {8}},
       {{16}, {8}, {0}, {24}}},
 
-   {SVGA3D_R8G8B8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_SRGB,
+   {SVGA3D_R8G8B8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_UNORM_SRGB,
       {1, 1, 1},  4, 4,
-      32, {{8}, {8}, {8}, {8}},
+      {{8}, {8}, {8}, {8}},
       {{16}, {8}, {0}, {24}}},
 
-   {SVGA3D_R8G8B8A8_UINT, SVGA3DBLOCKDESC_RGBA,
+   {SVGA3D_R8G8B8A8_UINT, SVGA3DBLOCKDESC_RGBA_UINT,
       {1, 1, 1},  4, 4,
-      32, {{8}, {8}, {8}, {8}},
+      {{8}, {8}, {8}, {8}},
       {{16}, {8}, {0}, {24}}},
 
-   {SVGA3D_R8G8B8A8_SINT, SVGA3DBLOCKDESC_RGBA,
+   {SVGA3D_R8G8B8A8_SINT, SVGA3DBLOCKDESC_RGBA_SINT,
       {1, 1, 1},  4, 4,
-      32, {{8}, {8}, {8}, {8}},
+      {{8}, {8}, {8}, {8}},
       {{16}, {8}, {0}, {24}}},
 
-   {SVGA3D_R16G16_TYPELESS, SVGA3DBLOCKDESC_RG,
+   {SVGA3D_R16G16_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
       {1, 1, 1},  4, 4,
-      32, {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {16}, {0}},
       {{0}, {16}, {0}, {0}}},
 
-   {SVGA3D_R16G16_UINT, SVGA3DBLOCKDESC_RG_FP,
+   {SVGA3D_R16G16_UINT, SVGA3DBLOCKDESC_RG_UINT,
       {1, 1, 1},  4, 4,
-      32, {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {16}, {0}},
       {{0}, {16}, {0}, {0}}},
 
-   {SVGA3D_R16G16_SINT, SVGA3DBLOCKDESC_UV,
+   {SVGA3D_R16G16_SINT, SVGA3DBLOCKDESC_RG_SINT,
       {1, 1, 1},  4, 4,
-      32, {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {16}, {0}},
       {{0}, {16}, {0}, {0}}},
 
-   {SVGA3D_R32_TYPELESS, SVGA3DBLOCKDESC_RED,
+   {SVGA3D_R32_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
       {1, 1, 1},  4, 4,
-      32, {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {32}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_D32_FLOAT, SVGA3DBLOCKDESC_DEPTH,
+   {SVGA3D_D32_FLOAT, SVGA3DBLOCKDESC_DEPTH_FP,
       {1, 1, 1},  4, 4,
-      32, {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {32}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_R32_UINT, SVGA3DBLOCKDESC_RED,
+   {SVGA3D_R32_UINT, SVGA3DBLOCKDESC_R_UINT,
       {1, 1, 1},  4, 4,
-      32, {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {32}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_R32_SINT, SVGA3DBLOCKDESC_RED,
+   {SVGA3D_R32_SINT, SVGA3DBLOCKDESC_R_SINT,
       {1, 1, 1},  4, 4,
-      32, {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {32}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_R24G8_TYPELESS, SVGA3DBLOCKDESC_RG,
+   {SVGA3D_R24G8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
       {1, 1, 1},  4, 4,
-      32, {{0}, {8}, {24}, {0}},
+      {{0}, {8}, {24}, {0}},
       {{0}, {24}, {0}, {0}}},
 
-   {SVGA3D_D24_UNORM_S8_UINT, SVGA3DBLOCKDESC_DS,
+   {SVGA3D_D24_UNORM_S8_UINT, SVGA3DBLOCKDESC_DS_UNORM,
       {1, 1, 1},  4, 4,
-      32, {{0}, {8}, {24}, {0}},
+      {{0}, {8}, {24}, {0}},
       {{0}, {24}, {0}, {0}}},
 
-   {SVGA3D_R24_UNORM_X8_TYPELESS, SVGA3DBLOCKDESC_RED,
+   {SVGA3D_R24_UNORM_X8, SVGA3DBLOCKDESC_R_UNORM,
       {1, 1, 1},  4, 4,
-      32, {{0}, {0}, {24}, {0}},
+      {{0}, {0}, {24}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_X24_TYPELESS_G8_UINT, SVGA3DBLOCKDESC_GREEN,
+   {SVGA3D_X24_G8_UINT, SVGA3DBLOCKDESC_G_UINT,
       {1, 1, 1},  4, 4,
-      32, {{0}, {8}, {0}, {0}},
+      {{0}, {8}, {0}, {0}},
       {{0}, {24}, {0}, {0}}},
 
-   {SVGA3D_R8G8_TYPELESS, SVGA3DBLOCKDESC_RG,
+   {SVGA3D_R8G8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
       {1, 1, 1},  2, 2,
-      16, {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {8}, {0}},
       {{0}, {8}, {0}, {0}}},
 
-   {SVGA3D_R8G8_UNORM, SVGA3DBLOCKDESC_RG,
+   {SVGA3D_R8G8_UNORM, SVGA3DBLOCKDESC_RG_UNORM,
       {1, 1, 1},  2, 2,
-      16, {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {8}, {0}},
       {{0}, {8}, {0}, {0}}},
 
-   {SVGA3D_R8G8_UINT, SVGA3DBLOCKDESC_RG,
+   {SVGA3D_R8G8_UINT, SVGA3DBLOCKDESC_RG_UINT,
       {1, 1, 1},  2, 2,
-      16, {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {8}, {0}},
       {{0}, {8}, {0}, {0}}},
 
-   {SVGA3D_R8G8_SINT, SVGA3DBLOCKDESC_UV,
+   {SVGA3D_R8G8_SINT, SVGA3DBLOCKDESC_RG_SINT,
       {1, 1, 1},  2, 2,
-      16, {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {8}, {0}},
       {{0}, {8}, {0}, {0}}},
 
-   {SVGA3D_R16_TYPELESS, SVGA3DBLOCKDESC_RED,
+   {SVGA3D_R16_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
       {1, 1, 1},  2, 2,
-      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {16}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_R16_UNORM, SVGA3DBLOCKDESC_RED,
+   {SVGA3D_R16_UNORM, SVGA3DBLOCKDESC_R_UNORM,
       {1, 1, 1},  2, 2,
-      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {16}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_R16_UINT, SVGA3DBLOCKDESC_RED,
+   {SVGA3D_R16_UINT, SVGA3DBLOCKDESC_R_UINT,
       {1, 1, 1},  2, 2,
-      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {16}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_R16_SNORM, SVGA3DBLOCKDESC_U,
+   {SVGA3D_R16_SNORM, SVGA3DBLOCKDESC_R_SNORM,
       {1, 1, 1},  2, 2,
-      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {16}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_R16_SINT, SVGA3DBLOCKDESC_U,
+   {SVGA3D_R16_SINT, SVGA3DBLOCKDESC_R_SINT,
       {1, 1, 1},  2, 2,
-      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {16}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_R8_TYPELESS, SVGA3DBLOCKDESC_RED,
+   {SVGA3D_R8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
       {1, 1, 1},  1, 1,
-      8, {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {8}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_R8_UNORM, SVGA3DBLOCKDESC_RED,
+   {SVGA3D_R8_UNORM, SVGA3DBLOCKDESC_R_UNORM,
       {1, 1, 1},  1, 1,
-      8, {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {8}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_R8_UINT, SVGA3DBLOCKDESC_RED,
+   {SVGA3D_R8_UINT, SVGA3DBLOCKDESC_R_UINT,
       {1, 1, 1},  1, 1,
-      8, {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {8}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_R8_SNORM, SVGA3DBLOCKDESC_U,
+   {SVGA3D_R8_SNORM, SVGA3DBLOCKDESC_R_SNORM,
       {1, 1, 1},  1, 1,
-      8, {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {8}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_R8_SINT, SVGA3DBLOCKDESC_U,
+   {SVGA3D_R8_SINT, SVGA3DBLOCKDESC_R_SINT,
       {1, 1, 1},  1, 1,
-      8, {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {8}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_P8, SVGA3DBLOCKDESC_RED,
+   {SVGA3D_P8, SVGA3DBLOCKDESC_NONE,
       {1, 1, 1},  1, 1,
-      8, {{0}, {0}, {8}, {0}},
+      {{0}, {0}, {8}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_R9G9B9E5_SHAREDEXP, SVGA3DBLOCKDESC_RGBE,
+   {SVGA3D_R9G9B9E5_SHAREDEXP, SVGA3DBLOCKDESC_RGB_EXP,
       {1, 1, 1},  4, 4,
-      32, {{9}, {9}, {9}, {5}},
+      {{9}, {9}, {9}, {5}},
       {{18}, {9}, {0}, {27}}},
 
-   {SVGA3D_R8G8_B8G8_UNORM, SVGA3DBLOCKDESC_RG,
-      {1, 1, 1},  2, 2,
-      16, {{0}, {8}, {8}, {0}},
-      {{0}, {8}, {0}, {0}}},
+   {SVGA3D_R8G8_B8G8_UNORM, SVGA3DBLOCKDESC_NONE,
+      {2, 1, 1},  4, 4,
+      {{0}, {8}, {8}, {0}},
+      {{0}, {0}, {8}, {0}}},
 
-   {SVGA3D_G8R8_G8B8_UNORM, SVGA3DBLOCKDESC_RG,
-      {1, 1, 1},  2, 2,
-      16, {{0}, {8}, {8}, {0}},
+   {SVGA3D_G8R8_G8B8_UNORM, SVGA3DBLOCKDESC_NONE,
+      {2, 1, 1},  4, 4,
+      {{0}, {8}, {8}, {0}},
       {{0}, {8}, {0}, {0}}},
 
-   {SVGA3D_BC1_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+   {SVGA3D_BC1_TYPELESS, SVGA3DBLOCKDESC_BC1_COMP_TYPELESS,
       {4, 4, 1},  8, 8,
-      64, {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {64}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_BC1_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+   {SVGA3D_BC1_UNORM_SRGB, SVGA3DBLOCKDESC_BC1_COMP_UNORM_SRGB,
       {4, 4, 1},  8, 8,
-      64, {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {64}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_BC2_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+   {SVGA3D_BC2_TYPELESS, SVGA3DBLOCKDESC_BC2_COMP_TYPELESS,
       {4, 4, 1},  16, 16,
-      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {128}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_BC2_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+   {SVGA3D_BC2_UNORM_SRGB, SVGA3DBLOCKDESC_BC2_COMP_UNORM_SRGB,
       {4, 4, 1},  16, 16,
-      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {128}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_BC3_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+   {SVGA3D_BC3_TYPELESS, SVGA3DBLOCKDESC_BC3_COMP_TYPELESS,
       {4, 4, 1},  16, 16,
-      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {128}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_BC3_UNORM_SRGB, SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+   {SVGA3D_BC3_UNORM_SRGB, SVGA3DBLOCKDESC_BC3_COMP_UNORM_SRGB,
       {4, 4, 1},  16, 16,
-      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {128}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_BC4_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+   {SVGA3D_BC4_TYPELESS, SVGA3DBLOCKDESC_BC4_COMP_TYPELESS,
       {4, 4, 1},  8, 8,
-      64, {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {64}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_ATI1, SVGA3DBLOCKDESC_COMPRESSED,
+   {SVGA3D_ATI1, SVGA3DBLOCKDESC_BC4_COMP_UNORM,
       {4, 4, 1},  8, 8,
-      64, {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {64}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_BC4_SNORM, SVGA3DBLOCKDESC_COMPRESSED,
+   {SVGA3D_BC4_SNORM, SVGA3DBLOCKDESC_BC4_COMP_SNORM,
       {4, 4, 1},  8, 8,
-      64, {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {64}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_BC5_TYPELESS, SVGA3DBLOCKDESC_COMPRESSED,
+   {SVGA3D_BC5_TYPELESS, SVGA3DBLOCKDESC_BC5_COMP_TYPELESS,
       {4, 4, 1},  16, 16,
-      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {128}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_ATI2, SVGA3DBLOCKDESC_COMPRESSED,
+   {SVGA3D_ATI2, SVGA3DBLOCKDESC_BC5_COMP_UNORM,
       {4, 4, 1},  16, 16,
-      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {128}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_BC5_SNORM, SVGA3DBLOCKDESC_COMPRESSED,
+   {SVGA3D_BC5_SNORM, SVGA3DBLOCKDESC_BC5_COMP_SNORM,
       {4, 4, 1},  16, 16,
-      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {128}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_R10G10B10_XR_BIAS_A2_UNORM, SVGA3DBLOCKDESC_RGBA,
+   {SVGA3D_R10G10B10_XR_BIAS_A2_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
       {1, 1, 1},  4, 4,
-      32, {{10}, {10}, {10}, {2}},
-      {{0}, {10}, {20}, {30}}},
+      {{10}, {10}, {10}, {2}},
+     {{20}, {10}, {0}, {30}}},
 
-   {SVGA3D_B8G8R8A8_TYPELESS, SVGA3DBLOCKDESC_RGBA,
+   {SVGA3D_B8G8R8A8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
       {1, 1, 1},  4, 4,
-      32, {{8}, {8}, {8}, {8}},
+      {{8}, {8}, {8}, {8}},
       {{0}, {8}, {16}, {24}}},
 
-   {SVGA3D_B8G8R8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_SRGB,
+   {SVGA3D_B8G8R8A8_UNORM_SRGB, SVGA3DBLOCKDESC_RGBA_UNORM_SRGB,
       {1, 1, 1},  4, 4,
-      32, {{8}, {8}, {8}, {8}},
+      {{8}, {8}, {8}, {8}},
       {{0}, {8}, {16}, {24}}},
 
-   {SVGA3D_B8G8R8X8_TYPELESS, SVGA3DBLOCKDESC_RGB,
+   {SVGA3D_B8G8R8X8_TYPELESS, SVGA3DBLOCKDESC_TYPELESS,
       {1, 1, 1},  4, 4,
-      24, {{8}, {8}, {8}, {0}},
+      {{8}, {8}, {8}, {0}},
       {{0}, {8}, {16}, {24}}},
 
-   {SVGA3D_B8G8R8X8_UNORM_SRGB, SVGA3DBLOCKDESC_RGB_SRGB,
+   {SVGA3D_B8G8R8X8_UNORM_SRGB, SVGA3DBLOCKDESC_RGB_UNORM_SRGB,
       {1, 1, 1},  4, 4,
-      24, {{8}, {8}, {8}, {0}},
+      {{8}, {8}, {8}, {0}},
       {{0}, {8}, {16}, {24}}},
 
-   {SVGA3D_Z_DF16, SVGA3DBLOCKDESC_DEPTH,
+   {SVGA3D_Z_DF16, SVGA3DBLOCKDESC_DEPTH_UNORM,
       {1, 1, 1},  2, 2,
-      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {16}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_Z_DF24, SVGA3DBLOCKDESC_DEPTH,
+   {SVGA3D_Z_DF24, SVGA3DBLOCKDESC_DEPTH_UNORM,
       {1, 1, 1},  4, 4,
-      32, {{0}, {8}, {24}, {0}},
-      {{0}, {24}, {0}, {0}}},
+      {{0}, {0}, {24}, {0}},
+      {{0}, {0}, {8}, {0}}},
 
-   {SVGA3D_Z_D24S8_INT, SVGA3DBLOCKDESC_DS,
+   {SVGA3D_Z_D24S8_INT, SVGA3DBLOCKDESC_DS_UNORM,
       {1, 1, 1},  4, 4,
-      32, {{0}, {8}, {24}, {0}},
-      {{0}, {24}, {0}, {0}}},
+      {{0}, {8}, {24}, {0}},
+      {{0}, {0}, {8}, {0}}},
 
    {SVGA3D_YV12, SVGA3DBLOCKDESC_YV12,
       {2, 2, 1},  6, 2,
-      48, {{0}, {0}, {48}, {0}},
+      {{0}, {0}, {48}, {0}},
       {{0}, {0}, {0}, {0}}},
 
    {SVGA3D_R32G32B32A32_FLOAT, SVGA3DBLOCKDESC_RGBA_FP,
       {1, 1, 1},  16, 16,
-      128, {{32}, {32}, {32}, {32}},
+      {{32}, {32}, {32}, {32}},
       {{64}, {32}, {0}, {96}}},
 
    {SVGA3D_R16G16B16A16_FLOAT, SVGA3DBLOCKDESC_RGBA_FP,
       {1, 1, 1},  8, 8,
-      64, {{16}, {16}, {16}, {16}},
+      {{16}, {16}, {16}, {16}},
       {{32}, {16}, {0}, {48}}},
 
-   {SVGA3D_R16G16B16A16_UNORM, SVGA3DBLOCKDESC_RGBA,
+   {SVGA3D_R16G16B16A16_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
       {1, 1, 1},  8, 8,
-      64, {{16}, {16}, {16}, {16}},
+      {{16}, {16}, {16}, {16}},
       {{32}, {16}, {0}, {48}}},
 
    {SVGA3D_R32G32_FLOAT, SVGA3DBLOCKDESC_RG_FP,
       {1, 1, 1},  8, 8,
-      64, {{0}, {32}, {32}, {0}},
+      {{0}, {32}, {32}, {0}},
       {{0}, {32}, {0}, {0}}},
 
-   {SVGA3D_R10G10B10A2_UNORM, SVGA3DBLOCKDESC_RGBA,
+   {SVGA3D_R10G10B10A2_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
       {1, 1, 1},  4, 4,
-      32, {{10}, {10}, {10}, {2}},
-      {{0}, {10}, {20}, {30}}},
+      {{10}, {10}, {10}, {2}},
+      {{20}, {10}, {0}, {30}}},
 
-   {SVGA3D_R8G8B8A8_SNORM, SVGA3DBLOCKDESC_RGBA,
+   {SVGA3D_R8G8B8A8_SNORM, SVGA3DBLOCKDESC_RGBA_SNORM,
       {1, 1, 1},  4, 4,
-      32, {{8}, {8}, {8}, {8}},
-      {{24}, {16}, {8}, {0}}},
+      {{8}, {8}, {8}, {8}},
+      {{16}, {8}, {0}, {24}}},
 
    {SVGA3D_R16G16_FLOAT, SVGA3DBLOCKDESC_RG_FP,
       {1, 1, 1},  4, 4,
-      32, {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {16}, {0}},
       {{0}, {16}, {0}, {0}}},
 
-   {SVGA3D_R16G16_UNORM, SVGA3DBLOCKDESC_RG,
+   {SVGA3D_R16G16_UNORM, SVGA3DBLOCKDESC_RG_UNORM,
       {1, 1, 1},  4, 4,
-      32, {{0}, {16}, {16}, {0}},
-      {{0}, {0}, {16}, {0}}},
+      {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {0}, {0}}},
 
-   {SVGA3D_R16G16_SNORM, SVGA3DBLOCKDESC_RG,
+   {SVGA3D_R16G16_SNORM, SVGA3DBLOCKDESC_RG_SNORM,
       {1, 1, 1},  4, 4,
-      32, {{16}, {16}, {0}, {0}},
-      {{16}, {0}, {0}, {0}}},
+      {{0}, {16}, {16}, {0}},
+      {{0}, {16}, {0}, {0}}},
 
    {SVGA3D_R32_FLOAT, SVGA3DBLOCKDESC_R_FP,
       {1, 1, 1},  4, 4,
-      32, {{0}, {0}, {32}, {0}},
+      {{0}, {0}, {32}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_R8G8_SNORM, SVGA3DBLOCKDESC_RG,
+   {SVGA3D_R8G8_SNORM, SVGA3DBLOCKDESC_RG_SNORM,
       {1, 1, 1},  2, 2,
-      16, {{8}, {8}, {0}, {0}},
-      {{8}, {0}, {0}, {0}}},
+      {{0}, {8}, {8}, {0}},
+      {{0}, {8}, {0}, {0}}},
 
    {SVGA3D_R16_FLOAT, SVGA3DBLOCKDESC_R_FP,
       {1, 1, 1},  2, 2,
-      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {16}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_D16_UNORM, SVGA3DBLOCKDESC_DEPTH,
+   {SVGA3D_D16_UNORM, SVGA3DBLOCKDESC_DEPTH_UNORM,
       {1, 1, 1},  2, 2,
-      16, {{0}, {0}, {16}, {0}},
+      {{0}, {0}, {16}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_A8_UNORM, SVGA3DBLOCKDESC_ALPHA,
+   {SVGA3D_A8_UNORM, SVGA3DBLOCKDESC_A_UNORM,
       {1, 1, 1},  1, 1,
-      8, {{0}, {0}, {0}, {8}},
+      {{0}, {0}, {0}, {8}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_BC1_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+   {SVGA3D_BC1_UNORM, SVGA3DBLOCKDESC_BC1_COMP_UNORM,
       {4, 4, 1},  8, 8,
-      64, {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {64}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_BC2_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+   {SVGA3D_BC2_UNORM, SVGA3DBLOCKDESC_BC2_COMP_UNORM,
       {4, 4, 1},  16, 16,
-      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {128}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_BC3_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+   {SVGA3D_BC3_UNORM, SVGA3DBLOCKDESC_BC3_COMP_UNORM,
       {4, 4, 1},  16, 16,
-      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {128}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_B5G6R5_UNORM, SVGA3DBLOCKDESC_RGB,
+   {SVGA3D_B5G6R5_UNORM, SVGA3DBLOCKDESC_RGB_UNORM,
       {1, 1, 1},  2, 2,
-      16, {{5}, {6}, {5}, {0}},
+      {{5}, {6}, {5}, {0}},
       {{0}, {5}, {11}, {0}}},
 
-   {SVGA3D_B5G5R5A1_UNORM, SVGA3DBLOCKDESC_RGBA,
+   {SVGA3D_B5G5R5A1_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
       {1, 1, 1},  2, 2,
-      16, {{5}, {5}, {5}, {1}},
+      {{5}, {5}, {5}, {1}},
       {{0}, {5}, {10}, {15}}},
 
-   {SVGA3D_B8G8R8A8_UNORM, SVGA3DBLOCKDESC_RGBA,
+   {SVGA3D_B8G8R8A8_UNORM, SVGA3DBLOCKDESC_RGBA_UNORM,
       {1, 1, 1},  4, 4,
-      32, {{8}, {8}, {8}, {8}},
+      {{8}, {8}, {8}, {8}},
       {{0}, {8}, {16}, {24}}},
 
-   {SVGA3D_B8G8R8X8_UNORM, SVGA3DBLOCKDESC_RGB,
+   {SVGA3D_B8G8R8X8_UNORM, SVGA3DBLOCKDESC_RGB_UNORM,
       {1, 1, 1},  4, 4,
-      24, {{8}, {8}, {8}, {0}},
+      {{8}, {8}, {8}, {0}},
       {{0}, {8}, {16}, {24}}},
 
-   {SVGA3D_BC4_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+   {SVGA3D_BC4_UNORM, SVGA3DBLOCKDESC_BC4_COMP_UNORM,
       {4, 4, 1},  8, 8,
-      64, {{0}, {0}, {64}, {0}},
+      {{0}, {0}, {64}, {0}},
       {{0}, {0}, {0}, {0}}},
 
-   {SVGA3D_BC5_UNORM, SVGA3DBLOCKDESC_COMPRESSED,
+   {SVGA3D_BC5_UNORM, SVGA3DBLOCKDESC_BC5_COMP_UNORM,
       {4, 4, 1},  16, 16,
-      128, {{0}, {0}, {128}, {0}},
+      {{0}, {0}, {128}, {0}},
       {{0}, {0}, {0}, {0}}},
-
 };
 
 static inline u32 clamped_umul32(u32 a, u32 b)
@@ -946,6 +1111,10 @@ static inline u32 clamped_umul32(u32 a, u32 b)
        return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
 }
 
+/**
+ * svga3dsurface_get_desc - Look up the appropriate SVGA3dSurfaceDesc for the
+ * given format.
+ */
 static inline const struct svga3d_surface_desc *
 svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
 {
@@ -955,23 +1124,10 @@ svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
        return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
 }
 
-/*
- *----------------------------------------------------------------------
- *
- * svga3dsurface_get_mip_size --
- *
- *      Given a base level size and the mip level, compute the size of
- *      the mip level.
- *
- * Results:
- *      See above.
- *
- * Side effects:
- *      None.
- *
- *----------------------------------------------------------------------
+/**
+ * svga3dsurface_get_mip_size -  Given a base level size and the mip level,
+ * compute the size of the mip level.
  */
-
 static inline surf_size_struct
 svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
 {
@@ -1018,28 +1174,17 @@ svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
        return pitch;
 }
 
-/*
- *-----------------------------------------------------------------------------
- *
- * svga3dsurface_get_image_buffer_size --
- *
- *      Return the number of bytes of buffer space required to store
- *      one image of a surface, optionally using the specified pitch.
- *
- *      If pitch is zero, it is assumed that rows are tightly packed.
+/**
+ * svga3dsurface_get_image_buffer_size - Calculates image buffer size.
  *
- *      This function is overflow-safe. If the result would have
- *      overflowed, instead we return MAX_UINT32.
+ * Return the number of bytes of buffer space required to store one image of a
+ * surface, optionally using the specified pitch.
  *
- * Results:
- *      Byte count.
+ * If pitch is zero, it is assumed that rows are tightly packed.
  *
- * Side effects:
- *      None.
- *
- *-----------------------------------------------------------------------------
+ * This function is overflow-safe. If the result would have overflowed, instead
+ * we return MAX_UINT32.
  */
-
 static inline u32
 svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
                                    const surf_size_struct *size,
@@ -1067,6 +1212,9 @@ svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
        return total_size;
 }
 
+/**
+ * svga3dsurface_get_serialized_size - Get the serialized size for the image.
+ */
 static inline u32
 svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
                                  surf_size_struct base_level_size,
@@ -1087,6 +1235,26 @@ svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
        return total_size * num_layers;
 }
 
+/**
+ * svga3dsurface_get_serialized_size_extended - Returns the number of bytes
+ * required for a surface with given parameters. Support for sample count.
+ */
+static inline u32
+svga3dsurface_get_serialized_size_extended(SVGA3dSurfaceFormat format,
+                                          surf_size_struct base_level_size,
+                                          u32 num_mip_levels,
+                                          u32 num_layers,
+                                          u32 num_samples)
+{
+       uint64_t total_size =
+               svga3dsurface_get_serialized_size(format,
+                                                 base_level_size,
+                                                 num_mip_levels,
+                                                 num_layers);
+       total_size *= max_t(u32, 1, num_samples);
+
+       return min_t(uint64_t, total_size, (uint64_t)U32_MAX);
+}
 
 /**
  * svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
@@ -1206,3 +1374,5 @@ svga3dsurface_is_screen_target_format(SVGA3dSurfaceFormat format)
        }
        return svga3dsurface_is_dx_screen_target_format(format);
 }
+
+#endif /* _SVGA3D_SURFACEDEFS_H_ */
index 27b33ba8843095523304b41adf4bae2c48c6f3b3..308370665a8ea5c514c8551661045f4c5e5fa73a 100644 (file)
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**********************************************************
- * Copyright 2012-2015 VMware, Inc.  All rights reserved.
+ * Copyright 2012-2015 VMware, Inc.
  *
  * Permission is hereby granted, free of charge, to any person
  * obtaining a copy of this software and associated documentation
 
 #define SVGA3D_INVALID_ID         ((uint32)-1)
 
+typedef uint8 SVGABool8;   /* 8-bit Bool definition */
 typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
 typedef uint32 SVGA3dColor; /* a, r, g, b */
 
+typedef uint32 SVGA3dSurfaceId;
+
+typedef
+#include "vmware_pack_begin.h"
+struct {
+   uint32 numerator;
+   uint32 denominator;
+}
+#include "vmware_pack_end.h"
+SVGA3dFraction64;
+
 typedef
 #include "vmware_pack_begin.h"
 struct SVGA3dCopyRect {
@@ -145,7 +158,7 @@ typedef enum SVGA3dSurfaceFormat {
    SVGA3D_BUMPU8V8                     = 20,
    SVGA3D_BUMPL6V5U5                   = 21,
    SVGA3D_BUMPX8L8V8U8                 = 22,
-   SVGA3D_BUMPL8V8U8                   = 23,
+   SVGA3D_FORMAT_DEAD1                 = 23,
 
    SVGA3D_ARGB_S10E5                   = 24,   /* 16-bit floating-point ARGB */
    SVGA3D_ARGB_S23E8                   = 25,   /* 32-bit floating-point ARGB */
@@ -204,8 +217,8 @@ typedef enum SVGA3dSurfaceFormat {
    SVGA3D_R32G32_SINT                  = 59,
    SVGA3D_R32G8X24_TYPELESS            = 60,
    SVGA3D_D32_FLOAT_S8X24_UINT         = 61,
-   SVGA3D_R32_FLOAT_X8X24_TYPELESS     = 62,
-   SVGA3D_X32_TYPELESS_G8X24_UINT      = 63,
+   SVGA3D_R32_FLOAT_X8X24              = 62,
+   SVGA3D_X32_G8X24_UINT               = 63,
    SVGA3D_R10G10B10A2_TYPELESS         = 64,
    SVGA3D_R10G10B10A2_UINT             = 65,
    SVGA3D_R11G11B10_FLOAT              = 66,
@@ -223,8 +236,8 @@ typedef enum SVGA3dSurfaceFormat {
    SVGA3D_R32_SINT                     = 78,
    SVGA3D_R24G8_TYPELESS               = 79,
    SVGA3D_D24_UNORM_S8_UINT            = 80,
-   SVGA3D_R24_UNORM_X8_TYPELESS        = 81,
-   SVGA3D_X24_TYPELESS_G8_UINT         = 82,
+   SVGA3D_R24_UNORM_X8                 = 81,
+   SVGA3D_X24_G8_UINT                  = 82,
    SVGA3D_R8G8_TYPELESS                = 83,
    SVGA3D_R8G8_UNORM                   = 84,
    SVGA3D_R8G8_UINT                    = 85,
@@ -296,92 +309,114 @@ typedef enum SVGA3dSurfaceFormat {
    SVGA3D_FORMAT_MAX
 } SVGA3dSurfaceFormat;
 
-typedef enum SVGA3dSurfaceFlags {
-   SVGA3D_SURFACE_CUBEMAP               = (1 << 0),
+/*
+ * SVGA3d Surface Flags --
+ */
+#define SVGA3D_SURFACE_CUBEMAP                (1 << 0)
 
-   /*
-    * HINT flags are not enforced by the device but are useful for
-    * performance.
-    */
-   SVGA3D_SURFACE_HINT_STATIC           = (1 << 1),
-   SVGA3D_SURFACE_HINT_DYNAMIC          = (1 << 2),
-   SVGA3D_SURFACE_HINT_INDEXBUFFER      = (1 << 3),
-   SVGA3D_SURFACE_HINT_VERTEXBUFFER     = (1 << 4),
-   SVGA3D_SURFACE_HINT_TEXTURE          = (1 << 5),
-   SVGA3D_SURFACE_HINT_RENDERTARGET     = (1 << 6),
-   SVGA3D_SURFACE_HINT_DEPTHSTENCIL     = (1 << 7),
-   SVGA3D_SURFACE_HINT_WRITEONLY        = (1 << 8),
-   SVGA3D_SURFACE_MASKABLE_ANTIALIAS    = (1 << 9),
-   SVGA3D_SURFACE_AUTOGENMIPMAPS        = (1 << 10),
-   SVGA3D_SURFACE_DECODE_RENDERTARGET   = (1 << 11),
+/*
+ * HINT flags are not enforced by the device but are useful for
+ * performance.
+ */
+#define SVGA3D_SURFACE_HINT_STATIC            (CONST64U(1) << 1)
+#define SVGA3D_SURFACE_HINT_DYNAMIC           (CONST64U(1) << 2)
+#define SVGA3D_SURFACE_HINT_INDEXBUFFER       (CONST64U(1) << 3)
+#define SVGA3D_SURFACE_HINT_VERTEXBUFFER      (CONST64U(1) << 4)
+#define SVGA3D_SURFACE_HINT_TEXTURE           (CONST64U(1) << 5)
+#define SVGA3D_SURFACE_HINT_RENDERTARGET      (CONST64U(1) << 6)
+#define SVGA3D_SURFACE_HINT_DEPTHSTENCIL      (CONST64U(1) << 7)
+#define SVGA3D_SURFACE_HINT_WRITEONLY         (CONST64U(1) << 8)
+#define SVGA3D_SURFACE_MASKABLE_ANTIALIAS     (CONST64U(1) << 9)
+#define SVGA3D_SURFACE_AUTOGENMIPMAPS         (CONST64U(1) << 10)
+
+#define SVGA3D_SURFACE_DECODE_RENDERTARGET    (CONST64U(1) << 11)
 
-   /*
   * Is this surface using a base-level pitch for it's mob backing?
   *
   * This flag is not intended to be set by guest-drivers, but is instead
   * set by the device when the surface is bound to a mob with a specified
   * pitch.
   */
-   SVGA3D_SURFACE_MOB_PITCH             = (1 << 12),
+/*
+ * Is this surface using a base-level pitch for it's mob backing?
+ *
+ * This flag is not intended to be set by guest-drivers, but is instead
+ * set by the device when the surface is bound to a mob with a specified
+ * pitch.
+ */
+#define SVGA3D_SURFACE_MOB_PITCH              (CONST64U(1) << 12)
 
-   SVGA3D_SURFACE_INACTIVE              = (1 << 13),
-   SVGA3D_SURFACE_HINT_RT_LOCKABLE      = (1 << 14),
-   SVGA3D_SURFACE_VOLUME                = (1 << 15),
+#define SVGA3D_SURFACE_INACTIVE               (CONST64U(1) << 13)
+#define SVGA3D_SURFACE_HINT_RT_LOCKABLE       (CONST64U(1) << 14)
+#define SVGA3D_SURFACE_VOLUME                 (CONST64U(1) << 15)
 
-   /*
   * Required to be set on a surface to bind it to a screen target.
   */
-   SVGA3D_SURFACE_SCREENTARGET          = (1 << 16),
+/*
+ * Required to be set on a surface to bind it to a screen target.
+ */
+#define SVGA3D_SURFACE_SCREENTARGET           (CONST64U(1) << 16)
 
-   /*
   * Align images in the guest-backing mob to 16-bytes.
   */
-   SVGA3D_SURFACE_ALIGN16               = (1 << 17),
+/*
+ * Align images in the guest-backing mob to 16-bytes.
+ */
+#define SVGA3D_SURFACE_ALIGN16                (CONST64U(1) << 17)
 
-   SVGA3D_SURFACE_1D                    = (1 << 18),
-   SVGA3D_SURFACE_ARRAY                 = (1 << 19),
+#define SVGA3D_SURFACE_1D                     (CONST64U(1) << 18)
+#define SVGA3D_SURFACE_ARRAY                  (CONST64U(1) << 19)
 
-   /*
   * Bind flags.
   * These are enforced for any surface defined with DefineGBSurface_v2.
   */
-   SVGA3D_SURFACE_BIND_VERTEX_BUFFER    = (1 << 20),
-   SVGA3D_SURFACE_BIND_INDEX_BUFFER     = (1 << 21),
-   SVGA3D_SURFACE_BIND_CONSTANT_BUFFER  = (1 << 22),
-   SVGA3D_SURFACE_BIND_SHADER_RESOURCE  = (1 << 23),
-   SVGA3D_SURFACE_BIND_RENDER_TARGET    = (1 << 24),
-   SVGA3D_SURFACE_BIND_DEPTH_STENCIL    = (1 << 25),
-   SVGA3D_SURFACE_BIND_STREAM_OUTPUT    = (1 << 26),
+/*
+ * Bind flags.
+ * These are enforced for any surface defined with DefineGBSurface_v2.
+ */
+#define SVGA3D_SURFACE_BIND_VERTEX_BUFFER     (CONST64U(1) << 20)
+#define SVGA3D_SURFACE_BIND_INDEX_BUFFER      (CONST64U(1) << 21)
+#define SVGA3D_SURFACE_BIND_CONSTANT_BUFFER   (CONST64U(1) << 22)
+#define SVGA3D_SURFACE_BIND_SHADER_RESOURCE   (CONST64U(1) << 23)
+#define SVGA3D_SURFACE_BIND_RENDER_TARGET     (CONST64U(1) << 24)
+#define SVGA3D_SURFACE_BIND_DEPTH_STENCIL     (CONST64U(1) << 25)
+#define SVGA3D_SURFACE_BIND_STREAM_OUTPUT     (CONST64U(1) << 26)
 
-   /*
-    * A note on staging flags:
-    *
-    * The STAGING flags notes that the surface will not be used directly by the
-    * drawing pipeline, i.e. that it will not be bound to any bind point.
-    * Staging surfaces may be used by copy operations to move data in and out
-    * of other surfaces.
-    *
-    * The HINT_INDIRECT_UPDATE flag suggests that the surface will receive
-    * updates indirectly, i.e. the surface will not be updated directly, but
-    * will receive copies from staging surfaces.
-    */
-   SVGA3D_SURFACE_STAGING_UPLOAD        = (1 << 27),
-   SVGA3D_SURFACE_STAGING_DOWNLOAD      = (1 << 28),
-   SVGA3D_SURFACE_HINT_INDIRECT_UPDATE  = (1 << 29),
+/*
+ * The STAGING flags notes that the surface will not be used directly by the
+ * drawing pipeline, i.e. that it will not be bound to any bind point.
+ * Staging surfaces may be used by copy operations to move data in and out
+ * of other surfaces.  No bind flags may be set on surfaces with this flag.
+ *
+ * The HINT_INDIRECT_UPDATE flag suggests that the surface will receive
+ * updates indirectly, i.e. the surface will not be updated directly, but
+ * will receive copies from staging surfaces.
+ */
+#define SVGA3D_SURFACE_STAGING_UPLOAD         (CONST64U(1) << 27)
+#define SVGA3D_SURFACE_STAGING_DOWNLOAD       (CONST64U(1) << 28)
+#define SVGA3D_SURFACE_HINT_INDIRECT_UPDATE   (CONST64U(1) << 29)
 
-   /*
   * Setting this flag allow this surface to be used with the
   * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command.  It is only valid for
   * buffer surfaces, an no bind flags are allowed to be set on surfaces
   * with this flag.
   */
-   SVGA3D_SURFACE_TRANSFER_FROM_BUFFER  = (1 << 30),
+/*
+ * Setting this flag allow this surface to be used with the
+ * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command.  It is only valid for
* buffer surfaces, and no bind flags are allowed to be set on surfaces
+ * with this flag.
+ */
+#define SVGA3D_SURFACE_TRANSFER_FROM_BUFFER   (CONST64U(1) << 30)
 
-   /*
-    * Marker for the last defined bit.
-    */
-   SVGA3D_SURFACE_FLAG_MAX              = (1 << 31),
-} SVGA3dSurfaceFlags;
+/*
+ * Reserved for video operations.
+ */
+#define SVGA3D_SURFACE_RESERVED1              (CONST64U(1) << 31)
+
+/*
+ * Specifies that a surface is multisample, and therefore requires the full
+ * mob-backing to store all the samples.
+ */
+#define SVGA3D_SURFACE_MULTISAMPLE            (CONST64U(1) << 32)
+
+#define SVGA3D_SURFACE_FLAG_MAX               (CONST64U(1) << 33)
+
+/*
+ * Surface flags types:
+ *
+ * SVGA3dSurface1Flags:  Lower 32-bits of flags.
+ * SVGA3dSurface2Flags:  Upper 32-bits of flags.
+ * SVGA3dSurfaceAllFlags: Full 64-bits of flags.
+ */
+typedef uint32 SVGA3dSurface1Flags;
+typedef uint32 SVGA3dSurface2Flags;
+typedef uint64 SVGA3dSurfaceAllFlags;
+
+#define SVGA3D_SURFACE_FLAGS1_MASK ((uint64_t)MAX_UINT32)
+#define SVGA3D_SURFACE_FLAGS2_MASK (MAX_UINT64 & ~SVGA3D_SURFACE_FLAGS1_MASK)
 
 #define SVGA3D_SURFACE_HB_DISALLOWED_MASK        \
         (  SVGA3D_SURFACE_MOB_PITCH    |         \
@@ -392,29 +427,41 @@ typedef enum SVGA3dSurfaceFlags {
            SVGA3D_SURFACE_STAGING_UPLOAD |       \
            SVGA3D_SURFACE_STAGING_DOWNLOAD |     \
            SVGA3D_SURFACE_HINT_INDIRECT_UPDATE | \
-           SVGA3D_SURFACE_TRANSFER_FROM_BUFFER   \
+           SVGA3D_SURFACE_TRANSFER_FROM_BUFFER | \
+           SVGA3D_SURFACE_MULTISAMPLE            \
+        )
+
+#define SVGA3D_SURFACE_HB_PRESENT_DISALLOWED_MASK   \
+       (   SVGA3D_SURFACE_1D |                      \
+           SVGA3D_SURFACE_MULTISAMPLE               \
         )
 
 #define SVGA3D_SURFACE_2D_DISALLOWED_MASK           \
         (  SVGA3D_SURFACE_CUBEMAP |                 \
            SVGA3D_SURFACE_MASKABLE_ANTIALIAS |      \
            SVGA3D_SURFACE_AUTOGENMIPMAPS |          \
-           SVGA3D_SURFACE_DECODE_RENDERTARGET |     \
            SVGA3D_SURFACE_VOLUME |                  \
            SVGA3D_SURFACE_1D |                      \
-           SVGA3D_SURFACE_ARRAY |                   \
            SVGA3D_SURFACE_BIND_VERTEX_BUFFER |      \
            SVGA3D_SURFACE_BIND_INDEX_BUFFER |       \
            SVGA3D_SURFACE_BIND_CONSTANT_BUFFER |    \
            SVGA3D_SURFACE_BIND_DEPTH_STENCIL |      \
            SVGA3D_SURFACE_BIND_STREAM_OUTPUT |      \
-           SVGA3D_SURFACE_TRANSFER_FROM_BUFFER      \
+           SVGA3D_SURFACE_TRANSFER_FROM_BUFFER |    \
+           SVGA3D_SURFACE_MULTISAMPLE               \
+        )
+
+#define SVGA3D_SURFACE_BASICOPS_DISALLOWED_MASK     \
+        (  SVGA3D_SURFACE_CUBEMAP |                 \
+           SVGA3D_SURFACE_AUTOGENMIPMAPS |          \
+           SVGA3D_SURFACE_VOLUME |                  \
+           SVGA3D_SURFACE_1D |                      \
+           SVGA3D_SURFACE_MULTISAMPLE               \
         )
 
 #define SVGA3D_SURFACE_SCREENTARGET_DISALLOWED_MASK \
         (  SVGA3D_SURFACE_CUBEMAP |                 \
            SVGA3D_SURFACE_AUTOGENMIPMAPS |          \
-           SVGA3D_SURFACE_DECODE_RENDERTARGET |     \
            SVGA3D_SURFACE_VOLUME |                  \
            SVGA3D_SURFACE_1D |                      \
            SVGA3D_SURFACE_BIND_VERTEX_BUFFER |      \
@@ -426,12 +473,36 @@ typedef enum SVGA3dSurfaceFlags {
            SVGA3D_SURFACE_STAGING_UPLOAD |          \
            SVGA3D_SURFACE_STAGING_DOWNLOAD |        \
            SVGA3D_SURFACE_HINT_INDIRECT_UPDATE |    \
-           SVGA3D_SURFACE_TRANSFER_FROM_BUFFER      \
+           SVGA3D_SURFACE_TRANSFER_FROM_BUFFER |    \
+           SVGA3D_SURFACE_MULTISAMPLE               \
+        )
+
+#define SVGA3D_SURFACE_BUFFER_DISALLOWED_MASK       \
+        (  SVGA3D_SURFACE_CUBEMAP |                 \
+           SVGA3D_SURFACE_AUTOGENMIPMAPS |          \
+           SVGA3D_SURFACE_VOLUME |                  \
+           SVGA3D_SURFACE_1D |                      \
+           SVGA3D_SURFACE_MASKABLE_ANTIALIAS |      \
+           SVGA3D_SURFACE_ARRAY |                   \
+           SVGA3D_SURFACE_MULTISAMPLE |             \
+           SVGA3D_SURFACE_MOB_PITCH                 \
+        )
+
+#define SVGA3D_SURFACE_MULTISAMPLE_DISALLOWED_MASK  \
+        (  SVGA3D_SURFACE_CUBEMAP |                 \
+           SVGA3D_SURFACE_AUTOGENMIPMAPS |          \
+           SVGA3D_SURFACE_VOLUME |                  \
+           SVGA3D_SURFACE_1D |                      \
+           SVGA3D_SURFACE_SCREENTARGET |            \
+           SVGA3D_SURFACE_MOB_PITCH                 \
         )
 
 #define SVGA3D_SURFACE_DX_ONLY_MASK             \
         (  SVGA3D_SURFACE_BIND_STREAM_OUTPUT |  \
+           SVGA3D_SURFACE_STAGING_UPLOAD |      \
+           SVGA3D_SURFACE_STAGING_DOWNLOAD |    \
            SVGA3D_SURFACE_TRANSFER_FROM_BUFFER  \
+        )
 
 #define SVGA3D_SURFACE_STAGING_MASK             \
         (  SVGA3D_SURFACE_STAGING_UPLOAD |      \
@@ -487,7 +558,7 @@ typedef enum {
 
 /*
  * Indicates that this format can be converted to any RGB format for which
- * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified
+ * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified.
  */
    SVGA3DFORMAT_OP_CONVERT_TO_ARGB                       = 0x00002000,
 
@@ -498,22 +569,22 @@ typedef enum {
 
 /*
  * Indicated that this format can be read as an SRGB texture (meaning that the
- * sampler will linearize the looked up data)
+ * sampler will linearize the looked up data).
  */
    SVGA3DFORMAT_OP_SRGBREAD                              = 0x00008000,
 
 /*
- * Indicates that this format can be used in the bumpmap instructions
+ * Indicates that this format can be used in the bumpmap instructions.
  */
    SVGA3DFORMAT_OP_BUMPMAP                               = 0x00010000,
 
 /*
- * Indicates that this format can be sampled by the displacement map sampler
+ * Indicates that this format can be sampled by the displacement map sampler.
  */
    SVGA3DFORMAT_OP_DMAP                                  = 0x00020000,
 
 /*
- * Indicates that this format cannot be used with texture filtering
+ * Indicates that this format cannot be used with texture filtering.
  */
    SVGA3DFORMAT_OP_NOFILTER                              = 0x00040000,
 
@@ -530,18 +601,18 @@ typedef enum {
    SVGA3DFORMAT_OP_SRGBWRITE                             = 0x00100000,
 
 /*
- * Indicates that this format cannot be used with alpha blending
+ * Indicates that this format cannot be used with alpha blending.
  */
    SVGA3DFORMAT_OP_NOALPHABLEND                          = 0x00200000,
 
 /*
  * Indicates that the device can auto-generated sublevels for resources
- * of this format
+ * of this format.
  */
    SVGA3DFORMAT_OP_AUTOGENMIPMAP                         = 0x00400000,
 
 /*
- * Indicates that this format can be used by vertex texture sampler
+ * Indicates that this format can be used by vertex texture sampler.
  */
    SVGA3DFORMAT_OP_VERTEXTEXTURE                         = 0x00800000,
 
@@ -1501,7 +1572,6 @@ union SVGADXQueryResultUnion {
 #include "vmware_pack_end.h"
 SVGADXQueryResultUnion;
 
-
 typedef enum {
    SVGA3D_QUERYSTATE_PENDING     = 0,      /* Query is not finished yet */
    SVGA3D_QUERYSTATE_SUCCEEDED   = 1,      /* Completed successfully */
@@ -1533,9 +1603,9 @@ typedef
 struct {
    union {
       struct {
-        uint16  function;       /* SVGA3dFogFunction */
-        uint8   type;           /* SVGA3dFogType */
-        uint8   base;           /* SVGA3dFogBase */
+         uint16  function;       /* SVGA3dFogFunction */
+         uint8   type;           /* SVGA3dFogType */
+         uint8   base;           /* SVGA3dFogBase */
       };
       uint32     uintValue;
    };
@@ -1547,17 +1617,25 @@ SVGA3dFogMode;
  * Uniquely identify one image (a 1D/2D/3D array) from a surface. This
  * is a surface ID as well as face/mipmap indices.
  */
-
 typedef
 #include "vmware_pack_begin.h"
 struct SVGA3dSurfaceImageId {
-   uint32               sid;
-   uint32               face;
-   uint32               mipmap;
+   uint32 sid;
+   uint32 face;
+   uint32 mipmap;
 }
 #include "vmware_pack_end.h"
 SVGA3dSurfaceImageId;
 
+typedef
+#include "vmware_pack_begin.h"
+struct SVGA3dSubSurfaceId {
+   uint32 sid;
+   uint32 subResourceId;
+}
+#include "vmware_pack_end.h"
+SVGA3dSubSurfaceId;
+
 typedef
 #include "vmware_pack_begin.h"
 struct {
@@ -1582,13 +1660,18 @@ typedef enum {
    SVGA_OTABLE_DX9_MAX         = 5,
 
    SVGA_OTABLE_DXCONTEXT       = 5,
-   SVGA_OTABLE_MAX             = 6
-} SVGAOTableType;
+   SVGA_OTABLE_DX_MAX          = 6,
 
-/*
- * Deprecated.
- */
-#define SVGA_OTABLE_COUNT 4
+   SVGA_OTABLE_RESERVED1       = 6,
+   SVGA_OTABLE_RESERVED2       = 7,
+
+   /*
+    * Additions to this table need to be tied to HW-version features and
+    * checkpointed accordingly.
+    */
+   SVGA_OTABLE_DEVEL_MAX       = 8,
+   SVGA_OTABLE_MAX             = 8
+} SVGAOTableType;
 
 typedef enum {
    SVGA_COTABLE_MIN             = 0,
@@ -1605,7 +1688,7 @@ typedef enum {
    SVGA_COTABLE_DXSHADER        = 10,
    SVGA_COTABLE_DX10_MAX        = 11,
    SVGA_COTABLE_UAVIEW          = 11,
-   SVGA_COTABLE_MAX
+   SVGA_COTABLE_MAX             = 12,
 } SVGACOTableType;
 
 /*
@@ -1626,8 +1709,37 @@ typedef enum SVGAMobFormat {
    SVGA3D_MOBFMT_PREDX_MAX   = 7,
    SVGA3D_MOBFMT_EMPTY       = 7,
    SVGA3D_MOBFMT_MAX,
+
+   /*
+    * This isn't actually used by the guest, but is a mob-format used
+    * internally by the SVGA device (and is therefore not binary compatible).
+    */
+   SVGA3D_MOBFMT_HB,
 } SVGAMobFormat;
 
 #define SVGA3D_MOB_EMPTY_BASE 1
 
+/*
+ * Multisample pattern types.
+ */
+
+typedef enum SVGA3dMSPattern {
+   SVGA3D_MS_PATTERN_NONE     = 0,
+   SVGA3D_MS_PATTERN_MIN      = 0,
+   SVGA3D_MS_PATTERN_STANDARD = 1,
+   SVGA3D_MS_PATTERN_CENTER   = 2,
+   SVGA3D_MS_PATTERN_MAX      = 3,
+} SVGA3dMSPattern;
+
+/*
+ * Precision settings for each sample.
+ */
+
+typedef enum SVGA3dMSQualityLevel {
+   SVGA3D_MS_QUALITY_NONE = 0,
+   SVGA3D_MS_QUALITY_MIN  = 0,
+   SVGA3D_MS_QUALITY_FULL = 1,
+   SVGA3D_MS_QUALITY_MAX  = 2,
+} SVGA3dMSQualityLevel;
+
 #endif /* _SVGA3D_TYPES_H_ */
index 884b1d1fb85f3e4042574884912723e27e7fbcf3..acb41e28e46fb96a51c4f6bb9bb27acda3b33f75 100644 (file)
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**********************************************************
- * Copyright 2007-2015 VMware, Inc.  All rights reserved.
+ * Copyright 2007-2015 VMware, Inc.
  *
  * Permission is hereby granted, free of charge, to any person
  * obtaining a copy of this software and associated documentation
index faf6d9b2b89136a60a90d92a1a02a00ecaf7d69f..e5385146e7fc3f05adc26fc20c180e0d2e43213b 100644 (file)
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**********************************************************
- * Copyright 2007-2015 VMware, Inc.  All rights reserved.
+ * Copyright 2007-2015 VMware, Inc.
  *
  * Permission is hereby granted, free of charge, to any person
  * obtaining a copy of this software and associated documentation
index 88e72bf9a534a419cfeebba793578532dfcc43bf..056f54b35d73f2402c2999aceeb2016e2921026c 100644 (file)
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**********************************************************
- * Copyright 1998-2015 VMware, Inc.  All rights reserved.
+ * Copyright 1998-2015 VMware, Inc.
  *
  * Permission is hereby granted, free of charge, to any person
  * obtaining a copy of this software and associated documentation
@@ -63,16 +64,26 @@ typedef uint32 SVGAMobId;
 #define SVGA_MAX_BITS_PER_PIXEL         32
 #define SVGA_MAX_DEPTH                  24
 #define SVGA_MAX_DISPLAYS               10
+#define SVGA_MAX_SCREEN_SIZE            8192
+#define SVGA_SCREEN_ROOT_LIMIT (SVGA_MAX_SCREEN_SIZE * SVGA_MAX_DISPLAYS)
+
 
 /*
  * Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned
  * cursor bypass mode. This is still supported, but no new guest
  * drivers should use it.
  */
-#define SVGA_CURSOR_ON_HIDE            0x0   /* Must be 0 to maintain backward compatibility */
-#define SVGA_CURSOR_ON_SHOW            0x1   /* Must be 1 to maintain backward compatibility */
-#define SVGA_CURSOR_ON_REMOVE_FROM_FB  0x2   /* Remove the cursor from the framebuffer because we need to see what's under it */
-#define SVGA_CURSOR_ON_RESTORE_TO_FB   0x3   /* Put the cursor back in the framebuffer so the user can see it */
+#define SVGA_CURSOR_ON_HIDE            0x0
+#define SVGA_CURSOR_ON_SHOW            0x1
+
+/*
+ * Remove the cursor from the framebuffer
+ * because we need to see what's under it
+ */
+#define SVGA_CURSOR_ON_REMOVE_FROM_FB  0x2
+
+/* Put the cursor back in the framebuffer so the user can see it */
+#define SVGA_CURSOR_ON_RESTORE_TO_FB   0x3
 
 /*
  * The maximum framebuffer size that can traced for guests unless the
@@ -101,7 +112,10 @@ typedef uint32 SVGAMobId;
 #define SVGA_VERSION_0     0
 #define SVGA_ID_0          SVGA_MAKE_ID(SVGA_VERSION_0)
 
-/* "Invalid" value for all SVGA IDs. (Version ID, screen object ID, surface ID...) */
+/*
+ * "Invalid" value for all SVGA IDs.
+ * (Version ID, screen object ID, surface ID...)
+ */
 #define SVGA_ID_INVALID    0xFFFFFFFF
 
 /* Port offsets, relative to BAR0 */
@@ -154,7 +168,7 @@ enum {
    SVGA_REG_CONFIG_DONE = 20,         /* Set when memory area configured */
    SVGA_REG_SYNC = 21,                /* See "FIFO Synchronization Registers" */
    SVGA_REG_BUSY = 22,                /* See "FIFO Synchronization Registers" */
-   SVGA_REG_GUEST_ID = 23,            /* Set guest OS identifier */
+   SVGA_REG_GUEST_ID = 23,            /* (Deprecated) */
    SVGA_REG_CURSOR_ID = 24,           /* (Deprecated) */
    SVGA_REG_CURSOR_X = 25,            /* (Deprecated) */
    SVGA_REG_CURSOR_Y = 26,            /* (Deprecated) */
@@ -186,7 +200,14 @@ enum {
    SVGA_REG_MEMORY_SIZE = 47,       /* Total dedicated device memory excluding FIFO */
    SVGA_REG_COMMAND_LOW = 48,       /* Lower 32 bits and submits commands */
    SVGA_REG_COMMAND_HIGH = 49,      /* Upper 32 bits of command buffer PA */
-   SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50,   /* Max primary memory */
+
+   /*
+    * Max primary memory.
+    * See SVGA_CAP_NO_BB_RESTRICTION.
+    */
+   SVGA_REG_MAX_PRIMARY_MEM = 50,
+   SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50,
+
    SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Sugested limit on mob mem */
    SVGA_REG_DEV_CAP = 52,           /* Write dev cap index, read value */
    SVGA_REG_CMD_PREPEND_LOW = 53,
@@ -194,7 +215,10 @@ enum {
    SVGA_REG_SCREENTARGET_MAX_WIDTH = 55,
    SVGA_REG_SCREENTARGET_MAX_HEIGHT = 56,
    SVGA_REG_MOB_MAX_SIZE = 57,
-   SVGA_REG_TOP = 58,               /* Must be 1 more than the last register */
+   SVGA_REG_BLANK_SCREEN_TARGETS = 58,
+   SVGA_REG_CAP2 = 59,
+   SVGA_REG_DEVEL_CAP = 60,
+   SVGA_REG_TOP = 61,               /* Must be 1 more than the last register */
 
    SVGA_PALETTE_BASE = 1024,        /* Base of SVGA color map */
    /* Next 768 (== 256*3) registers exist for colormap */
@@ -392,6 +416,7 @@ typedef enum {
    SVGA_CB_CONTEXT_0      = 0x0,
    SVGA_CB_CONTEXT_1      = 0x1, /* Supported with SVGA_CAP_HP_CMD_QUEUE */
    SVGA_CB_CONTEXT_MAX    = 0x2,
+   SVGA_CB_CONTEXT_HP_MAX = 0x2,
 } SVGACBContext;
 
 
@@ -448,6 +473,18 @@ typedef enum {
     * due to an error.  No IRQ is raised.
     */
    SVGA_CB_STATUS_SUBMISSION_ERROR = 6,
+
+   /*
+    * Written by the host when the host finished a
+    * SVGA_DC_CMD_ASYNC_STOP_QUEUE request for this command buffer
+    * queue.  The offset of the first byte not processed is stored in
+    * the errorOffset field of the command buffer header.  All guest
+    * visible side effects of commands till that point are guaranteed
+    * to be finished before this is written.  The
+    * SVGA_IRQFLAG_COMMAND_BUFFER IRQ is raised as long as the
+    * SVGA_CB_FLAG_NO_IRQ is not set.
+    */
+   SVGA_CB_STATUS_PARTIAL_COMPLETE = 7,
 } SVGACBStatus;
 
 typedef enum {
@@ -460,8 +497,8 @@ typedef enum {
 typedef
 #include "vmware_pack_begin.h"
 struct {
-   volatile SVGACBStatus status;
-   volatile uint32 errorOffset;
+   volatile SVGACBStatus status; /* Modified by device. */
+   volatile uint32 errorOffset;  /* Modified by device. */
    uint64 id;
    SVGACBFlags flags;
    uint32 length;
@@ -472,7 +509,9 @@ struct {
          uint32 mobOffset;
       } mob;
    } ptr;
-   uint32 offset; /* Valid if CMD_BUFFERS_2 cap set, must be zero otherwise */
+   uint32 offset; /* Valid if CMD_BUFFERS_2 cap set, must be zero otherwise,
+                   * modified by device.
+                   */
    uint32 dxContext; /* Valid if DX_CONTEXT flag set, must be zero otherwise */
    uint32 mustBeZero[6];
 }
@@ -483,20 +522,26 @@ typedef enum {
    SVGA_DC_CMD_NOP                   = 0,
    SVGA_DC_CMD_START_STOP_CONTEXT    = 1,
    SVGA_DC_CMD_PREEMPT               = 2,
-   SVGA_DC_CMD_MAX                   = 3,
-   SVGA_DC_CMD_FORCE_UINT            = MAX_UINT32,
+   SVGA_DC_CMD_START_QUEUE           = 3, /* Requires SVGA_CAP_HP_CMD_QUEUE */
+   SVGA_DC_CMD_ASYNC_STOP_QUEUE      = 4, /* Requires SVGA_CAP_HP_CMD_QUEUE */
+   SVGA_DC_CMD_EMPTY_CONTEXT_QUEUE   = 5, /* Requires SVGA_CAP_HP_CMD_QUEUE */
+   SVGA_DC_CMD_MAX                   = 6,
 } SVGADeviceContextCmdId;
 
-typedef struct {
+/*
+ * Starts or stops both SVGA_CB_CONTEXT_0 and SVGA_CB_CONTEXT_1.
+ */
+
+typedef struct SVGADCCmdStartStop {
    uint32 enable;
-   SVGACBContext context;
+   SVGACBContext context; /* Must be zero */
 } SVGADCCmdStartStop;
 
 /*
  * SVGADCCmdPreempt --
  *
  * This command allows the guest to request that all command buffers
- * on the specified context be preempted that can be.  After execution
+ * on SVGA_CB_CONTEXT_0 be preempted that can be.  After execution
  * of this command all command buffers that were preempted will
  * already have SVGA_CB_STATUS_PREEMPTED written into the status
  * field.  The device might still be processing a command buffer,
@@ -506,11 +551,68 @@ typedef struct {
  * command buffer header set to zero.
  */
 
-typedef struct {
-   SVGACBContext context;
+typedef struct SVGADCCmdPreempt {
+   SVGACBContext context; /* Must be zero */
    uint32 ignoreIDZero;
 } SVGADCCmdPreempt;
 
+/*
+ * Starts the requested command buffer processing queue.  Valid only
+ * if the SVGA_CAP_HP_CMD_QUEUE cap is set.
+ *
+ * For a command queue to be considered runnable it must be enabled
+ * and any corresponding higher priority queues must also be enabled.
+ * For example in order for command buffers to be processed on
+ * SVGA_CB_CONTEXT_0 both SVGA_CB_CONTEXT_0 and SVGA_CB_CONTEXT_1 must
+ * be enabled.  But for commands to be runnable on SVGA_CB_CONTEXT_1
+ * only that queue must be enabled.
+ */
+
+typedef struct SVGADCCmdStartQueue {
+   SVGACBContext context;
+} SVGADCCmdStartQueue;
+
+/*
+ * Requests the SVGA device to stop processing the requested command
+ * buffer queue as soon as possible.  The guest knows the stop has
+ * completed when one of the following happens.
+ *
+ * 1) A command buffer status of SVGA_CB_STATUS_PARTIAL_COMPLETE is returned
+ * 2) A command buffer error is encountered with would stop the queue
+ *    regardless of the async stop request.
+ * 3) All command buffers that have been submitted complete successfully.
+ * 4) The stop completes synchronously if no command buffers are
+ *    active on the queue when it is issued.
+ *
+ * If the command queue is not in a runnable state there is no
+ * guarentee this async stop will finish.  For instance if the high
+ * priority queue is not enabled and a stop is requested on the low
+ * priority queue, the high priority queue must be reenabled to
+ * guarantee that the async stop will finish.
+ *
+ * This command along with SVGA_DC_CMD_EMPTY_CONTEXT_QUEUE can be used
+ * to implement mid command buffer preemption.
+ *
+ * Valid only if the SVGA_CAP_HP_CMD_QUEUE cap is set.
+ */
+
+typedef struct SVGADCCmdAsyncStopQueue {
+   SVGACBContext context;
+} SVGADCCmdAsyncStopQueue;
+
+/*
+ * Requests the SVGA device to throw away any full command buffers on
+ * the requested command queue that have not been started.  For a
+ * driver to know which command buffers were thrown away a driver
+ * should only issue this command when the queue is stopped, for
+ * whatever reason.
+ */
+
+typedef struct SVGADCCmdEmptyQueue {
+   SVGACBContext context;
+} SVGADCCmdEmptyQueue;
+
+
 /*
  * SVGAGMRImageFormat --
  *
@@ -536,7 +638,7 @@ typedef struct SVGAGMRImageFormat {
       struct {
          uint32 bitsPerPixel : 8;
          uint32 colorDepth   : 8;
-        uint32 reserved     : 16;  /* Must be zero */
+         uint32 reserved     : 16;  /* Must be zero */
       };
 
       uint32 value;
@@ -672,8 +774,36 @@ SVGASignedPoint;
  * SVGA_CAP_GBOBJECTS --
  *    Enable guest-backed objects and surfaces.
  *
- * SVGA_CAP_CMD_BUFFERS_3 --
- *    Enable support for command buffers in a mob.
+ * SVGA_CAP_DX --
+ *    Enable support for DX commands, and command buffers in a mob.
+ *
+ * SVGA_CAP_HP_CMD_QUEUE --
+ *    Enable support for the high priority command queue, and the
+ *    ScreenCopy command.
+ *
+ * SVGA_CAP_NO_BB_RESTRICTION --
+ *    Allow ScreenTargets to be defined without regard to the 32-bpp
+ *    bounding-box memory restrictions. ie:
+ *
+ *    The summed memory usage of all screens (assuming they were defined as
+ *    32-bpp) must always be less than the value of the
+ *    SVGA_REG_MAX_PRIMARY_MEM register.
+ *
+ *    If this cap is not present, the 32-bpp bounding box around all screens
+ *    must additionally be under the value of the SVGA_REG_MAX_PRIMARY_MEM
+ *    register.
+ *
+ *    If the cap is present, the bounding box restriction is lifted (and only
+ *    the screen-sum limit applies).
+ *
+ *    (Note that this is a slight lie... there is still a sanity limit on any
+ *     dimension of the topology to be less than SVGA_SCREEN_ROOT_LIMIT, even
+ *     when SVGA_CAP_NO_BB_RESTRICTION is present, but that should be
+ *     large enough to express any possible topology without holes between
+ *     monitors.)
+ *
+ * SVGA_CAP_CAP2_REGISTER --
+ *    If this cap is present, the SVGA_REG_CAP2 register is supported.
  */
 
 #define SVGA_CAP_NONE               0x00000000
@@ -699,8 +829,30 @@ SVGASignedPoint;
 #define SVGA_CAP_GBOBJECTS          0x08000000
 #define SVGA_CAP_DX                 0x10000000
 #define SVGA_CAP_HP_CMD_QUEUE       0x20000000
+#define SVGA_CAP_NO_BB_RESTRICTION  0x40000000
+#define SVGA_CAP_CAP2_REGISTER      0x80000000
 
-#define SVGA_CAP_CMD_RESERVED       0x80000000
+/*
+ * The SVGA_REG_CAP2 register is an additional set of SVGA capability bits.
+ *
+ * SVGA_CAP2_GROW_OTABLE --
+ *      Allow the GrowOTable/DXGrowCOTable commands.
+ *
+ * SVGA_CAP2_INTRA_SURFACE_COPY --
+ *      Allow the IntraSurfaceCopy command.
+ *
+ * SVGA_CAP2_DX2 --
+ *      Allow the DefineGBSurface_v3, WholeSurfaceCopy.
+ *
+ * SVGA_CAP2_RESERVED --
+ *      Reserve the last bit for extending the SVGA capabilities to some
+ *      future mechanisms.
+ */
+#define SVGA_CAP2_NONE               0x00000000
+#define SVGA_CAP2_GROW_OTABLE        0x00000001
+#define SVGA_CAP2_INTRA_SURFACE_COPY 0x00000002
+#define SVGA_CAP2_DX2                0x00000004
+#define SVGA_CAP2_RESERVED           0x80000000
 
 
 /*
@@ -722,7 +874,8 @@ typedef enum {
    SVGABackdoorCapDeviceCaps = 0,
    SVGABackdoorCapFifoCaps = 1,
    SVGABackdoorCap3dHWVersion = 2,
-   SVGABackdoorCapMax = 3,
+   SVGABackdoorCapDeviceCaps2 = 3,
+   SVGABackdoorCapMax = 4,
 } SVGABackdoorCapType;
 
 
@@ -1914,16 +2067,6 @@ SVGAFifoCmdRemapGMR2;
 
 #define SVGA_VRAM_SIZE_W2K          (64 * 1024 * 1024) /* 64 MB */
 
-/*
- * To simplify autoDetect display configuration, support a minimum of
- * two 1920x1200 monitors, 32bpp, side-by-side, optionally rotated:
- *   numDisplays = 2
- *   maxWidth = numDisplay * 1920 = 3840
- *   maxHeight = rotated width of single monitor = 1920
- *   vramSize = maxWidth * maxHeight * 4 = 29491200
- */
-#define SVGA_VRAM_SIZE_AUTODETECT   (32 * 1024 * 1024)
-
 #if defined(VMX86_SERVER)
 #define SVGA_VRAM_SIZE               (4 * 1024 * 1024)
 #define SVGA_VRAM_SIZE_3D           (64 * 1024 * 1024)
index 2e8ba4df8de9e1cd061ed569fd7859775467ca53..350bbc6fab02fdfea2d12424009b70507da5dcb1 100644 (file)
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**********************************************************
- * Copyright 2015 VMware, Inc.  All rights reserved.
+ * Copyright 2015 VMware, Inc.
  *
  * Permission is hereby granted, free of charge, to any person
  * obtaining a copy of this software and associated documentation
@@ -40,7 +41,10 @@ typedef uint64 PPN64;
 
 typedef bool Bool;
 
+#define MAX_UINT64 U64_MAX
 #define MAX_UINT32 U32_MAX
 #define MAX_UINT16 U16_MAX
 
+#define CONST64U(x) x##ULL
+
 #endif
index 7e7b0ce34aa2904064b35dbc145f97117ab08493..75308bd0d97051da1b13e343d5eaed365dc273f5 100644 (file)
@@ -1,25 +1,2 @@
-/**********************************************************
- * Copyright 2015 VMware, Inc.  All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy,
- * modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- **********************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
 #include <linux/compiler.h>
index e2e440ed3d4420cc1e72d2fd19cfff4aa045fa17..e93d6f28b68ceda969a00dd0a60de2ed379c5810 100644 (file)
@@ -1,25 +1,2 @@
-/**********************************************************
- * Copyright 2015 VMware, Inc.  All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy,
- * modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- **********************************************************/
+/* SPDX-License-Identifier: GPL-2.0 */
 __packed
index 55d32ae43aa4ace13b6d33ed841f2bd235339f3a..0b9ee7fb45d6e347a7840f173ad82dc9f51be76a 100644 (file)
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /**************************************************************************
  *
- * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
index bf2e77ad5a209163a03eb3f62125e1a59d7e6014..6a2a9d69043b0c40c3209dc2e206369ac12786a5 100644 (file)
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**************************************************************************
  *
- * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
index e8c94b19db7bc26b6015414838c41d703ed86d20..fc6673cde28951b6b4b26bea81e5f9c02bcde869 100644 (file)
@@ -1,6 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /**************************************************************************
  *
- * Copyright © 2017 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2017 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
new file mode 100644 (file)
index 0000000..2dda033
--- /dev/null
@@ -0,0 +1,1123 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <drm/ttm/ttm_placement.h>
+
+#include <drm/drmP.h>
+#include "vmwgfx_drv.h"
+#include "drm/ttm/ttm_object.h"
+
+
+/**
+ * struct vmw_user_buffer_object - User-space-visible buffer object
+ *
+ * @prime: The prime object providing user visibility.
+ * @vbo: The struct vmw_buffer_object
+ */
+struct vmw_user_buffer_object {
+       struct ttm_prime_object prime;
+       struct vmw_buffer_object vbo;
+};
+
+
+/**
+ * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
+ * vmw_buffer_object.
+ *
+ * @bo: Pointer to the TTM buffer object.
+ * Return: Pointer to the struct vmw_buffer_object embedding the
+ * TTM buffer object.
+ */
+static struct vmw_buffer_object *
+vmw_buffer_object(struct ttm_buffer_object *bo)
+{
+       return container_of(bo, struct vmw_buffer_object, base);
+}
+
+
+/**
+ * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
+ * vmw_user_buffer_object.
+ *
+ * @bo: Pointer to the TTM buffer object.
+ * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
+ * object.
+ */
+static struct vmw_user_buffer_object *
+vmw_user_buffer_object(struct ttm_buffer_object *bo)
+{
+       struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
+
+       return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
+}
+
+
+/**
+ * vmw_bo_pin_in_placement - Validate a buffer to placement.
+ *
+ * @dev_priv:  Driver private.
+ * @buf:  DMA buffer to move.
+ * @placement:  The placement to pin it.
+ * @interruptible:  Use interruptible wait.
+ * Return: Zero on success, Negative error code on failure. In particular
+ * -ERESTARTSYS if interrupted by a signal
+ */
+int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
+                           struct vmw_buffer_object *buf,
+                           struct ttm_placement *placement,
+                           bool interruptible)
+{
+       struct ttm_operation_ctx ctx = {interruptible, false };
+       struct ttm_buffer_object *bo = &buf->base;
+       int ret;
+       uint32_t new_flags;
+
+       ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
+       if (unlikely(ret != 0))
+               return ret;
+
+       vmw_execbuf_release_pinned_bo(dev_priv);
+
+       ret = ttm_bo_reserve(bo, interruptible, false, NULL);
+       if (unlikely(ret != 0))
+               goto err;
+
+       if (buf->pin_count > 0)
+               ret = ttm_bo_mem_compat(placement, &bo->mem,
+                                       &new_flags) == true ? 0 : -EINVAL;
+       else
+               ret = ttm_bo_validate(bo, placement, &ctx);
+
+       if (!ret)
+               vmw_bo_pin_reserved(buf, true);
+
+       ttm_bo_unreserve(bo);
+
+err:
+       ttm_write_unlock(&dev_priv->reservation_sem);
+       return ret;
+}
+
+
+/**
+ * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
+ *
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
+ *
+ * @dev_priv:  Driver private.
+ * @buf:  DMA buffer to move.
+ * @pin:  Pin buffer if true.
+ * @interruptible:  Use interruptible wait.
+ * Return: Zero on success, Negative error code on failure. In particular
+ * -ERESTARTSYS if interrupted by a signal
+ */
+int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
+                             struct vmw_buffer_object *buf,
+                             bool interruptible)
+{
+       struct ttm_operation_ctx ctx = {interruptible, false };
+       struct ttm_buffer_object *bo = &buf->base;
+       int ret;
+       uint32_t new_flags;
+
+       ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
+       if (unlikely(ret != 0))
+               return ret;
+
+       vmw_execbuf_release_pinned_bo(dev_priv);
+
+       ret = ttm_bo_reserve(bo, interruptible, false, NULL);
+       if (unlikely(ret != 0))
+               goto err;
+
+       if (buf->pin_count > 0) {
+               ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
+                                       &new_flags) == true ? 0 : -EINVAL;
+               goto out_unreserve;
+       }
+
+       ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
+       if (likely(ret == 0) || ret == -ERESTARTSYS)
+               goto out_unreserve;
+
+       ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
+
+out_unreserve:
+       if (!ret)
+               vmw_bo_pin_reserved(buf, true);
+
+       ttm_bo_unreserve(bo);
+err:
+       ttm_write_unlock(&dev_priv->reservation_sem);
+       return ret;
+}
+
+
+/**
+ * vmw_bo_pin_in_vram - Move a buffer to vram.
+ *
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
+ *
+ * @dev_priv:  Driver private.
+ * @buf:  DMA buffer to move.
+ * @interruptible:  Use interruptible wait.
+ * Return: Zero on success, Negative error code on failure. In particular
+ * -ERESTARTSYS if interrupted by a signal
+ */
+int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
+                      struct vmw_buffer_object *buf,
+                      bool interruptible)
+{
+       return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
+                                      interruptible);
+}
+
+
+/**
+ * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
+ *
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
+ *
+ * @dev_priv:  Driver private.
+ * @buf:  DMA buffer to pin.
+ * @interruptible:  Use interruptible wait.
+ * Return: Zero on success, Negative error code on failure. In particular
+ * -ERESTARTSYS if interrupted by a signal
+ */
+int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
+                               struct vmw_buffer_object *buf,
+                               bool interruptible)
+{
+       struct ttm_operation_ctx ctx = {interruptible, false };
+       struct ttm_buffer_object *bo = &buf->base;
+       struct ttm_placement placement;
+       struct ttm_place place;
+       int ret = 0;
+       uint32_t new_flags;
+
+       place = vmw_vram_placement.placement[0];
+       place.lpfn = bo->num_pages;
+       placement.num_placement = 1;
+       placement.placement = &place;
+       placement.num_busy_placement = 1;
+       placement.busy_placement = &place;
+
+       ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
+       if (unlikely(ret != 0))
+               return ret;
+
+       vmw_execbuf_release_pinned_bo(dev_priv);
+       ret = ttm_bo_reserve(bo, interruptible, false, NULL);
+       if (unlikely(ret != 0))
+               goto err_unlock;
+
+       /*
+        * Is this buffer already in vram but not at the start of it?
+        * In that case, evict it first because TTM isn't good at handling
+        * that situation.
+        */
+       if (bo->mem.mem_type == TTM_PL_VRAM &&
+           bo->mem.start < bo->num_pages &&
+           bo->mem.start > 0 &&
+           buf->pin_count == 0) {
+               ctx.interruptible = false;
+               (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
+       }
+
+       if (buf->pin_count > 0)
+               ret = ttm_bo_mem_compat(&placement, &bo->mem,
+                                       &new_flags) == true ? 0 : -EINVAL;
+       else
+               ret = ttm_bo_validate(bo, &placement, &ctx);
+
+       /* For some reason we didn't end up at the start of vram */
+       WARN_ON(ret == 0 && bo->offset != 0);
+       if (!ret)
+               vmw_bo_pin_reserved(buf, true);
+
+       ttm_bo_unreserve(bo);
+err_unlock:
+       ttm_write_unlock(&dev_priv->reservation_sem);
+
+       return ret;
+}
+
+
+/**
+ * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
+ *
+ * This function takes the reservation_sem in write mode.
+ *
+ * @dev_priv:  Driver private.
+ * @buf:  DMA buffer to unpin.
+ * @interruptible:  Use interruptible wait.
+ * Return: Zero on success, Negative error code on failure. In particular
+ * -ERESTARTSYS if interrupted by a signal
+ */
+int vmw_bo_unpin(struct vmw_private *dev_priv,
+                struct vmw_buffer_object *buf,
+                bool interruptible)
+{
+       struct ttm_buffer_object *bo = &buf->base;
+       int ret;
+
+       ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
+       if (unlikely(ret != 0))
+               return ret;
+
+       ret = ttm_bo_reserve(bo, interruptible, false, NULL);
+       if (unlikely(ret != 0))
+               goto err;
+
+       vmw_bo_pin_reserved(buf, false);
+
+       ttm_bo_unreserve(bo);
+
+err:
+       ttm_read_unlock(&dev_priv->reservation_sem);
+       return ret;
+}
+
+/**
+ * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
+ * of a buffer.
+ *
+ * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
+ * @ptr: SVGAGuestPtr returning the result.
+ */
+void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
+                         SVGAGuestPtr *ptr)
+{
+       if (bo->mem.mem_type == TTM_PL_VRAM) {
+               ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
+               ptr->offset = bo->offset;
+       } else {
+               ptr->gmrId = bo->mem.start;
+               ptr->offset = 0;
+       }
+}
+
+
+/**
+ * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
+ *
+ * @vbo: The buffer object. Must be reserved.
+ * @pin: Whether to pin or unpin.
+ *
+ */
+void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
+{
+       struct ttm_operation_ctx ctx = { false, true };
+       struct ttm_place pl;
+       struct ttm_placement placement;
+       struct ttm_buffer_object *bo = &vbo->base;
+       uint32_t old_mem_type = bo->mem.mem_type;
+       int ret;
+
+       lockdep_assert_held(&bo->resv->lock.base);
+
+       if (pin) {
+               if (vbo->pin_count++ > 0)
+                       return;
+       } else {
+               WARN_ON(vbo->pin_count <= 0);
+               if (--vbo->pin_count > 0)
+                       return;
+       }
+
+       pl.fpfn = 0;
+       pl.lpfn = 0;
+       pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
+               | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
+       if (pin)
+               pl.flags |= TTM_PL_FLAG_NO_EVICT;
+
+       memset(&placement, 0, sizeof(placement));
+       placement.num_placement = 1;
+       placement.placement = &pl;
+
+       ret = ttm_bo_validate(bo, &placement, &ctx);
+
+       BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
+}
+
+
+/**
+ * vmw_bo_map_and_cache - Map a buffer object and cache the map
+ *
+ * @vbo: The buffer object to map
+ * Return: A kernel virtual address or NULL if mapping failed.
+ *
+ * This function maps a buffer object into the kernel address space, or
+ * returns the virtual kernel address of an already existing map. The virtual
+ * address remains valid as long as the buffer object is pinned or reserved.
+ * The cached map is torn down on either
+ * 1) Buffer object move
+ * 2) Buffer object swapout
+ * 3) Buffer object destruction
+ *
+ */
+void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
+{
+       struct ttm_buffer_object *bo = &vbo->base;
+       bool not_used;
+       void *virtual;
+       int ret;
+
+       virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
+       if (virtual)
+               return virtual;
+
+       ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
+       if (ret)
+               DRM_ERROR("Buffer object map failed: %d.\n", ret);
+
+       return ttm_kmap_obj_virtual(&vbo->map, &not_used);
+}
+
+
+/**
+ * vmw_bo_unmap - Tear down a cached buffer object map.
+ *
+ * @vbo: The buffer object whose map we are tearing down.
+ *
+ * This function tears down a cached map set up using
+ * vmw_buffer_object_map_and_cache().
+ */
+void vmw_bo_unmap(struct vmw_buffer_object *vbo)
+{
+       if (vbo->map.bo == NULL)
+               return;
+
+       ttm_bo_kunmap(&vbo->map);
+}
+
+
+/**
+ * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
+ *
+ * @dev_priv: Pointer to a struct vmw_private identifying the device.
+ * @size: The requested buffer size.
+ * @user: Whether this is an ordinary dma buffer or a user dma buffer.
+ */
+static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
+                             bool user)
+{
+       static size_t struct_size, user_struct_size;
+       size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
+
+       if (unlikely(struct_size == 0)) {
+               size_t backend_size = ttm_round_pot(vmw_tt_size);
+
+               struct_size = backend_size +
+                       ttm_round_pot(sizeof(struct vmw_buffer_object));
+               user_struct_size = backend_size +
+                       ttm_round_pot(sizeof(struct vmw_user_buffer_object));
+       }
+
+       if (dev_priv->map_mode == vmw_dma_alloc_coherent)
+               page_array_size +=
+                       ttm_round_pot(num_pages * sizeof(dma_addr_t));
+
+       return ((user) ? user_struct_size : struct_size) +
+               page_array_size;
+}
+
+
+/**
+ * vmw_bo_bo_free - vmw buffer object destructor
+ *
+ * @bo: Pointer to the embedded struct ttm_buffer_object
+ */
+void vmw_bo_bo_free(struct ttm_buffer_object *bo)
+{
+       struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
+
+       vmw_bo_unmap(vmw_bo);
+       kfree(vmw_bo);
+}
+
+
+/**
+ * vmw_user_bo_destroy - vmw buffer object destructor
+ *
+ * @bo: Pointer to the embedded struct ttm_buffer_object
+ */
+static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
+{
+       struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
+
+       vmw_bo_unmap(&vmw_user_bo->vbo);
+       ttm_prime_object_kfree(vmw_user_bo, prime);
+}
+
+
+/**
+ * vmw_bo_init - Initialize a vmw buffer object
+ *
+ * @dev_priv: Pointer to the device private struct
+ * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
+ * @size: Buffer object size in bytes.
+ * @placement: Initial placement.
+ * @interruptible: Whether waits should be performed interruptible.
+ * @bo_free: The buffer object destructor.
+ * Returns: Zero on success, negative error code on error.
+ *
+ * Note that on error, the code will free the buffer object.
+ */
+int vmw_bo_init(struct vmw_private *dev_priv,
+               struct vmw_buffer_object *vmw_bo,
+               size_t size, struct ttm_placement *placement,
+               bool interruptible,
+               void (*bo_free)(struct ttm_buffer_object *bo))
+{
+       struct ttm_bo_device *bdev = &dev_priv->bdev;
+       size_t acc_size;
+       int ret;
+       bool user = (bo_free == &vmw_user_bo_destroy);
+
+       WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
+
+       acc_size = vmw_bo_acc_size(dev_priv, size, user);
+       memset(vmw_bo, 0, sizeof(*vmw_bo));
+
+       INIT_LIST_HEAD(&vmw_bo->res_list);
+
+       ret = ttm_bo_init(bdev, &vmw_bo->base, size,
+                         ttm_bo_type_device, placement,
+                         0, interruptible, acc_size,
+                         NULL, NULL, bo_free);
+       return ret;
+}
+
+
+/**
+ * vmw_user_bo_release - TTM reference base object release callback for
+ * vmw user buffer objects
+ *
+ * @p_base: The TTM base object pointer about to be unreferenced.
+ *
+ * Clears the TTM base object pointer and drops the reference the
+ * base object has on the underlying struct vmw_buffer_object.
+ */
+static void vmw_user_bo_release(struct ttm_base_object **p_base)
+{
+       struct vmw_user_buffer_object *vmw_user_bo;
+       struct ttm_base_object *base = *p_base;
+       struct ttm_buffer_object *bo;
+
+       *p_base = NULL;
+
+       if (unlikely(base == NULL))
+               return;
+
+       vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
+                                  prime.base);
+       bo = &vmw_user_bo->vbo.base;
+       ttm_bo_unref(&bo);
+}
+
+
+/**
+ * vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback
+ * for vmw user buffer objects
+ *
+ * @base: Pointer to the TTM base object
+ * @ref_type: Reference type of the reference reaching zero.
+ *
+ * Called when user-space drops its last synccpu reference on the buffer
+ * object, Either explicitly or as part of a cleanup file close.
+ */
+static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
+                                       enum ttm_ref_type ref_type)
+{
+       struct vmw_user_buffer_object *user_bo;
+
+       user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
+
+       switch (ref_type) {
+       case TTM_REF_SYNCCPU_WRITE:
+               ttm_bo_synccpu_write_release(&user_bo->vbo.base);
+               break;
+       default:
+               WARN_ONCE(true, "Undefined buffer object reference release.\n");
+       }
+}
+
+
+/**
+ * vmw_user_bo_alloc - Allocate a user buffer object
+ *
+ * @dev_priv: Pointer to a struct device private.
+ * @tfile: Pointer to a struct ttm_object_file on which to register the user
+ * object.
+ * @size: Size of the buffer object.
+ * @shareable: Boolean whether the buffer is shareable with other open files.
+ * @handle: Pointer to where the handle value should be assigned.
+ * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
+ * should be assigned.
+ * Return: Zero on success, negative error code on error.
+ */
+int vmw_user_bo_alloc(struct vmw_private *dev_priv,
+                     struct ttm_object_file *tfile,
+                     uint32_t size,
+                     bool shareable,
+                     uint32_t *handle,
+                     struct vmw_buffer_object **p_vbo,
+                     struct ttm_base_object **p_base)
+{
+       struct vmw_user_buffer_object *user_bo;
+       struct ttm_buffer_object *tmp;
+       int ret;
+
+       user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
+       if (unlikely(!user_bo)) {
+               DRM_ERROR("Failed to allocate a buffer.\n");
+               return -ENOMEM;
+       }
+
+       ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
+                         (dev_priv->has_mob) ?
+                         &vmw_sys_placement :
+                         &vmw_vram_sys_placement, true,
+                         &vmw_user_bo_destroy);
+       if (unlikely(ret != 0))
+               return ret;
+
+       tmp = ttm_bo_reference(&user_bo->vbo.base);
+       ret = ttm_prime_object_init(tfile,
+                                   size,
+                                   &user_bo->prime,
+                                   shareable,
+                                   ttm_buffer_type,
+                                   &vmw_user_bo_release,
+                                   &vmw_user_bo_ref_obj_release);
+       if (unlikely(ret != 0)) {
+               ttm_bo_unref(&tmp);
+               goto out_no_base_object;
+       }
+
+       *p_vbo = &user_bo->vbo;
+       if (p_base) {
+               *p_base = &user_bo->prime.base;
+               kref_get(&(*p_base)->refcount);
+       }
+       *handle = user_bo->prime.base.hash.key;
+
+out_no_base_object:
+       return ret;
+}
+
+
+/**
+ * vmw_user_bo_verify_access - verify access permissions on this
+ * buffer object.
+ *
+ * @bo: Pointer to the buffer object being accessed
+ * @tfile: Identifying the caller.
+ */
+int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
+                             struct ttm_object_file *tfile)
+{
+       struct vmw_user_buffer_object *vmw_user_bo;
+
+       if (unlikely(bo->destroy != vmw_user_bo_destroy))
+               return -EPERM;
+
+       vmw_user_bo = vmw_user_buffer_object(bo);
+
+       /* Check that the caller has opened the object. */
+       if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
+               return 0;
+
+       DRM_ERROR("Could not grant buffer access.\n");
+       return -EPERM;
+}
+
+
+/**
+ * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
+ * access, idling previous GPU operations on the buffer and optionally
+ * blocking it for further command submissions.
+ *
+ * @user_bo: Pointer to the buffer object being grabbed for CPU access
+ * @tfile: Identifying the caller.
+ * @flags: Flags indicating how the grab should be performed.
+ * Return: Zero on success, Negative error code on error. In particular,
+ * -EBUSY will be returned if a dontblock operation is requested and the
+ * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
+ * interrupted by a signal.
+ *
+ * A blocking grab will be automatically released when @tfile is closed.
+ */
+static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
+                                   struct ttm_object_file *tfile,
+                                   uint32_t flags)
+{
+       struct ttm_buffer_object *bo = &user_bo->vbo.base;
+       bool existed;
+       int ret;
+
+       if (flags & drm_vmw_synccpu_allow_cs) {
+               bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
+               long lret;
+
+               lret = reservation_object_wait_timeout_rcu
+                       (bo->resv, true, true,
+                        nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
+               if (!lret)
+                       return -EBUSY;
+               else if (lret < 0)
+                       return lret;
+               return 0;
+       }
+
+       ret = ttm_bo_synccpu_write_grab
+               (bo, !!(flags & drm_vmw_synccpu_dontblock));
+       if (unlikely(ret != 0))
+               return ret;
+
+       ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
+                                TTM_REF_SYNCCPU_WRITE, &existed, false);
+       if (ret != 0 || existed)
+               ttm_bo_synccpu_write_release(&user_bo->vbo.base);
+
+       return ret;
+}
+
+/**
+ * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
+ * and unblock command submission on the buffer if blocked.
+ *
+ * @handle: Handle identifying the buffer object.
+ * @tfile: Identifying the caller.
+ * @flags: Flags indicating the type of release.
+ */
+static int vmw_user_bo_synccpu_release(uint32_t handle,
+                                          struct ttm_object_file *tfile,
+                                          uint32_t flags)
+{
+       if (!(flags & drm_vmw_synccpu_allow_cs))
+               return ttm_ref_object_base_unref(tfile, handle,
+                                                TTM_REF_SYNCCPU_WRITE);
+
+       return 0;
+}
+
+
+/**
+ * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
+ * functionality.
+ *
+ * @dev: Identifies the drm device.
+ * @data: Pointer to the ioctl argument.
+ * @file_priv: Identifies the caller.
+ * Return: Zero on success, negative error code on error.
+ *
+ * This function checks the ioctl arguments for validity and calls the
+ * relevant synccpu functions.
+ */
+int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
+                             struct drm_file *file_priv)
+{
+       struct drm_vmw_synccpu_arg *arg =
+               (struct drm_vmw_synccpu_arg *) data;
+       struct vmw_buffer_object *vbo;
+       struct vmw_user_buffer_object *user_bo;
+       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+       struct ttm_base_object *buffer_base;
+       int ret;
+
+       if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
+           || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
+                              drm_vmw_synccpu_dontblock |
+                              drm_vmw_synccpu_allow_cs)) != 0) {
+               DRM_ERROR("Illegal synccpu flags.\n");
+               return -EINVAL;
+       }
+
+       switch (arg->op) {
+       case drm_vmw_synccpu_grab:
+               ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
+                                            &buffer_base);
+               if (unlikely(ret != 0))
+                       return ret;
+
+               user_bo = container_of(vbo, struct vmw_user_buffer_object,
+                                      vbo);
+               ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
+               vmw_bo_unreference(&vbo);
+               ttm_base_object_unref(&buffer_base);
+               if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
+                            ret != -EBUSY)) {
+                       DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
+                                 (unsigned int) arg->handle);
+                       return ret;
+               }
+               break;
+       case drm_vmw_synccpu_release:
+               ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
+                                                 arg->flags);
+               if (unlikely(ret != 0)) {
+                       DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
+                                 (unsigned int) arg->handle);
+                       return ret;
+               }
+               break;
+       default:
+               DRM_ERROR("Invalid synccpu operation.\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+
+/**
+ * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
+ * allocation functionality.
+ *
+ * @dev: Identifies the drm device.
+ * @data: Pointer to the ioctl argument.
+ * @file_priv: Identifies the caller.
+ * Return: Zero on success, negative error code on error.
+ *
+ * This function checks the ioctl arguments for validity and allocates a
+ * struct vmw_user_buffer_object bo.
+ */
+int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       struct vmw_private *dev_priv = vmw_priv(dev);
+       union drm_vmw_alloc_dmabuf_arg *arg =
+           (union drm_vmw_alloc_dmabuf_arg *)data;
+       struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
+       struct drm_vmw_dmabuf_rep *rep = &arg->rep;
+       struct vmw_buffer_object *vbo;
+       uint32_t handle;
+       int ret;
+
+       ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+       if (unlikely(ret != 0))
+               return ret;
+
+       ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
+                               req->size, false, &handle, &vbo,
+                               NULL);
+       if (unlikely(ret != 0))
+               goto out_no_bo;
+
+       rep->handle = handle;
+       rep->map_handle = drm_vma_node_offset_addr(&vbo->base.vma_node);
+       rep->cur_gmr_id = handle;
+       rep->cur_gmr_offset = 0;
+
+       vmw_bo_unreference(&vbo);
+
+out_no_bo:
+       ttm_read_unlock(&dev_priv->reservation_sem);
+
+       return ret;
+}
+
+
+/**
+ * vmw_bo_unref_ioctl - Generic handle close ioctl.
+ *
+ * @dev: Identifies the drm device.
+ * @data: Pointer to the ioctl argument.
+ * @file_priv: Identifies the caller.
+ * Return: Zero on success, negative error code on error.
+ *
+ * This function checks the ioctl arguments for validity and closes a
+ * handle to a TTM base object, optionally freeing the object.
+ */
+int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+{
+       struct drm_vmw_unref_dmabuf_arg *arg =
+           (struct drm_vmw_unref_dmabuf_arg *)data;
+
+       return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+                                        arg->handle,
+                                        TTM_REF_USAGE);
+}
+
+
+/**
+ * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
+ *
+ * @tfile: The TTM object file the handle is registered with.
+ * @handle: The user buffer object handle
+ * @out: Pointer to a where a pointer to the embedded
+ * struct vmw_buffer_object should be placed.
+ * @p_base: Pointer to where a pointer to the TTM base object should be
+ * placed, or NULL if no such pointer is required.
+ * Return: Zero on success, Negative error code on error.
+ *
+ * Both the output base object pointer and the vmw buffer object pointer
+ * will be refcounted.
+ */
+int vmw_user_bo_lookup(struct ttm_object_file *tfile,
+                      uint32_t handle, struct vmw_buffer_object **out,
+                      struct ttm_base_object **p_base)
+{
+       struct vmw_user_buffer_object *vmw_user_bo;
+       struct ttm_base_object *base;
+
+       base = ttm_base_object_lookup(tfile, handle);
+       if (unlikely(base == NULL)) {
+               DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
+                         (unsigned long)handle);
+               return -ESRCH;
+       }
+
+       if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
+               ttm_base_object_unref(&base);
+               DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
+                         (unsigned long)handle);
+               return -EINVAL;
+       }
+
+       vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
+                                  prime.base);
+       (void)ttm_bo_reference(&vmw_user_bo->vbo.base);
+       if (p_base)
+               *p_base = base;
+       else
+               ttm_base_object_unref(&base);
+       *out = &vmw_user_bo->vbo;
+
+       return 0;
+}
+
+
+/**
+ * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
+ *
+ * @tfile: The TTM object file to register the handle with.
+ * @vbo: The embedded vmw buffer object.
+ * @handle: Pointer to where the new handle should be placed.
+ * Return: Zero on success, Negative error code on error.
+ */
+int vmw_user_bo_reference(struct ttm_object_file *tfile,
+                         struct vmw_buffer_object *vbo,
+                         uint32_t *handle)
+{
+       struct vmw_user_buffer_object *user_bo;
+
+       if (vbo->base.destroy != vmw_user_bo_destroy)
+               return -EINVAL;
+
+       user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
+
+       *handle = user_bo->prime.base.hash.key;
+       return ttm_ref_object_add(tfile, &user_bo->prime.base,
+                                 TTM_REF_USAGE, NULL, false);
+}
+
+
+/**
+ * vmw_bo_fence_single - Utility function to fence a single TTM buffer
+ *                       object without unreserving it.
+ *
+ * @bo:             Pointer to the struct ttm_buffer_object to fence.
+ * @fence:          Pointer to the fence. If NULL, this function will
+ *                  insert a fence into the command stream..
+ *
+ * Contrary to the ttm_eu version of this function, it takes only
+ * a single buffer object instead of a list, and it also doesn't
+ * unreserve the buffer object, which needs to be done separately.
+ */
+void vmw_bo_fence_single(struct ttm_buffer_object *bo,
+                        struct vmw_fence_obj *fence)
+{
+       struct ttm_bo_device *bdev = bo->bdev;
+
+       struct vmw_private *dev_priv =
+               container_of(bdev, struct vmw_private, bdev);
+
+       if (fence == NULL) {
+               vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+               reservation_object_add_excl_fence(bo->resv, &fence->base);
+               dma_fence_put(&fence->base);
+       } else
+               reservation_object_add_excl_fence(bo->resv, &fence->base);
+}
+
+
+/**
+ * vmw_dumb_create - Create a dumb kms buffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @args: Pointer to a struct drm_mode_create_dumb structure
+ * Return: Zero on success, negative error code on failure.
+ *
+ * This is a driver callback for the core drm create_dumb functionality.
+ * Note that this is very similar to the vmw_bo_alloc ioctl, except
+ * that the arguments have a different format.
+ */
+int vmw_dumb_create(struct drm_file *file_priv,
+                   struct drm_device *dev,
+                   struct drm_mode_create_dumb *args)
+{
+       struct vmw_private *dev_priv = vmw_priv(dev);
+       struct vmw_buffer_object *vbo;
+       int ret;
+
+       args->pitch = args->width * ((args->bpp + 7) / 8);
+       args->size = args->pitch * args->height;
+
+       ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+       if (unlikely(ret != 0))
+               return ret;
+
+       ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
+                                   args->size, false, &args->handle,
+                                   &vbo, NULL);
+       if (unlikely(ret != 0))
+               goto out_no_bo;
+
+       vmw_bo_unreference(&vbo);
+out_no_bo:
+       ttm_read_unlock(&dev_priv->reservation_sem);
+       return ret;
+}
+
+
+/**
+ * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @handle: Handle identifying the dumb buffer.
+ * @offset: The address space offset returned.
+ * Return: Zero on success, negative error code on failure.
+ *
+ * This is a driver callback for the core drm dumb_map_offset functionality.
+ */
+int vmw_dumb_map_offset(struct drm_file *file_priv,
+                       struct drm_device *dev, uint32_t handle,
+                       uint64_t *offset)
+{
+       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+       struct vmw_buffer_object *out_buf;
+       int ret;
+
+       ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
+       if (ret != 0)
+               return -EINVAL;
+
+       *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
+       vmw_bo_unreference(&out_buf);
+       return 0;
+}
+
+
+/**
+ * vmw_dumb_destroy - Destroy a dumb boffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @handle: Handle identifying the dumb buffer.
+ * Return: Zero on success, negative error code on failure.
+ *
+ * This is a driver callback for the core drm dumb_destroy functionality.
+ */
+int vmw_dumb_destroy(struct drm_file *file_priv,
+                    struct drm_device *dev,
+                    uint32_t handle)
+{
+       return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+                                        handle, TTM_REF_USAGE);
+}
+
+
+/**
+ * vmw_bo_swap_notify - swapout notify callback.
+ *
+ * @bo: The buffer object to be swapped out.
+ */
+void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
+{
+       /* Is @bo embedded in a struct vmw_buffer_object? */
+       if (bo->destroy != vmw_bo_bo_free &&
+           bo->destroy != vmw_user_bo_destroy)
+               return;
+
+       /* Kill any cached kernel maps before swapout */
+       vmw_bo_unmap(vmw_buffer_object(bo));
+}
+
+
+/**
+ * vmw_bo_move_notify - TTM move_notify_callback
+ *
+ * @bo: The TTM buffer object about to move.
+ * @mem: The struct ttm_mem_reg indicating to what memory
+ *       region the move is taking place.
+ *
+ * Detaches cached maps and device bindings that require that the
+ * buffer doesn't move.
+ */
+void vmw_bo_move_notify(struct ttm_buffer_object *bo,
+                       struct ttm_mem_reg *mem)
+{
+       struct vmw_buffer_object *vbo;
+
+       if (mem == NULL)
+               return;
+
+       /* Make sure @bo is embedded in a struct vmw_buffer_object? */
+       if (bo->destroy != vmw_bo_bo_free &&
+           bo->destroy != vmw_user_bo_destroy)
+               return;
+
+       vbo = container_of(bo, struct vmw_buffer_object, base);
+
+       /*
+        * Kill any cached kernel maps before move to or from VRAM.
+        * With other types of moves, the underlying pages stay the same,
+        * and the map can be kept.
+        */
+       if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
+               vmw_bo_unmap(vbo);
+
+       /*
+        * If we're moving a backup MOB out of MOB placement, then make sure we
+        * read back all resource content first, and unbind the MOB from
+        * the resource.
+        */
+       if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
+               vmw_resource_unbind_list(vbo);
+}
index 9f45d5004caeddebe390bead4dabef6acfda0a81..e7e4655d3f36bf3680412b7e462cdb138fd13fbd 100644 (file)
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /**************************************************************************
  *
- * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
index 36c7b6c839c0dd230752cba96c348b40e2c65c65..3b75af9bf85f353b3c7c0a6e1041dd119a92f5ce 100644 (file)
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /**************************************************************************
  *
- * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
index 3767ac335acae2a0013b287785c4538806051efc..7c3cb8efd11a894ecdf5c3993761be809bf68e46 100644 (file)
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /**************************************************************************
  *
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
@@ -38,7 +38,7 @@ struct vmw_user_context {
        struct vmw_cmdbuf_res_manager *man;
        struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
        spinlock_t cotable_lock;
-       struct vmw_dma_buffer *dx_query_mob;
+       struct vmw_buffer_object *dx_query_mob;
 };
 
 static void vmw_user_context_free(struct vmw_resource *res);
@@ -424,7 +424,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
        (void) vmw_execbuf_fence_commands(NULL, dev_priv,
                                          &fence, NULL);
 
-       vmw_fence_single_bo(bo, fence);
+       vmw_bo_fence_single(bo, fence);
 
        if (likely(fence != NULL))
                vmw_fence_obj_unreference(&fence);
@@ -648,7 +648,7 @@ static int vmw_dx_context_unbind(struct vmw_resource *res,
        (void) vmw_execbuf_fence_commands(NULL, dev_priv,
                                          &fence, NULL);
 
-       vmw_fence_single_bo(bo, fence);
+       vmw_bo_fence_single(bo, fence);
 
        if (likely(fence != NULL))
                vmw_fence_obj_unreference(&fence);
@@ -900,7 +900,7 @@ vmw_context_binding_state(struct vmw_resource *ctx)
  * specified in the parameter.  0 otherwise.
  */
 int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
-                             struct vmw_dma_buffer *mob)
+                             struct vmw_buffer_object *mob)
 {
        struct vmw_user_context *uctx =
                container_of(ctx_res, struct vmw_user_context, res);
@@ -908,7 +908,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
        if (mob == NULL) {
                if (uctx->dx_query_mob) {
                        uctx->dx_query_mob->dx_query_ctx = NULL;
-                       vmw_dmabuf_unreference(&uctx->dx_query_mob);
+                       vmw_bo_unreference(&uctx->dx_query_mob);
                        uctx->dx_query_mob = NULL;
                }
 
@@ -922,7 +922,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
        mob->dx_query_ctx  = ctx_res;
 
        if (!uctx->dx_query_mob)
-               uctx->dx_query_mob = vmw_dmabuf_reference(mob);
+               uctx->dx_query_mob = vmw_bo_reference(mob);
 
        return 0;
 }
@@ -932,7 +932,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
  *
  * @ctx_res: The context resource
  */
-struct vmw_dma_buffer *
+struct vmw_buffer_object *
 vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
 {
        struct vmw_user_context *uctx =
index cbf54ea7b4c0e05f9efd37053f1ea792d7c68b71..1d45714e1d5a061d1da0d1c9b5bbf7a932a3926d 100644 (file)
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /**************************************************************************
  *
- * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
@@ -324,7 +324,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
                vmw_dx_context_scrub_cotables(vcotbl->ctx, readback);
        mutex_unlock(&dev_priv->binding_mutex);
        (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
-       vmw_fence_single_bo(bo, fence);
+       vmw_bo_fence_single(bo, fence);
        if (likely(fence != NULL))
                vmw_fence_obj_unreference(&fence);
 
@@ -367,7 +367,7 @@ static int vmw_cotable_readback(struct vmw_resource *res)
        }
 
        (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
-       vmw_fence_single_bo(&res->backup->base, fence);
+       vmw_bo_fence_single(&res->backup->base, fence);
        vmw_fence_obj_unreference(&fence);
 
        return 0;
@@ -390,7 +390,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
        struct ttm_operation_ctx ctx = { false, false };
        struct vmw_private *dev_priv = res->dev_priv;
        struct vmw_cotable *vcotbl = vmw_cotable(res);
-       struct vmw_dma_buffer *buf, *old_buf = res->backup;
+       struct vmw_buffer_object *buf, *old_buf = res->backup;
        struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
        size_t old_size = res->backup_size;
        size_t old_size_read_back = vcotbl->size_read_back;
@@ -415,8 +415,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
        if (!buf)
                return -ENOMEM;
 
-       ret = vmw_dmabuf_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
-                             true, vmw_dmabuf_bo_free);
+       ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
+                         true, vmw_bo_bo_free);
        if (ret) {
                DRM_ERROR("Failed initializing new cotable MOB.\n");
                return ret;
@@ -482,7 +482,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
        /* Let go of the old mob. */
        list_del(&res->mob_head);
        list_add_tail(&res->mob_head, &buf->res_list);
-       vmw_dmabuf_unreference(&old_buf);
+       vmw_bo_unreference(&old_buf);
        res->id = vcotbl->type;
 
        return 0;
@@ -491,7 +491,7 @@ out_map_new:
        ttm_bo_kunmap(&old_map);
 out_wait:
        ttm_bo_unreserve(bo);
-       vmw_dmabuf_unreference(&buf);
+       vmw_bo_unreference(&buf);
 
        return ret;
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
deleted file mode 100644 (file)
index d59d9dd..0000000
+++ /dev/null
@@ -1,376 +0,0 @@
-/**************************************************************************
- *
- * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#include <drm/ttm/ttm_placement.h>
-
-#include <drm/drmP.h>
-#include "vmwgfx_drv.h"
-
-
-/**
- * vmw_dmabuf_pin_in_placement - Validate a buffer to placement.
- *
- * @dev_priv:  Driver private.
- * @buf:  DMA buffer to move.
- * @placement:  The placement to pin it.
- * @interruptible:  Use interruptible wait.
- *
- * Returns
- *  -ERESTARTSYS if interrupted by a signal.
- */
-int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
-                               struct vmw_dma_buffer *buf,
-                               struct ttm_placement *placement,
-                               bool interruptible)
-{
-       struct ttm_operation_ctx ctx = {interruptible, false };
-       struct ttm_buffer_object *bo = &buf->base;
-       int ret;
-       uint32_t new_flags;
-
-       ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
-       if (unlikely(ret != 0))
-               return ret;
-
-       vmw_execbuf_release_pinned_bo(dev_priv);
-
-       ret = ttm_bo_reserve(bo, interruptible, false, NULL);
-       if (unlikely(ret != 0))
-               goto err;
-
-       if (buf->pin_count > 0)
-               ret = ttm_bo_mem_compat(placement, &bo->mem,
-                                       &new_flags) == true ? 0 : -EINVAL;
-       else
-               ret = ttm_bo_validate(bo, placement, &ctx);
-
-       if (!ret)
-               vmw_bo_pin_reserved(buf, true);
-
-       ttm_bo_unreserve(bo);
-
-err:
-       ttm_write_unlock(&dev_priv->reservation_sem);
-       return ret;
-}
-
-/**
- * vmw_dmabuf_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
- *
- * This function takes the reservation_sem in write mode.
- * Flushes and unpins the query bo to avoid failures.
- *
- * @dev_priv:  Driver private.
- * @buf:  DMA buffer to move.
- * @pin:  Pin buffer if true.
- * @interruptible:  Use interruptible wait.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
- */
-int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
-                                 struct vmw_dma_buffer *buf,
-                                 bool interruptible)
-{
-       struct ttm_operation_ctx ctx = {interruptible, false };
-       struct ttm_buffer_object *bo = &buf->base;
-       int ret;
-       uint32_t new_flags;
-
-       ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
-       if (unlikely(ret != 0))
-               return ret;
-
-       vmw_execbuf_release_pinned_bo(dev_priv);
-
-       ret = ttm_bo_reserve(bo, interruptible, false, NULL);
-       if (unlikely(ret != 0))
-               goto err;
-
-       if (buf->pin_count > 0) {
-               ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
-                                       &new_flags) == true ? 0 : -EINVAL;
-               goto out_unreserve;
-       }
-
-       ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
-       if (likely(ret == 0) || ret == -ERESTARTSYS)
-               goto out_unreserve;
-
-       ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
-
-out_unreserve:
-       if (!ret)
-               vmw_bo_pin_reserved(buf, true);
-
-       ttm_bo_unreserve(bo);
-err:
-       ttm_write_unlock(&dev_priv->reservation_sem);
-       return ret;
-}
-
-/**
- * vmw_dmabuf_pin_in_vram - Move a buffer to vram.
- *
- * This function takes the reservation_sem in write mode.
- * Flushes and unpins the query bo to avoid failures.
- *
- * @dev_priv:  Driver private.
- * @buf:  DMA buffer to move.
- * @interruptible:  Use interruptible wait.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
- */
-int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
-                          struct vmw_dma_buffer *buf,
-                          bool interruptible)
-{
-       return vmw_dmabuf_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
-                                          interruptible);
-}
-
-/**
- * vmw_dmabuf_pin_in_start_of_vram - Move a buffer to start of vram.
- *
- * This function takes the reservation_sem in write mode.
- * Flushes and unpins the query bo to avoid failures.
- *
- * @dev_priv:  Driver private.
- * @buf:  DMA buffer to pin.
- * @interruptible:  Use interruptible wait.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
- */
-int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
-                                   struct vmw_dma_buffer *buf,
-                                   bool interruptible)
-{
-       struct ttm_operation_ctx ctx = {interruptible, false };
-       struct ttm_buffer_object *bo = &buf->base;
-       struct ttm_placement placement;
-       struct ttm_place place;
-       int ret = 0;
-       uint32_t new_flags;
-
-       place = vmw_vram_placement.placement[0];
-       place.lpfn = bo->num_pages;
-       placement.num_placement = 1;
-       placement.placement = &place;
-       placement.num_busy_placement = 1;
-       placement.busy_placement = &place;
-
-       ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
-       if (unlikely(ret != 0))
-               return ret;
-
-       vmw_execbuf_release_pinned_bo(dev_priv);
-       ret = ttm_bo_reserve(bo, interruptible, false, NULL);
-       if (unlikely(ret != 0))
-               goto err_unlock;
-
-       /*
-        * Is this buffer already in vram but not at the start of it?
-        * In that case, evict it first because TTM isn't good at handling
-        * that situation.
-        */
-       if (bo->mem.mem_type == TTM_PL_VRAM &&
-           bo->mem.start < bo->num_pages &&
-           bo->mem.start > 0 &&
-           buf->pin_count == 0) {
-               ctx.interruptible = false;
-               (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
-       }
-
-       if (buf->pin_count > 0)
-               ret = ttm_bo_mem_compat(&placement, &bo->mem,
-                                       &new_flags) == true ? 0 : -EINVAL;
-       else
-               ret = ttm_bo_validate(bo, &placement, &ctx);
-
-       /* For some reason we didn't end up at the start of vram */
-       WARN_ON(ret == 0 && bo->offset != 0);
-       if (!ret)
-               vmw_bo_pin_reserved(buf, true);
-
-       ttm_bo_unreserve(bo);
-err_unlock:
-       ttm_write_unlock(&dev_priv->reservation_sem);
-
-       return ret;
-}
-
-/**
- * vmw_dmabuf_unpin - Unpin the buffer given buffer, does not move the buffer.
- *
- * This function takes the reservation_sem in write mode.
- *
- * @dev_priv:  Driver private.
- * @buf:  DMA buffer to unpin.
- * @interruptible:  Use interruptible wait.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
- */
-int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
-                    struct vmw_dma_buffer *buf,
-                    bool interruptible)
-{
-       struct ttm_buffer_object *bo = &buf->base;
-       int ret;
-
-       ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
-       if (unlikely(ret != 0))
-               return ret;
-
-       ret = ttm_bo_reserve(bo, interruptible, false, NULL);
-       if (unlikely(ret != 0))
-               goto err;
-
-       vmw_bo_pin_reserved(buf, false);
-
-       ttm_bo_unreserve(bo);
-
-err:
-       ttm_read_unlock(&dev_priv->reservation_sem);
-       return ret;
-}
-
-/**
- * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
- * of a buffer.
- *
- * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
- * @ptr: SVGAGuestPtr returning the result.
- */
-void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
-                         SVGAGuestPtr *ptr)
-{
-       if (bo->mem.mem_type == TTM_PL_VRAM) {
-               ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
-               ptr->offset = bo->offset;
-       } else {
-               ptr->gmrId = bo->mem.start;
-               ptr->offset = 0;
-       }
-}
-
-
-/**
- * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
- *
- * @vbo: The buffer object. Must be reserved.
- * @pin: Whether to pin or unpin.
- *
- */
-void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
-{
-       struct ttm_operation_ctx ctx = { false, true };
-       struct ttm_place pl;
-       struct ttm_placement placement;
-       struct ttm_buffer_object *bo = &vbo->base;
-       uint32_t old_mem_type = bo->mem.mem_type;
-       int ret;
-
-       lockdep_assert_held(&bo->resv->lock.base);
-
-       if (pin) {
-               if (vbo->pin_count++ > 0)
-                       return;
-       } else {
-               WARN_ON(vbo->pin_count <= 0);
-               if (--vbo->pin_count > 0)
-                       return;
-       }
-
-       pl.fpfn = 0;
-       pl.lpfn = 0;
-       pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
-               | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
-       if (pin)
-               pl.flags |= TTM_PL_FLAG_NO_EVICT;
-
-       memset(&placement, 0, sizeof(placement));
-       placement.num_placement = 1;
-       placement.placement = &pl;
-
-       ret = ttm_bo_validate(bo, &placement, &ctx);
-
-       BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
-}
-
-
-/*
- * vmw_dma_buffer_unmap - Tear down a cached buffer object map.
- *
- * @vbo: The buffer object whose map we are tearing down.
- *
- * This function tears down a cached map set up using
- * vmw_dma_buffer_map_and_cache().
- */
-void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo)
-{
-       if (vbo->map.bo == NULL)
-               return;
-
-       ttm_bo_kunmap(&vbo->map);
-}
-
-
-/*
- * vmw_dma_buffer_map_and_cache - Map a buffer object and cache the map
- *
- * @vbo: The buffer object to map
- * Return: A kernel virtual address or NULL if mapping failed.
- *
- * This function maps a buffer object into the kernel address space, or
- * returns the virtual kernel address of an already existing map. The virtual
- * address remains valid as long as the buffer object is pinned or reserved.
- * The cached map is torn down on either
- * 1) Buffer object move
- * 2) Buffer object swapout
- * 3) Buffer object destruction
- *
- */
-void *vmw_dma_buffer_map_and_cache(struct vmw_dma_buffer *vbo)
-{
-       struct ttm_buffer_object *bo = &vbo->base;
-       bool not_used;
-       void *virtual;
-       int ret;
-
-       virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
-       if (virtual)
-               return virtual;
-
-       ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
-       if (ret)
-               DRM_ERROR("Buffer object map failed: %d.\n", ret);
-
-       return ttm_kmap_obj_virtual(&vbo->map, &not_used);
-}
index 09cc721160c46e688aec346fdc9d20a81d99fd1c..bb6dbbe188358f040c1a1e17425874b9b581e367 100644 (file)
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /**************************************************************************
  *
- * Copyright © 2009-2016 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2016 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
 #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT                  \
        DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT,    \
                struct drm_vmw_context_arg)
+#define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT                            \
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT,      \
+               union drm_vmw_gb_surface_create_ext_arg)
+#define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT                               \
+       DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT,         \
+               union drm_vmw_gb_surface_reference_ext_arg)
 
 /**
  * The core DRM version of this macro doesn't account for
 static const struct drm_ioctl_desc vmw_ioctls[] = {
        VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
                      DRM_AUTH | DRM_RENDER_ALLOW),
-       VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
+       VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
                      DRM_AUTH | DRM_RENDER_ALLOW),
-       VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
+       VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
                      DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
                      vmw_kms_cursor_bypass_ioctl,
@@ -219,11 +225,17 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
                      vmw_gb_surface_reference_ioctl,
                      DRM_AUTH | DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_SYNCCPU,
-                     vmw_user_dmabuf_synccpu_ioctl,
+                     vmw_user_bo_synccpu_ioctl,
                      DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
                      vmw_extended_context_define_ioctl,
                      DRM_AUTH | DRM_RENDER_ALLOW),
+       VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT,
+                     vmw_gb_surface_define_ext_ioctl,
+                     DRM_AUTH | DRM_RENDER_ALLOW),
+       VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT,
+                     vmw_gb_surface_reference_ext_ioctl,
+                     DRM_AUTH | DRM_RENDER_ALLOW),
 };
 
 static const struct pci_device_id vmw_pci_id_list[] = {
@@ -258,6 +270,15 @@ MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
 module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
 
 
+static void vmw_print_capabilities2(uint32_t capabilities2)
+{
+       DRM_INFO("Capabilities2:\n");
+       if (capabilities2 & SVGA_CAP2_GROW_OTABLE)
+               DRM_INFO("  Grow oTable.\n");
+       if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY)
+               DRM_INFO("  IntraSurface copy.\n");
+}
+
 static void vmw_print_capabilities(uint32_t capabilities)
 {
        DRM_INFO("Capabilities:\n");
@@ -321,7 +342,7 @@ static void vmw_print_capabilities(uint32_t capabilities)
 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
 {
        int ret;
-       struct vmw_dma_buffer *vbo;
+       struct vmw_buffer_object *vbo;
        struct ttm_bo_kmap_obj map;
        volatile SVGA3dQueryResult *result;
        bool dummy;
@@ -335,9 +356,9 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
        if (!vbo)
                return -ENOMEM;
 
-       ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
-                             &vmw_sys_ne_placement, false,
-                             &vmw_dmabuf_bo_free);
+       ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
+                         &vmw_sys_ne_placement, false,
+                         &vmw_bo_bo_free);
        if (unlikely(ret != 0))
                return ret;
 
@@ -358,7 +379,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
 
        if (unlikely(ret != 0)) {
                DRM_ERROR("Dummy query buffer map failed.\n");
-               vmw_dmabuf_unreference(&vbo);
+               vmw_bo_unreference(&vbo);
        } else
                dev_priv->dummy_query_bo = vbo;
 
@@ -460,7 +481,7 @@ static void vmw_release_device_early(struct vmw_private *dev_priv)
 
        BUG_ON(dev_priv->pinned_bo != NULL);
 
-       vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
+       vmw_bo_unreference(&dev_priv->dummy_query_bo);
        if (dev_priv->cman)
                vmw_cmdbuf_remove_pool(dev_priv->cman);
 
@@ -644,6 +665,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        mutex_init(&dev_priv->cmdbuf_mutex);
        mutex_init(&dev_priv->release_mutex);
        mutex_init(&dev_priv->binding_mutex);
+       mutex_init(&dev_priv->requested_layout_mutex);
        mutex_init(&dev_priv->global_kms_state_mutex);
        rwlock_init(&dev_priv->resource_lock);
        ttm_lock_init(&dev_priv->reservation_sem);
@@ -683,6 +705,12 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        }
 
        dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
+
+       if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) {
+               dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2);
+       }
+
+
        ret = vmw_dma_select_mode(dev_priv);
        if (unlikely(ret != 0)) {
                DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
@@ -751,6 +779,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        }
 
        vmw_print_capabilities(dev_priv->capabilities);
+       if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER)
+               vmw_print_capabilities2(dev_priv->capabilities2);
 
        ret = vmw_dma_masks(dev_priv);
        if (unlikely(ret != 0))
@@ -883,7 +913,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 
        if (dev_priv->has_mob) {
                spin_lock(&dev_priv->cap_lock);
-               vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
+               vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT);
                dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
                spin_unlock(&dev_priv->cap_lock);
        }
@@ -898,9 +928,23 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        if (ret)
                goto out_no_fifo;
 
+       if (dev_priv->has_dx) {
+               /*
+                * SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1
+                * support
+                */
+               if ((dev_priv->capabilities2 & SVGA_CAP2_DX2) != 0) {
+                       vmw_write(dev_priv, SVGA_REG_DEV_CAP,
+                                       SVGA3D_DEVCAP_SM41);
+                       dev_priv->has_sm4_1 = vmw_read(dev_priv,
+                                                       SVGA_REG_DEV_CAP);
+               }
+       }
+
        DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
-       DRM_INFO("Atomic: %s\n",
-                (dev->driver->driver_features & DRIVER_ATOMIC) ? "yes" : "no");
+       DRM_INFO("Atomic: %s\n", (dev->driver->driver_features & DRIVER_ATOMIC)
+                ? "yes." : "no.");
+       DRM_INFO("SM4_1: %s\n", dev_priv->has_sm4_1 ? "yes." : "no.");
 
        snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
                VMWGFX_REPO, VMWGFX_GIT_VERSION);
index 5fcbe1620d50b34898815be80ec7560115c68997..1abe21758b0d7523a0b66631f615f55e0d07796a 100644 (file)
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**************************************************************************
  *
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
 #include <linux/sync_file.h>
 
 #define VMWGFX_DRIVER_NAME "vmwgfx"
-#define VMWGFX_DRIVER_DATE "20180322"
+#define VMWGFX_DRIVER_DATE "20180704"
 #define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 14
-#define VMWGFX_DRIVER_PATCHLEVEL 1
+#define VMWGFX_DRIVER_MINOR 15
+#define VMWGFX_DRIVER_PATCHLEVEL 0
 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
 #define VMWGFX_MAX_RELOCATIONS 2048
 struct vmw_fpriv {
        struct drm_master *locked_master;
        struct ttm_object_file *tfile;
-       bool gb_aware;
+       bool gb_aware; /* user-space is guest-backed aware */
 };
 
-struct vmw_dma_buffer {
+struct vmw_buffer_object {
        struct ttm_buffer_object base;
        struct list_head res_list;
        s32 pin_count;
@@ -120,7 +120,7 @@ struct vmw_resource {
        unsigned long backup_size;
        bool res_dirty; /* Protected by backup buffer reserved */
        bool backup_dirty; /* Protected by backup buffer reserved */
-       struct vmw_dma_buffer *backup;
+       struct vmw_buffer_object *backup;
        unsigned long backup_offset;
        unsigned long pin_count; /* Protected by resource reserved */
        const struct vmw_res_func *func;
@@ -166,7 +166,7 @@ struct vmw_surface_offset;
 
 struct vmw_surface {
        struct vmw_resource res;
-       uint32_t flags;
+       SVGA3dSurfaceAllFlags flags;
        uint32_t format;
        uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
        struct drm_vmw_size base_size;
@@ -180,6 +180,8 @@ struct vmw_surface {
        SVGA3dTextureFilter autogen_filter;
        uint32_t multisample_count;
        struct list_head view_list;
+       SVGA3dMSPattern multisample_pattern;
+       SVGA3dMSQualityLevel quality_level;
 };
 
 struct vmw_marker_queue {
@@ -304,7 +306,7 @@ struct vmw_sw_context{
        uint32_t cmd_bounce_size;
        struct list_head resource_list;
        struct list_head ctx_resource_list; /* For contexts and cotables */
-       struct vmw_dma_buffer *cur_query_bo;
+       struct vmw_buffer_object *cur_query_bo;
        struct list_head res_relocations;
        uint32_t *buf_start;
        struct vmw_res_cache_entry res_cache[vmw_res_max];
@@ -315,7 +317,7 @@ struct vmw_sw_context{
        bool staged_bindings_inuse;
        struct list_head staged_cmd_res;
        struct vmw_resource_val_node *dx_ctx_node;
-       struct vmw_dma_buffer *dx_query_mob;
+       struct vmw_buffer_object *dx_query_mob;
        struct vmw_resource *dx_query_ctx;
        struct vmw_cmdbuf_res_manager *man;
 };
@@ -386,6 +388,7 @@ struct vmw_private {
        uint32_t initial_height;
        u32 *mmio_virt;
        uint32_t capabilities;
+       uint32_t capabilities2;
        uint32_t max_gmr_ids;
        uint32_t max_gmr_pages;
        uint32_t max_mob_pages;
@@ -397,6 +400,7 @@ struct vmw_private {
        spinlock_t cap_lock;
        bool has_dx;
        bool assume_16bpp;
+       bool has_sm4_1;
 
        /*
         * VGA registers.
@@ -411,6 +415,15 @@ struct vmw_private {
 
        uint32_t num_displays;
 
+       /*
+        * Currently requested_layout_mutex is used to protect the gui
+        * positionig state in display unit. With that use case currently this
+        * mutex is only taken during layout ioctl and atomic check_modeset.
+        * Other display unit state can be protected with this mutex but that
+        * needs careful consideration.
+        */
+       struct mutex requested_layout_mutex;
+
        /*
         * Framebuffer info.
         */
@@ -513,8 +526,8 @@ struct vmw_private {
         * are protected by the cmdbuf mutex.
         */
 
-       struct vmw_dma_buffer *dummy_query_bo;
-       struct vmw_dma_buffer *pinned_bo;
+       struct vmw_buffer_object *dummy_query_bo;
+       struct vmw_buffer_object *pinned_bo;
        uint32_t query_cid;
        uint32_t query_cid_valid;
        bool dummy_query_bo_pinned;
@@ -623,43 +636,13 @@ extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
                                  struct ttm_object_file *tfile,
                                  uint32_t handle,
                                  struct vmw_surface **out_surf,
-                                 struct vmw_dma_buffer **out_buf);
+                                 struct vmw_buffer_object **out_buf);
 extern int vmw_user_resource_lookup_handle(
        struct vmw_private *dev_priv,
        struct ttm_object_file *tfile,
        uint32_t handle,
        const struct vmw_user_resource_conv *converter,
        struct vmw_resource **p_res);
-extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
-extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
-                          struct vmw_dma_buffer *vmw_bo,
-                          size_t size, struct ttm_placement *placement,
-                          bool interuptable,
-                          void (*bo_free) (struct ttm_buffer_object *bo));
-extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
-                                 struct ttm_object_file *tfile);
-extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
-                                struct ttm_object_file *tfile,
-                                uint32_t size,
-                                bool shareable,
-                                uint32_t *handle,
-                                struct vmw_dma_buffer **p_dma_buf,
-                                struct ttm_base_object **p_base);
-extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
-                                    struct vmw_dma_buffer *dma_buf,
-                                    uint32_t *handle);
-extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
-                                 struct drm_file *file_priv);
-extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
-                                 struct drm_file *file_priv);
-extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
-                                        struct drm_file *file_priv);
-extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
-                                        uint32_t cur_validate_node);
-extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
-extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
-                                 uint32_t id, struct vmw_dma_buffer **out,
-                                 struct ttm_base_object **base);
 extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
                                  struct drm_file *file_priv);
 extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
@@ -670,43 +653,70 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
                                  struct vmw_resource **out);
 extern void vmw_resource_unreserve(struct vmw_resource *res,
                                   bool switch_backup,
-                                  struct vmw_dma_buffer *new_backup,
+                                  struct vmw_buffer_object *new_backup,
                                   unsigned long new_backup_offset);
-extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
-                                    struct ttm_mem_reg *mem);
 extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
                                  struct ttm_mem_reg *mem);
-extern void vmw_resource_swap_notify(struct ttm_buffer_object *bo);
-extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob);
-extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
-                               struct vmw_fence_obj *fence);
+extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob);
 extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
-
+extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo);
 
 /**
- * DMA buffer helper routines - vmwgfx_dmabuf.c
+ * Buffer object helper functions - vmwgfx_bo.c
  */
-extern int vmw_dmabuf_pin_in_placement(struct vmw_private *vmw_priv,
-                                      struct vmw_dma_buffer *bo,
-                                      struct ttm_placement *placement,
+extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv,
+                                  struct vmw_buffer_object *bo,
+                                  struct ttm_placement *placement,
+                                  bool interruptible);
+extern int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
+                             struct vmw_buffer_object *buf,
+                             bool interruptible);
+extern int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
+                                    struct vmw_buffer_object *buf,
+                                    bool interruptible);
+extern int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv,
+                                      struct vmw_buffer_object *bo,
                                       bool interruptible);
-extern int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
-                                 struct vmw_dma_buffer *buf,
-                                 bool interruptible);
-extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
-                                        struct vmw_dma_buffer *buf,
-                                        bool interruptible);
-extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *vmw_priv,
-                                          struct vmw_dma_buffer *bo,
-                                          bool interruptible);
-extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
-                           struct vmw_dma_buffer *bo,
-                           bool interruptible);
+extern int vmw_bo_unpin(struct vmw_private *vmw_priv,
+                       struct vmw_buffer_object *bo,
+                       bool interruptible);
 extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
                                 SVGAGuestPtr *ptr);
-extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin);
-extern void *vmw_dma_buffer_map_and_cache(struct vmw_dma_buffer *vbo);
-extern void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo);
+extern void vmw_bo_pin_reserved(struct vmw_buffer_object *bo, bool pin);
+extern void vmw_bo_bo_free(struct ttm_buffer_object *bo);
+extern int vmw_bo_init(struct vmw_private *dev_priv,
+                      struct vmw_buffer_object *vmw_bo,
+                      size_t size, struct ttm_placement *placement,
+                      bool interuptable,
+                      void (*bo_free)(struct ttm_buffer_object *bo));
+extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
+                                    struct ttm_object_file *tfile);
+extern int vmw_user_bo_alloc(struct vmw_private *dev_priv,
+                            struct ttm_object_file *tfile,
+                            uint32_t size,
+                            bool shareable,
+                            uint32_t *handle,
+                            struct vmw_buffer_object **p_dma_buf,
+                            struct ttm_base_object **p_base);
+extern int vmw_user_bo_reference(struct ttm_object_file *tfile,
+                                struct vmw_buffer_object *dma_buf,
+                                uint32_t *handle);
+extern int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
+                             struct drm_file *file_priv);
+extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
+                             struct drm_file *file_priv);
+extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
+                                    struct drm_file *file_priv);
+extern int vmw_user_bo_lookup(struct ttm_object_file *tfile,
+                             uint32_t id, struct vmw_buffer_object **out,
+                             struct ttm_base_object **base);
+extern void vmw_bo_fence_single(struct ttm_buffer_object *bo,
+                               struct vmw_fence_obj *fence);
+extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo);
+extern void vmw_bo_unmap(struct vmw_buffer_object *vbo);
+extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
+                              struct ttm_mem_reg *mem);
+extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
 
 /**
  * Misc Ioctl functionality - vmwgfx_ioctl.c
@@ -758,7 +768,7 @@ extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
 extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
 
 /**
- * TTM buffer object driver - vmwgfx_buffer.c
+ * TTM buffer object driver - vmwgfx_ttm_buffer.c
  */
 
 extern const size_t vmw_tt_size;
@@ -1041,8 +1051,8 @@ vmw_context_binding_state(struct vmw_resource *ctx);
 extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
                                          bool readback);
 extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
-                                    struct vmw_dma_buffer *mob);
-extern struct vmw_dma_buffer *
+                                    struct vmw_buffer_object *mob);
+extern struct vmw_buffer_object *
 vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
 
 
@@ -1070,14 +1080,22 @@ extern int vmw_surface_validate(struct vmw_private *dev_priv,
                                struct vmw_surface *srf);
 int vmw_surface_gb_priv_define(struct drm_device *dev,
                               uint32_t user_accounting_size,
-                              uint32_t svga3d_flags,
+                              SVGA3dSurfaceAllFlags svga3d_flags,
                               SVGA3dSurfaceFormat format,
                               bool for_scanout,
                               uint32_t num_mip_levels,
                               uint32_t multisample_count,
                               uint32_t array_size,
                               struct drm_vmw_size size,
+                              SVGA3dMSPattern multisample_pattern,
+                              SVGA3dMSQualityLevel quality_level,
                               struct vmw_surface **srf_out);
+extern int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev,
+                                          void *data,
+                                          struct drm_file *file_priv);
+extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev,
+                                             void *data,
+                                             struct drm_file *file_priv);
 
 /*
  * Shader management - vmwgfx_shader.c
@@ -1224,6 +1242,11 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
                    u32 w, u32 h,
                    struct vmw_diff_cpy *diff);
 
+/* Host messaging -vmwgfx_msg.c: */
+int vmw_host_get_guestinfo(const char *guest_info_param,
+                          char *buffer, size_t *length);
+int vmw_host_log(const char *log);
+
 /**
  * Inline helper functions
  */
@@ -1243,9 +1266,9 @@ static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
        return srf;
 }
 
-static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
+static inline void vmw_bo_unreference(struct vmw_buffer_object **buf)
 {
-       struct vmw_dma_buffer *tmp_buf = *buf;
+       struct vmw_buffer_object *tmp_buf = *buf;
 
        *buf = NULL;
        if (tmp_buf != NULL) {
@@ -1255,7 +1278,8 @@ static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
        }
 }
 
-static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
+static inline struct vmw_buffer_object *
+vmw_bo_reference(struct vmw_buffer_object *buf)
 {
        if (ttm_bo_reference(&buf->base))
                return buf;
@@ -1302,10 +1326,4 @@ static inline void vmw_mmio_write(u32 value, u32 *addr)
 {
        WRITE_ONCE(*addr, value);
 }
-
-/**
- * Add vmw_msg module function
- */
-extern int vmw_host_log(const char *log);
-
 #endif
index c9d5cc237124e27abec4821e98b6b651ae104007..1f134570b7599483e2768cfcbe785409f2b7ba04 100644 (file)
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /**************************************************************************
  *
- * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
@@ -92,7 +92,7 @@ struct vmw_resource_val_node {
        struct list_head head;
        struct drm_hash_item hash;
        struct vmw_resource *res;
-       struct vmw_dma_buffer *new_backup;
+       struct vmw_buffer_object *new_backup;
        struct vmw_ctx_binding_state *staged_bindings;
        unsigned long new_backup_offset;
        u32 first_usage : 1;
@@ -126,9 +126,9 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
                                 struct vmw_sw_context *sw_context,
                                 SVGAMobId *id,
-                                struct vmw_dma_buffer **vmw_bo_p);
+                                struct vmw_buffer_object **vmw_bo_p);
 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
-                                  struct vmw_dma_buffer *vbo,
+                                  struct vmw_buffer_object *vbo,
                                   bool validate_as_mob,
                                   uint32_t *p_val_node);
 /**
@@ -185,7 +185,7 @@ static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
                }
                vmw_resource_unreserve(res, switch_backup, val->new_backup,
                                       val->new_backup_offset);
-               vmw_dmabuf_unreference(&val->new_backup);
+               vmw_bo_unreference(&val->new_backup);
        }
 }
 
@@ -423,7 +423,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
        }
 
        if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
-               struct vmw_dma_buffer *dx_query_mob;
+               struct vmw_buffer_object *dx_query_mob;
 
                dx_query_mob = vmw_context_get_dx_query_mob(ctx);
                if (dx_query_mob)
@@ -544,7 +544,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
  * submission is reached.
  */
 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
-                                  struct vmw_dma_buffer *vbo,
+                                  struct vmw_buffer_object *vbo,
                                   bool validate_as_mob,
                                   uint32_t *p_val_node)
 {
@@ -616,7 +616,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
                        return ret;
 
                if (res->backup) {
-                       struct vmw_dma_buffer *vbo = res->backup;
+                       struct vmw_buffer_object *vbo = res->backup;
 
                        ret = vmw_bo_to_validate_list
                                (sw_context, vbo,
@@ -628,7 +628,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
        }
 
        if (sw_context->dx_query_mob) {
-               struct vmw_dma_buffer *expected_dx_query_mob;
+               struct vmw_buffer_object *expected_dx_query_mob;
 
                expected_dx_query_mob =
                        vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
@@ -657,7 +657,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
 
        list_for_each_entry(val, &sw_context->resource_list, head) {
                struct vmw_resource *res = val->res;
-               struct vmw_dma_buffer *backup = res->backup;
+               struct vmw_buffer_object *backup = res->backup;
 
                ret = vmw_resource_validate(res);
                if (unlikely(ret != 0)) {
@@ -668,7 +668,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
 
                /* Check if the resource switched backup buffer */
                if (backup && res->backup && (backup != res->backup)) {
-                       struct vmw_dma_buffer *vbo = res->backup;
+                       struct vmw_buffer_object *vbo = res->backup;
 
                        ret = vmw_bo_to_validate_list
                                (sw_context, vbo,
@@ -821,7 +821,7 @@ out_no_reloc:
 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
 {
        struct vmw_private *dev_priv = ctx_res->dev_priv;
-       struct vmw_dma_buffer *dx_query_mob;
+       struct vmw_buffer_object *dx_query_mob;
        struct {
                SVGA3dCmdHeader header;
                SVGA3dCmdDXBindAllQuery body;
@@ -1152,7 +1152,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
  * command batch.
  */
 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
-                                      struct vmw_dma_buffer *new_query_bo,
+                                      struct vmw_buffer_object *new_query_bo,
                                       struct vmw_sw_context *sw_context)
 {
        struct vmw_res_cache_entry *ctx_entry =
@@ -1234,7 +1234,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
        if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
                if (dev_priv->pinned_bo) {
                        vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
-                       vmw_dmabuf_unreference(&dev_priv->pinned_bo);
+                       vmw_bo_unreference(&dev_priv->pinned_bo);
                }
 
                if (!sw_context->needs_post_query_barrier) {
@@ -1256,7 +1256,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
                        dev_priv->query_cid = sw_context->last_query_ctx->id;
                        dev_priv->query_cid_valid = true;
                        dev_priv->pinned_bo =
-                               vmw_dmabuf_reference(sw_context->cur_query_bo);
+                               vmw_bo_reference(sw_context->cur_query_bo);
                }
        }
 }
@@ -1282,15 +1282,14 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
                                 struct vmw_sw_context *sw_context,
                                 SVGAMobId *id,
-                                struct vmw_dma_buffer **vmw_bo_p)
+                                struct vmw_buffer_object **vmw_bo_p)
 {
-       struct vmw_dma_buffer *vmw_bo = NULL;
+       struct vmw_buffer_object *vmw_bo = NULL;
        uint32_t handle = *id;
        struct vmw_relocation *reloc;
        int ret;
 
-       ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
-                                    NULL);
+       ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
        if (unlikely(ret != 0)) {
                DRM_ERROR("Could not find or use MOB buffer.\n");
                ret = -EINVAL;
@@ -1316,7 +1315,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
        return 0;
 
 out_no_reloc:
-       vmw_dmabuf_unreference(&vmw_bo);
+       vmw_bo_unreference(&vmw_bo);
        *vmw_bo_p = NULL;
        return ret;
 }
@@ -1343,15 +1342,14 @@ out_no_reloc:
 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
                                   struct vmw_sw_context *sw_context,
                                   SVGAGuestPtr *ptr,
-                                  struct vmw_dma_buffer **vmw_bo_p)
+                                  struct vmw_buffer_object **vmw_bo_p)
 {
-       struct vmw_dma_buffer *vmw_bo = NULL;
+       struct vmw_buffer_object *vmw_bo = NULL;
        uint32_t handle = ptr->gmrId;
        struct vmw_relocation *reloc;
        int ret;
 
-       ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
-                                    NULL);
+       ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
        if (unlikely(ret != 0)) {
                DRM_ERROR("Could not find or use GMR region.\n");
                ret = -EINVAL;
@@ -1376,7 +1374,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
        return 0;
 
 out_no_reloc:
-       vmw_dmabuf_unreference(&vmw_bo);
+       vmw_bo_unreference(&vmw_bo);
        *vmw_bo_p = NULL;
        return ret;
 }
@@ -1447,7 +1445,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
                SVGA3dCmdDXBindQuery q;
        } *cmd;
 
-       struct vmw_dma_buffer *vmw_bo;
+       struct vmw_buffer_object *vmw_bo;
        int    ret;
 
 
@@ -1466,7 +1464,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
        sw_context->dx_query_mob = vmw_bo;
        sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
 
-       vmw_dmabuf_unreference(&vmw_bo);
+       vmw_bo_unreference(&vmw_bo);
 
        return ret;
 }
@@ -1549,7 +1547,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
                                struct vmw_sw_context *sw_context,
                                SVGA3dCmdHeader *header)
 {
-       struct vmw_dma_buffer *vmw_bo;
+       struct vmw_buffer_object *vmw_bo;
        struct vmw_query_cmd {
                SVGA3dCmdHeader header;
                SVGA3dCmdEndGBQuery q;
@@ -1569,7 +1567,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
 
        ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
 
-       vmw_dmabuf_unreference(&vmw_bo);
+       vmw_bo_unreference(&vmw_bo);
        return ret;
 }
 
@@ -1584,7 +1582,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
                             struct vmw_sw_context *sw_context,
                             SVGA3dCmdHeader *header)
 {
-       struct vmw_dma_buffer *vmw_bo;
+       struct vmw_buffer_object *vmw_bo;
        struct vmw_query_cmd {
                SVGA3dCmdHeader header;
                SVGA3dCmdEndQuery q;
@@ -1623,7 +1621,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
 
        ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
 
-       vmw_dmabuf_unreference(&vmw_bo);
+       vmw_bo_unreference(&vmw_bo);
        return ret;
 }
 
@@ -1638,7 +1636,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
                                 struct vmw_sw_context *sw_context,
                                 SVGA3dCmdHeader *header)
 {
-       struct vmw_dma_buffer *vmw_bo;
+       struct vmw_buffer_object *vmw_bo;
        struct vmw_query_cmd {
                SVGA3dCmdHeader header;
                SVGA3dCmdWaitForGBQuery q;
@@ -1656,7 +1654,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
        if (unlikely(ret != 0))
                return ret;
 
-       vmw_dmabuf_unreference(&vmw_bo);
+       vmw_bo_unreference(&vmw_bo);
        return 0;
 }
 
@@ -1671,7 +1669,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
                              struct vmw_sw_context *sw_context,
                              SVGA3dCmdHeader *header)
 {
-       struct vmw_dma_buffer *vmw_bo;
+       struct vmw_buffer_object *vmw_bo;
        struct vmw_query_cmd {
                SVGA3dCmdHeader header;
                SVGA3dCmdWaitForQuery q;
@@ -1708,7 +1706,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
        if (unlikely(ret != 0))
                return ret;
 
-       vmw_dmabuf_unreference(&vmw_bo);
+       vmw_bo_unreference(&vmw_bo);
        return 0;
 }
 
@@ -1716,7 +1714,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
                       struct vmw_sw_context *sw_context,
                       SVGA3dCmdHeader *header)
 {
-       struct vmw_dma_buffer *vmw_bo = NULL;
+       struct vmw_buffer_object *vmw_bo = NULL;
        struct vmw_surface *srf = NULL;
        struct vmw_dma_cmd {
                SVGA3dCmdHeader header;
@@ -1768,7 +1766,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
                             header);
 
 out_no_surface:
-       vmw_dmabuf_unreference(&vmw_bo);
+       vmw_bo_unreference(&vmw_bo);
        return ret;
 }
 
@@ -1887,7 +1885,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
                                      struct vmw_sw_context *sw_context,
                                      void *buf)
 {
-       struct vmw_dma_buffer *vmw_bo;
+       struct vmw_buffer_object *vmw_bo;
        int ret;
 
        struct {
@@ -1901,7 +1899,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
        if (unlikely(ret != 0))
                return ret;
 
-       vmw_dmabuf_unreference(&vmw_bo);
+       vmw_bo_unreference(&vmw_bo);
 
        return ret;
 }
@@ -1928,7 +1926,7 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
                                     uint32_t *buf_id,
                                     unsigned long backup_offset)
 {
-       struct vmw_dma_buffer *dma_buf;
+       struct vmw_buffer_object *dma_buf;
        int ret;
 
        ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
@@ -1939,7 +1937,7 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
        if (val_node->first_usage)
                val_node->no_buffer_needed = true;
 
-       vmw_dmabuf_unreference(&val_node->new_backup);
+       vmw_bo_unreference(&val_node->new_backup);
        val_node->new_backup = dma_buf;
        val_node->new_backup_offset = backup_offset;
 
@@ -3118,6 +3116,32 @@ static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
                                 &cmd->body.destSid, NULL);
 }
 
+/**
+ * vmw_cmd_intra_surface_copy -
+ * Validate an SVGA_3D_CMD_INTRA_SURFACE_COPY command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
+                                          struct vmw_sw_context *sw_context,
+                                          SVGA3dCmdHeader *header)
+{
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdIntraSurfaceCopy body;
+       } *cmd = container_of(header, typeof(*cmd), header);
+
+       if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
+               return -EINVAL;
+
+       return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                               user_surface_converter,
+                               &cmd->body.surface.sid, NULL);
+}
+
+
 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
                                struct vmw_sw_context *sw_context,
                                void *buf, uint32_t *size)
@@ -3232,9 +3256,9 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
                    false, false, false),
        VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
                    false, false, false),
-       VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
+       VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
                    false, false, false),
-       VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
+       VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
                    false, false, false),
        VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
                    false, false, false),
@@ -3473,6 +3497,8 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
        VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
                    &vmw_cmd_dx_transfer_from_buffer,
                    true, false, true),
+       VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
+                   true, false, true),
 };
 
 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
@@ -3701,8 +3727,8 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv,
                               bool interruptible,
                               bool validate_as_mob)
 {
-       struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
-                                                 base);
+       struct vmw_buffer_object *vbo =
+               container_of(bo, struct vmw_buffer_object, base);
        struct ttm_operation_ctx ctx = { interruptible, true };
        int ret;
 
@@ -4423,7 +4449,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
 
        ttm_bo_unref(&query_val.bo);
        ttm_bo_unref(&pinned_val.bo);
-       vmw_dmabuf_unreference(&dev_priv->pinned_bo);
+       vmw_bo_unreference(&dev_priv->pinned_bo);
 out_unlock:
        return;
 
@@ -4432,7 +4458,7 @@ out_no_emit:
 out_no_reserve:
        ttm_bo_unref(&query_val.bo);
        ttm_bo_unref(&pinned_val.bo);
-       vmw_dmabuf_unreference(&dev_priv->pinned_bo);
+       vmw_bo_unreference(&dev_priv->pinned_bo);
 }
 
 /**
index 54e300365a5ccd04b2d5d6173fbb379bde860322..b913a56f3426669f21582e271fac9add830bb91d 100644 (file)
@@ -42,7 +42,7 @@ struct vmw_fb_par {
        void *vmalloc;
 
        struct mutex bo_mutex;
-       struct vmw_dma_buffer *vmw_bo;
+       struct vmw_buffer_object *vmw_bo;
        unsigned bo_size;
        struct drm_framebuffer *set_fb;
        struct drm_display_mode *set_mode;
@@ -184,7 +184,7 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
        struct drm_clip_rect clip;
        struct drm_framebuffer *cur_fb;
        u8 *src_ptr, *dst_ptr;
-       struct vmw_dma_buffer *vbo = par->vmw_bo;
+       struct vmw_buffer_object *vbo = par->vmw_bo;
        void *virtual;
 
        if (!READ_ONCE(par->dirty.active))
@@ -197,7 +197,7 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
 
        (void) ttm_read_lock(&vmw_priv->reservation_sem, false);
        (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
-       virtual = vmw_dma_buffer_map_and_cache(vbo);
+       virtual = vmw_bo_map_and_cache(vbo);
        if (!virtual)
                goto out_unreserve;
 
@@ -391,9 +391,9 @@ static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
  */
 
 static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
-                           size_t size, struct vmw_dma_buffer **out)
+                           size_t size, struct vmw_buffer_object **out)
 {
-       struct vmw_dma_buffer *vmw_bo;
+       struct vmw_buffer_object *vmw_bo;
        int ret;
 
        (void) ttm_write_lock(&vmw_priv->reservation_sem, false);
@@ -404,10 +404,10 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
                goto err_unlock;
        }
 
-       ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
+       ret = vmw_bo_init(vmw_priv, vmw_bo, size,
                              &vmw_sys_placement,
                              false,
-                             &vmw_dmabuf_bo_free);
+                             &vmw_bo_bo_free);
        if (unlikely(ret != 0))
                goto err_unlock; /* init frees the buffer on failure */
 
@@ -439,38 +439,13 @@ static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
 static int vmwgfx_set_config_internal(struct drm_mode_set *set)
 {
        struct drm_crtc *crtc = set->crtc;
-       struct drm_framebuffer *fb;
-       struct drm_crtc *tmp;
-       struct drm_device *dev = set->crtc->dev;
        struct drm_modeset_acquire_ctx ctx;
        int ret;
 
        drm_modeset_acquire_init(&ctx, 0);
 
 restart:
-       /*
-        * NOTE: ->set_config can also disable other crtcs (if we steal all
-        * connectors from it), hence we need to refcount the fbs across all
-        * crtcs. Atomic modeset will have saner semantics ...
-        */
-       drm_for_each_crtc(tmp, dev)
-               tmp->primary->old_fb = tmp->primary->fb;
-
-       fb = set->fb;
-
        ret = crtc->funcs->set_config(set, &ctx);
-       if (ret == 0) {
-               crtc->primary->crtc = crtc;
-               crtc->primary->fb = fb;
-       }
-
-       drm_for_each_crtc(tmp, dev) {
-               if (tmp->primary->fb)
-                       drm_framebuffer_get(tmp->primary->fb);
-               if (tmp->primary->old_fb)
-                       drm_framebuffer_put(tmp->primary->old_fb);
-               tmp->primary->old_fb = NULL;
-       }
 
        if (ret == -EDEADLK) {
                drm_modeset_backoff(&ctx);
@@ -516,7 +491,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
        }
 
        if (par->vmw_bo && detach_bo && unref_bo)
-               vmw_dmabuf_unreference(&par->vmw_bo);
+               vmw_bo_unreference(&par->vmw_bo);
 
        return 0;
 }
index 9ed544f8958f322b8fc4f34835e10414921a211d..3d546d4093341fd6ba329c06830f173666e2f707 100644 (file)
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /**************************************************************************
  *
- * Copyright © 2011-2014 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2011-2014 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
@@ -175,7 +175,6 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
        struct vmw_private *dev_priv = fman->dev_priv;
        struct vmwgfx_wait_cb cb;
        long ret = timeout;
-       unsigned long irq_flags;
 
        if (likely(vmw_fence_obj_signaled(fence)))
                return timeout;
@@ -183,7 +182,7 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
        vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
        vmw_seqno_waiter_add(dev_priv);
 
-       spin_lock_irqsave(f->lock, irq_flags);
+       spin_lock(f->lock);
 
        if (intr && signal_pending(current)) {
                ret = -ERESTARTSYS;
@@ -194,30 +193,45 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
        cb.task = current;
        list_add(&cb.base.node, &f->cb_list);
 
-       while (ret > 0) {
+       for (;;) {
                __vmw_fences_update(fman);
-               if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
-                       break;
 
+               /*
+                * We can use the barrier free __set_current_state() since
+                * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
+                * fence spinlock.
+                */
                if (intr)
                        __set_current_state(TASK_INTERRUPTIBLE);
                else
                        __set_current_state(TASK_UNINTERRUPTIBLE);
-               spin_unlock_irqrestore(f->lock, irq_flags);
 
-               ret = schedule_timeout(ret);
+               if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
+                       if (ret == 0 && timeout > 0)
+                               ret = 1;
+                       break;
+               }
 
-               spin_lock_irqsave(f->lock, irq_flags);
-               if (ret > 0 && intr && signal_pending(current))
+               if (intr && signal_pending(current)) {
                        ret = -ERESTARTSYS;
-       }
+                       break;
+               }
 
+               if (ret == 0)
+                       break;
+
+               spin_unlock(f->lock);
+
+               ret = schedule_timeout(ret);
+
+               spin_lock(f->lock);
+       }
+       __set_current_state(TASK_RUNNING);
        if (!list_empty(&cb.base.node))
                list_del(&cb.base.node);
-       __set_current_state(TASK_RUNNING);
 
 out:
-       spin_unlock_irqrestore(f->lock, irq_flags);
+       spin_unlock(f->lock);
 
        vmw_seqno_waiter_remove(dev_priv);
 
index 20224dba9d8e02f94ae17f6671eaeee74a392663..c9382933c2b94775a2cc165f73581db46a6ab8e0 100644 (file)
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**************************************************************************
  *
- * Copyright © 2011-2012 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2011-2012 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
index a1c68e6a689e32fd0dd4d74c805ee4afd0836a99..d0fd147ef75f2276fcd5c17865d65d94728e682c 100644 (file)
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /**************************************************************************
  *
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
index 66ffa1d4759c176afec56a6b5658c1f470128c4a..007a0cc7f232219a825f9cad5ff263bee2c936d2 100644 (file)
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /**************************************************************************
  *
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
index f2f9d88131f25ad37d0c018940627630030a4c26..ddb1e9365a3e52ffc36465708ce676dff5158bec 100644 (file)
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /**************************************************************************
  *
- * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2007-2010 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
index c5e8eae0dbe23c5f1f5966349075b4f242a1fc97..172a6ba6539cb39d274bdd445628ca7fade9ddd4 100644 (file)
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /**************************************************************************
  *
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
@@ -56,6 +56,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
        case DRM_VMW_PARAM_HW_CAPS:
                param->value = dev_priv->capabilities;
                break;
+       case DRM_VMW_PARAM_HW_CAPS2:
+               param->value = dev_priv->capabilities2;
+               break;
        case DRM_VMW_PARAM_FIFO_CAPS:
                param->value = dev_priv->fifo.capabilities;
                break;
@@ -113,6 +116,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
        case DRM_VMW_PARAM_DX:
                param->value = dev_priv->has_dx;
                break;
+       case DRM_VMW_PARAM_SM4_1:
+               param->value = dev_priv->has_sm4_1;
+               break;
        default:
                return -EINVAL;
        }
@@ -122,15 +128,12 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
 
 static u32 vmw_mask_multisample(unsigned int cap, u32 fmt_value)
 {
-       /* If the header is updated, update the format test as well! */
-       BUILD_BUG_ON(SVGA3D_DEVCAP_DXFMT_BC5_UNORM + 1 != SVGA3D_DEVCAP_MAX);
-
-       if (cap >= SVGA3D_DEVCAP_DXFMT_X8R8G8B8 &&
-           cap <= SVGA3D_DEVCAP_DXFMT_BC5_UNORM)
-               fmt_value &= ~(SVGADX_DXFMT_MULTISAMPLE_2 |
-                              SVGADX_DXFMT_MULTISAMPLE_4 |
-                              SVGADX_DXFMT_MULTISAMPLE_8);
-       else if (cap == SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES)
+       /*
+        * A version of user-space exists which use MULTISAMPLE_MASKABLESAMPLES
+        * to check the sample count supported by virtual device. Since there
+        * never was support for multisample count for backing MOB return 0.
+        */
+       if (cap == SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES)
                return 0;
 
        return fmt_value;
@@ -377,8 +380,8 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
        }
 
        vfb = vmw_framebuffer_to_vfb(fb);
-       if (!vfb->dmabuf) {
-               DRM_ERROR("Framebuffer not dmabuf backed.\n");
+       if (!vfb->bo) {
+               DRM_ERROR("Framebuffer not buffer backed.\n");
                ret = -EINVAL;
                goto out_no_ttm_lock;
        }
index b9239ba067c484da3f6d453ae02d088e376fd685..c3ad4478266b44cdaa2ec2cf1cb29930395d5e49 100644 (file)
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /**************************************************************************
  *
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
index 01f2dc9e6f52803a92b1ae7c38ef269c458eb732..23beff5d8e3c37e6904314db295cc9896657a38f 100644 (file)
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /**************************************************************************
  *
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
@@ -85,10 +85,10 @@ static int vmw_cursor_update_image(struct vmw_private *dev_priv,
        return 0;
 }
 
-static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
-                                   struct vmw_dma_buffer *dmabuf,
-                                   u32 width, u32 height,
-                                   u32 hotspotX, u32 hotspotY)
+static int vmw_cursor_update_bo(struct vmw_private *dev_priv,
+                               struct vmw_buffer_object *bo,
+                               u32 width, u32 height,
+                               u32 hotspotX, u32 hotspotY)
 {
        struct ttm_bo_kmap_obj map;
        unsigned long kmap_offset;
@@ -100,13 +100,13 @@ static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
        kmap_offset = 0;
        kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
-       ret = ttm_bo_reserve(&dmabuf->base, true, false, NULL);
+       ret = ttm_bo_reserve(&bo->base, true, false, NULL);
        if (unlikely(ret != 0)) {
                DRM_ERROR("reserve failed\n");
                return -EINVAL;
        }
 
-       ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
+       ret = ttm_bo_kmap(&bo->base, kmap_offset, kmap_num, &map);
        if (unlikely(ret != 0))
                goto err_unreserve;
 
@@ -116,7 +116,7 @@ static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
 
        ttm_bo_kunmap(&map);
 err_unreserve:
-       ttm_bo_unreserve(&dmabuf->base);
+       ttm_bo_unreserve(&bo->base);
 
        return ret;
 }
@@ -352,13 +352,13 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
        if (vps->surf)
                vmw_surface_unreference(&vps->surf);
 
-       if (vps->dmabuf)
-               vmw_dmabuf_unreference(&vps->dmabuf);
+       if (vps->bo)
+               vmw_bo_unreference(&vps->bo);
 
        if (fb) {
-               if (vmw_framebuffer_to_vfb(fb)->dmabuf) {
-                       vps->dmabuf = vmw_framebuffer_to_vfbd(fb)->buffer;
-                       vmw_dmabuf_reference(vps->dmabuf);
+               if (vmw_framebuffer_to_vfb(fb)->bo) {
+                       vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
+                       vmw_bo_reference(vps->bo);
                } else {
                        vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
                        vmw_surface_reference(vps->surf);
@@ -390,7 +390,7 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
        }
 
        du->cursor_surface = vps->surf;
-       du->cursor_dmabuf = vps->dmabuf;
+       du->cursor_bo = vps->bo;
 
        if (vps->surf) {
                du->cursor_age = du->cursor_surface->snooper.age;
@@ -399,11 +399,11 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
                                              vps->surf->snooper.image,
                                              64, 64, hotspot_x,
                                              hotspot_y);
-       } else if (vps->dmabuf) {
-               ret = vmw_cursor_update_dmabuf(dev_priv, vps->dmabuf,
-                                              plane->state->crtc_w,
-                                              plane->state->crtc_h,
-                                              hotspot_x, hotspot_y);
+       } else if (vps->bo) {
+               ret = vmw_cursor_update_bo(dev_priv, vps->bo,
+                                          plane->state->crtc_w,
+                                          plane->state->crtc_h,
+                                          hotspot_x, hotspot_y);
        } else {
                vmw_cursor_update_position(dev_priv, false, 0, 0);
                return;
@@ -519,7 +519,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
                ret = -EINVAL;
        }
 
-       if (!vmw_framebuffer_to_vfb(fb)->dmabuf)
+       if (!vmw_framebuffer_to_vfb(fb)->bo)
                surface = vmw_framebuffer_to_vfbs(fb)->surface;
 
        if (surface && !surface->snooper.image) {
@@ -535,9 +535,9 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
                             struct drm_crtc_state *new_state)
 {
        struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
-       int connector_mask = 1 << drm_connector_index(&du->connector);
+       int connector_mask = drm_connector_mask(&du->connector);
        bool has_primary = new_state->plane_mask &
-                          BIT(drm_plane_index(crtc->primary));
+                          drm_plane_mask(crtc->primary);
 
        /* We always want to have an active plane with an active CRTC */
        if (has_primary != new_state->enable)
@@ -687,8 +687,8 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane)
        if (vps->surf)
                (void) vmw_surface_reference(vps->surf);
 
-       if (vps->dmabuf)
-               (void) vmw_dmabuf_reference(vps->dmabuf);
+       if (vps->bo)
+               (void) vmw_bo_reference(vps->bo);
 
        state = &vps->base;
 
@@ -745,8 +745,8 @@ vmw_du_plane_destroy_state(struct drm_plane *plane,
        if (vps->surf)
                vmw_surface_unreference(&vps->surf);
 
-       if (vps->dmabuf)
-               vmw_dmabuf_unreference(&vps->dmabuf);
+       if (vps->bo)
+               vmw_bo_unreference(&vps->bo);
 
        drm_atomic_helper_plane_destroy_state(plane, state);
 }
@@ -902,12 +902,12 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
 
 /**
  * vmw_kms_readback - Perform a readback from the screen system to
- * a dma-buffer backed framebuffer.
+ * a buffer-object backed framebuffer.
  *
  * @dev_priv: Pointer to the device private structure.
  * @file_priv: Pointer to a struct drm_file identifying the caller.
  * Must be set to NULL if @user_fence_rep is NULL.
- * @vfb: Pointer to the dma-buffer backed framebuffer.
+ * @vfb: Pointer to the buffer-object backed framebuffer.
  * @user_fence_rep: User-space provided structure for fence information.
  * Must be set to non-NULL if @file_priv is non-NULL.
  * @vclips: Array of clip rects.
@@ -951,7 +951,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
                                           struct vmw_framebuffer **out,
                                           const struct drm_mode_fb_cmd2
                                           *mode_cmd,
-                                          bool is_dmabuf_proxy)
+                                          bool is_bo_proxy)
 
 {
        struct drm_device *dev = dev_priv->dev;
@@ -1019,7 +1019,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
        drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
        vfbs->surface = vmw_surface_reference(surface);
        vfbs->base.user_handle = mode_cmd->handles[0];
-       vfbs->is_dmabuf_proxy = is_dmabuf_proxy;
+       vfbs->is_bo_proxy = is_bo_proxy;
 
        *out = &vfbs->base;
 
@@ -1038,30 +1038,30 @@ out_err1:
 }
 
 /*
- * Dmabuf framebuffer code
+ * Buffer-object framebuffer code
  */
 
-static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
+static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
 {
-       struct vmw_framebuffer_dmabuf *vfbd =
+       struct vmw_framebuffer_bo *vfbd =
                vmw_framebuffer_to_vfbd(framebuffer);
 
        drm_framebuffer_cleanup(framebuffer);
-       vmw_dmabuf_unreference(&vfbd->buffer);
+       vmw_bo_unreference(&vfbd->buffer);
        if (vfbd->base.user_obj)
                ttm_base_object_unref(&vfbd->base.user_obj);
 
        kfree(vfbd);
 }
 
-static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
-                                struct drm_file *file_priv,
-                                unsigned flags, unsigned color,
-                                struct drm_clip_rect *clips,
-                                unsigned num_clips)
+static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
+                                   struct drm_file *file_priv,
+                                   unsigned int flags, unsigned int color,
+                                   struct drm_clip_rect *clips,
+                                   unsigned int num_clips)
 {
        struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
-       struct vmw_framebuffer_dmabuf *vfbd =
+       struct vmw_framebuffer_bo *vfbd =
                vmw_framebuffer_to_vfbd(framebuffer);
        struct drm_clip_rect norect;
        int ret, increment = 1;
@@ -1092,13 +1092,13 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
                                       true, true, NULL);
                break;
        case vmw_du_screen_object:
-               ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base,
-                                                 clips, NULL, num_clips,
-                                                 increment, true, NULL, NULL);
+               ret = vmw_kms_sou_do_bo_dirty(dev_priv, &vfbd->base,
+                                             clips, NULL, num_clips,
+                                             increment, true, NULL, NULL);
                break;
        case vmw_du_legacy:
-               ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0,
-                                                 clips, num_clips, increment);
+               ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0,
+                                             clips, num_clips, increment);
                break;
        default:
                ret = -EINVAL;
@@ -1114,23 +1114,23 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
        return ret;
 }
 
-static const struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
-       .destroy = vmw_framebuffer_dmabuf_destroy,
-       .dirty = vmw_framebuffer_dmabuf_dirty,
+static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
+       .destroy = vmw_framebuffer_bo_destroy,
+       .dirty = vmw_framebuffer_bo_dirty,
 };
 
 /**
- * Pin the dmabuffer in a location suitable for access by the
+ * Pin the bofer in a location suitable for access by the
  * display system.
  */
 static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
 {
        struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
-       struct vmw_dma_buffer *buf;
+       struct vmw_buffer_object *buf;
        struct ttm_placement *placement;
        int ret;
 
-       buf = vfb->dmabuf ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+       buf = vfb->bo ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
                vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
 
        if (!buf)
@@ -1139,12 +1139,12 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
        switch (dev_priv->active_display_unit) {
        case vmw_du_legacy:
                vmw_overlay_pause_all(dev_priv);
-               ret = vmw_dmabuf_pin_in_start_of_vram(dev_priv, buf, false);
+               ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false);
                vmw_overlay_resume_all(dev_priv);
                break;
        case vmw_du_screen_object:
        case vmw_du_screen_target:
-               if (vfb->dmabuf) {
+               if (vfb->bo) {
                        if (dev_priv->capabilities & SVGA_CAP_3D) {
                                /*
                                 * Use surface DMA to get content to
@@ -1160,8 +1160,7 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
                        placement = &vmw_mob_placement;
                }
 
-               return vmw_dmabuf_pin_in_placement(dev_priv, buf, placement,
-                                                  false);
+               return vmw_bo_pin_in_placement(dev_priv, buf, placement, false);
        default:
                return -EINVAL;
        }
@@ -1172,36 +1171,36 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
 static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
 {
        struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
-       struct vmw_dma_buffer *buf;
+       struct vmw_buffer_object *buf;
 
-       buf = vfb->dmabuf ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+       buf = vfb->bo ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
                vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
 
        if (WARN_ON(!buf))
                return 0;
 
-       return vmw_dmabuf_unpin(dev_priv, buf, false);
+       return vmw_bo_unpin(dev_priv, buf, false);
 }
 
 /**
- * vmw_create_dmabuf_proxy - create a proxy surface for the DMA buf
+ * vmw_create_bo_proxy - create a proxy surface for the buffer object
  *
  * @dev: DRM device
  * @mode_cmd: parameters for the new surface
- * @dmabuf_mob: MOB backing the DMA buf
+ * @bo_mob: MOB backing the buffer object
  * @srf_out: newly created surface
  *
- * When the content FB is a DMA buf, we create a surface as a proxy to the
+ * When the content FB is a buffer object, we create a surface as a proxy to the
  * same buffer.  This way we can do a surface copy rather than a surface DMA.
  * This is a more efficient approach
  *
  * RETURNS:
  * 0 on success, error code otherwise
  */
-static int vmw_create_dmabuf_proxy(struct drm_device *dev,
-                                  const struct drm_mode_fb_cmd2 *mode_cmd,
-                                  struct vmw_dma_buffer *dmabuf_mob,
-                                  struct vmw_surface **srf_out)
+static int vmw_create_bo_proxy(struct drm_device *dev,
+                              const struct drm_mode_fb_cmd2 *mode_cmd,
+                              struct vmw_buffer_object *bo_mob,
+                              struct vmw_surface **srf_out)
 {
        uint32_t format;
        struct drm_vmw_size content_base_size = {0};
@@ -1239,15 +1238,17 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
        content_base_size.depth  = 1;
 
        ret = vmw_surface_gb_priv_define(dev,
-                       0, /* kernel visible only */
-                       0, /* flags */
-                       format,
-                       true, /* can be a scanout buffer */
-                       1, /* num of mip levels */
-                       0,
-                       0,
-                       content_base_size,
-                       srf_out);
+                                        0, /* kernel visible only */
+                                        0, /* flags */
+                                        format,
+                                        true, /* can be a scanout buffer */
+                                        1, /* num of mip levels */
+                                        0,
+                                        0,
+                                        content_base_size,
+                                        SVGA3D_MS_PATTERN_NONE,
+                                        SVGA3D_MS_QUALITY_NONE,
+                                        srf_out);
        if (ret) {
                DRM_ERROR("Failed to allocate proxy content buffer\n");
                return ret;
@@ -1258,8 +1259,8 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
        /* Reserve and switch the backing mob. */
        mutex_lock(&res->dev_priv->cmdbuf_mutex);
        (void) vmw_resource_reserve(res, false, true);
-       vmw_dmabuf_unreference(&res->backup);
-       res->backup = vmw_dmabuf_reference(dmabuf_mob);
+       vmw_bo_unreference(&res->backup);
+       res->backup = vmw_bo_reference(bo_mob);
        res->backup_offset = 0;
        vmw_resource_unreserve(res, false, NULL, 0);
        mutex_unlock(&res->dev_priv->cmdbuf_mutex);
@@ -1269,21 +1270,21 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
 
 
 
-static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
-                                         struct vmw_dma_buffer *dmabuf,
-                                         struct vmw_framebuffer **out,
-                                         const struct drm_mode_fb_cmd2
-                                         *mode_cmd)
+static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
+                                     struct vmw_buffer_object *bo,
+                                     struct vmw_framebuffer **out,
+                                     const struct drm_mode_fb_cmd2
+                                     *mode_cmd)
 
 {
        struct drm_device *dev = dev_priv->dev;
-       struct vmw_framebuffer_dmabuf *vfbd;
+       struct vmw_framebuffer_bo *vfbd;
        unsigned int requested_size;
        struct drm_format_name_buf format_name;
        int ret;
 
        requested_size = mode_cmd->height * mode_cmd->pitches[0];
-       if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
+       if (unlikely(requested_size > bo->base.num_pages * PAGE_SIZE)) {
                DRM_ERROR("Screen buffer object size is too small "
                          "for requested mode.\n");
                return -EINVAL;
@@ -1312,20 +1313,20 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
        }
 
        drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
-       vfbd->base.dmabuf = true;
-       vfbd->buffer = vmw_dmabuf_reference(dmabuf);
+       vfbd->base.bo = true;
+       vfbd->buffer = vmw_bo_reference(bo);
        vfbd->base.user_handle = mode_cmd->handles[0];
        *out = &vfbd->base;
 
        ret = drm_framebuffer_init(dev, &vfbd->base.base,
-                                  &vmw_framebuffer_dmabuf_funcs);
+                                  &vmw_framebuffer_bo_funcs);
        if (ret)
                goto out_err2;
 
        return 0;
 
 out_err2:
-       vmw_dmabuf_unreference(&dmabuf);
+       vmw_bo_unreference(&bo);
        kfree(vfbd);
 out_err1:
        return ret;
@@ -1354,57 +1355,57 @@ vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
  * vmw_kms_new_framebuffer - Create a new framebuffer.
  *
  * @dev_priv: Pointer to device private struct.
- * @dmabuf: Pointer to dma buffer to wrap the kms framebuffer around.
- * Either @dmabuf or @surface must be NULL.
+ * @bo: Pointer to buffer object to wrap the kms framebuffer around.
+ * Either @bo or @surface must be NULL.
  * @surface: Pointer to a surface to wrap the kms framebuffer around.
- * Either @dmabuf or @surface must be NULL.
- * @only_2d: No presents will occur to this dma buffer based framebuffer. This
- * Helps the code to do some important optimizations.
+ * Either @bo or @surface must be NULL.
+ * @only_2d: No presents will occur to this buffer object based framebuffer.
+ * This helps the code to do some important optimizations.
  * @mode_cmd: Frame-buffer metadata.
  */
 struct vmw_framebuffer *
 vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
-                       struct vmw_dma_buffer *dmabuf,
+                       struct vmw_buffer_object *bo,
                        struct vmw_surface *surface,
                        bool only_2d,
                        const struct drm_mode_fb_cmd2 *mode_cmd)
 {
        struct vmw_framebuffer *vfb = NULL;
-       bool is_dmabuf_proxy = false;
+       bool is_bo_proxy = false;
        int ret;
 
        /*
         * We cannot use the SurfaceDMA command in an non-accelerated VM,
-        * therefore, wrap the DMA buf in a surface so we can use the
+        * therefore, wrap the buffer object in a surface so we can use the
         * SurfaceCopy command.
         */
        if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)  &&
-           dmabuf && only_2d &&
+           bo && only_2d &&
            mode_cmd->width > 64 &&  /* Don't create a proxy for cursor */
            dev_priv->active_display_unit == vmw_du_screen_target) {
-               ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd,
-                                             dmabuf, &surface);
+               ret = vmw_create_bo_proxy(dev_priv->dev, mode_cmd,
+                                         bo, &surface);
                if (ret)
                        return ERR_PTR(ret);
 
-               is_dmabuf_proxy = true;
+               is_bo_proxy = true;
        }
 
        /* Create the new framebuffer depending one what we have */
        if (surface) {
                ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
                                                      mode_cmd,
-                                                     is_dmabuf_proxy);
+                                                     is_bo_proxy);
 
                /*
-                * vmw_create_dmabuf_proxy() adds a reference that is no longer
+                * vmw_create_bo_proxy() adds a reference that is no longer
                 * needed
                 */
-               if (is_dmabuf_proxy)
+               if (is_bo_proxy)
                        vmw_surface_unreference(&surface);
-       } else if (dmabuf) {
-               ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, dmabuf, &vfb,
-                                                    mode_cmd);
+       } else if (bo) {
+               ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
+                                                mode_cmd);
        } else {
                BUG();
        }
@@ -1430,23 +1431,10 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
        struct vmw_framebuffer *vfb = NULL;
        struct vmw_surface *surface = NULL;
-       struct vmw_dma_buffer *bo = NULL;
+       struct vmw_buffer_object *bo = NULL;
        struct ttm_base_object *user_obj;
        int ret;
 
-       /**
-        * This code should be conditioned on Screen Objects not being used.
-        * If screen objects are used, we can allocate a GMR to hold the
-        * requested framebuffer.
-        */
-
-       if (!vmw_kms_validate_mode_vram(dev_priv,
-                                       mode_cmd->pitches[0],
-                                       mode_cmd->height)) {
-               DRM_ERROR("Requested mode exceed bounding box limit.\n");
-               return ERR_PTR(-ENOMEM);
-       }
-
        /*
         * Take a reference on the user object of the resource
         * backing the kms fb. This ensures that user-space handle
@@ -1466,7 +1454,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
         * End conditioned code.
         */
 
-       /* returns either a dmabuf or surface */
+       /* returns either a bo or surface */
        ret = vmw_user_lookup_handle(dev_priv, tfile,
                                     mode_cmd->handles[0],
                                     &surface, &bo);
@@ -1494,7 +1482,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
 err_out:
        /* vmw_user_lookup_handle takes one ref so does new_fb */
        if (bo)
-               vmw_dmabuf_unreference(&bo);
+               vmw_bo_unreference(&bo);
        if (surface)
                vmw_surface_unreference(&surface);
 
@@ -1508,7 +1496,168 @@ err_out:
        return &vfb->base;
 }
 
+/**
+ * vmw_kms_check_display_memory - Validates display memory required for a
+ * topology
+ * @dev: DRM device
+ * @num_rects: number of drm_rect in rects
+ * @rects: array of drm_rect representing the topology to validate indexed by
+ * crtc index.
+ *
+ * Returns:
+ * 0 on success otherwise negative error code
+ */
+static int vmw_kms_check_display_memory(struct drm_device *dev,
+                                       uint32_t num_rects,
+                                       struct drm_rect *rects)
+{
+       struct vmw_private *dev_priv = vmw_priv(dev);
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct drm_rect bounding_box = {0};
+       u64 total_pixels = 0, pixel_mem, bb_mem;
+       int i;
+
+       for (i = 0; i < num_rects; i++) {
+               /*
+                * Currently this check is limiting the topology within max
+                * texture/screentarget size. This should change in future when
+                * user-space support multiple fb with topology.
+                */
+               if (rects[i].x1 < 0 ||  rects[i].y1 < 0 ||
+                   rects[i].x2 > mode_config->max_width ||
+                   rects[i].y2 > mode_config->max_height) {
+                       DRM_ERROR("Invalid GUI layout.\n");
+                       return -EINVAL;
+               }
 
+               /* Bounding box upper left is at (0,0). */
+               if (rects[i].x2 > bounding_box.x2)
+                       bounding_box.x2 = rects[i].x2;
+
+               if (rects[i].y2 > bounding_box.y2)
+                       bounding_box.y2 = rects[i].y2;
+
+               total_pixels += (u64) drm_rect_width(&rects[i]) *
+                       (u64) drm_rect_height(&rects[i]);
+       }
+
+       /* Virtual svga device primary limits are always in 32-bpp. */
+       pixel_mem = total_pixels * 4;
+
+       /*
+        * For HV10 and below prim_bb_mem is vram size. When
+        * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
+        * limit on primary bounding box
+        */
+       if (pixel_mem > dev_priv->prim_bb_mem) {
+               DRM_ERROR("Combined output size too large.\n");
+               return -EINVAL;
+       }
+
+       /* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
+       if (dev_priv->active_display_unit != vmw_du_screen_target ||
+           !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
+               bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
+
+               if (bb_mem > dev_priv->prim_bb_mem) {
+                       DRM_ERROR("Topology is beyond supported limits.\n");
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+/**
+ * vmw_kms_check_topology - Validates topology in drm_atomic_state
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * Returns:
+ * 0 on success otherwise negative error code
+ */
+static int vmw_kms_check_topology(struct drm_device *dev,
+                                 struct drm_atomic_state *state)
+{
+       struct vmw_private *dev_priv = vmw_priv(dev);
+       struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+       struct drm_rect *rects;
+       struct drm_crtc *crtc;
+       uint32_t i;
+       int ret = 0;
+
+       rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
+                       GFP_KERNEL);
+       if (!rects)
+               return -ENOMEM;
+
+       mutex_lock(&dev_priv->requested_layout_mutex);
+
+       drm_for_each_crtc(crtc, dev) {
+               struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+               struct drm_crtc_state *crtc_state = crtc->state;
+
+               i = drm_crtc_index(crtc);
+
+               if (crtc_state && crtc_state->enable) {
+                       rects[i].x1 = du->gui_x;
+                       rects[i].y1 = du->gui_y;
+                       rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
+                       rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
+               }
+       }
+
+       /* Determine change to topology due to new atomic state */
+       for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
+                                     new_crtc_state, i) {
+               struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+               struct drm_connector *connector;
+               struct drm_connector_state *conn_state;
+               struct vmw_connector_state *vmw_conn_state;
+
+               if (!new_crtc_state->enable && old_crtc_state->enable) {
+                       rects[i].x1 = 0;
+                       rects[i].y1 = 0;
+                       rects[i].x2 = 0;
+                       rects[i].y2 = 0;
+                       continue;
+               }
+
+               if (!du->pref_active) {
+                       ret = -EINVAL;
+                       goto clean;
+               }
+
+               /*
+                * For vmwgfx each crtc has only one connector attached and it
+                * is not changed so don't really need to check the
+                * crtc->connector_mask and iterate over it.
+                */
+               connector = &du->connector;
+               conn_state = drm_atomic_get_connector_state(state, connector);
+               if (IS_ERR(conn_state)) {
+                       ret = PTR_ERR(conn_state);
+                       goto clean;
+               }
+
+               vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
+               vmw_conn_state->gui_x = du->gui_x;
+               vmw_conn_state->gui_y = du->gui_y;
+
+               rects[i].x1 = du->gui_x;
+               rects[i].y1 = du->gui_y;
+               rects[i].x2 = du->gui_x + new_crtc_state->mode.hdisplay;
+               rects[i].y2 = du->gui_y + new_crtc_state->mode.vdisplay;
+       }
+
+       ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
+                                          rects);
+
+clean:
+       mutex_unlock(&dev_priv->requested_layout_mutex);
+       kfree(rects);
+       return ret;
+}
 
 /**
  * vmw_kms_atomic_check_modeset- validate state object for modeset changes
@@ -1520,36 +1669,39 @@ err_out:
  * us to assign a value to mode->crtc_clock so that
  * drm_calc_timestamping_constants() won't throw an error message
  *
- * RETURNS
+ * Returns:
  * Zero for success or -errno
  */
 static int
 vmw_kms_atomic_check_modeset(struct drm_device *dev,
                             struct drm_atomic_state *state)
 {
-       struct drm_crtc_state *crtc_state;
        struct drm_crtc *crtc;
-       struct vmw_private *dev_priv = vmw_priv(dev);
-       int i;
-
-       for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
-               unsigned long requested_bb_mem = 0;
+       struct drm_crtc_state *crtc_state;
+       bool need_modeset = false;
+       int i, ret;
 
-               if (dev_priv->active_display_unit == vmw_du_screen_target) {
-                       if (crtc->primary->fb) {
-                               int cpp = crtc->primary->fb->pitches[0] /
-                                         crtc->primary->fb->width;
+       ret = drm_atomic_helper_check(dev, state);
+       if (ret)
+               return ret;
 
-                               requested_bb_mem += crtc->mode.hdisplay * cpp *
-                                                   crtc->mode.vdisplay;
-                       }
+       if (!state->allow_modeset)
+               return ret;
 
-                       if (requested_bb_mem > dev_priv->prim_bb_mem)
-                               return -EINVAL;
-               }
+       /*
+        * Legacy path do not set allow_modeset properly like
+        * @drm_atomic_helper_update_plane, This will result in unnecessary call
+        * to vmw_kms_check_topology. So extra set of check.
+        */
+       for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+               if (drm_atomic_crtc_needs_modeset(crtc_state))
+                       need_modeset = true;
        }
 
-       return drm_atomic_helper_check(dev, state);
+       if (need_modeset)
+               return vmw_kms_check_topology(dev, state);
+
+       return ret;
 }
 
 static const struct drm_mode_config_funcs vmw_kms_funcs = {
@@ -1841,40 +1993,49 @@ void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
 }
 
-
-/*
- * Small shared kms functions.
+/**
+ * vmw_du_update_layout - Update the display unit with topology from resolution
+ * plugin and generate DRM uevent
+ * @dev_priv: device private
+ * @num_rects: number of drm_rect in rects
+ * @rects: toplogy to update
  */
-
-static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
-                        struct drm_vmw_rect *rects)
+static int vmw_du_update_layout(struct vmw_private *dev_priv,
+                               unsigned int num_rects, struct drm_rect *rects)
 {
        struct drm_device *dev = dev_priv->dev;
        struct vmw_display_unit *du;
        struct drm_connector *con;
+       struct drm_connector_list_iter conn_iter;
 
-       mutex_lock(&dev->mode_config.mutex);
-
-#if 0
-       {
-               unsigned int i;
-
-               DRM_INFO("%s: new layout ", __func__);
-               for (i = 0; i < num; i++)
-                       DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
-                                rects[i].w, rects[i].h);
-               DRM_INFO("\n");
+       /*
+        * Currently only gui_x/y is protected with requested_layout_mutex.
+        */
+       mutex_lock(&dev_priv->requested_layout_mutex);
+       drm_connector_list_iter_begin(dev, &conn_iter);
+       drm_for_each_connector_iter(con, &conn_iter) {
+               du = vmw_connector_to_du(con);
+               if (num_rects > du->unit) {
+                       du->pref_width = drm_rect_width(&rects[du->unit]);
+                       du->pref_height = drm_rect_height(&rects[du->unit]);
+                       du->pref_active = true;
+                       du->gui_x = rects[du->unit].x1;
+                       du->gui_y = rects[du->unit].y1;
+               } else {
+                       du->pref_width = 800;
+                       du->pref_height = 600;
+                       du->pref_active = false;
+                       du->gui_x = 0;
+                       du->gui_y = 0;
+               }
        }
-#endif
+       drm_connector_list_iter_end(&conn_iter);
+       mutex_unlock(&dev_priv->requested_layout_mutex);
 
+       mutex_lock(&dev->mode_config.mutex);
        list_for_each_entry(con, &dev->mode_config.connector_list, head) {
                du = vmw_connector_to_du(con);
-               if (num > du->unit) {
-                       du->pref_width = rects[du->unit].w;
-                       du->pref_height = rects[du->unit].h;
-                       du->pref_active = true;
-                       du->gui_x = rects[du->unit].x;
-                       du->gui_y = rects[du->unit].y;
+               if (num_rects > du->unit) {
                        drm_object_property_set_value
                          (&con->base, dev->mode_config.suggested_x_property,
                           du->gui_x);
@@ -1882,9 +2043,6 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
                          (&con->base, dev->mode_config.suggested_y_property,
                           du->gui_y);
                } else {
-                       du->pref_width = 800;
-                       du->pref_height = 600;
-                       du->pref_active = false;
                        drm_object_property_set_value
                          (&con->base, dev->mode_config.suggested_x_property,
                           0);
@@ -1894,8 +2052,8 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
                }
                con->status = vmw_du_connector_detect(con, true);
        }
-
        mutex_unlock(&dev->mode_config.mutex);
+
        drm_sysfs_hotplug_event(dev);
 
        return 0;
@@ -2110,7 +2268,7 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
                drm_mode_probed_add(connector, mode);
        }
 
-       drm_mode_connector_list_update(connector);
+       drm_connector_list_update(connector);
        /* Move the prefered mode first, help apps pick the right mode. */
        drm_mode_sort(&connector->modes);
 
@@ -2195,7 +2353,25 @@ vmw_du_connector_atomic_get_property(struct drm_connector *connector,
        return 0;
 }
 
-
+/**
+ * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Update preferred topology of display unit as per ioctl request. The topology
+ * is expressed as array of drm_vmw_rect.
+ * e.g.
+ * [0 0 640 480] [640 0 800 600] [0 480 640 480]
+ *
+ * NOTE:
+ * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
+ * device limit on topology, x + w and y + h (lower right) cannot be greater
+ * than INT_MAX. So topology beyond these limits will return with error.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *file_priv)
 {
@@ -2204,15 +2380,12 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
                (struct drm_vmw_update_layout_arg *)data;
        void __user *user_rects;
        struct drm_vmw_rect *rects;
+       struct drm_rect *drm_rects;
        unsigned rects_size;
-       int ret;
-       int i;
-       u64 total_pixels = 0;
-       struct drm_mode_config *mode_config = &dev->mode_config;
-       struct drm_vmw_rect bounding_box = {0};
+       int ret, i;
 
        if (!arg->num_outputs) {
-               struct drm_vmw_rect def_rect = {0, 0, 800, 600};
+               struct drm_rect def_rect = {0, 0, 800, 600};
                vmw_du_update_layout(dev_priv, 1, &def_rect);
                return 0;
        }
@@ -2231,52 +2404,29 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
                goto out_free;
        }
 
-       for (i = 0; i < arg->num_outputs; ++i) {
-               if (rects[i].x < 0 ||
-                   rects[i].y < 0 ||
-                   rects[i].x + rects[i].w > mode_config->max_width ||
-                   rects[i].y + rects[i].h > mode_config->max_height) {
-                       DRM_ERROR("Invalid GUI layout.\n");
-                       ret = -EINVAL;
-                       goto out_free;
-               }
-
-               /*
-                * bounding_box.w and bunding_box.h are used as
-                * lower-right coordinates
-                */
-               if (rects[i].x + rects[i].w > bounding_box.w)
-                       bounding_box.w = rects[i].x + rects[i].w;
-
-               if (rects[i].y + rects[i].h > bounding_box.h)
-                       bounding_box.h = rects[i].y + rects[i].h;
+       drm_rects = (struct drm_rect *)rects;
 
-               total_pixels += (u64) rects[i].w * (u64) rects[i].h;
-       }
-
-       if (dev_priv->active_display_unit == vmw_du_screen_target) {
-               /*
-                * For Screen Targets, the limits for a toplogy are:
-                *      1. Bounding box (assuming 32bpp) must be < prim_bb_mem
-                *      2. Total pixels (assuming 32bpp) must be < prim_bb_mem
-                */
-               u64 bb_mem    = (u64) bounding_box.w * bounding_box.h * 4;
-               u64 pixel_mem = total_pixels * 4;
+       for (i = 0; i < arg->num_outputs; i++) {
+               struct drm_vmw_rect curr_rect;
 
-               if (bb_mem > dev_priv->prim_bb_mem) {
-                       DRM_ERROR("Topology is beyond supported limits.\n");
-                       ret = -EINVAL;
+               /* Verify user-space for overflow as kernel use drm_rect */
+               if ((rects[i].x + rects[i].w > INT_MAX) ||
+                   (rects[i].y + rects[i].h > INT_MAX)) {
+                       ret = -ERANGE;
                        goto out_free;
                }
 
-               if (pixel_mem > dev_priv->prim_bb_mem) {
-                       DRM_ERROR("Combined output size too large\n");
-                       ret = -EINVAL;
-                       goto out_free;
-               }
+               curr_rect = rects[i];
+               drm_rects[i].x1 = curr_rect.x;
+               drm_rects[i].y1 = curr_rect.y;
+               drm_rects[i].x2 = curr_rect.x + curr_rect.w;
+               drm_rects[i].y2 = curr_rect.y + curr_rect.h;
        }
 
-       vmw_du_update_layout(dev_priv, arg->num_outputs, rects);
+       ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
+
+       if (ret == 0)
+               vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
 
 out_free:
        kfree(rects);
@@ -2322,9 +2472,10 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
        } else {
                list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
                                    head) {
-                       if (crtc->primary->fb != &framebuffer->base)
-                               continue;
-                       units[num_units++] = vmw_crtc_to_du(crtc);
+                       struct drm_plane *plane = crtc->primary;
+
+                       if (plane->state->fb == &framebuffer->base)
+                               units[num_units++] = vmw_crtc_to_du(crtc);
                }
        }
 
@@ -2422,7 +2573,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
  * interrupted by a signal.
  */
 int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
-                                 struct vmw_dma_buffer *buf,
+                                 struct vmw_buffer_object *buf,
                                  bool interruptible,
                                  bool validate_as_mob,
                                  bool for_cpu_blit)
@@ -2454,7 +2605,7 @@ int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
  * Helper to be used if an error forces the caller to undo the actions of
  * vmw_kms_helper_buffer_prepare.
  */
-void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf)
+void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf)
 {
        if (buf)
                ttm_bo_unreserve(&buf->base);
@@ -2477,7 +2628,7 @@ void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf)
  */
 void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
                                  struct drm_file *file_priv,
-                                 struct vmw_dma_buffer *buf,
+                                 struct vmw_buffer_object *buf,
                                  struct vmw_fence_obj **out_fence,
                                  struct drm_vmw_fence_rep __user *
                                  user_fence_rep)
@@ -2489,7 +2640,7 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
        ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
                                         file_priv ? &handle : NULL);
        if (buf)
-               vmw_fence_single_bo(&buf->base, fence);
+               vmw_bo_fence_single(&buf->base, fence);
        if (file_priv)
                vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
                                            ret, user_fence_rep, fence,
@@ -2517,7 +2668,7 @@ void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx)
        struct vmw_resource *res = ctx->res;
 
        vmw_kms_helper_buffer_revert(ctx->buf);
-       vmw_dmabuf_unreference(&ctx->buf);
+       vmw_bo_unreference(&ctx->buf);
        vmw_resource_unreserve(res, false, NULL, 0);
        mutex_unlock(&res->dev_priv->cmdbuf_mutex);
 }
@@ -2562,7 +2713,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
                if (ret)
                        goto out_unreserve;
 
-               ctx->buf = vmw_dmabuf_reference(res->backup);
+               ctx->buf = vmw_bo_reference(res->backup);
        }
        ret = vmw_resource_validate(res);
        if (ret)
@@ -2595,7 +2746,7 @@ void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
                vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
                                             out_fence, NULL);
 
-       vmw_dmabuf_unreference(&ctx->buf);
+       vmw_bo_unreference(&ctx->buf);
        vmw_resource_unreserve(res, false, NULL, 0);
        mutex_unlock(&res->dev_priv->cmdbuf_mutex);
 }
@@ -2806,6 +2957,7 @@ void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
                                struct drm_crtc *crtc)
 {
        struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+       struct drm_plane *plane = crtc->primary;
        struct vmw_framebuffer *vfb;
 
        mutex_lock(&dev_priv->global_kms_state_mutex);
@@ -2813,7 +2965,7 @@ void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
        if (!du->is_implicit)
                goto out_unlock;
 
-       vfb = vmw_framebuffer_to_vfb(crtc->primary->fb);
+       vfb = vmw_framebuffer_to_vfb(plane->state->fb);
        WARN_ON_ONCE(dev_priv->num_implicit != 1 &&
                     dev_priv->implicit_fb != vfb);
 
index 6b7c012719f131033c873040309b3ff3912a6005..31311298ec0ba18f0c31b96f0338a6db40e06247 100644 (file)
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**************************************************************************
  *
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
@@ -90,7 +90,7 @@ struct vmw_kms_dirty {
 #define vmw_framebuffer_to_vfbs(x) \
        container_of(x, struct vmw_framebuffer_surface, base.base)
 #define vmw_framebuffer_to_vfbd(x) \
-       container_of(x, struct vmw_framebuffer_dmabuf, base.base)
+       container_of(x, struct vmw_framebuffer_bo, base.base)
 
 /**
  * Base class for framebuffers
@@ -102,7 +102,7 @@ struct vmw_framebuffer {
        struct drm_framebuffer base;
        int (*pin)(struct vmw_framebuffer *fb);
        int (*unpin)(struct vmw_framebuffer *fb);
-       bool dmabuf;
+       bool bo;
        struct ttm_base_object *user_obj;
        uint32_t user_handle;
 };
@@ -117,15 +117,15 @@ struct vmw_clip_rect {
 struct vmw_framebuffer_surface {
        struct vmw_framebuffer base;
        struct vmw_surface *surface;
-       struct vmw_dma_buffer *buffer;
+       struct vmw_buffer_object *buffer;
        struct list_head head;
-       bool is_dmabuf_proxy;  /* true if this is proxy surface for DMA buf */
+       bool is_bo_proxy;  /* true if this is proxy surface for DMA buf */
 };
 
 
-struct vmw_framebuffer_dmabuf {
+struct vmw_framebuffer_bo {
        struct vmw_framebuffer base;
-       struct vmw_dma_buffer *buffer;
+       struct vmw_buffer_object *buffer;
 };
 
 
@@ -161,18 +161,18 @@ struct vmw_crtc_state {
  *
  * @base DRM plane object
  * @surf Display surface for STDU
- * @dmabuf display dmabuf for SOU
+ * @bo display bo for SOU
  * @content_fb_type Used by STDU.
- * @dmabuf_size Size of the dmabuf, used by Screen Object Display Unit
+ * @bo_size Size of the bo, used by Screen Object Display Unit
  * @pinned pin count for STDU display surface
  */
 struct vmw_plane_state {
        struct drm_plane_state base;
        struct vmw_surface *surf;
-       struct vmw_dma_buffer *dmabuf;
+       struct vmw_buffer_object *bo;
 
        int content_fb_type;
-       unsigned long dmabuf_size;
+       unsigned long bo_size;
 
        int pinned;
 
@@ -192,6 +192,24 @@ struct vmw_connector_state {
        struct drm_connector_state base;
 
        bool is_implicit;
+
+       /**
+        * @gui_x:
+        *
+        * vmwgfx connector property representing the x position of this display
+        * unit (connector is synonymous to display unit) in overall topology.
+        * This is what the device expect as xRoot while creating screen.
+        */
+       int gui_x;
+
+       /**
+        * @gui_y:
+        *
+        * vmwgfx connector property representing the y position of this display
+        * unit (connector is synonymous to display unit) in overall topology.
+        * This is what the device expect as yRoot while creating screen.
+        */
+       int gui_y;
 };
 
 /**
@@ -209,7 +227,7 @@ struct vmw_display_unit {
        struct drm_plane cursor;
 
        struct vmw_surface *cursor_surface;
-       struct vmw_dma_buffer *cursor_dmabuf;
+       struct vmw_buffer_object *cursor_bo;
        size_t cursor_age;
 
        int cursor_x;
@@ -243,7 +261,7 @@ struct vmw_display_unit {
 
 struct vmw_validation_ctx {
        struct vmw_resource *res;
-       struct vmw_dma_buffer *buf;
+       struct vmw_buffer_object *buf;
 };
 
 #define vmw_crtc_to_du(x) \
@@ -291,14 +309,14 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
                         struct vmw_kms_dirty *dirty);
 
 int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
-                                 struct vmw_dma_buffer *buf,
+                                 struct vmw_buffer_object *buf,
                                  bool interruptible,
                                  bool validate_as_mob,
                                  bool for_cpu_blit);
-void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf);
+void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf);
 void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
                                  struct drm_file *file_priv,
-                                 struct vmw_dma_buffer *buf,
+                                 struct vmw_buffer_object *buf,
                                  struct vmw_fence_obj **out_fence,
                                  struct drm_vmw_fence_rep __user *
                                  user_fence_rep);
@@ -316,7 +334,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
                     uint32_t num_clips);
 struct vmw_framebuffer *
 vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
-                       struct vmw_dma_buffer *dmabuf,
+                       struct vmw_buffer_object *bo,
                        struct vmw_surface *surface,
                        bool only_2d,
                        const struct drm_mode_fb_cmd2 *mode_cmd);
@@ -384,11 +402,11 @@ void vmw_du_connector_destroy_state(struct drm_connector *connector,
  */
 int vmw_kms_ldu_init_display(struct vmw_private *dev_priv);
 int vmw_kms_ldu_close_display(struct vmw_private *dev_priv);
-int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv,
-                               struct vmw_framebuffer *framebuffer,
-                               unsigned flags, unsigned color,
-                               struct drm_clip_rect *clips,
-                               unsigned num_clips, int increment);
+int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
+                           struct vmw_framebuffer *framebuffer,
+                           unsigned int flags, unsigned int color,
+                           struct drm_clip_rect *clips,
+                           unsigned int num_clips, int increment);
 int vmw_kms_update_proxy(struct vmw_resource *res,
                         const struct drm_clip_rect *clips,
                         unsigned num_clips,
@@ -408,14 +426,14 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
                                 unsigned num_clips, int inc,
                                 struct vmw_fence_obj **out_fence,
                                 struct drm_crtc *crtc);
-int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
-                               struct vmw_framebuffer *framebuffer,
-                               struct drm_clip_rect *clips,
-                               struct drm_vmw_rect *vclips,
-                               unsigned num_clips, int increment,
-                               bool interruptible,
-                               struct vmw_fence_obj **out_fence,
-                               struct drm_crtc *crtc);
+int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
+                           struct vmw_framebuffer *framebuffer,
+                           struct drm_clip_rect *clips,
+                           struct drm_vmw_rect *vclips,
+                           unsigned int num_clips, int increment,
+                           bool interruptible,
+                           struct vmw_fence_obj **out_fence,
+                           struct drm_crtc *crtc);
 int vmw_kms_sou_readback(struct vmw_private *dev_priv,
                         struct drm_file *file_priv,
                         struct vmw_framebuffer *vfb,
index 4a5907e3f5602a09217fc5391b74c320debe8fae..72357811719122f70f36ea420786b0333a787e0a 100644 (file)
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /**************************************************************************
  *
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
@@ -438,7 +438,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
                goto err_free_connector;
        }
 
-       (void) drm_mode_connector_attach_encoder(connector, encoder);
+       (void) drm_connector_attach_encoder(connector, encoder);
        encoder->possible_crtcs = (1 << unit);
        encoder->possible_clones = 0;
 
@@ -547,11 +547,11 @@ int vmw_kms_ldu_close_display(struct vmw_private *dev_priv)
 }
 
 
-int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv,
-                               struct vmw_framebuffer *framebuffer,
-                               unsigned flags, unsigned color,
-                               struct drm_clip_rect *clips,
-                               unsigned num_clips, int increment)
+int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
+                           struct vmw_framebuffer *framebuffer,
+                           unsigned int flags, unsigned int color,
+                           struct drm_clip_rect *clips,
+                           unsigned int num_clips, int increment)
 {
        size_t fifo_size;
        int i;
index efd1ffd68185a77e07626dd2fc2385434f4f9194..e53bc639a7549a5d4f7bff2e26997de7e1f6b481 100644 (file)
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /**************************************************************************
  *
- * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2010 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
index d07c585e3c1df8849ea0d5f422a53e53e4deddb9..7ed179d30ec51f073e16d0bb8eaab0ee784116a8 100644 (file)
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /**************************************************************************
  *
- * Copyright © 2012-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2012-2015 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
@@ -225,7 +225,7 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
                ret = ttm_bo_reserve(bo, false, true, NULL);
                BUG_ON(ret != 0);
 
-               vmw_fence_single_bo(bo, NULL);
+               vmw_bo_fence_single(bo, NULL);
                ttm_bo_unreserve(bo);
        }
 
@@ -362,7 +362,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
        ret = ttm_bo_reserve(bo, false, true, NULL);
        BUG_ON(ret != 0);
 
-       vmw_fence_single_bo(bo, NULL);
+       vmw_bo_fence_single(bo, NULL);
        ttm_bo_unreserve(bo);
 
        ttm_bo_unref(&batch->otable_bo);
@@ -620,7 +620,7 @@ void vmw_mob_unbind(struct vmw_private *dev_priv,
                vmw_fifo_commit(dev_priv, sizeof(*cmd));
        }
        if (bo) {
-               vmw_fence_single_bo(bo, NULL);
+               vmw_bo_fence_single(bo, NULL);
                ttm_bo_unreserve(bo);
        }
        vmw_fifo_resource_dec(dev_priv);
index 21d746bdc922bc55ce27d25cd4d5c2ad935d595e..8b9270f314091a82f27d659baf9305cc37aa1782 100644 (file)
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /*
- * Copyright © 2016 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2016 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
@@ -31,6 +31,7 @@
 #include <linux/frame.h>
 #include <asm/hypervisor.h>
 #include <drm/drmP.h>
+#include "vmwgfx_drv.h"
 #include "vmwgfx_msg.h"
 
 
@@ -234,7 +235,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
 
                if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 ||
                    (HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) {
-                       DRM_ERROR("Failed to get reply size\n");
+                       DRM_ERROR("Failed to get reply size for host message.\n");
                        return -EINVAL;
                }
 
@@ -245,7 +246,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
                reply_len = ebx;
                reply     = kzalloc(reply_len + 1, GFP_KERNEL);
                if (!reply) {
-                       DRM_ERROR("Cannot allocate memory for reply\n");
+                       DRM_ERROR("Cannot allocate memory for host message reply.\n");
                        return -ENOMEM;
                }
 
@@ -338,7 +339,8 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
 
        msg = kasprintf(GFP_KERNEL, "info-get %s", guest_info_param);
        if (!msg) {
-               DRM_ERROR("Cannot allocate memory to get %s", guest_info_param);
+               DRM_ERROR("Cannot allocate memory to get guest info \"%s\".",
+                         guest_info_param);
                return -ENOMEM;
        }
 
@@ -374,7 +376,7 @@ out_msg:
 out_open:
        *length = 0;
        kfree(msg);
-       DRM_ERROR("Failed to get %s", guest_info_param);
+       DRM_ERROR("Failed to get guest info \"%s\".", guest_info_param);
 
        return -EINVAL;
 }
@@ -403,7 +405,7 @@ int vmw_host_log(const char *log)
 
        msg = kasprintf(GFP_KERNEL, "log %s", log);
        if (!msg) {
-               DRM_ERROR("Cannot allocate memory for log message\n");
+               DRM_ERROR("Cannot allocate memory for host log message.\n");
                return -ENOMEM;
        }
 
@@ -422,7 +424,7 @@ out_msg:
        vmw_close_channel(&channel);
 out_open:
        kfree(msg);
-       DRM_ERROR("Failed to send log\n");
+       DRM_ERROR("Failed to send host log message.\n");
 
        return -EINVAL;
 }
index 8545488aa0cfbe1bf1b1d14514d6794c0077834a..4907e50fb20a23a6d6ca6b88bf0b52354564c7b1 100644 (file)
@@ -1,16 +1,29 @@
-/*
- * Copyright (C) 2016, VMware, Inc.
+/* SPDX-License-Identifier: GPL-2.0+ OR MIT */
+/**************************************************************************
+ *
+ * Copyright 2016 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT.  See the GNU General Public License for more
- * details.
+ **************************************************************************
  *
  * Based on code from vmware.c and vmmouse.c.
  * Author:
index 222c9c2123a1ef5e761128d7c6c9ceb11e5131e7..9f1b9d289bec57d9377fec288a284b803ae53d85 100644 (file)
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /**************************************************************************
  *
- * Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2014 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
@@ -38,7 +38,7 @@
 #define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
 
 struct vmw_stream {
-       struct vmw_dma_buffer *buf;
+       struct vmw_buffer_object *buf;
        bool claimed;
        bool paused;
        struct drm_vmw_control_stream_arg saved;
@@ -94,7 +94,7 @@ static inline void fill_flush(struct vmw_escape_video_flush *cmd,
  * -ERESTARTSYS if interrupted by a signal.
  */
 static int vmw_overlay_send_put(struct vmw_private *dev_priv,
-                               struct vmw_dma_buffer *buf,
+                               struct vmw_buffer_object *buf,
                                struct drm_vmw_control_stream_arg *arg,
                                bool interruptible)
 {
@@ -225,16 +225,16 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
  * used with GMRs instead of being locked to vram.
  */
 static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
-                                  struct vmw_dma_buffer *buf,
+                                  struct vmw_buffer_object *buf,
                                   bool pin, bool inter)
 {
        if (!pin)
-               return vmw_dmabuf_unpin(dev_priv, buf, inter);
+               return vmw_bo_unpin(dev_priv, buf, inter);
 
        if (dev_priv->active_display_unit == vmw_du_legacy)
-               return vmw_dmabuf_pin_in_vram(dev_priv, buf, inter);
+               return vmw_bo_pin_in_vram(dev_priv, buf, inter);
 
-       return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf, inter);
+       return vmw_bo_pin_in_vram_or_gmr(dev_priv, buf, inter);
 }
 
 /**
@@ -278,7 +278,7 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv,
        }
 
        if (!pause) {
-               vmw_dmabuf_unreference(&stream->buf);
+               vmw_bo_unreference(&stream->buf);
                stream->paused = false;
        } else {
                stream->paused = true;
@@ -297,7 +297,7 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv,
  * -ERESTARTSYS if interrupted.
  */
 static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
-                                    struct vmw_dma_buffer *buf,
+                                    struct vmw_buffer_object *buf,
                                     struct drm_vmw_control_stream_arg *arg,
                                     bool interruptible)
 {
@@ -347,7 +347,7 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
        }
 
        if (stream->buf != buf)
-               stream->buf = vmw_dmabuf_reference(buf);
+               stream->buf = vmw_bo_reference(buf);
        stream->saved = *arg;
        /* stream is no longer stopped/paused */
        stream->paused = false;
@@ -466,7 +466,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
        struct vmw_overlay *overlay = dev_priv->overlay_priv;
        struct drm_vmw_control_stream_arg *arg =
            (struct drm_vmw_control_stream_arg *)data;
-       struct vmw_dma_buffer *buf;
+       struct vmw_buffer_object *buf;
        struct vmw_resource *res;
        int ret;
 
@@ -484,13 +484,13 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
                goto out_unlock;
        }
 
-       ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL);
+       ret = vmw_user_bo_lookup(tfile, arg->handle, &buf, NULL);
        if (ret)
                goto out_unlock;
 
        ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
 
-       vmw_dmabuf_unreference(&buf);
+       vmw_bo_unreference(&buf);
 
 out_unlock:
        mutex_unlock(&overlay->mutex);
index 0d42a46521fc1cc5c8083722fe03239d64ae3d87..0861c821a7fe399540726ec93e54baab5d383152 100644 (file)
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /**************************************************************************
  *
- * Copyright © 2013 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2013 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
@@ -40,7 +40,6 @@
  */
 
 static int vmw_prime_map_attach(struct dma_buf *dma_buf,
-                               struct device *target_dev,
                                struct dma_buf_attachment *attach)
 {
        return -ENOSYS;
@@ -72,17 +71,6 @@ static void vmw_prime_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
 {
 }
 
-static void *vmw_prime_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
-               unsigned long page_num)
-{
-       return NULL;
-}
-
-static void vmw_prime_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
-               unsigned long page_num, void *addr)
-{
-
-}
 static void *vmw_prime_dmabuf_kmap(struct dma_buf *dma_buf,
                unsigned long page_num)
 {
@@ -109,9 +97,7 @@ const struct dma_buf_ops vmw_prime_dmabuf_ops =  {
        .unmap_dma_buf = vmw_prime_unmap_dma_buf,
        .release = NULL,
        .map = vmw_prime_dmabuf_kmap,
-       .map_atomic = vmw_prime_dmabuf_kmap_atomic,
        .unmap = vmw_prime_dmabuf_kunmap,
-       .unmap_atomic = vmw_prime_dmabuf_kunmap_atomic,
        .mmap = vmw_prime_dmabuf_mmap,
        .vmap = vmw_prime_dmabuf_vmap,
        .vunmap = vmw_prime_dmabuf_vunmap,
index dce798053a9676c0307c5cba0832f3acd7ee9456..e99f6cdbb091341613108341e38871e2bcd720a5 100644 (file)
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**************************************************************************
  *
- * Copyright © 2009-2014 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2014 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
index 6b3a942b18df49bcd5032e0614d828478c45bbbb..92003ea5a2196a6cbcac38a9e9b4add15a4e6bcb 100644 (file)
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /**************************************************************************
  *
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
@@ -27,7 +27,6 @@
 
 #include "vmwgfx_drv.h"
 #include <drm/vmwgfx_drm.h>
-#include <drm/ttm/ttm_object.h>
 #include <drm/ttm/ttm_placement.h>
 #include <drm/drmP.h>
 #include "vmwgfx_resource_priv.h"
 
 #define VMW_RES_EVICT_ERR_COUNT 10
 
-struct vmw_user_dma_buffer {
-       struct ttm_prime_object prime;
-       struct vmw_dma_buffer dma;
-};
-
-struct vmw_bo_user_rep {
-       uint32_t handle;
-       uint64_t map_handle;
-};
-
-static inline struct vmw_dma_buffer *
-vmw_dma_buffer(struct ttm_buffer_object *bo)
-{
-       return container_of(bo, struct vmw_dma_buffer, base);
-}
-
-static inline struct vmw_user_dma_buffer *
-vmw_user_dma_buffer(struct ttm_buffer_object *bo)
-{
-       struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
-       return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
-}
-
 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
 {
        kref_get(&res->kref);
@@ -116,7 +92,7 @@ static void vmw_resource_release(struct kref *kref)
                res->backup_dirty = false;
                list_del_init(&res->mob_head);
                ttm_bo_unreserve(bo);
-               vmw_dmabuf_unreference(&res->backup);
+               vmw_bo_unreference(&res->backup);
        }
 
        if (likely(res->hw_destroy != NULL)) {
@@ -287,7 +263,7 @@ out_bad_resource:
 }
 
 /**
- * Helper function that looks either a surface or dmabuf.
+ * Helper function that looks either a surface or bo.
  *
  * The pointer this pointed at by out_surf and out_buf needs to be null.
  */
@@ -295,7 +271,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
                           struct ttm_object_file *tfile,
                           uint32_t handle,
                           struct vmw_surface **out_surf,
-                          struct vmw_dma_buffer **out_buf)
+                          struct vmw_buffer_object **out_buf)
 {
        struct vmw_resource *res;
        int ret;
@@ -311,512 +287,10 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
        }
 
        *out_surf = NULL;
-       ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL);
+       ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
        return ret;
 }
 
-/**
- * Buffer management.
- */
-
-/**
- * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
- *
- * @dev_priv: Pointer to a struct vmw_private identifying the device.
- * @size: The requested buffer size.
- * @user: Whether this is an ordinary dma buffer or a user dma buffer.
- */
-static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
-                                 bool user)
-{
-       static size_t struct_size, user_struct_size;
-       size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
-       size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
-
-       if (unlikely(struct_size == 0)) {
-               size_t backend_size = ttm_round_pot(vmw_tt_size);
-
-               struct_size = backend_size +
-                       ttm_round_pot(sizeof(struct vmw_dma_buffer));
-               user_struct_size = backend_size +
-                       ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
-       }
-
-       if (dev_priv->map_mode == vmw_dma_alloc_coherent)
-               page_array_size +=
-                       ttm_round_pot(num_pages * sizeof(dma_addr_t));
-
-       return ((user) ? user_struct_size : struct_size) +
-               page_array_size;
-}
-
-void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
-{
-       struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
-
-       vmw_dma_buffer_unmap(vmw_bo);
-       kfree(vmw_bo);
-}
-
-static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
-{
-       struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
-
-       vmw_dma_buffer_unmap(&vmw_user_bo->dma);
-       ttm_prime_object_kfree(vmw_user_bo, prime);
-}
-
-int vmw_dmabuf_init(struct vmw_private *dev_priv,
-                   struct vmw_dma_buffer *vmw_bo,
-                   size_t size, struct ttm_placement *placement,
-                   bool interruptible,
-                   void (*bo_free) (struct ttm_buffer_object *bo))
-{
-       struct ttm_bo_device *bdev = &dev_priv->bdev;
-       size_t acc_size;
-       int ret;
-       bool user = (bo_free == &vmw_user_dmabuf_destroy);
-
-       BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
-
-       acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
-       memset(vmw_bo, 0, sizeof(*vmw_bo));
-
-       INIT_LIST_HEAD(&vmw_bo->res_list);
-
-       ret = ttm_bo_init(bdev, &vmw_bo->base, size,
-                         ttm_bo_type_device, placement,
-                         0, interruptible, acc_size,
-                         NULL, NULL, bo_free);
-       return ret;
-}
-
-static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
-{
-       struct vmw_user_dma_buffer *vmw_user_bo;
-       struct ttm_base_object *base = *p_base;
-       struct ttm_buffer_object *bo;
-
-       *p_base = NULL;
-
-       if (unlikely(base == NULL))
-               return;
-
-       vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
-                                  prime.base);
-       bo = &vmw_user_bo->dma.base;
-       ttm_bo_unref(&bo);
-}
-
-static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
-                                           enum ttm_ref_type ref_type)
-{
-       struct vmw_user_dma_buffer *user_bo;
-       user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
-
-       switch (ref_type) {
-       case TTM_REF_SYNCCPU_WRITE:
-               ttm_bo_synccpu_write_release(&user_bo->dma.base);
-               break;
-       default:
-               BUG();
-       }
-}
-
-/**
- * vmw_user_dmabuf_alloc - Allocate a user dma buffer
- *
- * @dev_priv: Pointer to a struct device private.
- * @tfile: Pointer to a struct ttm_object_file on which to register the user
- * object.
- * @size: Size of the dma buffer.
- * @shareable: Boolean whether the buffer is shareable with other open files.
- * @handle: Pointer to where the handle value should be assigned.
- * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
- * should be assigned.
- */
-int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
-                         struct ttm_object_file *tfile,
-                         uint32_t size,
-                         bool shareable,
-                         uint32_t *handle,
-                         struct vmw_dma_buffer **p_dma_buf,
-                         struct ttm_base_object **p_base)
-{
-       struct vmw_user_dma_buffer *user_bo;
-       struct ttm_buffer_object *tmp;
-       int ret;
-
-       user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
-       if (unlikely(!user_bo)) {
-               DRM_ERROR("Failed to allocate a buffer.\n");
-               return -ENOMEM;
-       }
-
-       ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
-                             (dev_priv->has_mob) ?
-                             &vmw_sys_placement :
-                             &vmw_vram_sys_placement, true,
-                             &vmw_user_dmabuf_destroy);
-       if (unlikely(ret != 0))
-               return ret;
-
-       tmp = ttm_bo_reference(&user_bo->dma.base);
-       ret = ttm_prime_object_init(tfile,
-                                   size,
-                                   &user_bo->prime,
-                                   shareable,
-                                   ttm_buffer_type,
-                                   &vmw_user_dmabuf_release,
-                                   &vmw_user_dmabuf_ref_obj_release);
-       if (unlikely(ret != 0)) {
-               ttm_bo_unref(&tmp);
-               goto out_no_base_object;
-       }
-
-       *p_dma_buf = &user_bo->dma;
-       if (p_base) {
-               *p_base = &user_bo->prime.base;
-               kref_get(&(*p_base)->refcount);
-       }
-       *handle = user_bo->prime.base.hash.key;
-
-out_no_base_object:
-       return ret;
-}
-
-/**
- * vmw_user_dmabuf_verify_access - verify access permissions on this
- * buffer object.
- *
- * @bo: Pointer to the buffer object being accessed
- * @tfile: Identifying the caller.
- */
-int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
-                                 struct ttm_object_file *tfile)
-{
-       struct vmw_user_dma_buffer *vmw_user_bo;
-
-       if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
-               return -EPERM;
-
-       vmw_user_bo = vmw_user_dma_buffer(bo);
-
-       /* Check that the caller has opened the object. */
-       if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
-               return 0;
-
-       DRM_ERROR("Could not grant buffer access.\n");
-       return -EPERM;
-}
-
-/**
- * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
- * access, idling previous GPU operations on the buffer and optionally
- * blocking it for further command submissions.
- *
- * @user_bo: Pointer to the buffer object being grabbed for CPU access
- * @tfile: Identifying the caller.
- * @flags: Flags indicating how the grab should be performed.
- *
- * A blocking grab will be automatically released when @tfile is closed.
- */
-static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
-                                       struct ttm_object_file *tfile,
-                                       uint32_t flags)
-{
-       struct ttm_buffer_object *bo = &user_bo->dma.base;
-       bool existed;
-       int ret;
-
-       if (flags & drm_vmw_synccpu_allow_cs) {
-               bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
-               long lret;
-
-               lret = reservation_object_wait_timeout_rcu(bo->resv, true, true,
-                                                          nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
-               if (!lret)
-                       return -EBUSY;
-               else if (lret < 0)
-                       return lret;
-               return 0;
-       }
-
-       ret = ttm_bo_synccpu_write_grab
-               (bo, !!(flags & drm_vmw_synccpu_dontblock));
-       if (unlikely(ret != 0))
-               return ret;
-
-       ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
-                                TTM_REF_SYNCCPU_WRITE, &existed, false);
-       if (ret != 0 || existed)
-               ttm_bo_synccpu_write_release(&user_bo->dma.base);
-
-       return ret;
-}
-
-/**
- * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
- * and unblock command submission on the buffer if blocked.
- *
- * @handle: Handle identifying the buffer object.
- * @tfile: Identifying the caller.
- * @flags: Flags indicating the type of release.
- */
-static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
-                                          struct ttm_object_file *tfile,
-                                          uint32_t flags)
-{
-       if (!(flags & drm_vmw_synccpu_allow_cs))
-               return ttm_ref_object_base_unref(tfile, handle,
-                                                TTM_REF_SYNCCPU_WRITE);
-
-       return 0;
-}
-
-/**
- * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
- * functionality.
- *
- * @dev: Identifies the drm device.
- * @data: Pointer to the ioctl argument.
- * @file_priv: Identifies the caller.
- *
- * This function checks the ioctl arguments for validity and calls the
- * relevant synccpu functions.
- */
-int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
-                                 struct drm_file *file_priv)
-{
-       struct drm_vmw_synccpu_arg *arg =
-               (struct drm_vmw_synccpu_arg *) data;
-       struct vmw_dma_buffer *dma_buf;
-       struct vmw_user_dma_buffer *user_bo;
-       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-       struct ttm_base_object *buffer_base;
-       int ret;
-
-       if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
-           || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
-                              drm_vmw_synccpu_dontblock |
-                              drm_vmw_synccpu_allow_cs)) != 0) {
-               DRM_ERROR("Illegal synccpu flags.\n");
-               return -EINVAL;
-       }
-
-       switch (arg->op) {
-       case drm_vmw_synccpu_grab:
-               ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf,
-                                            &buffer_base);
-               if (unlikely(ret != 0))
-                       return ret;
-
-               user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
-                                      dma);
-               ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
-               vmw_dmabuf_unreference(&dma_buf);
-               ttm_base_object_unref(&buffer_base);
-               if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
-                            ret != -EBUSY)) {
-                       DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
-                                 (unsigned int) arg->handle);
-                       return ret;
-               }
-               break;
-       case drm_vmw_synccpu_release:
-               ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
-                                                     arg->flags);
-               if (unlikely(ret != 0)) {
-                       DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
-                                 (unsigned int) arg->handle);
-                       return ret;
-               }
-               break;
-       default:
-               DRM_ERROR("Invalid synccpu operation.\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
-                          struct drm_file *file_priv)
-{
-       struct vmw_private *dev_priv = vmw_priv(dev);
-       union drm_vmw_alloc_dmabuf_arg *arg =
-           (union drm_vmw_alloc_dmabuf_arg *)data;
-       struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
-       struct drm_vmw_dmabuf_rep *rep = &arg->rep;
-       struct vmw_dma_buffer *dma_buf;
-       uint32_t handle;
-       int ret;
-
-       ret = ttm_read_lock(&dev_priv->reservation_sem, true);
-       if (unlikely(ret != 0))
-               return ret;
-
-       ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
-                                   req->size, false, &handle, &dma_buf,
-                                   NULL);
-       if (unlikely(ret != 0))
-               goto out_no_dmabuf;
-
-       rep->handle = handle;
-       rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
-       rep->cur_gmr_id = handle;
-       rep->cur_gmr_offset = 0;
-
-       vmw_dmabuf_unreference(&dma_buf);
-
-out_no_dmabuf:
-       ttm_read_unlock(&dev_priv->reservation_sem);
-
-       return ret;
-}
-
-int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
-                          struct drm_file *file_priv)
-{
-       struct drm_vmw_unref_dmabuf_arg *arg =
-           (struct drm_vmw_unref_dmabuf_arg *)data;
-
-       return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
-                                        arg->handle,
-                                        TTM_REF_USAGE);
-}
-
-int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
-                          uint32_t handle, struct vmw_dma_buffer **out,
-                          struct ttm_base_object **p_base)
-{
-       struct vmw_user_dma_buffer *vmw_user_bo;
-       struct ttm_base_object *base;
-
-       base = ttm_base_object_lookup(tfile, handle);
-       if (unlikely(base == NULL)) {
-               pr_err("Invalid buffer object handle 0x%08lx\n",
-                      (unsigned long)handle);
-               return -ESRCH;
-       }
-
-       if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
-               ttm_base_object_unref(&base);
-               pr_err("Invalid buffer object handle 0x%08lx\n",
-                      (unsigned long)handle);
-               return -EINVAL;
-       }
-
-       vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
-                                  prime.base);
-       (void)ttm_bo_reference(&vmw_user_bo->dma.base);
-       if (p_base)
-               *p_base = base;
-       else
-               ttm_base_object_unref(&base);
-       *out = &vmw_user_bo->dma;
-
-       return 0;
-}
-
-int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
-                             struct vmw_dma_buffer *dma_buf,
-                             uint32_t *handle)
-{
-       struct vmw_user_dma_buffer *user_bo;
-
-       if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
-               return -EINVAL;
-
-       user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
-
-       *handle = user_bo->prime.base.hash.key;
-       return ttm_ref_object_add(tfile, &user_bo->prime.base,
-                                 TTM_REF_USAGE, NULL, false);
-}
-
-/**
- * vmw_dumb_create - Create a dumb kms buffer
- *
- * @file_priv: Pointer to a struct drm_file identifying the caller.
- * @dev: Pointer to the drm device.
- * @args: Pointer to a struct drm_mode_create_dumb structure
- *
- * This is a driver callback for the core drm create_dumb functionality.
- * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
- * that the arguments have a different format.
- */
-int vmw_dumb_create(struct drm_file *file_priv,
-                   struct drm_device *dev,
-                   struct drm_mode_create_dumb *args)
-{
-       struct vmw_private *dev_priv = vmw_priv(dev);
-       struct vmw_dma_buffer *dma_buf;
-       int ret;
-
-       args->pitch = args->width * ((args->bpp + 7) / 8);
-       args->size = args->pitch * args->height;
-
-       ret = ttm_read_lock(&dev_priv->reservation_sem, true);
-       if (unlikely(ret != 0))
-               return ret;
-
-       ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
-                                   args->size, false, &args->handle,
-                                   &dma_buf, NULL);
-       if (unlikely(ret != 0))
-               goto out_no_dmabuf;
-
-       vmw_dmabuf_unreference(&dma_buf);
-out_no_dmabuf:
-       ttm_read_unlock(&dev_priv->reservation_sem);
-       return ret;
-}
-
-/**
- * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
- *
- * @file_priv: Pointer to a struct drm_file identifying the caller.
- * @dev: Pointer to the drm device.
- * @handle: Handle identifying the dumb buffer.
- * @offset: The address space offset returned.
- *
- * This is a driver callback for the core drm dumb_map_offset functionality.
- */
-int vmw_dumb_map_offset(struct drm_file *file_priv,
-                       struct drm_device *dev, uint32_t handle,
-                       uint64_t *offset)
-{
-       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-       struct vmw_dma_buffer *out_buf;
-       int ret;
-
-       ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL);
-       if (ret != 0)
-               return -EINVAL;
-
-       *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
-       vmw_dmabuf_unreference(&out_buf);
-       return 0;
-}
-
-/**
- * vmw_dumb_destroy - Destroy a dumb boffer
- *
- * @file_priv: Pointer to a struct drm_file identifying the caller.
- * @dev: Pointer to the drm device.
- * @handle: Handle identifying the dumb buffer.
- *
- * This is a driver callback for the core drm dumb_destroy functionality.
- */
-int vmw_dumb_destroy(struct drm_file *file_priv,
-                    struct drm_device *dev,
-                    uint32_t handle)
-{
-       return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
-                                        handle, TTM_REF_USAGE);
-}
-
 /**
  * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
  *
@@ -829,7 +303,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
 {
        unsigned long size =
                (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
-       struct vmw_dma_buffer *backup;
+       struct vmw_buffer_object *backup;
        int ret;
 
        if (likely(res->backup)) {
@@ -841,16 +315,16 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
        if (unlikely(!backup))
                return -ENOMEM;
 
-       ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
+       ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
                              res->func->backup_placement,
                              interruptible,
-                             &vmw_dmabuf_bo_free);
+                             &vmw_bo_bo_free);
        if (unlikely(ret != 0))
-               goto out_no_dmabuf;
+               goto out_no_bo;
 
        res->backup = backup;
 
-out_no_dmabuf:
+out_no_bo:
        return ret;
 }
 
@@ -919,7 +393,7 @@ out_bind_failed:
  */
 void vmw_resource_unreserve(struct vmw_resource *res,
                            bool switch_backup,
-                           struct vmw_dma_buffer *new_backup,
+                           struct vmw_buffer_object *new_backup,
                            unsigned long new_backup_offset)
 {
        struct vmw_private *dev_priv = res->dev_priv;
@@ -931,11 +405,11 @@ void vmw_resource_unreserve(struct vmw_resource *res,
                if (res->backup) {
                        lockdep_assert_held(&res->backup->base.resv->lock.base);
                        list_del_init(&res->mob_head);
-                       vmw_dmabuf_unreference(&res->backup);
+                       vmw_bo_unreference(&res->backup);
                }
 
                if (new_backup) {
-                       res->backup = vmw_dmabuf_reference(new_backup);
+                       res->backup = vmw_bo_reference(new_backup);
                        lockdep_assert_held(&new_backup->base.resv->lock.base);
                        list_add_tail(&res->mob_head, &new_backup->res_list);
                } else {
@@ -959,6 +433,7 @@ void vmw_resource_unreserve(struct vmw_resource *res,
  *                             for a resource and in that case, allocate
  *                             one, reserve and validate it.
  *
+ * @ticket:         The ww aqcquire context to use, or NULL if trylocking.
  * @res:            The resource for which to allocate a backup buffer.
  * @interruptible:  Whether any sleeps during allocation should be
  *                  performed while interruptible.
@@ -966,7 +441,8 @@ void vmw_resource_unreserve(struct vmw_resource *res,
  *                  reserved and validated backup buffer.
  */
 static int
-vmw_resource_check_buffer(struct vmw_resource *res,
+vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
+                         struct vmw_resource *res,
                          bool interruptible,
                          struct ttm_validate_buffer *val_buf)
 {
@@ -985,7 +461,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
        val_buf->bo = ttm_bo_reference(&res->backup->base);
        val_buf->shared = false;
        list_add_tail(&val_buf->head, &val_list);
-       ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL);
+       ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
        if (unlikely(ret != 0))
                goto out_no_reserve;
 
@@ -1003,11 +479,11 @@ vmw_resource_check_buffer(struct vmw_resource *res,
        return 0;
 
 out_no_validate:
-       ttm_eu_backoff_reservation(NULL, &val_list);
+       ttm_eu_backoff_reservation(ticket, &val_list);
 out_no_reserve:
        ttm_bo_unref(&val_buf->bo);
        if (backup_dirty)
-               vmw_dmabuf_unreference(&res->backup);
+               vmw_bo_unreference(&res->backup);
 
        return ret;
 }
@@ -1050,10 +526,12 @@ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
  * vmw_resource_backoff_reservation - Unreserve and unreference a
  *                                    backup buffer
  *.
+ * @ticket:         The ww acquire ctx used for reservation.
  * @val_buf:        Backup buffer information.
  */
 static void
-vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
+vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
+                                struct ttm_validate_buffer *val_buf)
 {
        struct list_head val_list;
 
@@ -1062,7 +540,7 @@ vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
 
        INIT_LIST_HEAD(&val_list);
        list_add_tail(&val_buf->head, &val_list);
-       ttm_eu_backoff_reservation(NULL, &val_list);
+       ttm_eu_backoff_reservation(ticket, &val_list);
        ttm_bo_unref(&val_buf->bo);
 }
 
@@ -1070,10 +548,12 @@ vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
  * vmw_resource_do_evict - Evict a resource, and transfer its data
  *                         to a backup buffer.
  *
+ * @ticket:         The ww acquire ticket to use, or NULL if trylocking.
  * @res:            The resource to evict.
  * @interruptible:  Whether to wait interruptible.
  */
-static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
+static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
+                                struct vmw_resource *res, bool interruptible)
 {
        struct ttm_validate_buffer val_buf;
        const struct vmw_res_func *func = res->func;
@@ -1083,7 +563,7 @@ static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
 
        val_buf.bo = NULL;
        val_buf.shared = false;
-       ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
+       ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
        if (unlikely(ret != 0))
                return ret;
 
@@ -1098,7 +578,7 @@ static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
        res->backup_dirty = true;
        res->res_dirty = false;
 out_no_unbind:
-       vmw_resource_backoff_reservation(&val_buf);
+       vmw_resource_backoff_reservation(ticket, &val_buf);
 
        return ret;
 }
@@ -1152,7 +632,8 @@ int vmw_resource_validate(struct vmw_resource *res)
 
                write_unlock(&dev_priv->resource_lock);
 
-               ret = vmw_resource_do_evict(evict_res, true);
+               /* Trylock backup buffers with a NULL ticket. */
+               ret = vmw_resource_do_evict(NULL, evict_res, true);
                if (unlikely(ret != 0)) {
                        write_lock(&dev_priv->resource_lock);
                        list_add_tail(&evict_res->lru_head, lru_list);
@@ -1171,7 +652,7 @@ int vmw_resource_validate(struct vmw_resource *res)
                goto out_no_validate;
        else if (!res->func->needs_backup && res->backup) {
                list_del_init(&res->mob_head);
-               vmw_dmabuf_unreference(&res->backup);
+               vmw_bo_unreference(&res->backup);
        }
 
        return 0;
@@ -1180,109 +661,39 @@ out_no_validate:
        return ret;
 }
 
-/**
- * vmw_fence_single_bo - Utility function to fence a single TTM buffer
- *                       object without unreserving it.
- *
- * @bo:             Pointer to the struct ttm_buffer_object to fence.
- * @fence:          Pointer to the fence. If NULL, this function will
- *                  insert a fence into the command stream..
- *
- * Contrary to the ttm_eu version of this function, it takes only
- * a single buffer object instead of a list, and it also doesn't
- * unreserve the buffer object, which needs to be done separately.
- */
-void vmw_fence_single_bo(struct ttm_buffer_object *bo,
-                        struct vmw_fence_obj *fence)
-{
-       struct ttm_bo_device *bdev = bo->bdev;
-
-       struct vmw_private *dev_priv =
-               container_of(bdev, struct vmw_private, bdev);
-
-       if (fence == NULL) {
-               vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
-               reservation_object_add_excl_fence(bo->resv, &fence->base);
-               dma_fence_put(&fence->base);
-       } else
-               reservation_object_add_excl_fence(bo->resv, &fence->base);
-}
 
 /**
- * vmw_resource_move_notify - TTM move_notify_callback
+ * vmw_resource_unbind_list
  *
- * @bo: The TTM buffer object about to move.
- * @mem: The struct ttm_mem_reg indicating to what memory
- *       region the move is taking place.
+ * @vbo: Pointer to the current backing MOB.
  *
  * Evicts the Guest Backed hardware resource if the backup
  * buffer is being moved out of MOB memory.
- * Note that this function should not race with the resource
- * validation code as long as it accesses only members of struct
- * resource that remain static while bo::res is !NULL and
- * while we have @bo reserved. struct resource::backup is *not* a
- * static member. The resource validation code will take care
- * to set @bo::res to NULL, while having @bo reserved when the
- * buffer is no longer bound to the resource, so @bo:res can be
- * used to determine whether there is a need to unbind and whether
- * it is safe to unbind.
+ * Note that this function will not race with the resource
+ * validation code, since resource validation and eviction
+ * both require the backup buffer to be reserved.
  */
-void vmw_resource_move_notify(struct ttm_buffer_object *bo,
-                             struct ttm_mem_reg *mem)
+void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
 {
-       struct vmw_dma_buffer *dma_buf;
-
-       if (mem == NULL)
-               return;
-
-       if (bo->destroy != vmw_dmabuf_bo_free &&
-           bo->destroy != vmw_user_dmabuf_destroy)
-               return;
-
-       dma_buf = container_of(bo, struct vmw_dma_buffer, base);
-
-       /*
-        * Kill any cached kernel maps before move. An optimization could
-        * be to do this iff source or destination memory type is VRAM.
-        */
-       vmw_dma_buffer_unmap(dma_buf);
 
-       if (mem->mem_type != VMW_PL_MOB) {
-               struct vmw_resource *res, *n;
-               struct ttm_validate_buffer val_buf;
+       struct vmw_resource *res, *next;
+       struct ttm_validate_buffer val_buf = {
+               .bo = &vbo->base,
+               .shared = false
+       };
 
-               val_buf.bo = bo;
-               val_buf.shared = false;
+       lockdep_assert_held(&vbo->base.resv->lock.base);
+       list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
+               if (!res->func->unbind)
+                       continue;
 
-               list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
-
-                       if (unlikely(res->func->unbind == NULL))
-                               continue;
-
-                       (void) res->func->unbind(res, true, &val_buf);
-                       res->backup_dirty = true;
-                       res->res_dirty = false;
-                       list_del_init(&res->mob_head);
-               }
-
-               (void) ttm_bo_wait(bo, false, false);
+               (void) res->func->unbind(res, true, &val_buf);
+               res->backup_dirty = true;
+               res->res_dirty = false;
+               list_del_init(&res->mob_head);
        }
-}
-
-
-/**
- * vmw_resource_swap_notify - swapout notify callback.
- *
- * @bo: The buffer object to be swapped out.
- */
-void vmw_resource_swap_notify(struct ttm_buffer_object *bo)
-{
-       if (bo->destroy != vmw_dmabuf_bo_free &&
-           bo->destroy != vmw_user_dmabuf_destroy)
-               return;
 
-       /* Kill any cached kernel maps before swapout */
-       vmw_dma_buffer_unmap(vmw_dma_buffer(bo));
+       (void) ttm_bo_wait(&vbo->base, false, false);
 }
 
 
@@ -1294,7 +705,7 @@ void vmw_resource_swap_notify(struct ttm_buffer_object *bo)
  * Read back cached states from the device if they exist.  This function
  * assumings binding_mutex is held.
  */
-int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
+int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
 {
        struct vmw_resource *dx_query_ctx;
        struct vmw_private *dev_priv;
@@ -1344,7 +755,7 @@ int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
 void vmw_query_move_notify(struct ttm_buffer_object *bo,
                           struct ttm_mem_reg *mem)
 {
-       struct vmw_dma_buffer *dx_query_mob;
+       struct vmw_buffer_object *dx_query_mob;
        struct ttm_bo_device *bdev = bo->bdev;
        struct vmw_private *dev_priv;
 
@@ -1353,7 +764,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo,
 
        mutex_lock(&dev_priv->binding_mutex);
 
-       dx_query_mob = container_of(bo, struct vmw_dma_buffer, base);
+       dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
        if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
                mutex_unlock(&dev_priv->binding_mutex);
                return;
@@ -1368,7 +779,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo,
 
                /* Create a fence and attach the BO to it */
                (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
-               vmw_fence_single_bo(bo, fence);
+               vmw_bo_fence_single(bo, fence);
 
                if (fence != NULL)
                        vmw_fence_obj_unreference(&fence);
@@ -1405,6 +816,7 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
        struct vmw_resource *evict_res;
        unsigned err_count = 0;
        int ret;
+       struct ww_acquire_ctx ticket;
 
        do {
                write_lock(&dev_priv->resource_lock);
@@ -1418,7 +830,8 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
                list_del_init(&evict_res->lru_head);
                write_unlock(&dev_priv->resource_lock);
 
-               ret = vmw_resource_do_evict(evict_res, false);
+               /* Wait lock backup buffers with a ticket. */
+               ret = vmw_resource_do_evict(&ticket, evict_res, false);
                if (unlikely(ret != 0)) {
                        write_lock(&dev_priv->resource_lock);
                        list_add_tail(&evict_res->lru_head, lru_list);
@@ -1481,7 +894,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
                goto out_no_reserve;
 
        if (res->pin_count == 0) {
-               struct vmw_dma_buffer *vbo = NULL;
+               struct vmw_buffer_object *vbo = NULL;
 
                if (res->backup) {
                        vbo = res->backup;
@@ -1539,7 +952,7 @@ void vmw_resource_unpin(struct vmw_resource *res)
 
        WARN_ON(res->pin_count == 0);
        if (--res->pin_count == 0 && res->backup) {
-               struct vmw_dma_buffer *vbo = res->backup;
+               struct vmw_buffer_object *vbo = res->backup;
 
                (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
                vmw_bo_pin_reserved(vbo, false);
index ac05968a832bcdb99a40fd2a4bf2d50d854e97fe..a8c1c5ebd71d39a0f2235d08ecd3e552ff0fcf8f 100644 (file)
@@ -1,7 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**************************************************************************
  *
- * Copyright © 2012-2014 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2012-2014 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
index 3d667e903beb7bd76d13d8048a26a1763e835709..ad0de7f0cd60f07006e58fe4a544b19b973ccb87 100644 (file)
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /**************************************************************************
  *
- * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2011-2015 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
@@ -66,7 +66,7 @@ struct vmw_kms_sou_readback_blit {
        SVGAFifoCmdBlitScreenToGMRFB body;
 };
 
-struct vmw_kms_sou_dmabuf_blit {
+struct vmw_kms_sou_bo_blit {
        uint32 header;
        SVGAFifoCmdBlitGMRFBToScreen body;
 };
@@ -83,7 +83,7 @@ struct vmw_screen_object_unit {
        struct vmw_display_unit base;
 
        unsigned long buffer_size; /**< Size of allocated buffer */
-       struct vmw_dma_buffer *buffer; /**< Backing store buffer */
+       struct vmw_buffer_object *buffer; /**< Backing store buffer */
 
        bool defined;
 };
@@ -109,7 +109,7 @@ static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
  */
 static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
                               struct vmw_screen_object_unit *sou,
-                              uint32_t x, uint32_t y,
+                              int x, int y,
                               struct drm_display_mode *mode)
 {
        size_t fifo_size;
@@ -139,13 +139,8 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
                (sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0);
        cmd->obj.size.width = mode->hdisplay;
        cmd->obj.size.height = mode->vdisplay;
-       if (sou->base.is_implicit) {
-               cmd->obj.root.x = x;
-               cmd->obj.root.y = y;
-       } else {
-               cmd->obj.root.x = sou->base.gui_x;
-               cmd->obj.root.y = sou->base.gui_y;
-       }
+       cmd->obj.root.x = x;
+       cmd->obj.root.y = y;
        sou->base.set_gui_x = cmd->obj.root.x;
        sou->base.set_gui_y = cmd->obj.root.y;
 
@@ -222,12 +217,11 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
        struct vmw_plane_state *vps;
        int ret;
 
-
-       sou      = vmw_crtc_to_sou(crtc);
+       sou = vmw_crtc_to_sou(crtc);
        dev_priv = vmw_priv(crtc->dev);
-       ps       = crtc->primary->state;
-       fb       = ps->fb;
-       vps      = vmw_plane_state_to_vps(ps);
+       ps = crtc->primary->state;
+       fb = ps->fb;
+       vps = vmw_plane_state_to_vps(ps);
 
        vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL;
 
@@ -240,11 +234,25 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
        }
 
        if (vfb) {
-               sou->buffer = vps->dmabuf;
-               sou->buffer_size = vps->dmabuf_size;
+               struct drm_connector_state *conn_state;
+               struct vmw_connector_state *vmw_conn_state;
+               int x, y;
+
+               sou->buffer = vps->bo;
+               sou->buffer_size = vps->bo_size;
+
+               if (sou->base.is_implicit) {
+                       x = crtc->x;
+                       y = crtc->y;
+               } else {
+                       conn_state = sou->base.connector.state;
+                       vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
+
+                       x = vmw_conn_state->gui_x;
+                       y = vmw_conn_state->gui_y;
+               }
 
-               ret = vmw_sou_fifo_create(dev_priv, sou, crtc->x, crtc->y,
-                                         &crtc->mode);
+               ret = vmw_sou_fifo_create(dev_priv, sou, x, y, &crtc->mode);
                if (ret)
                        DRM_ERROR("Failed to define Screen Object %dx%d\n",
                                  crtc->x, crtc->y);
@@ -408,10 +416,10 @@ vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane,
        struct drm_crtc *crtc = plane->state->crtc ?
                plane->state->crtc : old_state->crtc;
 
-       if (vps->dmabuf)
-               vmw_dmabuf_unpin(vmw_priv(crtc->dev), vps->dmabuf, false);
-       vmw_dmabuf_unreference(&vps->dmabuf);
-       vps->dmabuf_size = 0;
+       if (vps->bo)
+               vmw_bo_unpin(vmw_priv(crtc->dev), vps->bo, false);
+       vmw_bo_unreference(&vps->bo);
+       vps->bo_size = 0;
 
        vmw_du_plane_cleanup_fb(plane, old_state);
 }
@@ -440,8 +448,8 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
 
 
        if (!new_fb) {
-               vmw_dmabuf_unreference(&vps->dmabuf);
-               vps->dmabuf_size = 0;
+               vmw_bo_unreference(&vps->bo);
+               vps->bo_size = 0;
 
                return 0;
        }
@@ -449,22 +457,22 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
        size = new_state->crtc_w * new_state->crtc_h * 4;
        dev_priv = vmw_priv(crtc->dev);
 
-       if (vps->dmabuf) {
-               if (vps->dmabuf_size == size) {
+       if (vps->bo) {
+               if (vps->bo_size == size) {
                        /*
                         * Note that this might temporarily up the pin-count
                         * to 2, until cleanup_fb() is called.
                         */
-                       return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf,
+                       return vmw_bo_pin_in_vram(dev_priv, vps->bo,
                                                      true);
                }
 
-               vmw_dmabuf_unreference(&vps->dmabuf);
-               vps->dmabuf_size = 0;
+               vmw_bo_unreference(&vps->bo);
+               vps->bo_size = 0;
        }
 
-       vps->dmabuf = kzalloc(sizeof(*vps->dmabuf), GFP_KERNEL);
-       if (!vps->dmabuf)
+       vps->bo = kzalloc(sizeof(*vps->bo), GFP_KERNEL);
+       if (!vps->bo)
                return -ENOMEM;
 
        vmw_svga_enable(dev_priv);
@@ -473,22 +481,22 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
         * resume the overlays, this is preferred to failing to alloc.
         */
        vmw_overlay_pause_all(dev_priv);
-       ret = vmw_dmabuf_init(dev_priv, vps->dmabuf, size,
+       ret = vmw_bo_init(dev_priv, vps->bo, size,
                              &vmw_vram_ne_placement,
-                             false, &vmw_dmabuf_bo_free);
+                             false, &vmw_bo_bo_free);
        vmw_overlay_resume_all(dev_priv);
        if (ret) {
-               vps->dmabuf = NULL; /* vmw_dmabuf_init frees on error */
+               vps->bo = NULL; /* vmw_bo_init frees on error */
                return ret;
        }
 
-       vps->dmabuf_size = size;
+       vps->bo_size = size;
 
        /*
         * TTM already thinks the buffer is pinned, but make sure the
         * pin_count is upped.
         */
-       return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf, true);
+       return vmw_bo_pin_in_vram(dev_priv, vps->bo, true);
 }
 
 
@@ -512,10 +520,10 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
                vclips.w = crtc->mode.hdisplay;
                vclips.h = crtc->mode.vdisplay;
 
-               if (vfb->dmabuf)
-                       ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb, NULL,
-                                                         &vclips, 1, 1, true,
-                                                         &fence, crtc);
+               if (vfb->bo)
+                       ret = vmw_kms_sou_do_bo_dirty(dev_priv, vfb, NULL,
+                                                     &vclips, 1, 1, true,
+                                                     &fence, crtc);
                else
                        ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL,
                                                           &vclips, NULL, 0, 0,
@@ -527,8 +535,6 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
                 */
                if (ret != 0)
                        DRM_ERROR("Failed to update screen.\n");
-
-               crtc->primary->fb = plane->state->fb;
        } else {
                /*
                 * When disabling a plane, CRTC and FB should always be NULL
@@ -697,7 +703,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
                goto err_free_connector;
        }
 
-       (void) drm_mode_connector_attach_encoder(connector, encoder);
+       (void) drm_connector_attach_encoder(connector, encoder);
        encoder->possible_crtcs = (1 << unit);
        encoder->possible_clones = 0;
 
@@ -777,11 +783,11 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
        return 0;
 }
 
-static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv,
+static int do_bo_define_gmrfb(struct vmw_private *dev_priv,
                                  struct vmw_framebuffer *framebuffer)
 {
-       struct vmw_dma_buffer *buf =
-               container_of(framebuffer, struct vmw_framebuffer_dmabuf,
+       struct vmw_buffer_object *buf =
+               container_of(framebuffer, struct vmw_framebuffer_bo,
                             base)->buffer;
        int depth = framebuffer->base.format->depth;
        struct {
@@ -972,13 +978,13 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
 }
 
 /**
- * vmw_sou_dmabuf_fifo_commit - Callback to submit a set of readback clips.
+ * vmw_sou_bo_fifo_commit - Callback to submit a set of readback clips.
  *
  * @dirty: The closure structure.
  *
  * Commits a previously built command buffer of readback clips.
  */
-static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
+static void vmw_sou_bo_fifo_commit(struct vmw_kms_dirty *dirty)
 {
        if (!dirty->num_hits) {
                vmw_fifo_commit(dirty->dev_priv, 0);
@@ -986,20 +992,20 @@ static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
        }
 
        vmw_fifo_commit(dirty->dev_priv,
-                       sizeof(struct vmw_kms_sou_dmabuf_blit) *
+                       sizeof(struct vmw_kms_sou_bo_blit) *
                        dirty->num_hits);
 }
 
 /**
- * vmw_sou_dmabuf_clip - Callback to encode a readback cliprect.
+ * vmw_sou_bo_clip - Callback to encode a readback cliprect.
  *
  * @dirty: The closure structure
  *
  * Encodes a BLIT_GMRFB_TO_SCREEN cliprect.
  */
-static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
+static void vmw_sou_bo_clip(struct vmw_kms_dirty *dirty)
 {
-       struct vmw_kms_sou_dmabuf_blit *blit = dirty->cmd;
+       struct vmw_kms_sou_bo_blit *blit = dirty->cmd;
 
        blit += dirty->num_hits;
        blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
@@ -1014,10 +1020,10 @@ static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
 }
 
 /**
- * vmw_kms_do_dmabuf_dirty - Dirty part of a dma-buffer backed framebuffer
+ * vmw_kms_do_bo_dirty - Dirty part of a buffer-object backed framebuffer
  *
  * @dev_priv: Pointer to the device private structure.
- * @framebuffer: Pointer to the dma-buffer backed framebuffer.
+ * @framebuffer: Pointer to the buffer-object backed framebuffer.
  * @clips: Array of clip rects.
  * @vclips: Alternate array of clip rects. Either @clips or @vclips must
  * be NULL.
@@ -1027,12 +1033,12 @@ static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
  * @out_fence: If non-NULL, will return a ref-counted pointer to a
  * struct vmw_fence_obj. The returned fence pointer may be NULL in which
  * case the device has already synchronized.
- * @crtc: If crtc is passed, perform dmabuf dirty on that crtc only.
+ * @crtc: If crtc is passed, perform bo dirty on that crtc only.
  *
  * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
  * interrupted.
  */
-int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
+int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
                                struct vmw_framebuffer *framebuffer,
                                struct drm_clip_rect *clips,
                                struct drm_vmw_rect *vclips,
@@ -1041,8 +1047,8 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
                                struct vmw_fence_obj **out_fence,
                                struct drm_crtc *crtc)
 {
-       struct vmw_dma_buffer *buf =
-               container_of(framebuffer, struct vmw_framebuffer_dmabuf,
+       struct vmw_buffer_object *buf =
+               container_of(framebuffer, struct vmw_framebuffer_bo,
                             base)->buffer;
        struct vmw_kms_dirty dirty;
        int ret;
@@ -1052,14 +1058,14 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
        if (ret)
                return ret;
 
-       ret = do_dmabuf_define_gmrfb(dev_priv, framebuffer);
+       ret = do_bo_define_gmrfb(dev_priv, framebuffer);
        if (unlikely(ret != 0))
                goto out_revert;
 
        dirty.crtc = crtc;
-       dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit;
-       dirty.clip = vmw_sou_dmabuf_clip;
-       dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) *
+       dirty.fifo_commit = vmw_sou_bo_fifo_commit;
+       dirty.clip = vmw_sou_bo_clip;
+       dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_bo_blit) *
                num_clips;
        ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
                                   0, 0, num_clips, increment, &dirty);
@@ -1118,12 +1124,12 @@ static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty)
 
 /**
  * vmw_kms_sou_readback - Perform a readback from the screen object system to
- * a dma-buffer backed framebuffer.
+ * a buffer-object backed framebuffer.
  *
  * @dev_priv: Pointer to the device private structure.
  * @file_priv: Pointer to a struct drm_file identifying the caller.
  * Must be set to NULL if @user_fence_rep is NULL.
- * @vfb: Pointer to the dma-buffer backed framebuffer.
+ * @vfb: Pointer to the buffer-object backed framebuffer.
  * @user_fence_rep: User-space provided structure for fence information.
  * Must be set to non-NULL if @file_priv is non-NULL.
  * @vclips: Array of clip rects.
@@ -1141,8 +1147,8 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
                         uint32_t num_clips,
                         struct drm_crtc *crtc)
 {
-       struct vmw_dma_buffer *buf =
-               container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
+       struct vmw_buffer_object *buf =
+               container_of(vfb, struct vmw_framebuffer_bo, base)->buffer;
        struct vmw_kms_dirty dirty;
        int ret;
 
@@ -1151,7 +1157,7 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
        if (ret)
                return ret;
 
-       ret = do_dmabuf_define_gmrfb(dev_priv, vfb);
+       ret = do_bo_define_gmrfb(dev_priv, vfb);
        if (unlikely(ret != 0))
                goto out_revert;
 
index 73b8e9a163685677c938a98126a7ce402bcaf6d8..fe4842ca3b6ed21d82d782861e54aa27afe4f1c1 100644 (file)
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /**************************************************************************
  *
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
@@ -159,7 +159,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
                              SVGA3dShaderType type,
                              uint8_t num_input_sig,
                              uint8_t num_output_sig,
-                             struct vmw_dma_buffer *byte_code,
+                             struct vmw_buffer_object *byte_code,
                              void (*res_free) (struct vmw_resource *res))
 {
        struct vmw_shader *shader = vmw_res_to_shader(res);
@@ -178,7 +178,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
 
        res->backup_size = size;
        if (byte_code) {
-               res->backup = vmw_dmabuf_reference(byte_code);
+               res->backup = vmw_bo_reference(byte_code);
                res->backup_offset = offset;
        }
        shader->size = size;
@@ -306,7 +306,7 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
        (void) vmw_execbuf_fence_commands(NULL, dev_priv,
                                          &fence, NULL);
 
-       vmw_fence_single_bo(val_buf->bo, fence);
+       vmw_bo_fence_single(val_buf->bo, fence);
 
        if (likely(fence != NULL))
                vmw_fence_obj_unreference(&fence);
@@ -537,7 +537,7 @@ static int vmw_dx_shader_unbind(struct vmw_resource *res,
 
        (void) vmw_execbuf_fence_commands(NULL, dev_priv,
                                          &fence, NULL);
-       vmw_fence_single_bo(val_buf->bo, fence);
+       vmw_bo_fence_single(val_buf->bo, fence);
 
        if (likely(fence != NULL))
                vmw_fence_obj_unreference(&fence);
@@ -723,7 +723,7 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
 }
 
 static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
-                                struct vmw_dma_buffer *buffer,
+                                struct vmw_buffer_object *buffer,
                                 size_t shader_size,
                                 size_t offset,
                                 SVGA3dShaderType shader_type,
@@ -801,7 +801,7 @@ out:
 
 
 static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
-                                            struct vmw_dma_buffer *buffer,
+                                            struct vmw_buffer_object *buffer,
                                             size_t shader_size,
                                             size_t offset,
                                             SVGA3dShaderType shader_type)
@@ -862,12 +862,12 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
 {
        struct vmw_private *dev_priv = vmw_priv(dev);
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-       struct vmw_dma_buffer *buffer = NULL;
+       struct vmw_buffer_object *buffer = NULL;
        SVGA3dShaderType shader_type;
        int ret;
 
        if (buffer_handle != SVGA3D_INVALID_ID) {
-               ret = vmw_user_dmabuf_lookup(tfile, buffer_handle,
+               ret = vmw_user_bo_lookup(tfile, buffer_handle,
                                             &buffer, NULL);
                if (unlikely(ret != 0)) {
                        DRM_ERROR("Could not find buffer for shader "
@@ -906,7 +906,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
 
        ttm_read_unlock(&dev_priv->reservation_sem);
 out_bad_arg:
-       vmw_dmabuf_unreference(&buffer);
+       vmw_bo_unreference(&buffer);
        return ret;
 }
 
@@ -983,7 +983,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
                          struct list_head *list)
 {
        struct ttm_operation_ctx ctx = { false, true };
-       struct vmw_dma_buffer *buf;
+       struct vmw_buffer_object *buf;
        struct ttm_bo_kmap_obj map;
        bool is_iomem;
        int ret;
@@ -997,8 +997,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
        if (unlikely(!buf))
                return -ENOMEM;
 
-       ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement,
-                             true, vmw_dmabuf_bo_free);
+       ret = vmw_bo_init(dev_priv, buf, size, &vmw_sys_ne_placement,
+                             true, vmw_bo_bo_free);
        if (unlikely(ret != 0))
                goto out;
 
@@ -1031,7 +1031,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
                                 res, list);
        vmw_resource_unreference(&res);
 no_reserve:
-       vmw_dmabuf_unreference(&buf);
+       vmw_bo_unreference(&buf);
 out:
        return ret;
 }
index a0cb310665cc50548680ff6e729ea79dfc7269be..6ebc5affde1476587636d5312514fa32d3e5f3dd 100644 (file)
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /**************************************************************************
  *
- * Copyright © 2016 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2016 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
index d3573c37c436575f4fed5fbe20b4cac050172ad8..e9b6b7baa00946f35cb0a6eb2df09ce75c82a6a1 100644 (file)
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /**************************************************************************
- * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
index 268738387b5e32925b35a3d44854c036d1169548..b80c7252f2fd136d154359912614e964bf966517 100644 (file)
@@ -1,6 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
 /**************************************************************************
- * Copyright © 2014-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
index 67331f01ef32e72ea46f600ebce3479112eeb48e..93f6b96ca7bbbe179dfa74957dd854c04b06b17c 100644 (file)
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /******************************************************************************
  *
- * COPYRIGHT © 2014-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * COPYRIGHT (C) 2014-2015 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
@@ -44,7 +44,7 @@
 enum stdu_content_type {
        SAME_AS_DISPLAY = 0,
        SEPARATE_SURFACE,
-       SEPARATE_DMA
+       SEPARATE_BO
 };
 
 /**
@@ -58,7 +58,7 @@ enum stdu_content_type {
  * @bottom: Bottom side of bounding box.
  * @fb_left: Left side of the framebuffer/content bounding box
  * @fb_top: Top of the framebuffer/content bounding box
- * @buf: DMA buffer when DMA-ing between buffer and screen targets.
+ * @buf: buffer object when DMA-ing between buffer and screen targets.
  * @sid: Surface ID when copying between surface and screen targets.
  */
 struct vmw_stdu_dirty {
@@ -68,7 +68,7 @@ struct vmw_stdu_dirty {
        s32 fb_left, fb_top;
        u32 pitch;
        union {
-               struct vmw_dma_buffer *buf;
+               struct vmw_buffer_object *buf;
                u32 sid;
        };
 };
@@ -178,13 +178,9 @@ static int vmw_stdu_define_st(struct vmw_private *dev_priv,
        cmd->body.height = mode->vdisplay;
        cmd->body.flags  = (0 == cmd->body.stid) ? SVGA_STFLAG_PRIMARY : 0;
        cmd->body.dpi    = 0;
-       if (stdu->base.is_implicit) {
-               cmd->body.xRoot  = crtc_x;
-               cmd->body.yRoot  = crtc_y;
-       } else {
-               cmd->body.xRoot  = stdu->base.gui_x;
-               cmd->body.yRoot  = stdu->base.gui_y;
-       }
+       cmd->body.xRoot  = crtc_x;
+       cmd->body.yRoot  = crtc_y;
+
        stdu->base.set_gui_x = cmd->body.xRoot;
        stdu->base.set_gui_y = cmd->body.yRoot;
 
@@ -374,11 +370,14 @@ static void vmw_stdu_crtc_mode_set_nofb(struct drm_crtc *crtc)
 {
        struct vmw_private *dev_priv;
        struct vmw_screen_target_display_unit *stdu;
-       int ret;
-
+       struct drm_connector_state *conn_state;
+       struct vmw_connector_state *vmw_conn_state;
+       int x, y, ret;
 
-       stdu     = vmw_crtc_to_stdu(crtc);
+       stdu = vmw_crtc_to_stdu(crtc);
        dev_priv = vmw_priv(crtc->dev);
+       conn_state = stdu->base.connector.state;
+       vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
 
        if (stdu->defined) {
                ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
@@ -397,8 +396,16 @@ static void vmw_stdu_crtc_mode_set_nofb(struct drm_crtc *crtc)
        if (!crtc->state->enable)
                return;
 
+       if (stdu->base.is_implicit) {
+               x = crtc->x;
+               y = crtc->y;
+       } else {
+               x = vmw_conn_state->gui_x;
+               y = vmw_conn_state->gui_y;
+       }
+
        vmw_svga_enable(dev_priv);
-       ret = vmw_stdu_define_st(dev_priv, stdu, &crtc->mode, crtc->x, crtc->y);
+       ret = vmw_stdu_define_st(dev_priv, stdu, &crtc->mode, x, y);
 
        if (ret)
                DRM_ERROR("Failed to define Screen Target of size %dx%d\n",
@@ -414,6 +421,7 @@ static void vmw_stdu_crtc_helper_prepare(struct drm_crtc *crtc)
 static void vmw_stdu_crtc_atomic_enable(struct drm_crtc *crtc,
                                        struct drm_crtc_state *old_state)
 {
+       struct drm_plane_state *plane_state = crtc->primary->state;
        struct vmw_private *dev_priv;
        struct vmw_screen_target_display_unit *stdu;
        struct vmw_framebuffer *vfb;
@@ -422,7 +430,7 @@ static void vmw_stdu_crtc_atomic_enable(struct drm_crtc *crtc,
 
        stdu     = vmw_crtc_to_stdu(crtc);
        dev_priv = vmw_priv(crtc->dev);
-       fb       = crtc->primary->fb;
+       fb       = plane_state->fb;
 
        vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL;
 
@@ -507,14 +515,14 @@ static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc,
 
 
 /**
- * vmw_stdu_dmabuf_clip - Callback to encode a suface DMA command cliprect
+ * vmw_stdu_bo_clip - Callback to encode a suface DMA command cliprect
  *
  * @dirty: The closure structure.
  *
  * Encodes a surface DMA command cliprect and updates the bounding box
  * for the DMA.
  */
-static void vmw_stdu_dmabuf_clip(struct vmw_kms_dirty *dirty)
+static void vmw_stdu_bo_clip(struct vmw_kms_dirty *dirty)
 {
        struct vmw_stdu_dirty *ddirty =
                container_of(dirty, struct vmw_stdu_dirty, base);
@@ -542,14 +550,14 @@ static void vmw_stdu_dmabuf_clip(struct vmw_kms_dirty *dirty)
 }
 
 /**
- * vmw_stdu_dmabuf_fifo_commit - Callback to fill in and submit a DMA command.
+ * vmw_stdu_bo_fifo_commit - Callback to fill in and submit a DMA command.
  *
  * @dirty: The closure structure.
  *
  * Fills in the missing fields in a DMA command, and optionally encodes
  * a screen target update command, depending on transfer direction.
  */
-static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
+static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty)
 {
        struct vmw_stdu_dirty *ddirty =
                container_of(dirty, struct vmw_stdu_dirty, base);
@@ -593,13 +601,13 @@ static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
 
 
 /**
- * vmw_stdu_dmabuf_cpu_clip - Callback to encode a CPU blit
+ * vmw_stdu_bo_cpu_clip - Callback to encode a CPU blit
  *
  * @dirty: The closure structure.
  *
  * This function calculates the bounding box for all the incoming clips.
  */
-static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty)
+static void vmw_stdu_bo_cpu_clip(struct vmw_kms_dirty *dirty)
 {
        struct vmw_stdu_dirty *ddirty =
                container_of(dirty, struct vmw_stdu_dirty, base);
@@ -623,14 +631,14 @@ static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty)
 
 
 /**
- * vmw_stdu_dmabuf_cpu_commit - Callback to do a CPU blit from DMAbuf
+ * vmw_stdu_bo_cpu_commit - Callback to do a CPU blit from buffer object
  *
  * @dirty: The closure structure.
  *
  * For the special case when we cannot create a proxy surface in a
  * 2D VM, we have to do a CPU blit ourselves.
  */
-static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
+static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
 {
        struct vmw_stdu_dirty *ddirty =
                container_of(dirty, struct vmw_stdu_dirty, base);
@@ -651,7 +659,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
        if (width == 0 || height == 0)
                return;
 
-       /* Assume we are blitting from Guest (dmabuf) to Host (display_srf) */
+       /* Assume we are blitting from Guest (bo) to Host (display_srf) */
        dst_pitch = stdu->display_srf->base_size.width * stdu->cpp;
        dst_bo = &stdu->display_srf->res.backup->base;
        dst_offset = ddirty->top * dst_pitch + ddirty->left * stdu->cpp;
@@ -711,13 +719,13 @@ out_cleanup:
 }
 
 /**
- * vmw_kms_stdu_dma - Perform a DMA transfer between a dma-buffer backed
+ * vmw_kms_stdu_dma - Perform a DMA transfer between a buffer-object backed
  * framebuffer and the screen target system.
  *
  * @dev_priv: Pointer to the device private structure.
  * @file_priv: Pointer to a struct drm-file identifying the caller. May be
  * set to NULL, but then @user_fence_rep must also be set to NULL.
- * @vfb: Pointer to the dma-buffer backed framebuffer.
+ * @vfb: Pointer to the buffer-object backed framebuffer.
  * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
  * @vclips: Alternate array of clip rects. Either @clips or @vclips must
  * be NULL.
@@ -746,8 +754,8 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
                     bool interruptible,
                     struct drm_crtc *crtc)
 {
-       struct vmw_dma_buffer *buf =
-               container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
+       struct vmw_buffer_object *buf =
+               container_of(vfb, struct vmw_framebuffer_bo, base)->buffer;
        struct vmw_stdu_dirty ddirty;
        int ret;
        bool cpu_blit = !(dev_priv->capabilities & SVGA_CAP_3D);
@@ -769,8 +777,8 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
        ddirty.fb_left = ddirty.fb_top = S32_MAX;
        ddirty.pitch = vfb->base.pitches[0];
        ddirty.buf = buf;
-       ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit;
-       ddirty.base.clip = vmw_stdu_dmabuf_clip;
+       ddirty.base.fifo_commit = vmw_stdu_bo_fifo_commit;
+       ddirty.base.clip = vmw_stdu_bo_clip;
        ddirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_dma) +
                num_clips * sizeof(SVGA3dCopyBox) +
                sizeof(SVGA3dCmdSurfaceDMASuffix);
@@ -779,8 +787,8 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
 
 
        if (cpu_blit) {
-               ddirty.base.fifo_commit = vmw_stdu_dmabuf_cpu_commit;
-               ddirty.base.clip = vmw_stdu_dmabuf_cpu_clip;
+               ddirty.base.fifo_commit = vmw_stdu_bo_cpu_commit;
+               ddirty.base.clip = vmw_stdu_bo_cpu_clip;
                ddirty.base.fifo_reserve_size = 0;
        }
 
@@ -926,7 +934,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
        if (ret)
                return ret;
 
-       if (vfbs->is_dmabuf_proxy) {
+       if (vfbs->is_bo_proxy) {
                ret = vmw_kms_update_proxy(srf, clips, num_clips, inc);
                if (ret)
                        goto out_finish;
@@ -1074,7 +1082,7 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane,
  * @new_state: info on the new plane state, including the FB
  *
  * This function allocates a new display surface if the content is
- * backed by a DMA.  The display surface is pinned here, and it'll
+ * backed by a buffer object.  The display surface is pinned here, and it'll
  * be unpinned in .cleanup_fb()
  *
  * Returns 0 on success
@@ -1104,13 +1112,13 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
        }
 
        vfb = vmw_framebuffer_to_vfb(new_fb);
-       new_vfbs = (vfb->dmabuf) ? NULL : vmw_framebuffer_to_vfbs(new_fb);
+       new_vfbs = (vfb->bo) ? NULL : vmw_framebuffer_to_vfbs(new_fb);
 
        if (new_vfbs && new_vfbs->surface->base_size.width == hdisplay &&
            new_vfbs->surface->base_size.height == vdisplay)
                new_content_type = SAME_AS_DISPLAY;
-       else if (vfb->dmabuf)
-               new_content_type = SEPARATE_DMA;
+       else if (vfb->bo)
+               new_content_type = SEPARATE_BO;
        else
                new_content_type = SEPARATE_SURFACE;
 
@@ -1123,10 +1131,10 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
                display_base_size.depth  = 1;
 
                /*
-                * If content buffer is a DMA buf, then we have to construct
-                * surface info
+                * If content buffer is a buffer object, then we have to
+                * construct surface info
                 */
-               if (new_content_type == SEPARATE_DMA) {
+               if (new_content_type == SEPARATE_BO) {
 
                        switch (new_fb->format->cpp[0]*8) {
                        case 32:
@@ -1149,6 +1157,9 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
                        content_srf.flags             = 0;
                        content_srf.mip_levels[0]     = 1;
                        content_srf.multisample_count = 0;
+                       content_srf.multisample_pattern =
+                               SVGA3D_MS_PATTERN_NONE;
+                       content_srf.quality_level = SVGA3D_MS_QUALITY_NONE;
                } else {
                        content_srf = *new_vfbs->surface;
                }
@@ -1177,6 +1188,8 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
                                 content_srf.multisample_count,
                                 0,
                                 display_base_size,
+                                content_srf.multisample_pattern,
+                                content_srf.quality_level,
                                 &vps->surf);
                        if (ret != 0) {
                                DRM_ERROR("Couldn't allocate STDU surface.\n");
@@ -1211,12 +1224,12 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
        vps->content_fb_type = new_content_type;
 
        /*
-        * This should only happen if the DMA buf is too large to create a
+        * This should only happen if the buffer object is too large to create a
         * proxy surface for.
-        * If we are a 2D VM with a DMA buffer then we have to use CPU blit
+        * If we are a 2D VM with a buffer object then we have to use CPU blit
         * so cache these mappings
         */
-       if (vps->content_fb_type == SEPARATE_DMA &&
+       if (vps->content_fb_type == SEPARATE_BO &&
            !(dev_priv->capabilities & SVGA_CAP_3D))
                vps->cpp = new_fb->pitches[0] / new_fb->width;
 
@@ -1275,7 +1288,7 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
                if (ret)
                        DRM_ERROR("Failed to bind surface to STDU.\n");
 
-               if (vfb->dmabuf)
+               if (vfb->bo)
                        ret = vmw_kms_stdu_dma(dev_priv, NULL, vfb, NULL, NULL,
                                               &vclips, 1, 1, true, false,
                                               crtc);
@@ -1285,8 +1298,6 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
                                                         1, 1, NULL, crtc);
                if (ret)
                        DRM_ERROR("Failed to update STDU.\n");
-
-               crtc->primary->fb = plane->state->fb;
        } else {
                crtc = old_state->crtc;
                stdu = vmw_crtc_to_stdu(crtc);
@@ -1487,7 +1498,7 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
                goto err_free_connector;
        }
 
-       (void) drm_mode_connector_attach_encoder(connector, encoder);
+       (void) drm_connector_attach_encoder(connector, encoder);
        encoder->possible_crtcs = (1 << unit);
        encoder->possible_clones = 0;
 
index b236c48bf265b864cd378b883d8284d59d41f951..e125233e074bf82a1128543ca5c8f9fd2b28604c 100644 (file)
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /**************************************************************************
  *
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
 #include "vmwgfx_binding.h"
 #include "device_include/svga3d_surfacedefs.h"
 
+#define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32)
+#define SVGA3D_FLAGS_UPPER_32(svga3d_flags) (svga3d_flags >> 32)
+#define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
+       (svga3d_flags & ((uint64_t)U32_MAX))
 
 /**
  * struct vmw_user_surface - User-space visible surface resource
@@ -81,7 +85,16 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
                                 bool readback,
                                 struct ttm_validate_buffer *val_buf);
 static int vmw_gb_surface_destroy(struct vmw_resource *res);
-
+static int
+vmw_gb_surface_define_internal(struct drm_device *dev,
+                              struct drm_vmw_gb_surface_create_ext_req *req,
+                              struct drm_vmw_gb_surface_create_rep *rep,
+                              struct drm_file *file_priv);
+static int
+vmw_gb_surface_reference_internal(struct drm_device *dev,
+                                 struct drm_vmw_surface_arg *req,
+                                 struct drm_vmw_gb_surface_ref_ext_rep *rep,
+                                 struct drm_file *file_priv);
 
 static const struct vmw_user_resource_conv user_surface_conv = {
        .object_type = VMW_RES_SURFACE,
@@ -224,7 +237,12 @@ static void vmw_surface_define_encode(const struct vmw_surface *srf,
        cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
        cmd->header.size = cmd_len;
        cmd->body.sid = srf->res.id;
-       cmd->body.surfaceFlags = srf->flags;
+       /*
+        * Downcast of surfaceFlags, was upcasted when received from user-space,
+        * since driver internally stores as 64 bit.
+        * For legacy surface define only 32 bit flag is supported.
+        */
+       cmd->body.surfaceFlags = (SVGA3dSurface1Flags)srf->flags;
        cmd->body.format = srf->format;
        for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
                cmd->body.face[i].numMipLevels = srf->mip_levels[i];
@@ -468,7 +486,7 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res,
        (void) vmw_execbuf_fence_commands(NULL, dev_priv,
                                          &fence, NULL);
 
-       vmw_fence_single_bo(val_buf->bo, fence);
+       vmw_bo_fence_single(val_buf->bo, fence);
 
        if (likely(fence != NULL))
                vmw_fence_obj_unreference(&fence);
@@ -760,7 +778,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
        srf = &user_srf->srf;
        res = &srf->res;
 
-       srf->flags = req->flags;
+       /* Driver internally stores as 64-bit flags */
+       srf->flags = (SVGA3dSurfaceAllFlags)req->flags;
        srf->format = req->format;
        srf->scanout = req->scanout;
 
@@ -785,6 +804,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
        srf->base_size = *srf->sizes;
        srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
        srf->multisample_count = 0;
+       srf->multisample_pattern = SVGA3D_MS_PATTERN_NONE;
+       srf->quality_level = SVGA3D_MS_QUALITY_NONE;
 
        cur_bo_offset = 0;
        cur_offset = srf->offsets;
@@ -842,12 +863,12 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
        if (dev_priv->has_mob && req->shareable) {
                uint32_t backup_handle;
 
-               ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
-                                           res->backup_size,
-                                           true,
-                                           &backup_handle,
-                                           &res->backup,
-                                           &user_srf->backup_base);
+               ret = vmw_user_bo_alloc(dev_priv, tfile,
+                                       res->backup_size,
+                                       true,
+                                       &backup_handle,
+                                       &res->backup,
+                                       &user_srf->backup_base);
                if (unlikely(ret != 0)) {
                        vmw_resource_unreference(&res);
                        goto out_unlock;
@@ -990,7 +1011,8 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
        user_srf = container_of(base, struct vmw_user_surface, prime.base);
        srf = &user_srf->srf;
 
-       rep->flags = srf->flags;
+       /* Downcast of flags when sending back to user space */
+       rep->flags = (uint32_t)srf->flags;
        rep->format = srf->format;
        memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
        user_sizes = (struct drm_vmw_size __user *)(unsigned long)
@@ -1031,6 +1053,10 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
                SVGA3dCmdHeader header;
                SVGA3dCmdDefineGBSurface_v2 body;
        } *cmd2;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDefineGBSurface_v3 body;
+       } *cmd3;
 
        if (likely(res->id != -1))
                return 0;
@@ -1047,7 +1073,11 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
                goto out_no_fifo;
        }
 
-       if (srf->array_size > 0) {
+       if (dev_priv->has_sm4_1 && srf->array_size > 0) {
+               cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V3;
+               cmd_len = sizeof(cmd3->body);
+               submit_len = sizeof(*cmd3);
+       } else if (srf->array_size > 0) {
                /* has_dx checked on creation time. */
                cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
                cmd_len = sizeof(cmd2->body);
@@ -1060,6 +1090,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
 
        cmd = vmw_fifo_reserve(dev_priv, submit_len);
        cmd2 = (typeof(cmd2))cmd;
+       cmd3 = (typeof(cmd3))cmd;
        if (unlikely(!cmd)) {
                DRM_ERROR("Failed reserving FIFO space for surface "
                          "creation.\n");
@@ -1067,12 +1098,27 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
                goto out_no_fifo;
        }
 
-       if (srf->array_size > 0) {
+       if (dev_priv->has_sm4_1 && srf->array_size > 0) {
+               cmd3->header.id = cmd_id;
+               cmd3->header.size = cmd_len;
+               cmd3->body.sid = srf->res.id;
+               cmd3->body.surfaceFlags = srf->flags;
+               cmd3->body.format = srf->format;
+               cmd3->body.numMipLevels = srf->mip_levels[0];
+               cmd3->body.multisampleCount = srf->multisample_count;
+               cmd3->body.multisamplePattern = srf->multisample_pattern;
+               cmd3->body.qualityLevel = srf->quality_level;
+               cmd3->body.autogenFilter = srf->autogen_filter;
+               cmd3->body.size.width = srf->base_size.width;
+               cmd3->body.size.height = srf->base_size.height;
+               cmd3->body.size.depth = srf->base_size.depth;
+               cmd3->body.arraySize = srf->array_size;
+       } else if (srf->array_size > 0) {
                cmd2->header.id = cmd_id;
                cmd2->header.size = cmd_len;
                cmd2->body.sid = srf->res.id;
                cmd2->body.surfaceFlags = srf->flags;
-               cmd2->body.format = cpu_to_le32(srf->format);
+               cmd2->body.format = srf->format;
                cmd2->body.numMipLevels = srf->mip_levels[0];
                cmd2->body.multisampleCount = srf->multisample_count;
                cmd2->body.autogenFilter = srf->autogen_filter;
@@ -1085,7 +1131,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
                cmd->header.size = cmd_len;
                cmd->body.sid = srf->res.id;
                cmd->body.surfaceFlags = srf->flags;
-               cmd->body.format = cpu_to_le32(srf->format);
+               cmd->body.format = srf->format;
                cmd->body.numMipLevels = srf->mip_levels[0];
                cmd->body.multisampleCount = srf->multisample_count;
                cmd->body.autogenFilter = srf->autogen_filter;
@@ -1210,7 +1256,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
        (void) vmw_execbuf_fence_commands(NULL, dev_priv,
                                          &fence, NULL);
 
-       vmw_fence_single_bo(val_buf->bo, fence);
+       vmw_bo_fence_single(val_buf->bo, fence);
 
        if (likely(fence != NULL))
                vmw_fence_obj_unreference(&fence);
@@ -1256,194 +1302,55 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
 
 /**
  * vmw_gb_surface_define_ioctl - Ioctl function implementing
- *                               the user surface define functionality.
+ * the user surface define functionality.
  *
- * @dev:            Pointer to a struct drm_device.
- * @data:           Pointer to data copied from / to user-space.
- * @file_priv:      Pointer to a drm file private structure.
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
  */
 int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *file_priv)
 {
-       struct vmw_private *dev_priv = vmw_priv(dev);
-       struct vmw_user_surface *user_srf;
-       struct vmw_surface *srf;
-       struct vmw_resource *res;
-       struct vmw_resource *tmp;
        union drm_vmw_gb_surface_create_arg *arg =
            (union drm_vmw_gb_surface_create_arg *)data;
-       struct drm_vmw_gb_surface_create_req *req = &arg->req;
        struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
-       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-       int ret;
-       uint32_t size;
-       uint32_t backup_handle = 0;
-
-       if (req->multisample_count != 0)
-               return -EINVAL;
-
-       if (req->mip_levels > DRM_VMW_MAX_MIP_LEVELS)
-               return -EINVAL;
+       struct drm_vmw_gb_surface_create_ext_req req_ext;
 
-       if (unlikely(vmw_user_surface_size == 0))
-               vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
-                       128;
-
-       size = vmw_user_surface_size + 128;
-
-       /* Define a surface based on the parameters. */
-       ret = vmw_surface_gb_priv_define(dev,
-                       size,
-                       req->svga3d_flags,
-                       req->format,
-                       req->drm_surface_flags & drm_vmw_surface_flag_scanout,
-                       req->mip_levels,
-                       req->multisample_count,
-                       req->array_size,
-                       req->base_size,
-                       &srf);
-       if (unlikely(ret != 0))
-               return ret;
-
-       user_srf = container_of(srf, struct vmw_user_surface, srf);
-       if (drm_is_primary_client(file_priv))
-               user_srf->master = drm_master_get(file_priv->master);
+       req_ext.base = arg->req;
+       req_ext.version = drm_vmw_gb_surface_v1;
+       req_ext.svga3d_flags_upper_32_bits = 0;
+       req_ext.multisample_pattern = SVGA3D_MS_PATTERN_NONE;
+       req_ext.quality_level = SVGA3D_MS_QUALITY_NONE;
+       req_ext.must_be_zero = 0;
 
-       ret = ttm_read_lock(&dev_priv->reservation_sem, true);
-       if (unlikely(ret != 0))
-               return ret;
-
-       res = &user_srf->srf.res;
-
-
-       if (req->buffer_handle != SVGA3D_INVALID_ID) {
-               ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
-                                            &res->backup,
-                                            &user_srf->backup_base);
-               if (ret == 0) {
-                       if (res->backup->base.num_pages * PAGE_SIZE <
-                           res->backup_size) {
-                               DRM_ERROR("Surface backup buffer is too small.\n");
-                               vmw_dmabuf_unreference(&res->backup);
-                               ret = -EINVAL;
-                               goto out_unlock;
-                       } else {
-                               backup_handle = req->buffer_handle;
-                       }
-               }
-       } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
-               ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
-                                           res->backup_size,
-                                           req->drm_surface_flags &
-                                           drm_vmw_surface_flag_shareable,
-                                           &backup_handle,
-                                           &res->backup,
-                                           &user_srf->backup_base);
-
-       if (unlikely(ret != 0)) {
-               vmw_resource_unreference(&res);
-               goto out_unlock;
-       }
-
-       tmp = vmw_resource_reference(res);
-       ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
-                                   req->drm_surface_flags &
-                                   drm_vmw_surface_flag_shareable,
-                                   VMW_RES_SURFACE,
-                                   &vmw_user_surface_base_release, NULL);
-
-       if (unlikely(ret != 0)) {
-               vmw_resource_unreference(&tmp);
-               vmw_resource_unreference(&res);
-               goto out_unlock;
-       }
-
-       rep->handle      = user_srf->prime.base.hash.key;
-       rep->backup_size = res->backup_size;
-       if (res->backup) {
-               rep->buffer_map_handle =
-                       drm_vma_node_offset_addr(&res->backup->base.vma_node);
-               rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
-               rep->buffer_handle = backup_handle;
-       } else {
-               rep->buffer_map_handle = 0;
-               rep->buffer_size = 0;
-               rep->buffer_handle = SVGA3D_INVALID_ID;
-       }
-
-       vmw_resource_unreference(&res);
-
-out_unlock:
-       ttm_read_unlock(&dev_priv->reservation_sem);
-       return ret;
+       return vmw_gb_surface_define_internal(dev, &req_ext, rep, file_priv);
 }
 
 /**
  * vmw_gb_surface_reference_ioctl - Ioctl function implementing
- *                                  the user surface reference functionality.
+ * the user surface reference functionality.
  *
- * @dev:            Pointer to a struct drm_device.
- * @data:           Pointer to data copied from / to user-space.
- * @file_priv:      Pointer to a drm file private structure.
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
  */
 int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
                                   struct drm_file *file_priv)
 {
-       struct vmw_private *dev_priv = vmw_priv(dev);
        union drm_vmw_gb_surface_reference_arg *arg =
            (union drm_vmw_gb_surface_reference_arg *)data;
        struct drm_vmw_surface_arg *req = &arg->req;
        struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep;
-       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-       struct vmw_surface *srf;
-       struct vmw_user_surface *user_srf;
-       struct ttm_base_object *base;
-       uint32_t backup_handle;
-       int ret = -EINVAL;
+       struct drm_vmw_gb_surface_ref_ext_rep rep_ext;
+       int ret;
+
+       ret = vmw_gb_surface_reference_internal(dev, req, &rep_ext, file_priv);
 
-       ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
-                                          req->handle_type, &base);
        if (unlikely(ret != 0))
                return ret;
 
-       user_srf = container_of(base, struct vmw_user_surface, prime.base);
-       srf = &user_srf->srf;
-       if (!srf->res.backup) {
-               DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
-               goto out_bad_resource;
-       }
-
-       mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
-       ret = vmw_user_dmabuf_reference(tfile, srf->res.backup,
-                                       &backup_handle);
-       mutex_unlock(&dev_priv->cmdbuf_mutex);
-
-       if (unlikely(ret != 0)) {
-               DRM_ERROR("Could not add a reference to a GB surface "
-                         "backup buffer.\n");
-               (void) ttm_ref_object_base_unref(tfile, base->hash.key,
-                                                TTM_REF_USAGE);
-               goto out_bad_resource;
-       }
-
-       rep->creq.svga3d_flags = srf->flags;
-       rep->creq.format = srf->format;
-       rep->creq.mip_levels = srf->mip_levels[0];
-       rep->creq.drm_surface_flags = 0;
-       rep->creq.multisample_count = srf->multisample_count;
-       rep->creq.autogen_filter = srf->autogen_filter;
-       rep->creq.array_size = srf->array_size;
-       rep->creq.buffer_handle = backup_handle;
-       rep->creq.base_size = srf->base_size;
-       rep->crep.handle = user_srf->prime.base.hash.key;
-       rep->crep.backup_size = srf->res.backup_size;
-       rep->crep.buffer_handle = backup_handle;
-       rep->crep.buffer_map_handle =
-               drm_vma_node_offset_addr(&srf->res.backup->base.vma_node);
-       rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
-
-out_bad_resource:
-       ttm_base_object_unref(&base);
+       rep->creq = rep_ext.creq.base;
+       rep->crep = rep_ext.crep;
 
        return ret;
 }
@@ -1461,6 +1368,8 @@ out_bad_resource:
  * @multisample_count:
  * @array_size: Surface array size.
  * @size: width, heigh, depth of the surface requested
+ * @multisample_pattern: Multisampling pattern when msaa is supported
+ * @quality_level: Precision settings
  * @user_srf_out: allocated user_srf.  Set to NULL on failure.
  *
  * GB surfaces allocated by this function will not have a user mode handle, and
@@ -1470,13 +1379,15 @@ out_bad_resource:
  */
 int vmw_surface_gb_priv_define(struct drm_device *dev,
                               uint32_t user_accounting_size,
-                              uint32_t svga3d_flags,
+                              SVGA3dSurfaceAllFlags svga3d_flags,
                               SVGA3dSurfaceFormat format,
                               bool for_scanout,
                               uint32_t num_mip_levels,
                               uint32_t multisample_count,
                               uint32_t array_size,
                               struct drm_vmw_size size,
+                              SVGA3dMSPattern multisample_pattern,
+                              SVGA3dMSQualityLevel quality_level,
                               struct vmw_surface **srf_out)
 {
        struct vmw_private *dev_priv = vmw_priv(dev);
@@ -1487,7 +1398,8 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
        };
        struct vmw_surface *srf;
        int ret;
-       u32 num_layers;
+       u32 num_layers = 1;
+       u32 sample_count = 1;
 
        *srf_out = NULL;
 
@@ -1562,19 +1474,23 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
        srf->autogen_filter    = SVGA3D_TEX_FILTER_NONE;
        srf->array_size        = array_size;
        srf->multisample_count = multisample_count;
+       srf->multisample_pattern = multisample_pattern;
+       srf->quality_level = quality_level;
 
        if (array_size)
                num_layers = array_size;
        else if (svga3d_flags & SVGA3D_SURFACE_CUBEMAP)
                num_layers = SVGA3D_MAX_SURFACE_FACES;
-       else
-               num_layers = 1;
+
+       if (srf->flags & SVGA3D_SURFACE_MULTISAMPLE)
+               sample_count = srf->multisample_count;
 
        srf->res.backup_size   =
-               svga3dsurface_get_serialized_size(srf->format,
-                                                 srf->base_size,
-                                                 srf->mip_levels[0],
-                                                 num_layers);
+               svga3dsurface_get_serialized_size_extended(srf->format,
+                                                          srf->base_size,
+                                                          srf->mip_levels[0],
+                                                          num_layers,
+                                                          sample_count);
 
        if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
                srf->res.backup_size += sizeof(SVGA3dDXSOState);
@@ -1599,3 +1515,266 @@ out_unlock:
        ttm_read_unlock(&dev_priv->reservation_sem);
        return ret;
 }
+
+/**
+ * vmw_gb_surface_define_ext_ioctl - Ioctl function implementing
+ * the user surface define functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *file_priv)
+{
+       union drm_vmw_gb_surface_create_ext_arg *arg =
+           (union drm_vmw_gb_surface_create_ext_arg *)data;
+       struct drm_vmw_gb_surface_create_ext_req *req = &arg->req;
+       struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
+
+       return vmw_gb_surface_define_internal(dev, req, rep, file_priv);
+}
+
+/**
+ * vmw_gb_surface_reference_ext_ioctl - Ioctl function implementing
+ * the user surface reference functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @data: Pointer to data copied from / to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev, void *data,
+                                  struct drm_file *file_priv)
+{
+       union drm_vmw_gb_surface_reference_ext_arg *arg =
+           (union drm_vmw_gb_surface_reference_ext_arg *)data;
+       struct drm_vmw_surface_arg *req = &arg->req;
+       struct drm_vmw_gb_surface_ref_ext_rep *rep = &arg->rep;
+
+       return vmw_gb_surface_reference_internal(dev, req, rep, file_priv);
+}
+
+/**
+ * vmw_gb_surface_define_internal - Ioctl function implementing
+ * the user surface define functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @req: Request argument from user-space.
+ * @rep: Response argument to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+static int
+vmw_gb_surface_define_internal(struct drm_device *dev,
+                              struct drm_vmw_gb_surface_create_ext_req *req,
+                              struct drm_vmw_gb_surface_create_rep *rep,
+                              struct drm_file *file_priv)
+{
+       struct vmw_private *dev_priv = vmw_priv(dev);
+       struct vmw_user_surface *user_srf;
+       struct vmw_surface *srf;
+       struct vmw_resource *res;
+       struct vmw_resource *tmp;
+       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+       int ret;
+       uint32_t size;
+       uint32_t backup_handle = 0;
+       SVGA3dSurfaceAllFlags svga3d_flags_64 =
+               SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits,
+                               req->base.svga3d_flags);
+
+       if (!dev_priv->has_sm4_1) {
+               /*
+                * If SM4_1 is not support then cannot send 64-bit flag to
+                * device.
+                */
+               if (req->svga3d_flags_upper_32_bits != 0)
+                       return -EINVAL;
+
+               if (req->base.multisample_count != 0)
+                       return -EINVAL;
+
+               if (req->multisample_pattern != SVGA3D_MS_PATTERN_NONE)
+                       return -EINVAL;
+
+               if (req->quality_level != SVGA3D_MS_QUALITY_NONE)
+                       return -EINVAL;
+       }
+
+       if ((svga3d_flags_64 & SVGA3D_SURFACE_MULTISAMPLE) &&
+           req->base.multisample_count == 0)
+               return -EINVAL;
+
+       if (req->base.mip_levels > DRM_VMW_MAX_MIP_LEVELS)
+               return -EINVAL;
+
+       if (unlikely(vmw_user_surface_size == 0))
+               vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
+                       128;
+
+       size = vmw_user_surface_size + 128;
+
+       /* Define a surface based on the parameters. */
+       ret = vmw_surface_gb_priv_define(dev,
+                                        size,
+                                        svga3d_flags_64,
+                                        req->base.format,
+                                        req->base.drm_surface_flags &
+                                        drm_vmw_surface_flag_scanout,
+                                        req->base.mip_levels,
+                                        req->base.multisample_count,
+                                        req->base.array_size,
+                                        req->base.base_size,
+                                        req->multisample_pattern,
+                                        req->quality_level,
+                                        &srf);
+       if (unlikely(ret != 0))
+               return ret;
+
+       user_srf = container_of(srf, struct vmw_user_surface, srf);
+       if (drm_is_primary_client(file_priv))
+               user_srf->master = drm_master_get(file_priv->master);
+
+       ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+       if (unlikely(ret != 0))
+               return ret;
+
+       res = &user_srf->srf.res;
+
+       if (req->base.buffer_handle != SVGA3D_INVALID_ID) {
+               ret = vmw_user_bo_lookup(tfile, req->base.buffer_handle,
+                                        &res->backup,
+                                        &user_srf->backup_base);
+               if (ret == 0) {
+                       if (res->backup->base.num_pages * PAGE_SIZE <
+                           res->backup_size) {
+                               DRM_ERROR("Surface backup buffer too small.\n");
+                               vmw_bo_unreference(&res->backup);
+                               ret = -EINVAL;
+                               goto out_unlock;
+                       } else {
+                               backup_handle = req->base.buffer_handle;
+                       }
+               }
+       } else if (req->base.drm_surface_flags &
+                  drm_vmw_surface_flag_create_buffer)
+               ret = vmw_user_bo_alloc(dev_priv, tfile,
+                                       res->backup_size,
+                                       req->base.drm_surface_flags &
+                                       drm_vmw_surface_flag_shareable,
+                                       &backup_handle,
+                                       &res->backup,
+                                       &user_srf->backup_base);
+
+       if (unlikely(ret != 0)) {
+               vmw_resource_unreference(&res);
+               goto out_unlock;
+       }
+
+       tmp = vmw_resource_reference(res);
+       ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
+                                   req->base.drm_surface_flags &
+                                   drm_vmw_surface_flag_shareable,
+                                   VMW_RES_SURFACE,
+                                   &vmw_user_surface_base_release, NULL);
+
+       if (unlikely(ret != 0)) {
+               vmw_resource_unreference(&tmp);
+               vmw_resource_unreference(&res);
+               goto out_unlock;
+       }
+
+       rep->handle      = user_srf->prime.base.hash.key;
+       rep->backup_size = res->backup_size;
+       if (res->backup) {
+               rep->buffer_map_handle =
+                       drm_vma_node_offset_addr(&res->backup->base.vma_node);
+               rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
+               rep->buffer_handle = backup_handle;
+       } else {
+               rep->buffer_map_handle = 0;
+               rep->buffer_size = 0;
+               rep->buffer_handle = SVGA3D_INVALID_ID;
+       }
+
+       vmw_resource_unreference(&res);
+
+out_unlock:
+       ttm_read_unlock(&dev_priv->reservation_sem);
+       return ret;
+}
+
+/**
+ * vmw_gb_surface_reference_internal - Ioctl function implementing
+ * the user surface reference functionality.
+ *
+ * @dev: Pointer to a struct drm_device.
+ * @req: Pointer to user-space request surface arg.
+ * @rep: Pointer to response to user-space.
+ * @file_priv: Pointer to a drm file private structure.
+ */
+static int
+vmw_gb_surface_reference_internal(struct drm_device *dev,
+                                 struct drm_vmw_surface_arg *req,
+                                 struct drm_vmw_gb_surface_ref_ext_rep *rep,
+                                 struct drm_file *file_priv)
+{
+       struct vmw_private *dev_priv = vmw_priv(dev);
+       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+       struct vmw_surface *srf;
+       struct vmw_user_surface *user_srf;
+       struct ttm_base_object *base;
+       uint32_t backup_handle;
+       int ret = -EINVAL;
+
+       ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
+                                          req->handle_type, &base);
+       if (unlikely(ret != 0))
+               return ret;
+
+       user_srf = container_of(base, struct vmw_user_surface, prime.base);
+       srf = &user_srf->srf;
+       if (!srf->res.backup) {
+               DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
+               goto out_bad_resource;
+       }
+
+       mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
+       ret = vmw_user_bo_reference(tfile, srf->res.backup, &backup_handle);
+       mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("Could not add a reference to a GB surface "
+                         "backup buffer.\n");
+               (void) ttm_ref_object_base_unref(tfile, base->hash.key,
+                                                TTM_REF_USAGE);
+               goto out_bad_resource;
+       }
+
+       rep->creq.base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(srf->flags);
+       rep->creq.base.format = srf->format;
+       rep->creq.base.mip_levels = srf->mip_levels[0];
+       rep->creq.base.drm_surface_flags = 0;
+       rep->creq.base.multisample_count = srf->multisample_count;
+       rep->creq.base.autogen_filter = srf->autogen_filter;
+       rep->creq.base.array_size = srf->array_size;
+       rep->creq.base.buffer_handle = backup_handle;
+       rep->creq.base.base_size = srf->base_size;
+       rep->crep.handle = user_srf->prime.base.hash.key;
+       rep->crep.backup_size = srf->res.backup_size;
+       rep->crep.buffer_handle = backup_handle;
+       rep->crep.buffer_map_handle =
+               drm_vma_node_offset_addr(&srf->res.backup->base.vma_node);
+       rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
+
+       rep->creq.version = drm_vmw_gb_surface_v1;
+       rep->creq.svga3d_flags_upper_32_bits =
+               SVGA3D_FLAGS_UPPER_32(srf->flags);
+       rep->creq.multisample_pattern = srf->multisample_pattern;
+       rep->creq.quality_level = srf->quality_level;
+       rep->creq.must_be_zero = 0;
+
+out_bad_resource:
+       ttm_base_object_unref(&base);
+
+       return ret;
+}
similarity index 99%
rename from drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
rename to drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 21111fd091f953117471302804df4cf80dac26f0..31786b200afc470d73d4f661a4e9358959d686f8 100644 (file)
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /**************************************************************************
  *
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
@@ -798,7 +798,7 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
        struct ttm_object_file *tfile =
                vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
 
-       return vmw_user_dmabuf_verify_access(bo, tfile);
+       return vmw_user_bo_verify_access(bo, tfile);
 }
 
 static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
@@ -852,7 +852,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo,
                            bool evict,
                            struct ttm_mem_reg *mem)
 {
-       vmw_resource_move_notify(bo, mem);
+       vmw_bo_move_notify(bo, mem);
        vmw_query_move_notify(bo, mem);
 }
 
@@ -864,7 +864,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo,
  */
 static void vmw_swap_notify(struct ttm_buffer_object *bo)
 {
-       vmw_resource_swap_notify(bo);
+       vmw_bo_swap_notify(bo);
        (void) ttm_bo_wait(bo, false, false);
 }
 
index e771091d2cd3b962d99cad883b093b29f9131b62..7b1e5a5cbd2c7758aadf70f784f4ce9c629ca1db 100644 (file)
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /**************************************************************************
  *
- * Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2009-2011 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
index b4162fd78600f06e71aee12fa54c818dc183a766..ebc1d83c34b40a5f7f0c8908dbcff6282b5db2e2 100644 (file)
@@ -1,7 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
 /**************************************************************************
  *
- * Copyright © 2012-2016 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright 2012-2016 VMware, Inc., Palo Alto, CA., USA
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the
index b3786c1a4e80fdedb4bc1da0a4c046184d4ff250..6b6d5ab82ec3fadf4b46d4e3c3771f9c8d56e143 100644 (file)
@@ -623,7 +623,7 @@ static int displback_initwait(struct xen_drm_front_info *front_info)
        if (ret < 0)
                return ret;
 
-       DRM_INFO("Have %d conector(s)\n", cfg->num_connectors);
+       DRM_INFO("Have %d connector(s)\n", cfg->num_connectors);
        /* Create event channels for all connectors and publish */
        ret = xen_drm_front_evtchnl_create_all(front_info);
        if (ret < 0)
index 2c2479b571ae6baf56c7f736c8f587c51227da0f..5693b4a4b02b1a951d897f30bd60bf2e74a6297d 100644 (file)
@@ -126,12 +126,12 @@ struct xen_drm_front_drm_info {
 
 static inline u64 xen_drm_front_fb_to_cookie(struct drm_framebuffer *fb)
 {
-       return (u64)fb;
+       return (uintptr_t)fb;
 }
 
 static inline u64 xen_drm_front_dbuf_to_cookie(struct drm_gem_object *gem_obj)
 {
-       return (u64)gem_obj;
+       return (uintptr_t)gem_obj;
 }
 
 int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
index 8099cb343ae3bef389a2718c1fc34f50c737c73f..d333b67cc1a06b4cbfe838ddf3ab4245c1de3ae1 100644 (file)
@@ -122,7 +122,7 @@ static void guest_calc_num_grefs(struct xen_drm_front_shbuf *buf)
 }
 
 #define xen_page_to_vaddr(page) \
-               ((phys_addr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
+               ((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
 
 static int backend_unmap(struct xen_drm_front_shbuf *buf)
 {
index 13ea90f7a185acb222a7a8e9190b4e0de5ac33c5..78655269d84343d683fc2278740ff21e075c3d1d 100644 (file)
@@ -272,7 +272,7 @@ static int zx_hdmi_connector_get_modes(struct drm_connector *connector)
 
        hdmi->sink_is_hdmi = drm_detect_hdmi_monitor(edid);
        hdmi->sink_has_audio = drm_detect_monitor_audio(edid);
-       drm_mode_connector_update_edid_property(connector, edid);
+       drm_connector_update_edid_property(connector, edid);
        ret = drm_add_edid_modes(connector, edid);
        kfree(edid);
 
@@ -326,7 +326,7 @@ static int zx_hdmi_register(struct drm_device *drm, struct zx_hdmi *hdmi)
        drm_connector_helper_add(&hdmi->connector,
                                 &zx_hdmi_connector_helper_funcs);
 
-       drm_mode_connector_attach_encoder(&hdmi->connector, encoder);
+       drm_connector_attach_encoder(&hdmi->connector, encoder);
 
        return 0;
 }
index d1931f5ea0b2e401fcc621e6a22587f1a4c110f5..ae8c53b4b261b70eeb6f08b4f721cbd660212b5f 100644 (file)
@@ -446,7 +446,7 @@ static const struct drm_plane_helper_funcs zx_gl_plane_helper_funcs = {
 
 static void zx_plane_destroy(struct drm_plane *plane)
 {
-       drm_plane_helper_disable(plane);
+       drm_plane_helper_disable(plane, NULL);
        drm_plane_cleanup(plane);
 }
 
index 0de1a71ca4e082b7deddd010319f648237f05d30..b73afb212fb246f8e5e7c9b5e1def38f8d1df8c8 100644 (file)
@@ -297,7 +297,7 @@ static int zx_tvenc_register(struct drm_device *drm, struct zx_tvenc *tvenc)
                           DRM_MODE_CONNECTOR_Composite);
        drm_connector_helper_add(connector, &zx_tvenc_connector_helper_funcs);
 
-       drm_mode_connector_attach_encoder(connector, encoder);
+       drm_connector_attach_encoder(connector, encoder);
 
        return 0;
 }
index 3e7e33cd3dfa46ec226b2fbf50bc925550d1c124..23d1ff4355a0304c9a1923d0e87a774b9f128414 100644 (file)
@@ -109,7 +109,7 @@ static int zx_vga_connector_get_modes(struct drm_connector *connector)
         */
        zx_writel(vga->mmio + VGA_AUTO_DETECT_SEL, VGA_DETECT_SEL_HAS_DEVICE);
 
-       drm_mode_connector_update_edid_property(connector, edid);
+       drm_connector_update_edid_property(connector, edid);
        ret = drm_add_edid_modes(connector, edid);
        kfree(edid);
 
@@ -175,7 +175,7 @@ static int zx_vga_register(struct drm_device *drm, struct zx_vga *vga)
 
        drm_connector_helper_add(connector, &zx_vga_connector_helper_funcs);
 
-       ret = drm_mode_connector_attach_encoder(connector, encoder);
+       ret = drm_connector_attach_encoder(connector, encoder);
        if (ret) {
                DRM_DEV_ERROR(dev, "failed to attach encoder: %d\n", ret);
                goto clean_connector;
index f1d5f76e9c33d8f31fa4e6f79f37e27e4f43c35c..d88073e7d22dddd94b33e2210fb20181dea6d245 100644 (file)
@@ -218,6 +218,9 @@ static int host1x_probe(struct platform_device *pdev)
                return err;
        }
 
+       if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
+               goto skip_iommu;
+
        host->group = iommu_group_get(&pdev->dev);
        if (host->group) {
                struct iommu_domain_geometry *geometry;
index e2f4a4d93d2012f3d21ea59271732c84a80189a0..527a1cddb14fd5d2a23fa96559ed0e33c54bd690 100644 (file)
@@ -569,7 +569,8 @@ void host1x_job_unpin(struct host1x_job *job)
        for (i = 0; i < job->num_unpins; i++) {
                struct host1x_job_unpin_data *unpin = &job->unpins[i];
 
-               if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
+               if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
+                   unpin->size && host->domain) {
                        iommu_unmap(host->domain, job->addr_phys[i],
                                    unpin->size);
                        free_iova(&host->iova,
index c73bd003f845d4ad7efe935fc6bdfe6c149b6404..474b00e19697d90e7a60ee60fb8f8badd330f81e 100644 (file)
@@ -122,6 +122,8 @@ enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat)
        case V4L2_PIX_FMT_NV16:
        case V4L2_PIX_FMT_NV61:
                return IPUV3_COLORSPACE_YUV;
+       case V4L2_PIX_FMT_XRGB32:
+       case V4L2_PIX_FMT_XBGR32:
        case V4L2_PIX_FMT_RGB32:
        case V4L2_PIX_FMT_BGR32:
        case V4L2_PIX_FMT_RGB24:
@@ -190,6 +192,8 @@ int ipu_stride_to_bytes(u32 pixel_stride, u32 pixelformat)
                return (24 * pixel_stride) >> 3;
        case V4L2_PIX_FMT_BGR32:
        case V4L2_PIX_FMT_RGB32:
+       case V4L2_PIX_FMT_XBGR32:
+       case V4L2_PIX_FMT_XRGB32:
                return (32 * pixel_stride) >> 3;
        default:
                break;
index e68e4734f052fe2a3e01720a450080a67ff995b3..a9d2501500a19750d0249b82d52ff67553507b74 100644 (file)
@@ -188,6 +188,12 @@ static int v4l2_pix_fmt_to_drm_fourcc(u32 pixelformat)
        case V4L2_PIX_FMT_RGB32:
                /* R G B A <=> [32:0] A:B:G:R */
                return DRM_FORMAT_XBGR8888;
+       case V4L2_PIX_FMT_XBGR32:
+               /* B G R X <=> [32:0] X:R:G:B */
+               return DRM_FORMAT_XRGB8888;
+       case V4L2_PIX_FMT_XRGB32:
+               /* X R G B <=> [32:0] B:G:R:X */
+               return DRM_FORMAT_BGRX8888;
        case V4L2_PIX_FMT_UYVY:
                return DRM_FORMAT_UYVY;
        case V4L2_PIX_FMT_YUYV:
@@ -269,9 +275,20 @@ EXPORT_SYMBOL_GPL(ipu_cpmem_set_uv_offset);
 
 void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride)
 {
+       u32 ilo, sly;
+
+       if (stride < 0) {
+               stride = -stride;
+               ilo = 0x100000 - (stride / 8);
+       } else {
+               ilo = stride / 8;
+       }
+
+       sly = (stride * 2) - 1;
+
        ipu_ch_param_write_field(ch, IPU_FIELD_SO, 1);
-       ipu_ch_param_write_field(ch, IPU_FIELD_ILO, stride / 8);
-       ipu_ch_param_write_field(ch, IPU_FIELD_SLY, (stride * 2) - 1);
+       ipu_ch_param_write_field(ch, IPU_FIELD_ILO, ilo);
+       ipu_ch_param_write_field(ch, IPU_FIELD_SLY, sly);
 };
 EXPORT_SYMBOL_GPL(ipu_cpmem_interlaced_scan);
 
@@ -776,6 +793,8 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
                break;
        case V4L2_PIX_FMT_RGB32:
        case V4L2_PIX_FMT_BGR32:
+       case V4L2_PIX_FMT_XRGB32:
+       case V4L2_PIX_FMT_XBGR32:
                offset = image->rect.left * 4 +
                        image->rect.top * pix->bytesperline;
                break;
index 5450a2db12192dc9b90e6d34da17e3c5f6bb911c..954eefe144e2d49764f9b345b2045738804c0e85 100644 (file)
@@ -224,14 +224,18 @@ static int ipu_csi_set_testgen_mclk(struct ipu_csi *csi, u32 pixel_clk,
  * Find the CSI data format and data width for the given V4L2 media
  * bus pixel format code.
  */
-static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code)
+static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code,
+                               enum v4l2_mbus_type mbus_type)
 {
        switch (mbus_code) {
        case MEDIA_BUS_FMT_BGR565_2X8_BE:
        case MEDIA_BUS_FMT_BGR565_2X8_LE:
        case MEDIA_BUS_FMT_RGB565_2X8_BE:
        case MEDIA_BUS_FMT_RGB565_2X8_LE:
-               cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB565;
+               if (mbus_type == V4L2_MBUS_CSI2)
+                       cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB565;
+               else
+                       cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
                cfg->mipi_dt = MIPI_DT_RGB565;
                cfg->data_width = IPU_CSI_DATA_WIDTH_8;
                break;
@@ -247,6 +251,12 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code)
                cfg->mipi_dt = MIPI_DT_RGB555;
                cfg->data_width = IPU_CSI_DATA_WIDTH_8;
                break;
+       case MEDIA_BUS_FMT_RGB888_1X24:
+       case MEDIA_BUS_FMT_BGR888_1X24:
+               cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_RGB_YUV444;
+               cfg->mipi_dt = MIPI_DT_RGB888;
+               cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+               break;
        case MEDIA_BUS_FMT_UYVY8_2X8:
                cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY;
                cfg->mipi_dt = MIPI_DT_YUV422;
@@ -318,13 +328,17 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code)
 /*
  * Fill a CSI bus config struct from mbus_config and mbus_framefmt.
  */
-static void fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
+static int fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
                                 struct v4l2_mbus_config *mbus_cfg,
                                 struct v4l2_mbus_framefmt *mbus_fmt)
 {
+       int ret;
+
        memset(csicfg, 0, sizeof(*csicfg));
 
-       mbus_code_to_bus_cfg(csicfg, mbus_fmt->code);
+       ret = mbus_code_to_bus_cfg(csicfg, mbus_fmt->code, mbus_cfg->type);
+       if (ret < 0)
+               return ret;
 
        switch (mbus_cfg->type) {
        case V4L2_MBUS_PARALLEL:
@@ -356,6 +370,8 @@ static void fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
                /* will never get here, keep compiler quiet */
                break;
        }
+
+       return 0;
 }
 
 int ipu_csi_init_interface(struct ipu_csi *csi,
@@ -365,8 +381,11 @@ int ipu_csi_init_interface(struct ipu_csi *csi,
        struct ipu_csi_bus_config cfg;
        unsigned long flags;
        u32 width, height, data = 0;
+       int ret;
 
-       fill_csi_bus_cfg(&cfg, mbus_cfg, mbus_fmt);
+       ret = fill_csi_bus_cfg(&cfg, mbus_cfg, mbus_fmt);
+       if (ret < 0)
+               return ret;
 
        /* set default sensor frame width and height */
        width = mbus_fmt->width;
@@ -587,11 +606,14 @@ int ipu_csi_set_mipi_datatype(struct ipu_csi *csi, u32 vc,
        struct ipu_csi_bus_config cfg;
        unsigned long flags;
        u32 temp;
+       int ret;
 
        if (vc > 3)
                return -EINVAL;
 
-       mbus_code_to_bus_cfg(&cfg, mbus_fmt->code);
+       ret = mbus_code_to_bus_cfg(&cfg, mbus_fmt->code, V4L2_MBUS_CSI2);
+       if (ret < 0)
+               return ret;
 
        spin_lock_irqsave(&csi->lock, flags);
 
index 524a717ab28e4937cf120a18f942e2a04f3c216d..f4081962784ccee322282a5099590c42dcbcccc9 100644 (file)
@@ -226,6 +226,12 @@ static const struct ipu_image_pixfmt image_convert_formats[] = {
        }, {
                .fourcc = V4L2_PIX_FMT_BGR32,
                .bpp    = 32,
+       }, {
+               .fourcc = V4L2_PIX_FMT_XRGB32,
+               .bpp    = 32,
+       }, {
+               .fourcc = V4L2_PIX_FMT_XBGR32,
+               .bpp    = 32,
        }, {
                .fourcc = V4L2_PIX_FMT_YUYV,
                .bpp    = 16,
index f858cc72011d183fa11892fb152e0d9b705c3059..3942ee61bd1c17e57867a7d8e5f521b5f8eae9b8 100644 (file)
@@ -1952,6 +1952,8 @@ static int hid_device_probe(struct device *dev)
        }
        hdev->io_started = false;
 
+       clear_bit(ffs(HID_STAT_REPROBED), &hdev->status);
+
        if (!hdev->driver) {
                id = hid_match_device(hdev, hdrv);
                if (id == NULL) {
@@ -2215,7 +2217,8 @@ static int __hid_bus_reprobe_drivers(struct device *dev, void *data)
        struct hid_device *hdev = to_hid_device(dev);
 
        if (hdev->driver == hdrv &&
-           !hdrv->match(hdev, hid_ignore_special_drivers))
+           !hdrv->match(hdev, hid_ignore_special_drivers) &&
+           !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
                return device_reprobe(dev);
 
        return 0;
index 8469b6964ff64e45f7807641ef8eda8197f8f81f..b48100236df890cdd1bbffa0daac97257357a38d 100644 (file)
@@ -1154,6 +1154,8 @@ copy_rest:
                        goto out;
                if (list->tail > list->head) {
                        len = list->tail - list->head;
+                       if (len > count)
+                               len = count;
 
                        if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) {
                                ret = -EFAULT;
@@ -1163,6 +1165,8 @@ copy_rest:
                        list->head += len;
                } else {
                        len = HID_DEBUG_BUFSIZE - list->head;
+                       if (len > count)
+                               len = count;
 
                        if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) {
                                ret = -EFAULT;
@@ -1170,7 +1174,9 @@ copy_rest:
                        }
                        list->head = 0;
                        ret += len;
-                       goto copy_rest;
+                       count -= len;
+                       if (count > 0)
+                               goto copy_rest;
                }
 
        }
index 7b8e17b03cb864a7bc0ab0cbd114594b01f28c5c..6bf4da7ad63a51f3b9aa6713552c96be6042bba2 100644 (file)
@@ -124,6 +124,8 @@ static const struct hid_device_id hammer_devices[] = {
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_STAFF) },
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_WAND) },
+       { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+                    USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_WHISKERS) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, hammer_devices);
index a85634fe033f01bd6f9a2b41c67d027c0b55ccc1..c7981ddd8776377faa9a238b8e58d6162054b9c6 100644 (file)
 #define USB_DEVICE_ID_GOOGLE_TOUCH_ROSE        0x5028
 #define USB_DEVICE_ID_GOOGLE_STAFF     0x502b
 #define USB_DEVICE_ID_GOOGLE_WAND      0x502d
+#define USB_DEVICE_ID_GOOGLE_WHISKERS  0x5030
 
 #define USB_VENDOR_ID_GOTOP            0x08f2
 #define USB_DEVICE_ID_SUPER_Q2         0x007f
index cb86cc834201c89f660daa3509722f6fa72cb98c..0422ec2b13d208d98acdf22c5eb97b6393c5f530 100644 (file)
@@ -573,7 +573,7 @@ static bool steam_is_valve_interface(struct hid_device *hdev)
 
 static int steam_client_ll_parse(struct hid_device *hdev)
 {
-       struct steam_device *steam = hid_get_drvdata(hdev);
+       struct steam_device *steam = hdev->driver_data;
 
        return hid_parse_report(hdev, steam->hdev->dev_rdesc,
                        steam->hdev->dev_rsize);
@@ -590,7 +590,7 @@ static void steam_client_ll_stop(struct hid_device *hdev)
 
 static int steam_client_ll_open(struct hid_device *hdev)
 {
-       struct steam_device *steam = hid_get_drvdata(hdev);
+       struct steam_device *steam = hdev->driver_data;
        int ret;
 
        ret = hid_hw_open(steam->hdev);
@@ -605,7 +605,7 @@ static int steam_client_ll_open(struct hid_device *hdev)
 
 static void steam_client_ll_close(struct hid_device *hdev)
 {
-       struct steam_device *steam = hid_get_drvdata(hdev);
+       struct steam_device *steam = hdev->driver_data;
 
        mutex_lock(&steam->mutex);
        steam->client_opened = false;
@@ -623,7 +623,7 @@ static int steam_client_ll_raw_request(struct hid_device *hdev,
                                size_t count, unsigned char report_type,
                                int reqtype)
 {
-       struct steam_device *steam = hid_get_drvdata(hdev);
+       struct steam_device *steam = hdev->driver_data;
 
        return hid_hw_raw_request(steam->hdev, reportnum, buf, count,
                        report_type, reqtype);
@@ -710,7 +710,7 @@ static int steam_probe(struct hid_device *hdev,
                ret = PTR_ERR(steam->client_hdev);
                goto client_hdev_fail;
        }
-       hid_set_drvdata(steam->client_hdev, steam);
+       steam->client_hdev->driver_data = steam;
 
        /*
         * With the real steam controller interface, do not connect hidraw.
index c1652bb7bd156e298514bcc63ce818ea49e6d7c6..eae0cb3ddec668e8d2f82b1d571bb826b0fb1dd5 100644 (file)
@@ -484,7 +484,7 @@ static void i2c_hid_get_input(struct i2c_hid *ihid)
                return;
        }
 
-       if ((ret_size > size) || (ret_size <= 2)) {
+       if ((ret_size > size) || (ret_size < 2)) {
                dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
                        __func__, size, ret_size);
                return;
index 582e449be9feeeebd5924fea4a28aab4a5c2e8a2..a2c53ea3b5edfce82eeb2ef4b4e2392f5c7fb98f 100644 (file)
@@ -205,8 +205,7 @@ static void ish_remove(struct pci_dev *pdev)
        kfree(ishtp_dev);
 }
 
-#ifdef CONFIG_PM
-static struct device *ish_resume_device;
+static struct device __maybe_unused *ish_resume_device;
 
 /* 50ms to get resume response */
 #define WAIT_FOR_RESUME_ACK_MS         50
@@ -220,7 +219,7 @@ static struct device *ish_resume_device;
  * in that case a simple resume message is enough, others we need
  * a reset sequence.
  */
-static void ish_resume_handler(struct work_struct *work)
+static void __maybe_unused ish_resume_handler(struct work_struct *work)
 {
        struct pci_dev *pdev = to_pci_dev(ish_resume_device);
        struct ishtp_device *dev = pci_get_drvdata(pdev);
@@ -262,7 +261,7 @@ static void ish_resume_handler(struct work_struct *work)
  *
  * Return: 0 to the pm core
  */
-static int ish_suspend(struct device *device)
+static int __maybe_unused ish_suspend(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
        struct ishtp_device *dev = pci_get_drvdata(pdev);
@@ -288,7 +287,7 @@ static int ish_suspend(struct device *device)
        return 0;
 }
 
-static DECLARE_WORK(resume_work, ish_resume_handler);
+static __maybe_unused DECLARE_WORK(resume_work, ish_resume_handler);
 /**
  * ish_resume() - ISH resume callback
  * @device:    device pointer
@@ -297,7 +296,7 @@ static DECLARE_WORK(resume_work, ish_resume_handler);
  *
  * Return: 0 to the pm core
  */
-static int ish_resume(struct device *device)
+static int __maybe_unused ish_resume(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
        struct ishtp_device *dev = pci_get_drvdata(pdev);
@@ -311,21 +310,14 @@ static int ish_resume(struct device *device)
        return 0;
 }
 
-static const struct dev_pm_ops ish_pm_ops = {
-       .suspend = ish_suspend,
-       .resume = ish_resume,
-};
-#define ISHTP_ISH_PM_OPS       (&ish_pm_ops)
-#else
-#define ISHTP_ISH_PM_OPS       NULL
-#endif /* CONFIG_PM */
+static SIMPLE_DEV_PM_OPS(ish_pm_ops, ish_suspend, ish_resume);
 
 static struct pci_driver ish_driver = {
        .name = KBUILD_MODNAME,
        .id_table = ish_pci_tbl,
        .probe = ish_probe,
        .remove = ish_remove,
-       .driver.pm = ISHTP_ISH_PM_OPS,
+       .driver.pm = &ish_pm_ops,
 };
 
 module_pci_driver(ish_driver);
index e3ce233f8bdcc5bdcae97ffa217f65e022938b0e..23872d08308cdb5857d53b5bcdf907e20d74c345 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/hiddev.h>
 #include <linux/compat.h>
 #include <linux/vmalloc.h>
+#include <linux/nospec.h>
 #include "usbhid.h"
 
 #ifdef CONFIG_USB_DYNAMIC_MINORS
@@ -469,10 +470,14 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
 
                if (uref->field_index >= report->maxfield)
                        goto inval;
+               uref->field_index = array_index_nospec(uref->field_index,
+                                                      report->maxfield);
 
                field = report->field[uref->field_index];
                if (uref->usage_index >= field->maxusage)
                        goto inval;
+               uref->usage_index = array_index_nospec(uref->usage_index,
+                                                      field->maxusage);
 
                uref->usage_code = field->usage[uref->usage_index].hid;
 
@@ -499,6 +504,8 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
 
                        if (uref->field_index >= report->maxfield)
                                goto inval;
+                       uref->field_index = array_index_nospec(uref->field_index,
+                                                              report->maxfield);
 
                        field = report->field[uref->field_index];
 
@@ -753,6 +760,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
                if (finfo.field_index >= report->maxfield)
                        break;
+               finfo.field_index = array_index_nospec(finfo.field_index,
+                                                      report->maxfield);
 
                field = report->field[finfo.field_index];
                memset(&finfo, 0, sizeof(finfo));
@@ -797,6 +806,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
                if (cinfo.index >= hid->maxcollection)
                        break;
+               cinfo.index = array_index_nospec(cinfo.index,
+                                                hid->maxcollection);
 
                cinfo.type = hid->collection[cinfo.index].type;
                cinfo.usage = hid->collection[cinfo.index].usage;
index c101369b51de88b927fdf2295f3bb664ed415899..d6797535fff97217b477cf7c2009397dcce2d1ec 100644 (file)
@@ -395,6 +395,14 @@ static void wacom_usage_mapping(struct hid_device *hdev,
                }
        }
 
+       /* 2nd-generation Intuos Pro Large has incorrect Y maximum */
+       if (hdev->vendor == USB_VENDOR_ID_WACOM &&
+           hdev->product == 0x0358 &&
+           WACOM_PEN_FIELD(field) &&
+           wacom_equivalent_usage(usage->hid) == HID_GD_Y) {
+               field->logical_maximum = 43200;
+       }
+
        switch (usage->hid) {
        case HID_GD_X:
                features->x_max = field->logical_maximum;
index 0bb44d0088edb5f8bd8da44a23c57095db14bb68..ad7afa74d3657d902cf655ef6f1467854625b98b 100644 (file)
@@ -3365,8 +3365,14 @@ void wacom_setup_device_quirks(struct wacom *wacom)
                        if (features->type >= INTUOSHT && features->type <= BAMBOO_PT)
                                features->device_type |= WACOM_DEVICETYPE_PAD;
 
-                       features->x_max = 4096;
-                       features->y_max = 4096;
+                       if (features->type == INTUOSHT2) {
+                               features->x_max = features->x_max / 10;
+                               features->y_max = features->y_max / 10;
+                       }
+                       else {
+                               features->x_max = 4096;
+                               features->y_max = 4096;
+                       }
                }
                else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) {
                        features->device_type |= WACOM_DEVICETYPE_PAD;
index bf3bb7e1adab8579cf7647a24cdba12c9c828050..9d3ef879dc51e1aa08848649cfaec435a6f882fa 100644 (file)
@@ -1074,6 +1074,13 @@ static struct dmi_system_id i8k_blacklist_fan_support_dmi_table[] __initdata = {
                        DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Vostro 3360"),
                },
        },
+       {
+               .ident = "Dell XPS13 9333",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS13 9333"),
+               },
+       },
        { }
 };
 
index 155d4d1d1585af4aa7debc37163072af4979bd02..f9d1349c328698aa510e57d10c6b7b475999d16f 100644 (file)
@@ -4175,7 +4175,7 @@ static int nct6775_probe(struct platform_device *pdev)
         * The temperature is already monitored if the respective bit in <mask>
         * is set.
         */
-       for (i = 0; i < 32; i++) {
+       for (i = 0; i < 31; i++) {
                if (!(data->temp_mask & BIT(i + 1)))
                        continue;
                if (!reg_temp_alternate[i])
index 4a34f311e1ff4df2cd6cdfcff7ea1662c95a06eb..6ec65adaba49569ab7b9775f856859a0fcfbd967 100644 (file)
@@ -647,10 +647,10 @@ static int __i2c_bit_add_bus(struct i2c_adapter *adap,
        if (bit_adap->getscl == NULL)
                adap->quirks = &i2c_bit_quirk_no_clk_stretch;
 
-       /* Bring bus to a known state. Looks like STOP if bus is not free yet */
-       setscl(bit_adap, 1);
-       udelay(bit_adap->udelay);
-       setsda(bit_adap, 1);
+       /*
+        * We tried forcing SCL/SDA to an initial state here. But that caused a
+        * regression, sadly. Check Bugzilla #200045 for details.
+        */
 
        ret = add_adapter(adap);
        if (ret < 0)
index 44cffad43701f4839096bbde5c5937ee22cce135..c4d176f5ed793c76c78c412d081c21bc8dff2327 100644 (file)
@@ -234,7 +234,8 @@ static const struct irq_chip cht_wc_i2c_irq_chip = {
        .name                   = "cht_wc_ext_chrg_irq_chip",
 };
 
-static const char * const bq24190_suppliers[] = { "fusb302-typec-source" };
+static const char * const bq24190_suppliers[] = {
+       "tcpm-source-psy-i2c-fusb302" };
 
 static const struct property_entry bq24190_props[] = {
        PROPERTY_ENTRY_STRING_ARRAY("supplied-from", bq24190_suppliers),
index 75d6ab177055efa2119635eb56845388ac3a0848..7379043711dfa89455abd65bce74ac08707073f9 100644 (file)
@@ -237,12 +237,16 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
        /*
         * It's not always possible to have 1 to 2 ratio when d=7, so fall back
         * to minimal possible clkh in this case.
+        *
+        * Note:
+        * CLKH is not allowed to be 0, in this case I2C clock is not generated
+        * at all
         */
-       if (clk >= clkl + d) {
+       if (clk > clkl + d) {
                clkh = clk - clkl - d;
                clkl -= d;
        } else {
-               clkh = 0;
+               clkh = 1;
                clkl = clk - (d << 1);
        }
 
index 005e6e0330c278276a0d602fcfebdc3429218cfd..66f85bbf35917161cc36e4ffb308d78b8401c0cb 100644 (file)
@@ -279,9 +279,9 @@ static int i2c_gpio_probe(struct platform_device *pdev)
         * required for an I2C bus.
         */
        if (pdata->scl_is_open_drain)
-               gflags = GPIOD_OUT_LOW;
+               gflags = GPIOD_OUT_HIGH;
        else
-               gflags = GPIOD_OUT_LOW_OPEN_DRAIN;
+               gflags = GPIOD_OUT_HIGH_OPEN_DRAIN;
        priv->scl = i2c_gpio_get_desc(dev, "scl", 1, gflags);
        if (IS_ERR(priv->scl))
                return PTR_ERR(priv->scl);
index 0207e194f84bb4e667d4ebec548987f40caffdd1..498c5e89164988beb8d82e5f4bc53aac82b137ac 100644 (file)
@@ -368,6 +368,7 @@ static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx,
                goto err_desc;
        }
 
+       reinit_completion(&dma->cmd_complete);
        txdesc->callback = i2c_imx_dma_callback;
        txdesc->callback_param = i2c_imx;
        if (dma_submit_error(dmaengine_submit(txdesc))) {
@@ -622,7 +623,6 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
         * The first byte must be transmitted by the CPU.
         */
        imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR);
-       reinit_completion(&i2c_imx->dma->cmd_complete);
        time_left = wait_for_completion_timeout(
                                &i2c_imx->dma->cmd_complete,
                                msecs_to_jiffies(DMA_TIMEOUT));
@@ -681,7 +681,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
        if (result)
                return result;
 
-       reinit_completion(&i2c_imx->dma->cmd_complete);
        time_left = wait_for_completion_timeout(
                                &i2c_imx->dma->cmd_complete,
                                msecs_to_jiffies(DMA_TIMEOUT));
@@ -1010,7 +1009,7 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
        i2c_imx->pinctrl_pins_gpio = pinctrl_lookup_state(i2c_imx->pinctrl,
                        "gpio");
        rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN);
-       rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", GPIOD_OUT_HIGH);
+       rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN);
 
        if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER ||
            PTR_ERR(rinfo->scl_gpiod) == -EPROBE_DEFER) {
index 5e310efd94464897d9db7becf89294a8b2adc4f6..3c1c817f6968e43fdafd73ff6f53d381c2528b96 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
+#include <linux/reset.h>
 #include <linux/slab.h>
 
 /* register offsets */
 #define ID_ARBLOST     (1 << 3)
 #define ID_NACK                (1 << 4)
 /* persistent flags */
+#define ID_P_NO_RXDMA  (1 << 30) /* HW forbids RXDMA sometimes */
 #define ID_P_PM_BLOCKED        (1 << 31)
-#define ID_P_MASK      ID_P_PM_BLOCKED
+#define ID_P_MASK      (ID_P_PM_BLOCKED | ID_P_NO_RXDMA)
 
 enum rcar_i2c_type {
        I2C_RCAR_GEN1,
@@ -141,6 +143,8 @@ struct rcar_i2c_priv {
        struct dma_chan *dma_rx;
        struct scatterlist sg;
        enum dma_data_direction dma_direction;
+
+       struct reset_control *rstc;
 };
 
 #define rcar_i2c_priv_to_dev(p)                ((p)->adap.dev.parent)
@@ -370,6 +374,11 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv)
        dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
                         sg_dma_len(&priv->sg), priv->dma_direction);
 
+       /* Gen3 can only do one RXDMA per transfer and we just completed it */
+       if (priv->devtype == I2C_RCAR_GEN3 &&
+           priv->dma_direction == DMA_FROM_DEVICE)
+               priv->flags |= ID_P_NO_RXDMA;
+
        priv->dma_direction = DMA_NONE;
 }
 
@@ -407,8 +416,9 @@ static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
        unsigned char *buf;
        int len;
 
-       /* Do not use DMA if it's not available or for messages < 8 bytes */
-       if (IS_ERR(chan) || msg->len < 8 || !(msg->flags & I2C_M_DMA_SAFE))
+       /* Do various checks to see if DMA is feasible at all */
+       if (IS_ERR(chan) || msg->len < 8 || !(msg->flags & I2C_M_DMA_SAFE) ||
+           (read && priv->flags & ID_P_NO_RXDMA))
                return;
 
        if (read) {
@@ -739,6 +749,25 @@ static void rcar_i2c_release_dma(struct rcar_i2c_priv *priv)
        }
 }
 
+/* I2C is a special case, we need to poll the status of a reset */
+static int rcar_i2c_do_reset(struct rcar_i2c_priv *priv)
+{
+       int i, ret;
+
+       ret = reset_control_reset(priv->rstc);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < LOOP_TIMEOUT; i++) {
+               ret = reset_control_status(priv->rstc);
+               if (ret == 0)
+                       return 0;
+               udelay(1);
+       }
+
+       return -ETIMEDOUT;
+}
+
 static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
                                struct i2c_msg *msgs,
                                int num)
@@ -750,6 +779,16 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
 
        pm_runtime_get_sync(dev);
 
+       /* Gen3 needs a reset before allowing RXDMA once */
+       if (priv->devtype == I2C_RCAR_GEN3) {
+               priv->flags |= ID_P_NO_RXDMA;
+               if (!IS_ERR(priv->rstc)) {
+                       ret = rcar_i2c_do_reset(priv);
+                       if (ret == 0)
+                               priv->flags &= ~ID_P_NO_RXDMA;
+               }
+       }
+
        rcar_i2c_init(priv);
 
        ret = rcar_i2c_bus_barrier(priv);
@@ -920,6 +959,15 @@ static int rcar_i2c_probe(struct platform_device *pdev)
        if (ret < 0)
                goto out_pm_put;
 
+       if (priv->devtype == I2C_RCAR_GEN3) {
+               priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+               if (!IS_ERR(priv->rstc)) {
+                       ret = reset_control_status(priv->rstc);
+                       if (ret < 0)
+                               priv->rstc = ERR_PTR(-ENOTSUPP);
+               }
+       }
+
        /* Stay always active when multi-master to keep arbitration working */
        if (of_property_read_bool(dev->of_node, "multi-master"))
                priv->flags |= ID_P_PM_BLOCKED;
index e866c481bfc325d3c42e733faa88d133b3388f0d..fce52bdab2b715a7123e34b153e2206662c67bf9 100644 (file)
@@ -127,7 +127,7 @@ enum stu300_error {
 
 /*
  * The number of address send athemps tried before giving up.
- * If the first one failes it seems like 5 to 8 attempts are required.
+ * If the first one fails it seems like 5 to 8 attempts are required.
  */
 #define NUM_ADDR_RESEND_ATTEMPTS 12
 
index 5fccd1f1bca85d28bcc249fa6b76f4297bf504bb..797def5319f1325adacf1974c0b44cdb3a7ca4a6 100644 (file)
@@ -545,6 +545,14 @@ static int tegra_i2c_disable_packet_mode(struct tegra_i2c_dev *i2c_dev)
 {
        u32 cnfg;
 
+       /*
+        * NACK interrupt is generated before the I2C controller generates
+        * the STOP condition on the bus. So wait for 2 clock periods
+        * before disabling the controller so that the STOP condition has
+        * been delivered properly.
+        */
+       udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate));
+
        cnfg = i2c_readl(i2c_dev, I2C_CNFG);
        if (cnfg & I2C_CNFG_PACKET_MODE_EN)
                i2c_writel(i2c_dev, cnfg & ~I2C_CNFG_PACKET_MODE_EN, I2C_CNFG);
@@ -706,15 +714,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
        if (likely(i2c_dev->msg_err == I2C_ERR_NONE))
                return 0;
 
-       /*
-        * NACK interrupt is generated before the I2C controller generates
-        * the STOP condition on the bus. So wait for 2 clock periods
-        * before resetting the controller so that the STOP condition has
-        * been delivered properly.
-        */
-       if (i2c_dev->msg_err == I2C_ERR_NO_ACK)
-               udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate));
-
        tegra_i2c_init(i2c_dev);
        if (i2c_dev->msg_err == I2C_ERR_NO_ACK) {
                if (msg->flags & I2C_M_IGNORE_NAK)
index 31d16ada6e7d9a789240cc62f50a7fcde840bb2e..301285c54603fda6ded7653ad2446c8421cb2ee2 100644 (file)
@@ -198,7 +198,16 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
 
                val = !val;
                bri->set_scl(adap, val);
-               ndelay(RECOVERY_NDELAY);
+
+               /*
+                * If we can set SDA, we will always create STOP here to ensure
+                * the additional pulses will do no harm. This is achieved by
+                * letting SDA follow SCL half a cycle later.
+                */
+               ndelay(RECOVERY_NDELAY / 2);
+               if (bri->set_sda)
+                       bri->set_sda(adap, val);
+               ndelay(RECOVERY_NDELAY / 2);
        }
 
        /* check if recovery actually succeeded */
index f3f683041e7f9199ad5799ef5c8fd83f59fc9856..51970bae3c4a5a4d08f03ae558816fd9c264996b 100644 (file)
@@ -465,15 +465,18 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
 
        status = i2c_transfer(adapter, msg, num);
        if (status < 0)
-               return status;
-       if (status != num)
-               return -EIO;
+               goto cleanup;
+       if (status != num) {
+               status = -EIO;
+               goto cleanup;
+       }
+       status = 0;
 
        /* Check PEC if last message is a read */
        if (i && (msg[num-1].flags & I2C_M_RD)) {
                status = i2c_smbus_check_pec(partial_pec, &msg[num-1]);
                if (status < 0)
-                       return status;
+                       goto cleanup;
        }
 
        if (read_write == I2C_SMBUS_READ)
@@ -499,12 +502,13 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
                        break;
                }
 
+cleanup:
        if (msg[0].flags & I2C_M_DMA_SAFE)
                kfree(msg[0].buf);
        if (msg[1].flags & I2C_M_DMA_SAFE)
                kfree(msg[1].buf);
 
-       return 0;
+       return status;
 }
 
 /**
index 7e3d82cff3d5f2537608c0a21d9bf277767e7bd8..c149c9c360fc4f265ce1e406e1dd8ba7ae5615d8 100644 (file)
@@ -1053,7 +1053,7 @@ static irqreturn_t mma8452_interrupt(int irq, void *p)
        if (src < 0)
                return IRQ_NONE;
 
-       if (!(src & data->chip_info->enabled_events))
+       if (!(src & (data->chip_info->enabled_events | MMA8452_INT_DRDY)))
                return IRQ_NONE;
 
        if (src & MMA8452_INT_DRDY) {
index f9c0624505a2993e3a48d9a581faa8a26e9287de..42618fe4f83ed82d0f50b92e884dd59a11a1df09 100644 (file)
@@ -959,6 +959,8 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
        }
 
        irq_type = irqd_get_trigger_type(desc);
+       if (!irq_type)
+               irq_type = IRQF_TRIGGER_RISING;
        if (irq_type == IRQF_TRIGGER_RISING)
                st->irq_mask = INV_MPU6050_ACTIVE_HIGH;
        else if (irq_type == IRQF_TRIGGER_FALLING)
index 34d42a2504c92bf43ee1216f1855e9febfd03161..df5b2a0da96c4a9c311ddd6da57f990f6d1f821f 100644 (file)
@@ -582,6 +582,8 @@ static int tsl2772_als_calibrate(struct iio_dev *indio_dev)
                        "%s: failed to get lux\n", __func__);
                return lux_val;
        }
+       if (lux_val == 0)
+               return -ERANGE;
 
        ret = (chip->settings.als_cal_target * chip->settings.als_gain_trim) /
                        lux_val;
index 5ec3e41b65f2b8f991626a4522d6263d66ca2a27..fe87d27779d96b99ce4f847c9a9e02a4a1a87aa7 100644 (file)
@@ -415,10 +415,9 @@ static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2)
        }
        comp_humidity = bmp280_compensate_humidity(data, adc_humidity);
 
-       *val = comp_humidity;
-       *val2 = 1024;
+       *val = comp_humidity * 1000 / 1024;
 
-       return IIO_VAL_FRACTIONAL;
+       return IIO_VAL_INT;
 }
 
 static int bmp280_read_raw(struct iio_dev *indio_dev,
index 3e90b6a1d9d2d6a203d13d945e9322a9ec154fe8..cc06e8404e9bf07c6d0acccd81490a0ba78a94f7 100644 (file)
@@ -3488,8 +3488,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
        struct ib_flow_attr               *flow_attr;
        struct ib_qp                      *qp;
        struct ib_uflow_resources         *uflow_res;
+       struct ib_uverbs_flow_spec_hdr    *kern_spec;
        int err = 0;
-       void *kern_spec;
        void *ib_spec;
        int i;
 
@@ -3538,8 +3538,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
                if (!kern_flow_attr)
                        return -ENOMEM;
 
-               memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
-               err = ib_copy_from_udata(kern_flow_attr + 1, ucore,
+               *kern_flow_attr = cmd.flow_attr;
+               err = ib_copy_from_udata(&kern_flow_attr->flow_specs, ucore,
                                         cmd.flow_attr.size);
                if (err)
                        goto err_free_attr;
@@ -3559,6 +3559,11 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
                goto err_uobj;
        }
 
+       if (qp->qp_type != IB_QPT_UD && qp->qp_type != IB_QPT_RAW_PACKET) {
+               err = -EINVAL;
+               goto err_put;
+       }
+
        flow_attr = kzalloc(struct_size(flow_attr, flows,
                                cmd.flow_attr.num_of_specs), GFP_KERNEL);
        if (!flow_attr) {
@@ -3578,21 +3583,22 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
        flow_attr->flags = kern_flow_attr->flags;
        flow_attr->size = sizeof(*flow_attr);
 
-       kern_spec = kern_flow_attr + 1;
+       kern_spec = kern_flow_attr->flow_specs;
        ib_spec = flow_attr + 1;
        for (i = 0; i < flow_attr->num_of_specs &&
-            cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) &&
-            cmd.flow_attr.size >=
-            ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) {
-               err = kern_spec_to_ib_spec(file->ucontext, kern_spec, ib_spec,
-                                          uflow_res);
+                       cmd.flow_attr.size >= sizeof(*kern_spec) &&
+                       cmd.flow_attr.size >= kern_spec->size;
+            i++) {
+               err = kern_spec_to_ib_spec(
+                               file->ucontext, (struct ib_uverbs_flow_spec *)kern_spec,
+                               ib_spec, uflow_res);
                if (err)
                        goto err_free;
 
                flow_attr->size +=
                        ((union ib_flow_spec *) ib_spec)->size;
-               cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
-               kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size;
+               cmd.flow_attr.size -= kern_spec->size;
+               kern_spec = ((void *)kern_spec) + kern_spec->size;
                ib_spec += ((union ib_flow_spec *) ib_spec)->size;
        }
        if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
index 3ae2339dd27a9f5b6c4d104674d096f9f22b5b67..2094d136513d6c5f144663ad9e74192fd85a191a 100644 (file)
@@ -736,10 +736,6 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
        if (ret)
                return ret;
 
-       if (!file->ucontext &&
-           (command != IB_USER_VERBS_CMD_GET_CONTEXT || extended))
-               return -EINVAL;
-
        if (extended) {
                if (count < (sizeof(hdr) + sizeof(ex_hdr)))
                        return -EINVAL;
@@ -759,6 +755,16 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
                goto out;
        }
 
+       /*
+        * Must be after the ib_dev check, as once the RCU clears ib_dev ==
+        * NULL means ucontext == NULL
+        */
+       if (!file->ucontext &&
+           (command != IB_USER_VERBS_CMD_GET_CONTEXT || extended)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
        if (!verify_command_mask(ib_dev, command, extended)) {
                ret = -EOPNOTSUPP;
                goto out;
index 0b56828c1319b1b350385dfd5be2c981d26d6ffa..9d6beb948535bec89545e9f9b25f7b83976a654e 100644 (file)
@@ -1562,11 +1562,12 @@ EXPORT_SYMBOL(ib_destroy_qp);
 
 /* Completion queues */
 
-struct ib_cq *ib_create_cq(struct ib_device *device,
-                          ib_comp_handler comp_handler,
-                          void (*event_handler)(struct ib_event *, void *),
-                          void *cq_context,
-                          const struct ib_cq_init_attr *cq_attr)
+struct ib_cq *__ib_create_cq(struct ib_device *device,
+                            ib_comp_handler comp_handler,
+                            void (*event_handler)(struct ib_event *, void *),
+                            void *cq_context,
+                            const struct ib_cq_init_attr *cq_attr,
+                            const char *caller)
 {
        struct ib_cq *cq;
 
@@ -1580,12 +1581,13 @@ struct ib_cq *ib_create_cq(struct ib_device *device,
                cq->cq_context    = cq_context;
                atomic_set(&cq->usecnt, 0);
                cq->res.type = RDMA_RESTRACK_CQ;
+               cq->res.kern_name = caller;
                rdma_restrack_add(&cq->res);
        }
 
        return cq;
 }
-EXPORT_SYMBOL(ib_create_cq);
+EXPORT_SYMBOL(__ib_create_cq);
 
 int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
 {
index 1445918e32392f28ae4ce9ea74e7df0feeddf371..7b76e6f81aeb477181afedc2f44fec990ce3090f 100644 (file)
@@ -774,7 +774,7 @@ static int c4iw_set_page(struct ib_mr *ibmr, u64 addr)
 {
        struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
 
-       if (unlikely(mhp->mpl_len == mhp->max_mpl_len))
+       if (unlikely(mhp->mpl_len == mhp->attr.pbl_size))
                return -ENOMEM;
 
        mhp->mpl[mhp->mpl_len++] = addr;
index 1a1a47ac53c6f049285a30028ba4cde5bd21d5af..f15c931020810cdbc6125898af42146337974cc8 100644 (file)
@@ -271,7 +271,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
 
        lockdep_assert_held(&qp->s_lock);
        ps->s_txreq = get_txreq(ps->dev, qp);
-       if (IS_ERR(ps->s_txreq))
+       if (!ps->s_txreq)
                goto bail_no_tx;
 
        if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
index b7b671017e594298c8bea18d029fbd1154a8cfe9..e254dcec6f647067a0efce4cee6b47e9f76dbf9c 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
  *
  * This file is provided under a dual BSD/GPLv2 license.  When using or
  * redistributing this file, you may do so under either license.
@@ -72,7 +72,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
        int middle = 0;
 
        ps->s_txreq = get_txreq(ps->dev, qp);
-       if (IS_ERR(ps->s_txreq))
+       if (!ps->s_txreq)
                goto bail_no_tx;
 
        if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
index 1ab332f1866e878580ddb683f71ac83e13d35cbb..70d39fc450a1e112b2f97b4e499cbf96623d19ad 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
  *
  * This file is provided under a dual BSD/GPLv2 license.  When using or
  * redistributing this file, you may do so under either license.
@@ -503,7 +503,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
        u32 lid;
 
        ps->s_txreq = get_txreq(ps->dev, qp);
-       if (IS_ERR(ps->s_txreq))
+       if (!ps->s_txreq)
                goto bail_no_tx;
 
        if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
index 873e48ea923fc42acc9cb2d5d3d7055dd07a1790..c4ab2d5b4502ee1e905ef2c193495f56e479eaf9 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright(c) 2016 - 2017 Intel Corporation.
+ * Copyright(c) 2016 - 2018 Intel Corporation.
  *
  * This file is provided under a dual BSD/GPLv2 license.  When using or
  * redistributing this file, you may do so under either license.
@@ -94,7 +94,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
                                struct rvt_qp *qp)
        __must_hold(&qp->s_lock)
 {
-       struct verbs_txreq *tx = ERR_PTR(-EBUSY);
+       struct verbs_txreq *tx = NULL;
 
        write_seqlock(&dev->txwait_lock);
        if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
index 729244c3086ce7eb7d28da104bb4f7f4363c96bf..1c19bbc764b2d6f93134fe7775f55569d7b70b84 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright(c) 2016 Intel Corporation.
+ * Copyright(c) 2016 - 2018 Intel Corporation.
  *
  * This file is provided under a dual BSD/GPLv2 license.  When using or
  * redistributing this file, you may do so under either license.
@@ -83,7 +83,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
        if (unlikely(!tx)) {
                /* call slow path to get the lock */
                tx = __get_txreq(dev, qp);
-               if (IS_ERR(tx))
+               if (!tx)
                        return tx;
        }
        tx->qp = qp;
index ed1f253faf977c5bf3b4f15f4a1ea8992e817d88..c7c85c22e4e3291a343319ffcdb2e00034d7cc5f 100644 (file)
@@ -486,8 +486,11 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
        }
 
        if (flags & IB_MR_REREG_ACCESS) {
-               if (ib_access_writable(mr_access_flags) && !mmr->umem->writable)
-                       return -EPERM;
+               if (ib_access_writable(mr_access_flags) &&
+                   !mmr->umem->writable) {
+                       err = -EPERM;
+                       goto release_mpt_entry;
+               }
 
                err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
                                               convert_access(mr_access_flags));
index e52dd21519b45ff00268ae33c21816a8b5a96b53..b3ba9a222550750f9c92a1ea8d1cf23b93e05d12 100644 (file)
@@ -3199,8 +3199,8 @@ static int flow_counters_set_data(struct ib_counters *ibcounters,
        if (!mcounters->hw_cntrs_hndl) {
                mcounters->hw_cntrs_hndl = mlx5_fc_create(
                        to_mdev(ibcounters->device)->mdev, false);
-               if (!mcounters->hw_cntrs_hndl) {
-                       ret = -ENOMEM;
+               if (IS_ERR(mcounters->hw_cntrs_hndl)) {
+                       ret = PTR_ERR(mcounters->hw_cntrs_hndl);
                        goto free;
                }
                hw_hndl = true;
@@ -3546,29 +3546,35 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
                        return ERR_PTR(-ENOMEM);
 
                err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz);
-               if (err) {
-                       kfree(ucmd);
-                       return ERR_PTR(err);
-               }
+               if (err)
+                       goto free_ucmd;
        }
 
-       if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO)
-               return ERR_PTR(-ENOMEM);
+       if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) {
+               err = -ENOMEM;
+               goto free_ucmd;
+       }
 
        if (domain != IB_FLOW_DOMAIN_USER ||
            flow_attr->port > dev->num_ports ||
            (flow_attr->flags & ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP |
-                                 IB_FLOW_ATTR_FLAGS_EGRESS)))
-               return ERR_PTR(-EINVAL);
+                                 IB_FLOW_ATTR_FLAGS_EGRESS))) {
+               err = -EINVAL;
+               goto free_ucmd;
+       }
 
        if (is_egress &&
            (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
-            flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT))
-               return ERR_PTR(-EINVAL);
+            flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
+               err = -EINVAL;
+               goto free_ucmd;
+       }
 
        dst = kzalloc(sizeof(*dst), GFP_KERNEL);
-       if (!dst)
-               return ERR_PTR(-ENOMEM);
+       if (!dst) {
+               err = -ENOMEM;
+               goto free_ucmd;
+       }
 
        mutex_lock(&dev->flow_db->lock);
 
@@ -3637,8 +3643,8 @@ destroy_ft:
 unlock:
        mutex_unlock(&dev->flow_db->lock);
        kfree(dst);
+free_ucmd:
        kfree(ucmd);
-       kfree(handler);
        return ERR_PTR(err);
 }
 
@@ -6107,7 +6113,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
        dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
                             MLX5_CAP_GEN(mdev, num_vhca_ports));
 
-       if (MLX5_VPORT_MANAGER(mdev) &&
+       if (MLX5_ESWITCH_MANAGER(mdev) &&
            mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
                dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0);
 
index 0af7b7905550baddb5084d99293e9a36196eb6b3..f5de5adc9b1a4143b7d2c82f6e7fe59df1157f93 100644 (file)
@@ -266,18 +266,24 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
 
        desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
                    srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
-       if (desc_size == 0 || srq->msrq.max_gs > desc_size)
-               return ERR_PTR(-EINVAL);
+       if (desc_size == 0 || srq->msrq.max_gs > desc_size) {
+               err = -EINVAL;
+               goto err_srq;
+       }
        desc_size = roundup_pow_of_two(desc_size);
        desc_size = max_t(size_t, 32, desc_size);
-       if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg))
-               return ERR_PTR(-EINVAL);
+       if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) {
+               err = -EINVAL;
+               goto err_srq;
+       }
        srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
                sizeof(struct mlx5_wqe_data_seg);
        srq->msrq.wqe_shift = ilog2(desc_size);
        buf_size = srq->msrq.max * desc_size;
-       if (buf_size < desc_size)
-               return ERR_PTR(-EINVAL);
+       if (buf_size < desc_size) {
+               err = -EINVAL;
+               goto err_srq;
+       }
        in.type = init_attr->srq_type;
 
        if (pd->uobject)
index f7ac8fc9b531d7550fb0b41233b55e0bec51b4ff..f07b8df96f43954e67d4dfc32148e96a751e6974 100644 (file)
@@ -1957,6 +1957,9 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        }
 
        if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
+               if (rdma_protocol_iwarp(&dev->ibdev, 1))
+                       return -EINVAL;
+
                if (attr_mask & IB_QP_PATH_MTU) {
                        if (attr->path_mtu < IB_MTU_256 ||
                            attr->path_mtu > IB_MTU_4096) {
index f30eeba3f772c5a8e0433cdc6b6fcaa47076583c..8be27238a86e4ee1f160b4058e9517ef58708d26 100644 (file)
@@ -645,6 +645,9 @@ next_wqe:
                } else {
                        goto exit;
                }
+               if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
+                   qp->sq_sig_type == IB_SIGNAL_ALL_WR)
+                       rxe_run_task(&qp->comp.task, 1);
                qp->req.wqe_index = next_index(qp->sq.queue,
                                                qp->req.wqe_index);
                goto next_wqe;
@@ -709,6 +712,7 @@ next_wqe:
 
        if (fill_packet(qp, wqe, &pkt, skb, payload)) {
                pr_debug("qp#%d Error during fill packet\n", qp_num(qp));
+               kfree_skb(skb);
                goto err;
        }
 
@@ -740,7 +744,6 @@ next_wqe:
        goto next_wqe;
 
 err:
-       kfree_skb(skb);
        wqe->status = IB_WC_LOC_PROT_ERR;
        wqe->state = wqe_state_error;
        __rxe_do_task(&qp->comp.task);
index cf30523c6ef64c956e5ebf77c730c6bb146c4a1f..6c7326c93721c495c4e61a73cac2dfaf9a5bc8fc 100644 (file)
@@ -131,8 +131,10 @@ EXPORT_SYMBOL(input_mt_destroy_slots);
  * inactive, or if the tool type is changed, a new tracking id is
  * assigned to the slot. The tool type is only reported if the
  * corresponding absbit field is set.
+ *
+ * Returns true if contact is active.
  */
-void input_mt_report_slot_state(struct input_dev *dev,
+bool input_mt_report_slot_state(struct input_dev *dev,
                                unsigned int tool_type, bool active)
 {
        struct input_mt *mt = dev->mt;
@@ -140,22 +142,24 @@ void input_mt_report_slot_state(struct input_dev *dev,
        int id;
 
        if (!mt)
-               return;
+               return false;
 
        slot = &mt->slots[mt->slot];
        slot->frame = mt->frame;
 
        if (!active) {
                input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1);
-               return;
+               return false;
        }
 
        id = input_mt_get_value(slot, ABS_MT_TRACKING_ID);
-       if (id < 0 || input_mt_get_value(slot, ABS_MT_TOOL_TYPE) != tool_type)
+       if (id < 0)
                id = input_mt_new_trkid(mt);
 
        input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, id);
        input_event(dev, EV_ABS, ABS_MT_TOOL_TYPE, tool_type);
+
+       return true;
 }
 EXPORT_SYMBOL(input_mt_report_slot_state);
 
index 48e36acbeb496db7f5033029158a645f8d3cdb27..cd620e009bada3a8f8c1e70b99be25100bea9c44 100644 (file)
@@ -125,7 +125,7 @@ static const struct xpad_device {
        u8 mapping;
        u8 xtype;
 } xpad_device[] = {
-       { 0x0079, 0x18d4, "GPD Win 2 Controller", 0, XTYPE_XBOX360 },
+       { 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 },
        { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
        { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
        { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
index f6e643b589b616c61d1751005fedb3288f0ad82c..e8dae6195b30500934f23738f995201521e05160 100644 (file)
@@ -45,7 +45,7 @@ struct event_dev {
 static irqreturn_t events_interrupt(int irq, void *dev_id)
 {
        struct event_dev *edev = dev_id;
-       unsigned type, code, value;
+       unsigned int type, code, value;
 
        type = __raw_readl(edev->addr + REG_READ);
        code = __raw_readl(edev->addr + REG_READ);
@@ -57,7 +57,7 @@ static irqreturn_t events_interrupt(int irq, void *dev_id)
 }
 
 static void events_import_bits(struct event_dev *edev,
-                       unsigned long bits[], unsigned type, size_t count)
+                       unsigned long bits[], unsigned int type, size_t count)
 {
        void __iomem *addr = edev->addr;
        int i, j;
@@ -99,6 +99,7 @@ static void events_import_abs_params(struct event_dev *edev)
 
                for (j = 0; j < ARRAY_SIZE(val); j++) {
                        int offset = (i * ARRAY_SIZE(val) + j) * sizeof(u32);
+
                        val[j] = __raw_readl(edev->addr + REG_DATA + offset);
                }
 
@@ -112,7 +113,7 @@ static int events_probe(struct platform_device *pdev)
        struct input_dev *input_dev;
        struct event_dev *edev;
        struct resource *res;
-       unsigned keymapnamelen;
+       unsigned int keymapnamelen;
        void __iomem *addr;
        int irq;
        int i;
@@ -150,7 +151,7 @@ static int events_probe(struct platform_device *pdev)
        for (i = 0; i < keymapnamelen; i++)
                edev->name[i] = __raw_readb(edev->addr + REG_DATA + i);
 
-       pr_debug("events_probe() keymap=%s\n", edev->name);
+       pr_debug("%s: keymap=%s\n", __func__, edev->name);
 
        input_dev->name = edev->name;
        input_dev->id.bustype = BUS_HOST;
index c25606e006938743d64498429cf3d0b69768d7fb..ca59a2be9bc5344f65389ea7372a7740b74b5343 100644 (file)
@@ -841,4 +841,14 @@ config INPUT_RAVE_SP_PWRBUTTON
          To compile this driver as a module, choose M here: the
          module will be called rave-sp-pwrbutton.
 
+config INPUT_SC27XX_VIBRA
+       tristate "Spreadtrum sc27xx vibrator support"
+       depends on MFD_SC27XX_PMIC || COMPILE_TEST
+       select INPUT_FF_MEMLESS
+       help
+         This option enables support for Spreadtrum sc27xx vibrator driver.
+
+         To compile this driver as a module, choose M here. The module will
+         be called sc27xx_vibra.
+
 endif
index 72cde28649e2c0bc4fec14f6898445d2f79880dc..9d0f9d1ff68f41a5ec7f13101bb11176e8fd8729 100644 (file)
@@ -66,6 +66,7 @@ obj-$(CONFIG_INPUT_RETU_PWRBUTTON)    += retu-pwrbutton.o
 obj-$(CONFIG_INPUT_AXP20X_PEK)         += axp20x-pek.o
 obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER)        += rotary_encoder.o
 obj-$(CONFIG_INPUT_RK805_PWRKEY)       += rk805-pwrkey.o
+obj-$(CONFIG_INPUT_SC27XX_VIBRA)       += sc27xx-vibra.o
 obj-$(CONFIG_INPUT_SGI_BTNS)           += sgi_btns.o
 obj-$(CONFIG_INPUT_SIRFSOC_ONKEY)      += sirfsoc-onkey.o
 obj-$(CONFIG_INPUT_SOC_BUTTON_ARRAY)   += soc_button_array.o
diff --git a/drivers/input/misc/sc27xx-vibra.c b/drivers/input/misc/sc27xx-vibra.c
new file mode 100644 (file)
index 0000000..295251a
--- /dev/null
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Spreadtrum Communications Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/input.h>
+#include <linux/workqueue.h>
+
+#define CUR_DRV_CAL_SEL                GENMASK(13, 12)
+#define SLP_LDOVIBR_PD_EN      BIT(9)
+#define LDO_VIBR_PD            BIT(8)
+
+struct vibra_info {
+       struct input_dev        *input_dev;
+       struct work_struct      play_work;
+       struct regmap           *regmap;
+       u32                     base;
+       u32                     strength;
+       bool                    enabled;
+};
+
+static void sc27xx_vibra_set(struct vibra_info *info, bool on)
+{
+       if (on) {
+               regmap_update_bits(info->regmap, info->base, LDO_VIBR_PD, 0);
+               regmap_update_bits(info->regmap, info->base,
+                                  SLP_LDOVIBR_PD_EN, 0);
+               info->enabled = true;
+       } else {
+               regmap_update_bits(info->regmap, info->base, LDO_VIBR_PD,
+                                  LDO_VIBR_PD);
+               regmap_update_bits(info->regmap, info->base,
+                                  SLP_LDOVIBR_PD_EN, SLP_LDOVIBR_PD_EN);
+               info->enabled = false;
+       }
+}
+
+static int sc27xx_vibra_hw_init(struct vibra_info *info)
+{
+       return regmap_update_bits(info->regmap, info->base, CUR_DRV_CAL_SEL, 0);
+}
+
+static void sc27xx_vibra_play_work(struct work_struct *work)
+{
+       struct vibra_info *info = container_of(work, struct vibra_info,
+                                              play_work);
+
+       if (info->strength && !info->enabled)
+               sc27xx_vibra_set(info, true);
+       else if (info->strength == 0 && info->enabled)
+               sc27xx_vibra_set(info, false);
+}
+
+static int sc27xx_vibra_play(struct input_dev *input, void *data,
+                            struct ff_effect *effect)
+{
+       struct vibra_info *info = input_get_drvdata(input);
+
+       info->strength = effect->u.rumble.weak_magnitude;
+       schedule_work(&info->play_work);
+
+       return 0;
+}
+
+static void sc27xx_vibra_close(struct input_dev *input)
+{
+       struct vibra_info *info = input_get_drvdata(input);
+
+       cancel_work_sync(&info->play_work);
+       if (info->enabled)
+               sc27xx_vibra_set(info, false);
+}
+
+static int sc27xx_vibra_probe(struct platform_device *pdev)
+{
+       struct vibra_info *info;
+       int error;
+
+       info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+       if (!info)
+               return -ENOMEM;
+
+       info->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+       if (!info->regmap) {
+               dev_err(&pdev->dev, "failed to get vibrator regmap.\n");
+               return -ENODEV;
+       }
+
+       error = device_property_read_u32(&pdev->dev, "reg", &info->base);
+       if (error) {
+               dev_err(&pdev->dev, "failed to get vibrator base address.\n");
+               return error;
+       }
+
+       info->input_dev = devm_input_allocate_device(&pdev->dev);
+       if (!info->input_dev) {
+               dev_err(&pdev->dev, "failed to allocate input device.\n");
+               return -ENOMEM;
+       }
+
+       info->input_dev->name = "sc27xx:vibrator";
+       info->input_dev->id.version = 0;
+       info->input_dev->close = sc27xx_vibra_close;
+
+       input_set_drvdata(info->input_dev, info);
+       input_set_capability(info->input_dev, EV_FF, FF_RUMBLE);
+       INIT_WORK(&info->play_work, sc27xx_vibra_play_work);
+       info->enabled = false;
+
+       error = sc27xx_vibra_hw_init(info);
+       if (error) {
+               dev_err(&pdev->dev, "failed to initialize the vibrator.\n");
+               return error;
+       }
+
+       error = input_ff_create_memless(info->input_dev, NULL,
+                                       sc27xx_vibra_play);
+       if (error) {
+               dev_err(&pdev->dev, "failed to register vibrator to FF.\n");
+               return error;
+       }
+
+       error = input_register_device(info->input_dev);
+       if (error) {
+               dev_err(&pdev->dev, "failed to register input device.\n");
+               return error;
+       }
+
+       return 0;
+}
+
+static const struct of_device_id sc27xx_vibra_of_match[] = {
+       { .compatible = "sprd,sc2731-vibrator", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, sc27xx_vibra_of_match);
+
+static struct platform_driver sc27xx_vibra_driver = {
+       .driver = {
+               .name = "sc27xx-vibrator",
+               .of_match_table = sc27xx_vibra_of_match,
+       },
+       .probe = sc27xx_vibra_probe,
+};
+
+module_platform_driver(sc27xx_vibra_driver);
+
+MODULE_DESCRIPTION("Spreadtrum SC27xx Vibrator Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Xiaotong Lu <xiaotong.lu@spreadtrum.com>");
index 599544c1a91cd365261b6ca2ec4e4f3149b0a63d..243e0fa6e3e3cb44ce22adc6e76421fda79f4ff2 100644 (file)
@@ -27,6 +27,8 @@
 #define ETP_DISABLE_POWER      0x0001
 #define ETP_PRESSURE_OFFSET    25
 
+#define ETP_CALIBRATE_MAX_LEN  3
+
 /* IAP Firmware handling */
 #define ETP_PRODUCT_ID_FORMAT_STRING   "%d.0"
 #define ETP_FW_NAME            "elan_i2c_" ETP_PRODUCT_ID_FORMAT_STRING ".bin"
index 8ff75114e7626dc3d2fa23a1d3457f1802b2a628..f5ae24865355a3292ae8a8efd713746b628cacb0 100644 (file)
@@ -613,7 +613,7 @@ static ssize_t calibrate_store(struct device *dev,
        int tries = 20;
        int retval;
        int error;
-       u8 val[3];
+       u8 val[ETP_CALIBRATE_MAX_LEN];
 
        retval = mutex_lock_interruptible(&data->sysfs_mutex);
        if (retval)
@@ -1345,6 +1345,9 @@ static const struct acpi_device_id elan_acpi_id[] = {
        { "ELAN060C", 0 },
        { "ELAN0611", 0 },
        { "ELAN0612", 0 },
+       { "ELAN0618", 0 },
+       { "ELAN061D", 0 },
+       { "ELAN0622", 0 },
        { "ELAN1000", 0 },
        { }
 };
index cfcb32559925baf1acf070f908f3b91b1fc1b905..c060d270bc4d862ad7366bd87529dbdc032672b6 100644 (file)
@@ -56,7 +56,7 @@
 static int elan_smbus_initialize(struct i2c_client *client)
 {
        u8 check[ETP_SMBUS_HELLOPACKET_LEN] = { 0x55, 0x55, 0x55, 0x55, 0x55 };
-       u8 values[ETP_SMBUS_HELLOPACKET_LEN] = { 0, 0, 0, 0, 0 };
+       u8 values[I2C_SMBUS_BLOCK_MAX] = {0};
        int len, error;
 
        /* Get hello packet */
@@ -117,12 +117,16 @@ static int elan_smbus_calibrate(struct i2c_client *client)
 static int elan_smbus_calibrate_result(struct i2c_client *client, u8 *val)
 {
        int error;
+       u8 buf[I2C_SMBUS_BLOCK_MAX] = {0};
+
+       BUILD_BUG_ON(ETP_CALIBRATE_MAX_LEN > sizeof(buf));
 
        error = i2c_smbus_read_block_data(client,
-                                         ETP_SMBUS_CALIBRATE_QUERY, val);
+                                         ETP_SMBUS_CALIBRATE_QUERY, buf);
        if (error < 0)
                return error;
 
+       memcpy(val, buf, ETP_CALIBRATE_MAX_LEN);
        return 0;
 }
 
@@ -472,6 +476,8 @@ static int elan_smbus_get_report(struct i2c_client *client, u8 *report)
 {
        int len;
 
+       BUILD_BUG_ON(I2C_SMBUS_BLOCK_MAX > ETP_SMBUS_REPORT_LEN);
+
        len = i2c_smbus_read_block_data(client,
                                        ETP_SMBUS_PACKET_QUERY,
                                        &report[ETP_SMBUS_REPORT_OFFSET]);
index fb4d902c440345d3cbc02329ed742d48b931dc85..dd85b16dc6f889bb366a10cbd4278234d3f9763c 100644 (file)
@@ -799,7 +799,7 @@ static int elantech_packet_check_v4(struct psmouse *psmouse)
        else if (ic_version == 7 && etd->info.samples[1] == 0x2A)
                sanity_check = ((packet[3] & 0x1c) == 0x10);
        else
-               sanity_check = ((packet[0] & 0x0c) == 0x04 &&
+               sanity_check = ((packet[0] & 0x08) == 0x00 &&
                                (packet[3] & 0x1c) == 0x10);
 
        if (!sanity_check)
@@ -1175,6 +1175,12 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
        { }
 };
 
+static const char * const middle_button_pnp_ids[] = {
+       "LEN2131", /* ThinkPad P52 w/ NFC */
+       "LEN2132", /* ThinkPad P52 */
+       NULL
+};
+
 /*
  * Set the appropriate event bits for the input subsystem
  */
@@ -1194,7 +1200,8 @@ static int elantech_set_input_params(struct psmouse *psmouse)
        __clear_bit(EV_REL, dev->evbit);
 
        __set_bit(BTN_LEFT, dev->keybit);
-       if (dmi_check_system(elantech_dmi_has_middle_button))
+       if (dmi_check_system(elantech_dmi_has_middle_button) ||
+                       psmouse_matches_pnp_id(psmouse, middle_button_pnp_ids))
                __set_bit(BTN_MIDDLE, dev->keybit);
        __set_bit(BTN_RIGHT, dev->keybit);
 
index 5ff5b1952be0c7afe810cef7f6f086f71928e150..d3ff1fc09af712700507d05ac3548703e49173a1 100644 (file)
@@ -192,8 +192,8 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
                        else
                                input_report_rel(dev, REL_WHEEL, -wheel);
 
-                       input_report_key(dev, BTN_SIDE,  BIT(4));
-                       input_report_key(dev, BTN_EXTRA, BIT(5));
+                       input_report_key(dev, BTN_SIDE,  packet[3] & BIT(4));
+                       input_report_key(dev, BTN_EXTRA, packet[3] & BIT(5));
                        break;
                }
                break;
@@ -203,13 +203,13 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
                input_report_rel(dev, REL_WHEEL, -(s8) packet[3]);
 
                /* Extra buttons on Genius NewNet 3D */
-               input_report_key(dev, BTN_SIDE,  BIT(6));
-               input_report_key(dev, BTN_EXTRA, BIT(7));
+               input_report_key(dev, BTN_SIDE,  packet[0] & BIT(6));
+               input_report_key(dev, BTN_EXTRA, packet[0] & BIT(7));
                break;
 
        case PSMOUSE_THINKPS:
                /* Extra button on ThinkingMouse */
-               input_report_key(dev, BTN_EXTRA, BIT(3));
+               input_report_key(dev, BTN_EXTRA, packet[0] & BIT(3));
 
                /*
                 * Without this bit of weirdness moving up gives wildly
@@ -223,7 +223,7 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
                 * Cortron PS2 Trackball reports SIDE button in the
                 * 4th bit of the first byte.
                 */
-               input_report_key(dev, BTN_SIDE, BIT(3));
+               input_report_key(dev, BTN_SIDE, packet[0] & BIT(3));
                packet[0] |= BIT(3);
                break;
 
index 7172b88cd0649c8de16ac7373ec2a16c42c338d2..fad2eae4a118e793e617a86a52b28351ef4fafed 100644 (file)
@@ -3,6 +3,7 @@
 #
 config RMI4_CORE
        tristate "Synaptics RMI4 bus support"
+       select IRQ_DOMAIN
        help
          Say Y here if you want to support the Synaptics RMI4 bus.  This is
          required for all RMI4 device support.
index 8bb866c7b9855c5025d31b7be3f722d469f73da9..8eeffa066022dadb9f718f77aab1609700f05543 100644 (file)
@@ -32,15 +32,15 @@ void rmi_2d_sensor_abs_process(struct rmi_2d_sensor *sensor,
        if (obj->type == RMI_2D_OBJECT_NONE)
                return;
 
-       if (axis_align->swap_axes)
-               swap(obj->x, obj->y);
-
        if (axis_align->flip_x)
                obj->x = sensor->max_x - obj->x;
 
        if (axis_align->flip_y)
                obj->y = sensor->max_y - obj->y;
 
+       if (axis_align->swap_axes)
+               swap(obj->x, obj->y);
+
        /*
         * Here checking if X offset or y offset are specified is
         * redundant. We just add the offsets or clip the values.
@@ -120,15 +120,15 @@ void rmi_2d_sensor_rel_report(struct rmi_2d_sensor *sensor, int x, int y)
        x = min(RMI_2D_REL_POS_MAX, max(RMI_2D_REL_POS_MIN, (int)x));
        y = min(RMI_2D_REL_POS_MAX, max(RMI_2D_REL_POS_MIN, (int)y));
 
-       if (axis_align->swap_axes)
-               swap(x, y);
-
        if (axis_align->flip_x)
                x = min(RMI_2D_REL_POS_MAX, -x);
 
        if (axis_align->flip_y)
                y = min(RMI_2D_REL_POS_MAX, -y);
 
+       if (axis_align->swap_axes)
+               swap(x, y);
+
        if (x || y) {
                input_report_rel(sensor->input, REL_X, x);
                input_report_rel(sensor->input, REL_Y, y);
@@ -141,17 +141,10 @@ static void rmi_2d_sensor_set_input_params(struct rmi_2d_sensor *sensor)
        struct input_dev *input = sensor->input;
        int res_x;
        int res_y;
+       int max_x, max_y;
        int input_flags = 0;
 
        if (sensor->report_abs) {
-               if (sensor->axis_align.swap_axes) {
-                       swap(sensor->max_x, sensor->max_y);
-                       swap(sensor->axis_align.clip_x_low,
-                            sensor->axis_align.clip_y_low);
-                       swap(sensor->axis_align.clip_x_high,
-                            sensor->axis_align.clip_y_high);
-               }
-
                sensor->min_x = sensor->axis_align.clip_x_low;
                if (sensor->axis_align.clip_x_high)
                        sensor->max_x = min(sensor->max_x,
@@ -163,14 +156,19 @@ static void rmi_2d_sensor_set_input_params(struct rmi_2d_sensor *sensor)
                                sensor->axis_align.clip_y_high);
 
                set_bit(EV_ABS, input->evbit);
-               input_set_abs_params(input, ABS_MT_POSITION_X, 0, sensor->max_x,
-                                       0, 0);
-               input_set_abs_params(input, ABS_MT_POSITION_Y, 0, sensor->max_y,
-                                       0, 0);
+
+               max_x = sensor->max_x;
+               max_y = sensor->max_y;
+               if (sensor->axis_align.swap_axes)
+                       swap(max_x, max_y);
+               input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_x, 0, 0);
+               input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_y, 0, 0);
 
                if (sensor->x_mm && sensor->y_mm) {
                        res_x = (sensor->max_x - sensor->min_x) / sensor->x_mm;
                        res_y = (sensor->max_y - sensor->min_y) / sensor->y_mm;
+                       if (sensor->axis_align.swap_axes)
+                               swap(res_x, res_y);
 
                        input_abs_set_res(input, ABS_X, res_x);
                        input_abs_set_res(input, ABS_Y, res_y);
index c5fa53adba8d01318cfeacea440360c51c044a7d..bd0d5ff01b08f9c88920b03f56dbb4a3eed21af3 100644 (file)
@@ -9,6 +9,8 @@
 
 #include <linux/kernel.h>
 #include <linux/device.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
 #include <linux/list.h>
 #include <linux/pm.h>
 #include <linux/rmi.h>
@@ -167,6 +169,39 @@ static inline void rmi_function_of_probe(struct rmi_function *fn)
 {}
 #endif
 
+static struct irq_chip rmi_irq_chip = {
+       .name = "rmi4",
+};
+
+static int rmi_create_function_irq(struct rmi_function *fn,
+                                  struct rmi_function_handler *handler)
+{
+       struct rmi_driver_data *drvdata = dev_get_drvdata(&fn->rmi_dev->dev);
+       int i, error;
+
+       for (i = 0; i < fn->num_of_irqs; i++) {
+               set_bit(fn->irq_pos + i, fn->irq_mask);
+
+               fn->irq[i] = irq_create_mapping(drvdata->irqdomain,
+                                               fn->irq_pos + i);
+
+               irq_set_chip_data(fn->irq[i], fn);
+               irq_set_chip_and_handler(fn->irq[i], &rmi_irq_chip,
+                                        handle_simple_irq);
+               irq_set_nested_thread(fn->irq[i], 1);
+
+               error = devm_request_threaded_irq(&fn->dev, fn->irq[i], NULL,
+                                       handler->attention, IRQF_ONESHOT,
+                                       dev_name(&fn->dev), fn);
+               if (error) {
+                       dev_err(&fn->dev, "Error %d registering IRQ\n", error);
+                       return error;
+               }
+       }
+
+       return 0;
+}
+
 static int rmi_function_probe(struct device *dev)
 {
        struct rmi_function *fn = to_rmi_function(dev);
@@ -178,7 +213,14 @@ static int rmi_function_probe(struct device *dev)
 
        if (handler->probe) {
                error = handler->probe(fn);
-               return error;
+               if (error)
+                       return error;
+       }
+
+       if (fn->num_of_irqs && handler->attention) {
+               error = rmi_create_function_irq(fn, handler);
+               if (error)
+                       return error;
        }
 
        return 0;
@@ -230,12 +272,18 @@ err_put_device:
 
 void rmi_unregister_function(struct rmi_function *fn)
 {
+       int i;
+
        rmi_dbg(RMI_DEBUG_CORE, &fn->dev, "Unregistering F%02X.\n",
                        fn->fd.function_number);
 
        device_del(&fn->dev);
        of_node_put(fn->dev.of_node);
        put_device(&fn->dev);
+
+       for (i = 0; i < fn->num_of_irqs; i++)
+               irq_dispose_mapping(fn->irq[i]);
+
 }
 
 /**
index b7625a9ac66ab5384727cc83496223be3aedbe92..96383eab41ba1d850468a64e7ced8a3f1bf72ff6 100644 (file)
 
 struct rmi_device;
 
+/*
+ * The interrupt source count in the function descriptor can represent up to
+ * 6 interrupt sources in the normal manner.
+ */
+#define RMI_FN_MAX_IRQS        6
+
 /**
  * struct rmi_function - represents the implementation of an RMI4
  * function for a particular device (basically, a driver for that RMI4 function)
@@ -26,6 +32,7 @@ struct rmi_device;
  * @irq_pos: The position in the irq bitfield this function holds
  * @irq_mask: For convenience, can be used to mask IRQ bits off during ATTN
  * interrupt handling.
+ * @irqs: assigned virq numbers (up to num_of_irqs)
  *
  * @node: entry in device's list of functions
  */
@@ -36,6 +43,7 @@ struct rmi_function {
        struct list_head node;
 
        unsigned int num_of_irqs;
+       int irq[RMI_FN_MAX_IRQS];
        unsigned int irq_pos;
        unsigned long irq_mask[];
 };
@@ -76,7 +84,7 @@ struct rmi_function_handler {
        void (*remove)(struct rmi_function *fn);
        int (*config)(struct rmi_function *fn);
        int (*reset)(struct rmi_function *fn);
-       int (*attention)(struct rmi_function *fn, unsigned long *irq_bits);
+       irqreturn_t (*attention)(int irq, void *ctx);
        int (*suspend)(struct rmi_function *fn);
        int (*resume)(struct rmi_function *fn);
 };
index 7d29053dfb0f06878ff7897b59f52039a299a089..fc3ab93b7aea454475ee324eecee91470c4a9dc3 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/pm.h>
 #include <linux/slab.h>
 #include <linux/of.h>
+#include <linux/irqdomain.h>
 #include <uapi/linux/input.h>
 #include <linux/rmi.h>
 #include "rmi_bus.h"
@@ -127,28 +128,11 @@ static int rmi_driver_process_config_requests(struct rmi_device *rmi_dev)
        return 0;
 }
 
-static void process_one_interrupt(struct rmi_driver_data *data,
-                                 struct rmi_function *fn)
-{
-       struct rmi_function_handler *fh;
-
-       if (!fn || !fn->dev.driver)
-               return;
-
-       fh = to_rmi_function_handler(fn->dev.driver);
-       if (fh->attention) {
-               bitmap_and(data->fn_irq_bits, data->irq_status, fn->irq_mask,
-                               data->irq_count);
-               if (!bitmap_empty(data->fn_irq_bits, data->irq_count))
-                       fh->attention(fn, data->fn_irq_bits);
-       }
-}
-
 static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
 {
        struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
        struct device *dev = &rmi_dev->dev;
-       struct rmi_function *entry;
+       int i;
        int error;
 
        if (!data)
@@ -173,16 +157,8 @@ static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
         */
        mutex_unlock(&data->irq_mutex);
 
-       /*
-        * It would be nice to be able to use irq_chip to handle these
-        * nested IRQs.  Unfortunately, most of the current customers for
-        * this driver are using older kernels (3.0.x) that don't support
-        * the features required for that.  Once they've shifted to more
-        * recent kernels (say, 3.3 and higher), this should be switched to
-        * use irq_chip.
-        */
-       list_for_each_entry(entry, &data->function_list, node)
-               process_one_interrupt(data, entry);
+       for_each_set_bit(i, data->irq_status, data->irq_count)
+               handle_nested_irq(irq_find_mapping(data->irqdomain, i));
 
        if (data->input)
                input_sync(data->input);
@@ -1001,9 +977,13 @@ EXPORT_SYMBOL_GPL(rmi_driver_resume);
 static int rmi_driver_remove(struct device *dev)
 {
        struct rmi_device *rmi_dev = to_rmi_device(dev);
+       struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
 
        rmi_disable_irq(rmi_dev, false);
 
+       irq_domain_remove(data->irqdomain);
+       data->irqdomain = NULL;
+
        rmi_f34_remove_sysfs(rmi_dev);
        rmi_free_function_list(rmi_dev);
 
@@ -1035,7 +1015,8 @@ int rmi_probe_interrupts(struct rmi_driver_data *data)
 {
        struct rmi_device *rmi_dev = data->rmi_dev;
        struct device *dev = &rmi_dev->dev;
-       int irq_count;
+       struct fwnode_handle *fwnode = rmi_dev->xport->dev->fwnode;
+       int irq_count = 0;
        size_t size;
        int retval;
 
@@ -1046,7 +1027,6 @@ int rmi_probe_interrupts(struct rmi_driver_data *data)
         * being accessed.
         */
        rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Counting IRQs.\n", __func__);
-       irq_count = 0;
        data->bootloader_mode = false;
 
        retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_count_irqs);
@@ -1058,6 +1038,15 @@ int rmi_probe_interrupts(struct rmi_driver_data *data)
        if (data->bootloader_mode)
                dev_warn(dev, "Device in bootloader mode.\n");
 
+       /* Allocate and register a linear revmap irq_domain */
+       data->irqdomain = irq_domain_create_linear(fwnode, irq_count,
+                                                  &irq_domain_simple_ops,
+                                                  data);
+       if (!data->irqdomain) {
+               dev_err(&rmi_dev->dev, "Failed to create IRQ domain\n");
+               return -ENOMEM;
+       }
+
        data->irq_count = irq_count;
        data->num_of_irq_regs = (data->irq_count + 7) / 8;
 
@@ -1080,10 +1069,9 @@ int rmi_init_functions(struct rmi_driver_data *data)
 {
        struct rmi_device *rmi_dev = data->rmi_dev;
        struct device *dev = &rmi_dev->dev;
-       int irq_count;
+       int irq_count = 0;
        int retval;
 
-       irq_count = 0;
        rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Creating functions.\n", __func__);
        retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_create_function);
        if (retval < 0) {
index 8a07ae147df690ee7796c3f9f897904fce6ac6dd..4edaa14fe878650c81e6267550869f8acc714b40 100644 (file)
@@ -681,9 +681,9 @@ static int rmi_f01_resume(struct rmi_function *fn)
        return 0;
 }
 
-static int rmi_f01_attention(struct rmi_function *fn,
-                            unsigned long *irq_bits)
+static irqreturn_t rmi_f01_attention(int irq, void *ctx)
 {
+       struct rmi_function *fn = ctx;
        struct rmi_device *rmi_dev = fn->rmi_dev;
        int error;
        u8 device_status;
@@ -692,7 +692,7 @@ static int rmi_f01_attention(struct rmi_function *fn,
        if (error) {
                dev_err(&fn->dev,
                        "Failed to read device status: %d.\n", error);
-               return error;
+               return IRQ_RETVAL(error);
        }
 
        if (RMI_F01_STATUS_BOOTLOADER(device_status))
@@ -704,11 +704,11 @@ static int rmi_f01_attention(struct rmi_function *fn,
                error = rmi_dev->driver->reset_handler(rmi_dev);
                if (error) {
                        dev_err(&fn->dev, "Device reset failed: %d\n", error);
-                       return error;
+                       return IRQ_RETVAL(error);
                }
        }
 
-       return 0;
+       return IRQ_HANDLED;
 }
 
 struct rmi_function_handler rmi_f01_handler = {
index 88822196d6b723fcf69efd9c3b685fb92dedcf7b..aaa1edc9552254609c1e2ba00008b48bf80f3a85 100644 (file)
@@ -244,8 +244,9 @@ static int rmi_f03_config(struct rmi_function *fn)
        return 0;
 }
 
-static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
+static irqreturn_t rmi_f03_attention(int irq, void *ctx)
 {
+       struct rmi_function *fn = ctx;
        struct rmi_device *rmi_dev = fn->rmi_dev;
        struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
        struct f03_data *f03 = dev_get_drvdata(&fn->dev);
@@ -262,7 +263,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
                /* First grab the data passed by the transport device */
                if (drvdata->attn_data.size < ob_len) {
                        dev_warn(&fn->dev, "F03 interrupted, but data is missing!\n");
-                       return 0;
+                       return IRQ_HANDLED;
                }
 
                memcpy(obs, drvdata->attn_data.data, ob_len);
@@ -277,7 +278,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
                                "%s: Failed to read F03 output buffers: %d\n",
                                __func__, error);
                        serio_interrupt(f03->serio, 0, SERIO_TIMEOUT);
-                       return error;
+                       return IRQ_RETVAL(error);
                }
        }
 
@@ -303,7 +304,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
                serio_interrupt(f03->serio, ob_data, serio_flags);
        }
 
-       return 0;
+       return IRQ_HANDLED;
 }
 
 static void rmi_f03_remove(struct rmi_function *fn)
index 12a233251793c24c754224ae1b379de52db34e7d..df64d6aed4f7e10b8eb78eb78619a15d7bcaaf56 100644 (file)
@@ -570,9 +570,7 @@ static inline u8 rmi_f11_parse_finger_state(const u8 *f_state, u8 n_finger)
 }
 
 static void rmi_f11_finger_handler(struct f11_data *f11,
-                                  struct rmi_2d_sensor *sensor,
-                                  unsigned long *irq_bits, int num_irq_regs,
-                                  int size)
+                                  struct rmi_2d_sensor *sensor, int size)
 {
        const u8 *f_state = f11->data.f_state;
        u8 finger_state;
@@ -581,12 +579,7 @@ static void rmi_f11_finger_handler(struct f11_data *f11,
        int rel_fingers;
        int abs_size = sensor->nbr_fingers * RMI_F11_ABS_BYTES;
 
-       int abs_bits = bitmap_and(f11->result_bits, irq_bits, f11->abs_mask,
-                                 num_irq_regs * 8);
-       int rel_bits = bitmap_and(f11->result_bits, irq_bits, f11->rel_mask,
-                                 num_irq_regs * 8);
-
-       if (abs_bits) {
+       if (sensor->report_abs) {
                if (abs_size > size)
                        abs_fingers = size / RMI_F11_ABS_BYTES;
                else
@@ -604,19 +597,7 @@ static void rmi_f11_finger_handler(struct f11_data *f11,
                        rmi_f11_abs_pos_process(f11, sensor, &sensor->objs[i],
                                                        finger_state, i);
                }
-       }
 
-       if (rel_bits) {
-               if ((abs_size + sensor->nbr_fingers * RMI_F11_REL_BYTES) > size)
-                       rel_fingers = (size - abs_size) / RMI_F11_REL_BYTES;
-               else
-                       rel_fingers = sensor->nbr_fingers;
-
-               for (i = 0; i < rel_fingers; i++)
-                       rmi_f11_rel_pos_report(f11, i);
-       }
-
-       if (abs_bits) {
                /*
                 * the absolute part is made in 2 parts to allow the kernel
                 * tracking to take place.
@@ -638,7 +619,16 @@ static void rmi_f11_finger_handler(struct f11_data *f11,
                }
 
                input_mt_sync_frame(sensor->input);
+       } else if (sensor->report_rel) {
+               if ((abs_size + sensor->nbr_fingers * RMI_F11_REL_BYTES) > size)
+                       rel_fingers = (size - abs_size) / RMI_F11_REL_BYTES;
+               else
+                       rel_fingers = sensor->nbr_fingers;
+
+               for (i = 0; i < rel_fingers; i++)
+                       rmi_f11_rel_pos_report(f11, i);
        }
+
 }
 
 static int f11_2d_construct_data(struct f11_data *f11)
@@ -1276,8 +1266,9 @@ static int rmi_f11_config(struct rmi_function *fn)
        return 0;
 }
 
-static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits)
+static irqreturn_t rmi_f11_attention(int irq, void *ctx)
 {
+       struct rmi_function *fn = ctx;
        struct rmi_device *rmi_dev = fn->rmi_dev;
        struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
        struct f11_data *f11 = dev_get_drvdata(&fn->dev);
@@ -1303,13 +1294,12 @@ static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits)
                                data_base_addr, f11->sensor.data_pkt,
                                f11->sensor.pkt_size);
                if (error < 0)
-                       return error;
+                       return IRQ_RETVAL(error);
        }
 
-       rmi_f11_finger_handler(f11, &f11->sensor, irq_bits,
-                               drvdata->num_of_irq_regs, valid_bytes);
+       rmi_f11_finger_handler(f11, &f11->sensor, valid_bytes);
 
-       return 0;
+       return IRQ_HANDLED;
 }
 
 static int rmi_f11_resume(struct rmi_function *fn)
index a3d1aa88f2a9ce27fcd1f89d2f87d58b21686fce..5c7f489157792bf32da34e982b715824ec17eaff 100644 (file)
@@ -197,10 +197,10 @@ static void rmi_f12_process_objects(struct f12_data *f12, u8 *data1, int size)
                rmi_2d_sensor_abs_report(sensor, &sensor->objs[i], i);
 }
 
-static int rmi_f12_attention(struct rmi_function *fn,
-                            unsigned long *irq_nr_regs)
+static irqreturn_t rmi_f12_attention(int irq, void *ctx)
 {
        int retval;
+       struct rmi_function *fn = ctx;
        struct rmi_device *rmi_dev = fn->rmi_dev;
        struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
        struct f12_data *f12 = dev_get_drvdata(&fn->dev);
@@ -222,7 +222,7 @@ static int rmi_f12_attention(struct rmi_function *fn,
                if (retval < 0) {
                        dev_err(&fn->dev, "Failed to read object data. Code: %d.\n",
                                retval);
-                       return retval;
+                       return IRQ_RETVAL(retval);
                }
        }
 
@@ -232,7 +232,7 @@ static int rmi_f12_attention(struct rmi_function *fn,
 
        input_mt_sync_frame(sensor->input);
 
-       return 0;
+       return IRQ_HANDLED;
 }
 
 static int rmi_f12_write_control_regs(struct rmi_function *fn)
index 82e0f0d43d55271c92c774ba325b1bc40099f83e..5e3ed5ac0c3e40b3919b59493293720877907f1a 100644 (file)
@@ -122,8 +122,9 @@ static void rmi_f30_report_button(struct rmi_function *fn,
        }
 }
 
-static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
+static irqreturn_t rmi_f30_attention(int irq, void *ctx)
 {
+       struct rmi_function *fn = ctx;
        struct f30_data *f30 = dev_get_drvdata(&fn->dev);
        struct rmi_driver_data *drvdata = dev_get_drvdata(&fn->rmi_dev->dev);
        int error;
@@ -134,7 +135,7 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
                if (drvdata->attn_data.size < f30->register_count) {
                        dev_warn(&fn->dev,
                                 "F30 interrupted, but data is missing\n");
-                       return 0;
+                       return IRQ_HANDLED;
                }
                memcpy(f30->data_regs, drvdata->attn_data.data,
                        f30->register_count);
@@ -147,7 +148,7 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
                        dev_err(&fn->dev,
                                "%s: Failed to read F30 data registers: %d\n",
                                __func__, error);
-                       return error;
+                       return IRQ_RETVAL(error);
                }
        }
 
@@ -159,7 +160,7 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
                        rmi_f03_commit_buttons(f30->f03);
        }
 
-       return 0;
+       return IRQ_HANDLED;
 }
 
 static int rmi_f30_config(struct rmi_function *fn)
index f1f5ac539d5d56b2d554e2aa7bdb50fd0af0d5e3..87a7d4ba382d7210b294f8168adb5083c15b80ee 100644 (file)
@@ -100,8 +100,9 @@ static int rmi_f34_command(struct f34_data *f34, u8 command,
        return 0;
 }
 
-static int rmi_f34_attention(struct rmi_function *fn, unsigned long *irq_bits)
+static irqreturn_t rmi_f34_attention(int irq, void *ctx)
 {
+       struct rmi_function *fn = ctx;
        struct f34_data *f34 = dev_get_drvdata(&fn->dev);
        int ret;
        u8 status;
@@ -126,7 +127,7 @@ static int rmi_f34_attention(struct rmi_function *fn, unsigned long *irq_bits)
                        complete(&f34->v7.cmd_done);
        }
 
-       return 0;
+       return IRQ_HANDLED;
 }
 
 static int rmi_f34_write_blocks(struct f34_data *f34, const void *data,
index e8a59d1640192b75e6f83db0e2ad355064c19ff9..a6f515bcab2228a8783f10dbf10fae30462fd852 100644 (file)
@@ -610,11 +610,6 @@ error:
        mutex_unlock(&f54->data_mutex);
 }
 
-static int rmi_f54_attention(struct rmi_function *fn, unsigned long *irqbits)
-{
-       return 0;
-}
-
 static int rmi_f54_config(struct rmi_function *fn)
 {
        struct rmi_driver *drv = fn->rmi_dev->driver;
@@ -756,6 +751,5 @@ struct rmi_function_handler rmi_f54_handler = {
        .func = 0x54,
        .probe = rmi_f54_probe,
        .config = rmi_f54_config,
-       .attention = rmi_f54_attention,
        .remove = rmi_f54_remove,
 };
index b353d494ad404888bd2884527fe771937cb1416f..136f6e7bf797767256e66c1c083cb80c55cd7a1b 100644 (file)
@@ -527,6 +527,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
                },
        },
+       {
+               /* Lenovo LaVie Z */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
+               },
+       },
        { }
 };
 
index ff7043f74a3d32286a6b8cdbed91f1bc3f0be12f..d196ac3d8b8cda8e1cf405101ed5603473db821d 100644 (file)
@@ -603,6 +603,7 @@ static const struct acpi_device_id silead_ts_acpi_match[] = {
        { "GSL3692", 0 },
        { "MSSL1680", 0 },
        { "MSSL0001", 0 },
+       { "MSSL0002", 0 },
        { }
 };
 MODULE_DEVICE_TABLE(acpi, silead_ts_acpi_match);
index e055d228bfb94057893a8a080dd7bbc709aeb6bf..689ffe5383706dd062cd9ce7aac9bc631c7656ba 100644 (file)
@@ -142,7 +142,6 @@ config DMAR_TABLE
 config INTEL_IOMMU
        bool "Support for Intel IOMMU using DMA Remapping Devices"
        depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC)
-       select DMA_DIRECT_OPS
        select IOMMU_API
        select IOMMU_IOVA
        select NEED_DMA_MAP_STATE
index 14e4b37224284976a1cb8890e5d13ae5337350cc..115ff26e9cede3494a75d59ee7c87655c72f4090 100644 (file)
@@ -31,7 +31,6 @@
 #include <linux/pci.h>
 #include <linux/dmar.h>
 #include <linux/dma-mapping.h>
-#include <linux/dma-direct.h>
 #include <linux/mempool.h>
 #include <linux/memory.h>
 #include <linux/cpu.h>
@@ -485,14 +484,37 @@ static int dmar_forcedac;
 static int intel_iommu_strict;
 static int intel_iommu_superpage = 1;
 static int intel_iommu_ecs = 1;
+static int intel_iommu_pasid28;
 static int iommu_identity_mapping;
 
 #define IDENTMAP_ALL           1
 #define IDENTMAP_GFX           2
 #define IDENTMAP_AZALIA                4
 
-#define ecs_enabled(iommu)     (intel_iommu_ecs && ecap_ecs(iommu->ecap))
-#define pasid_enabled(iommu)   (ecs_enabled(iommu) && ecap_pasid(iommu->ecap))
+/* Broadwell and Skylake have broken ECS support — normal so-called "second
+ * level" translation of DMA requests-without-PASID doesn't actually happen
+ * unless you also set the NESTE bit in an extended context-entry. Which of
+ * course means that SVM doesn't work because it's trying to do nested
+ * translation of the physical addresses it finds in the process page tables,
+ * through the IOVA->phys mapping found in the "second level" page tables.
+ *
+ * The VT-d specification was retroactively changed to change the definition
+ * of the capability bits and pretend that Broadwell/Skylake never happened...
+ * but unfortunately the wrong bit was changed. It's ECS which is broken, but
+ * for some reason it was the PASID capability bit which was redefined (from
+ * bit 28 on BDW/SKL to bit 40 in future).
+ *
+ * So our test for ECS needs to eschew those implementations which set the old
+ * PASID capabiity bit 28, since those are the ones on which ECS is broken.
+ * Unless we are working around the 'pasid28' limitations, that is, by putting
+ * the device into passthrough mode for normal DMA and thus masking the bug.
+ */
+#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
+                           (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
+/* PASID support is thus enabled if ECS is enabled and *either* of the old
+ * or new capability bits are set. */
+#define pasid_enabled(iommu) (ecs_enabled(iommu) &&                    \
+                             (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
 
 int intel_iommu_gfx_mapped;
 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
@@ -555,6 +577,11 @@ static int __init intel_iommu_setup(char *str)
                        printk(KERN_INFO
                                "Intel-IOMMU: disable extended context table support\n");
                        intel_iommu_ecs = 0;
+               } else if (!strncmp(str, "pasid28", 7)) {
+                       printk(KERN_INFO
+                               "Intel-IOMMU: enable pre-production PASID support\n");
+                       intel_iommu_pasid28 = 1;
+                       iommu_identity_mapping |= IDENTMAP_GFX;
                } else if (!strncmp(str, "tboot_noforce", 13)) {
                        printk(KERN_INFO
                                "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
@@ -3713,30 +3740,61 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
                                  dma_addr_t *dma_handle, gfp_t flags,
                                  unsigned long attrs)
 {
-       void *vaddr;
+       struct page *page = NULL;
+       int order;
 
-       vaddr = dma_direct_alloc(dev, size, dma_handle, flags, attrs);
-       if (iommu_no_mapping(dev) || !vaddr)
-               return vaddr;
+       size = PAGE_ALIGN(size);
+       order = get_order(size);
 
-       *dma_handle = __intel_map_single(dev, virt_to_phys(vaddr),
-                       PAGE_ALIGN(size), DMA_BIDIRECTIONAL,
-                       dev->coherent_dma_mask);
-       if (!*dma_handle)
-               goto out_free_pages;
-       return vaddr;
+       if (!iommu_no_mapping(dev))
+               flags &= ~(GFP_DMA | GFP_DMA32);
+       else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
+               if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
+                       flags |= GFP_DMA;
+               else
+                       flags |= GFP_DMA32;
+       }
+
+       if (gfpflags_allow_blocking(flags)) {
+               unsigned int count = size >> PAGE_SHIFT;
+
+               page = dma_alloc_from_contiguous(dev, count, order, flags);
+               if (page && iommu_no_mapping(dev) &&
+                   page_to_phys(page) + size > dev->coherent_dma_mask) {
+                       dma_release_from_contiguous(dev, page, count);
+                       page = NULL;
+               }
+       }
+
+       if (!page)
+               page = alloc_pages(flags, order);
+       if (!page)
+               return NULL;
+       memset(page_address(page), 0, size);
+
+       *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
+                                        DMA_BIDIRECTIONAL,
+                                        dev->coherent_dma_mask);
+       if (*dma_handle)
+               return page_address(page);
+       if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
+               __free_pages(page, order);
 
-out_free_pages:
-       dma_direct_free(dev, size, vaddr, *dma_handle, attrs);
        return NULL;
 }
 
 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
                                dma_addr_t dma_handle, unsigned long attrs)
 {
-       if (!iommu_no_mapping(dev))
-               intel_unmap(dev, dma_handle, PAGE_ALIGN(size));
-       dma_direct_free(dev, size, vaddr, dma_handle, attrs);
+       int order;
+       struct page *page = virt_to_page(vaddr);
+
+       size = PAGE_ALIGN(size);
+       order = get_order(size);
+
+       intel_unmap(dev, dma_handle, size);
+       if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
+               __free_pages(page, order);
 }
 
 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
index 0f52d44b3f6997c8c9e4e6f6f1a7da7b43d3e7c5..f5fe0100f9ffd043d251d96ce473775bfdafd3b4 100644 (file)
@@ -199,7 +199,7 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
 
 fail:
        irq_domain_free_irqs_parent(domain, virq, nr_irqs);
-       gicv2m_unalloc_msi(v2m, hwirq, get_count_order(nr_irqs));
+       gicv2m_unalloc_msi(v2m, hwirq, nr_irqs);
        return err;
 }
 
index 5377d7e2afba62b518671267b5d29c4963c2e5e6..d7842d312d3eacd7d07853caa6a529a1c8080c99 100644 (file)
@@ -182,6 +182,22 @@ static struct its_collection *dev_event_to_col(struct its_device *its_dev,
        return its->collections + its_dev->event_map.col_map[event];
 }
 
+static struct its_collection *valid_col(struct its_collection *col)
+{
+       if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(0, 15)))
+               return NULL;
+
+       return col;
+}
+
+static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
+{
+       if (valid_col(its->collections + vpe->col_idx))
+               return vpe;
+
+       return NULL;
+}
+
 /*
  * ITS command descriptors - parameters to be encoded in a command
  * block.
@@ -439,7 +455,7 @@ static struct its_collection *its_build_mapti_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return col;
+       return valid_col(col);
 }
 
 static struct its_collection *its_build_movi_cmd(struct its_node *its,
@@ -458,7 +474,7 @@ static struct its_collection *its_build_movi_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return col;
+       return valid_col(col);
 }
 
 static struct its_collection *its_build_discard_cmd(struct its_node *its,
@@ -476,7 +492,7 @@ static struct its_collection *its_build_discard_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return col;
+       return valid_col(col);
 }
 
 static struct its_collection *its_build_inv_cmd(struct its_node *its,
@@ -494,7 +510,7 @@ static struct its_collection *its_build_inv_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return col;
+       return valid_col(col);
 }
 
 static struct its_collection *its_build_int_cmd(struct its_node *its,
@@ -512,7 +528,7 @@ static struct its_collection *its_build_int_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return col;
+       return valid_col(col);
 }
 
 static struct its_collection *its_build_clear_cmd(struct its_node *its,
@@ -530,7 +546,7 @@ static struct its_collection *its_build_clear_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return col;
+       return valid_col(col);
 }
 
 static struct its_collection *its_build_invall_cmd(struct its_node *its,
@@ -554,7 +570,7 @@ static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return desc->its_vinvall_cmd.vpe;
+       return valid_vpe(its, desc->its_vinvall_cmd.vpe);
 }
 
 static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
@@ -576,7 +592,7 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return desc->its_vmapp_cmd.vpe;
+       return valid_vpe(its, desc->its_vmapp_cmd.vpe);
 }
 
 static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
@@ -599,7 +615,7 @@ static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return desc->its_vmapti_cmd.vpe;
+       return valid_vpe(its, desc->its_vmapti_cmd.vpe);
 }
 
 static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
@@ -622,7 +638,7 @@ static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return desc->its_vmovi_cmd.vpe;
+       return valid_vpe(its, desc->its_vmovi_cmd.vpe);
 }
 
 static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
@@ -640,7 +656,7 @@ static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return desc->its_vmovp_cmd.vpe;
+       return valid_vpe(its, desc->its_vmovp_cmd.vpe);
 }
 
 static u64 its_cmd_ptr_to_offset(struct its_node *its,
@@ -1824,11 +1840,16 @@ static int its_alloc_tables(struct its_node *its)
 
 static int its_alloc_collections(struct its_node *its)
 {
+       int i;
+
        its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
                                   GFP_KERNEL);
        if (!its->collections)
                return -ENOMEM;
 
+       for (i = 0; i < nr_cpu_ids; i++)
+               its->collections[i].target_address = ~0ULL;
+
        return 0;
 }
 
@@ -2310,7 +2331,14 @@ static int its_irq_domain_activate(struct irq_domain *domain,
                cpu_mask = cpumask_of_node(its_dev->its->numa_node);
 
        /* Bind the LPI to the first possible CPU */
-       cpu = cpumask_first(cpu_mask);
+       cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
+       if (cpu >= nr_cpu_ids) {
+               if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)
+                       return -EINVAL;
+
+               cpu = cpumask_first(cpu_online_mask);
+       }
+
        its_dev->event_map.col_map[event] = cpu;
        irq_data_update_effective_affinity(d, cpumask_of(cpu));
 
@@ -3399,6 +3427,16 @@ static int redist_disable_lpis(void)
        u64 timeout = USEC_PER_SEC;
        u64 val;
 
+       /*
+        * If coming via a CPU hotplug event, we don't need to disable
+        * LPIs before trying to re-enable them. They are already
+        * configured and all is well in the world. Detect this case
+        * by checking the allocation of the pending table for the
+        * current CPU.
+        */
+       if (gic_data_rdist()->pend_page)
+               return 0;
+
        if (!gic_rdists_supports_plpis()) {
                pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
                return -ENXIO;
index 1ec3bfe56693ab39831e464048b1959a04250e6d..c671b3212010e6de583e5a5211fc2a20064200f2 100644 (file)
@@ -93,8 +93,12 @@ static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
        msg->address_lo = lower_32_bits(msi_data->msiir_addr);
        msg->data = data->hwirq;
 
-       if (msi_affinity_flag)
-               msg->data |= cpumask_first(data->common->affinity);
+       if (msi_affinity_flag) {
+               const struct cpumask *mask;
+
+               mask = irq_data_get_effective_affinity_mask(data);
+               msg->data |= cpumask_first(mask);
+       }
 
        iommu_dma_map_msi_msg(data->irq, msg);
 }
@@ -121,7 +125,7 @@ static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
                return -EINVAL;
        }
 
-       cpumask_copy(irq_data->common->affinity, mask);
+       irq_data_update_effective_affinity(irq_data, cpumask_of(cpu));
 
        return IRQ_SET_MASK_OK;
 }
index 98f90aadd141b03c42bedd070b66030be7983d86..18c0a1281914fa3218761bd20b2a2e0c85e8aae6 100644 (file)
@@ -588,7 +588,7 @@ static const struct proto_ops data_sock_ops = {
        .getname        = data_sock_getname,
        .sendmsg        = mISDN_sock_sendmsg,
        .recvmsg        = mISDN_sock_recvmsg,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
        .setsockopt     = data_sock_setsockopt,
index 10c08982185a572ff05683461d514e86c8920f96..9c03f35d9df113c6eb6608f4b48b85447635aca9 100644 (file)
@@ -4,7 +4,7 @@
 
 menuconfig NVM
        bool "Open-Channel SSD target support"
-       depends on BLOCK && HAS_DMA && PCI
+       depends on BLOCK && PCI
        select BLK_DEV_NVME
        help
          Say Y here to get to enable Open-channel SSDs.
index ab13fcec3fca046c3da6fd621f0e0db9c47b1bf9..75df4c9d8b541de480dfea8d823e0eff389d9ccd 100644 (file)
@@ -588,7 +588,7 @@ static const char *raid10_md_layout_to_format(int layout)
 }
 
 /* Return md raid10 algorithm for @name */
-static const int raid10_name_to_format(const char *name)
+static int raid10_name_to_format(const char *name)
 {
        if (!strcasecmp(name, "near"))
                return ALGORITHM_RAID10_NEAR;
index 938766794c2ef3b6caf538a0fa787447eadb160c..3d0e2c198f0614dbaf22db657a2bfc9336f89ebd 100644 (file)
@@ -885,9 +885,7 @@ EXPORT_SYMBOL_GPL(dm_table_set_type);
 static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
                               sector_t start, sector_t len, void *data)
 {
-       struct request_queue *q = bdev_get_queue(dev->bdev);
-
-       return q && blk_queue_dax(q);
+       return bdev_dax_supported(dev->bdev, PAGE_SIZE);
 }
 
 static bool dm_table_supports_dax(struct dm_table *t)
@@ -1907,6 +1905,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
 
        if (dm_table_supports_dax(t))
                blk_queue_flag_set(QUEUE_FLAG_DAX, q);
+       else
+               blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
+
        if (dm_table_supports_dax_write_cache(t))
                dax_write_cache(t->md->dax_dev, true);
 
index 36ef284ad086b881324771d4f882dc6fa96d6dde..72142021b5c9a0410cfb6ccb04a93d613376fb53 100644 (file)
@@ -776,7 +776,6 @@ static int __write_changed_details(struct dm_pool_metadata *pmd)
 static int __commit_transaction(struct dm_pool_metadata *pmd)
 {
        int r;
-       size_t metadata_len, data_len;
        struct thin_disk_superblock *disk_super;
        struct dm_block *sblock;
 
@@ -797,14 +796,6 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
        if (r < 0)
                return r;
 
-       r = dm_sm_root_size(pmd->metadata_sm, &metadata_len);
-       if (r < 0)
-               return r;
-
-       r = dm_sm_root_size(pmd->data_sm, &data_len);
-       if (r < 0)
-               return r;
-
        r = save_sm_roots(pmd);
        if (r < 0)
                return r;
index 7945238df1c0a67a8e525697f0e419c7594ed1ad..b900723bbd0fae4845a17ef67dadcf33dc5cc67b 100644 (file)
@@ -1386,6 +1386,8 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
 
 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
 
+static void requeue_bios(struct pool *pool);
+
 static void check_for_space(struct pool *pool)
 {
        int r;
@@ -1398,8 +1400,10 @@ static void check_for_space(struct pool *pool)
        if (r)
                return;
 
-       if (nr_free)
+       if (nr_free) {
                set_pool_mode(pool, PM_WRITE);
+               requeue_bios(pool);
+       }
 }
 
 /*
@@ -1476,7 +1480,10 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
 
        r = dm_pool_alloc_data_block(pool->pmd, result);
        if (r) {
-               metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
+               if (r == -ENOSPC)
+                       set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
+               else
+                       metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
                return r;
        }
 
index 5961c7794ef37008f7a10f521517aded086f20f3..87107c995cb5be3a25b33b10b737ec1ac0dc3fea 100644 (file)
@@ -136,6 +136,7 @@ struct dm_writecache {
        struct dm_target *ti;
        struct dm_dev *dev;
        struct dm_dev *ssd_dev;
+       sector_t start_sector;
        void *memory_map;
        uint64_t memory_map_size;
        size_t metadata_sectors;
@@ -259,7 +260,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
        if (da != p) {
                long i;
                wc->memory_map = NULL;
-               pages = kvmalloc(p * sizeof(struct page *), GFP_KERNEL);
+               pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
                if (!pages) {
                        r = -ENOMEM;
                        goto err2;
@@ -293,6 +294,10 @@ static int persistent_memory_claim(struct dm_writecache *wc)
        }
 
        dax_read_unlock(id);
+
+       wc->memory_map += (size_t)wc->start_sector << SECTOR_SHIFT;
+       wc->memory_map_size -= (size_t)wc->start_sector << SECTOR_SHIFT;
+
        return 0;
 err3:
        kvfree(pages);
@@ -311,7 +316,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
 static void persistent_memory_release(struct dm_writecache *wc)
 {
        if (wc->memory_vmapped)
-               vunmap(wc->memory_map);
+               vunmap(wc->memory_map - ((size_t)wc->start_sector << SECTOR_SHIFT));
 }
 
 static struct page *persistent_memory_page(void *addr)
@@ -359,7 +364,7 @@ static void *memory_data(struct dm_writecache *wc, struct wc_entry *e)
 
 static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e)
 {
-       return wc->metadata_sectors +
+       return wc->start_sector + wc->metadata_sectors +
                ((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT));
 }
 
@@ -471,6 +476,7 @@ static void ssd_commit_flushed(struct dm_writecache *wc)
                if (unlikely(region.sector + region.count > wc->metadata_sectors))
                        region.count = wc->metadata_sectors - region.sector;
 
+               region.sector += wc->start_sector;
                atomic_inc(&endio.count);
                req.bi_op = REQ_OP_WRITE;
                req.bi_op_flags = REQ_SYNC;
@@ -859,7 +865,7 @@ static int writecache_alloc_entries(struct dm_writecache *wc)
 
        if (wc->entries)
                return 0;
-       wc->entries = vmalloc(sizeof(struct wc_entry) * wc->n_blocks);
+       wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks));
        if (!wc->entries)
                return -ENOMEM;
        for (b = 0; b < wc->n_blocks; b++) {
@@ -1481,9 +1487,9 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
                wb->bio.bi_iter.bi_sector = read_original_sector(wc, e);
                wb->page_offset = PAGE_SIZE;
                if (max_pages <= WB_LIST_INLINE ||
-                   unlikely(!(wb->wc_list = kmalloc(max_pages * sizeof(struct wc_entry *),
-                                                    GFP_NOIO | __GFP_NORETRY |
-                                                    __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
+                   unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
+                                                          GFP_NOIO | __GFP_NORETRY |
+                                                          __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
                        wb->wc_list = wb->wc_list_inline;
                        max_pages = WB_LIST_INLINE;
                }
@@ -1946,14 +1952,6 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
        }
        wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode);
 
-       if (WC_MODE_PMEM(wc)) {
-               r = persistent_memory_claim(wc);
-               if (r) {
-                       ti->error = "Unable to map persistent memory for cache";
-                       goto bad;
-               }
-       }
-
        /*
         * Parse the cache block size
         */
@@ -1982,7 +1980,16 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
 
        while (opt_params) {
                string = dm_shift_arg(&as), opt_params--;
-               if (!strcasecmp(string, "high_watermark") && opt_params >= 1) {
+               if (!strcasecmp(string, "start_sector") && opt_params >= 1) {
+                       unsigned long long start_sector;
+                       string = dm_shift_arg(&as), opt_params--;
+                       if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1)
+                               goto invalid_optional;
+                       wc->start_sector = start_sector;
+                       if (wc->start_sector != start_sector ||
+                           wc->start_sector >= wc->memory_map_size >> SECTOR_SHIFT)
+                               goto invalid_optional;
+               } else if (!strcasecmp(string, "high_watermark") && opt_params >= 1) {
                        string = dm_shift_arg(&as), opt_params--;
                        if (sscanf(string, "%d%c", &high_wm_percent, &dummy) != 1)
                                goto invalid_optional;
@@ -2039,12 +2046,20 @@ invalid_optional:
                goto bad;
        }
 
-       if (!WC_MODE_PMEM(wc)) {
+       if (WC_MODE_PMEM(wc)) {
+               r = persistent_memory_claim(wc);
+               if (r) {
+                       ti->error = "Unable to map persistent memory for cache";
+                       goto bad;
+               }
+       } else {
                struct dm_io_region region;
                struct dm_io_request req;
                size_t n_blocks, n_metadata_blocks;
                uint64_t n_bitmap_bits;
 
+               wc->memory_map_size -= (uint64_t)wc->start_sector << SECTOR_SHIFT;
+
                bio_list_init(&wc->flush_list);
                wc->flush_thread = kthread_create(writecache_flush_thread, wc, "dm_writecache_flush");
                if (IS_ERR(wc->flush_thread)) {
@@ -2097,7 +2112,7 @@ invalid_optional:
                }
 
                region.bdev = wc->ssd_dev->bdev;
-               region.sector = 0;
+               region.sector = wc->start_sector;
                region.count = wc->metadata_sectors;
                req.bi_op = REQ_OP_READ;
                req.bi_op_flags = REQ_SYNC;
@@ -2265,7 +2280,7 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
 
 static struct target_type writecache_target = {
        .name                   = "writecache",
-       .version                = {1, 0, 0},
+       .version                = {1, 1, 0},
        .module                 = THIS_MODULE,
        .ctr                    = writecache_ctr,
        .dtr                    = writecache_dtr,
index 3c0e45f4dcf5cdf06d79b0c9d107d7455a0b6ad7..a44183ff4be0a3bd4219a7bf5854622aeca79db2 100644 (file)
@@ -787,7 +787,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 
        /* Chunk BIO work */
        mutex_init(&dmz->chunk_lock);
-       INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_KERNEL);
+       INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO);
        dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
                                        0, dev->name);
        if (!dmz->chunk_wq) {
index e65429a29c06e2554e8a0e23ba5a0b2a3b18a8c8..b0dd7027848b7de9f701469c6eb29b5d9c96e1df 100644 (file)
@@ -1056,8 +1056,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
        if (len < 1)
                goto out;
        nr_pages = min(len, nr_pages);
-       if (ti->type->direct_access)
-               ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
+       ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
 
  out:
        dm_put_live_table(md, srcu_idx);
@@ -1606,10 +1605,9 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
                                 * the usage of io->orig_bio in dm_remap_zone_report()
                                 * won't be affected by this reassignment.
                                 */
-                               struct bio *b = bio_clone_bioset(bio, GFP_NOIO,
-                                                                &md->queue->bio_split);
+                               struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
+                                                         GFP_NOIO, &md->queue->bio_split);
                                ci.io->orig_bio = b;
-                               bio_advance(bio, (bio_sectors(bio) - ci.sector_count) << 9);
                                bio_chain(b, bio);
                                ret = generic_make_request(bio);
                                break;
index 29b0cd9ec951ee4603279656e7148ba2a5b8d763..994aed2f9dfff4135170102265523045e893ac0a 100644 (file)
@@ -5547,7 +5547,8 @@ int md_run(struct mddev *mddev)
                else
                        pr_warn("md: personality for level %s is not loaded!\n",
                                mddev->clevel);
-               return -EINVAL;
+               err = -EINVAL;
+               goto abort;
        }
        spin_unlock(&pers_lock);
        if (mddev->level != pers->level) {
@@ -5560,7 +5561,8 @@ int md_run(struct mddev *mddev)
            pers->start_reshape == NULL) {
                /* This personality cannot handle reshaping... */
                module_put(pers->owner);
-               return -EINVAL;
+               err = -EINVAL;
+               goto abort;
        }
 
        if (pers->sync_request) {
@@ -5629,7 +5631,7 @@ int md_run(struct mddev *mddev)
                mddev->private = NULL;
                module_put(pers->owner);
                bitmap_destroy(mddev);
-               return err;
+               goto abort;
        }
        if (mddev->queue) {
                bool nonrot = true;
index 478cf446827f469c1d02d6f2918fcb8dd870f893..35bd3a62451b30fec0cca41fcdc687bb7920aa56 100644 (file)
@@ -3893,6 +3893,13 @@ static int raid10_run(struct mddev *mddev)
                            disk->rdev->saved_raid_disk < 0)
                                conf->fullsync = 1;
                }
+
+               if (disk->replacement &&
+                   !test_bit(In_sync, &disk->replacement->flags) &&
+                   disk->replacement->saved_raid_disk < 0) {
+                       conf->fullsync = 1;
+               }
+
                disk->recovery_disabled = mddev->recovery_disabled - 1;
        }
 
index f1178f6f434d0f76d419845d8789d87095ed355a..aff0ab7bf83d565e81507e48f9ae793586d2f3fd 100644 (file)
@@ -222,7 +222,7 @@ struct vb2_dc_attachment {
        enum dma_data_direction dma_dir;
 };
 
-static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
+static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf,
        struct dma_buf_attachment *dbuf_attach)
 {
        struct vb2_dc_attachment *attach;
@@ -358,7 +358,6 @@ static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
        .map_dma_buf = vb2_dc_dmabuf_ops_map,
        .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
        .map = vb2_dc_dmabuf_ops_kmap,
-       .map_atomic = vb2_dc_dmabuf_ops_kmap,
        .vmap = vb2_dc_dmabuf_ops_vmap,
        .mmap = vb2_dc_dmabuf_ops_mmap,
        .release = vb2_dc_dmabuf_ops_release,
index 753ed3138dcc8bd77bacda888fa09ed849e09f69..015e737095cdd6644b4e0332120aa3ee993eb1d0 100644 (file)
@@ -371,7 +371,7 @@ struct vb2_dma_sg_attachment {
        enum dma_data_direction dma_dir;
 };
 
-static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
+static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf,
        struct dma_buf_attachment *dbuf_attach)
 {
        struct vb2_dma_sg_attachment *attach;
@@ -507,7 +507,6 @@ static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
        .map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
        .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
        .map = vb2_dma_sg_dmabuf_ops_kmap,
-       .map_atomic = vb2_dma_sg_dmabuf_ops_kmap,
        .vmap = vb2_dma_sg_dmabuf_ops_vmap,
        .mmap = vb2_dma_sg_dmabuf_ops_mmap,
        .release = vb2_dma_sg_dmabuf_ops_release,
index 359fb9804d160426ab037133e2102b979833ecff..6dfbd5b0590759e0c712094dffb96518900efa1a 100644 (file)
@@ -209,7 +209,7 @@ struct vb2_vmalloc_attachment {
        enum dma_data_direction dma_dir;
 };
 
-static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
+static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,
        struct dma_buf_attachment *dbuf_attach)
 {
        struct vb2_vmalloc_attachment *attach;
@@ -346,7 +346,6 @@ static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
        .map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
        .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
        .map = vb2_vmalloc_dmabuf_ops_kmap,
-       .map_atomic = vb2_vmalloc_dmabuf_ops_kmap,
        .vmap = vb2_vmalloc_dmabuf_ops_vmap,
        .mmap = vb2_vmalloc_dmabuf_ops_mmap,
        .release = vb2_vmalloc_dmabuf_ops_release,
index 40826bba06b6d52c06bef7eb64bea6a719496dd0..fcfab6635f9c6649a64e0b144f55df672d621389 100644 (file)
@@ -207,29 +207,19 @@ void lirc_bpf_free(struct rc_dev *rcdev)
        bpf_prog_array_free(rcdev->raw->progs);
 }
 
-int lirc_prog_attach(const union bpf_attr *attr)
+int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
 {
-       struct bpf_prog *prog;
        struct rc_dev *rcdev;
        int ret;
 
        if (attr->attach_flags)
                return -EINVAL;
 
-       prog = bpf_prog_get_type(attr->attach_bpf_fd,
-                                BPF_PROG_TYPE_LIRC_MODE2);
-       if (IS_ERR(prog))
-               return PTR_ERR(prog);
-
        rcdev = rc_dev_get_from_fd(attr->target_fd);
-       if (IS_ERR(rcdev)) {
-               bpf_prog_put(prog);
+       if (IS_ERR(rcdev))
                return PTR_ERR(rcdev);
-       }
 
        ret = lirc_bpf_attach(rcdev, prog);
-       if (ret)
-               bpf_prog_put(prog);
 
        put_device(&rcdev->dev);
 
index 753b1a698fc4e3ef74a7ebb718a1997ae8a13a4d..6b16946f9b05d45ea3372f955d12151bb2782cfd 100644 (file)
@@ -103,15 +103,15 @@ static struct file *cxl_getfile(const char *name,
        d_instantiate(path.dentry, inode);
 
        file = alloc_file(&path, OPEN_FMODE(flags), fops);
-       if (IS_ERR(file))
-               goto err_dput;
+       if (IS_ERR(file)) {
+               path_put(&path);
+               goto err_fs;
+       }
        file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
        file->private_data = priv;
 
        return file;
 
-err_dput:
-       path_put(&path);
 err_inode:
        iput(inode);
 err_fs:
index e05c3245930a1e3f94aad4c0a7f85e015759ec76..fa840666bdd1aeb20cca67fc5df9556fc73135aa 100644 (file)
@@ -507,35 +507,14 @@ static int remote_settings_file_close(struct inode *inode, struct file *file)
 static ssize_t remote_settings_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
 {
        void __iomem *address = (void __iomem *)file->private_data;
-       unsigned char *page;
-       int retval;
        int len = 0;
        unsigned int value;
-
-       if (*offset < 0)
-               return -EINVAL;
-       if (count == 0 || count > 1024)
-               return 0;
-       if (*offset != 0)
-               return 0;
-
-       page = (unsigned char *)__get_free_page(GFP_KERNEL);
-       if (!page)
-               return -ENOMEM;
+       char lbuf[20];
 
        value = readl(address);
-       len = sprintf(page, "%d\n", value);
-
-       if (copy_to_user(buf, page, len)) {
-               retval = -EFAULT;
-               goto exit;
-       }
-       *offset += len;
-       retval = len;
+       len = snprintf(lbuf, sizeof(lbuf), "%d\n", value);
 
-exit:
-       free_page((unsigned long)page);
-       return retval;
+       return simple_read_from_buffer(buf, count, offset, lbuf, len);
 }
 
 static ssize_t remote_settings_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset)
index b0b8f18a85e3e132d7741e9daab53fe9270b1b8a..6649f0d56d2f0cdee1dde6d57ad68141ccce12bf 100644 (file)
@@ -310,8 +310,11 @@ int mei_irq_read_handler(struct mei_device *dev,
        if (&cl->link == &dev->file_list) {
                /* A message for not connected fixed address clients
                 * should be silently discarded
+                * On power down client may be force cleaned,
+                * silently discard such messages
                 */
-               if (hdr_is_fixed(mei_hdr)) {
+               if (hdr_is_fixed(mei_hdr) ||
+                   dev->dev_state == MEI_DEV_POWER_DOWN) {
                        mei_irq_discard_msg(dev, mei_hdr);
                        ret = 0;
                        goto reset_slots;
index efd733472a3531804225c5515ade4f4cf69fd707..56c6f79a5c5af83a862cf76352f172f5e02ee0b8 100644 (file)
@@ -467,7 +467,7 @@ static int vmballoon_send_batched_lock(struct vmballoon *b,
                unsigned int num_pages, bool is_2m_pages, unsigned int *target)
 {
        unsigned long status;
-       unsigned long pfn = page_to_pfn(b->page);
+       unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
 
        STATS_INC(b->stats.lock[is_2m_pages]);
 
@@ -515,7 +515,7 @@ static bool vmballoon_send_batched_unlock(struct vmballoon *b,
                unsigned int num_pages, bool is_2m_pages, unsigned int *target)
 {
        unsigned long status;
-       unsigned long pfn = page_to_pfn(b->page);
+       unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
 
        STATS_INC(b->stats.unlock[is_2m_pages]);
 
index ef05e00393782d16d77f2f1ecf16803f69461217..2a833686784b6b459d9744b366ef22cb5ca1279c 100644 (file)
@@ -27,8 +27,8 @@ struct mmc_gpio {
        bool override_cd_active_level;
        irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id);
        char *ro_label;
-       char cd_label[0];
        u32 cd_debounce_delay_ms;
+       char cd_label[];
 };
 
 static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id)
index 623f4d27fa0161b1a938521c266176f544f2ff22..80dc2fd6576cf3f88afd695ad1f36ec1b4f52b41 100644 (file)
@@ -1065,8 +1065,8 @@ static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
         * It's used when HS400 mode is enabled.
         */
        if (data->flags & MMC_DATA_WRITE &&
-               !(host->timing != MMC_TIMING_MMC_HS400))
-               return;
+               host->timing != MMC_TIMING_MMC_HS400)
+               goto disable;
 
        if (data->flags & MMC_DATA_WRITE)
                enable = SDMMC_CARD_WR_THR_EN;
@@ -1074,7 +1074,8 @@ static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
                enable = SDMMC_CARD_RD_THR_EN;
 
        if (host->timing != MMC_TIMING_MMC_HS200 &&
-           host->timing != MMC_TIMING_UHS_SDR104)
+           host->timing != MMC_TIMING_UHS_SDR104 &&
+           host->timing != MMC_TIMING_MMC_HS400)
                goto disable;
 
        blksz_depth = blksz / (1 << host->data_shift);
index f7f9773d161f1e5e2f58ae1f9a32c800ac5f3474..d032bd63444d10295826750b80a560c0335ddda5 100644 (file)
@@ -139,8 +139,7 @@ renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host) {
        renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST,
                                            RST_RESERVED_BITS | val);
 
-       if (host->data && host->data->flags & MMC_DATA_READ)
-               clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
+       clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
 
        renesas_sdhi_internal_dmac_enable_dma(host, true);
 }
@@ -164,17 +163,14 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
                goto force_pio;
 
        /* This DMAC cannot handle if buffer is not 8-bytes alignment */
-       if (!IS_ALIGNED(sg_dma_address(sg), 8)) {
-               dma_unmap_sg(&host->pdev->dev, sg, host->sg_len,
-                            mmc_get_dma_dir(data));
-               goto force_pio;
-       }
+       if (!IS_ALIGNED(sg_dma_address(sg), 8))
+               goto force_pio_with_unmap;
 
        if (data->flags & MMC_DATA_READ) {
                dtran_mode |= DTRAN_MODE_CH_NUM_CH1;
                if (test_bit(SDHI_INTERNAL_DMAC_ONE_RX_ONLY, &global_flags) &&
                    test_and_set_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags))
-                       goto force_pio;
+                       goto force_pio_with_unmap;
        } else {
                dtran_mode |= DTRAN_MODE_CH_NUM_CH0;
        }
@@ -189,6 +185,9 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
 
        return;
 
+force_pio_with_unmap:
+       dma_unmap_sg(&host->pdev->dev, sg, host->sg_len, mmc_get_dma_dir(data));
+
 force_pio:
        host->force_pio = true;
        renesas_sdhi_internal_dmac_enable_dma(host, false);
index d6aef70d34fac0554d223bed4121e173dea1281d..4eb3d29ecde1078512f85d291904a63d5fa358c6 100644 (file)
@@ -312,6 +312,15 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
 
                        if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
                                val |= SDHCI_SUPPORT_HS400;
+
+                       /*
+                        * Do not advertise faster UHS modes if there are no
+                        * pinctrl states for 100MHz/200MHz.
+                        */
+                       if (IS_ERR_OR_NULL(imx_data->pins_100mhz) ||
+                           IS_ERR_OR_NULL(imx_data->pins_200mhz))
+                               val &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50
+                                        | SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_HS400);
                }
        }
 
@@ -1158,18 +1167,6 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
                                                ESDHC_PINCTRL_STATE_100MHZ);
                imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
                                                ESDHC_PINCTRL_STATE_200MHZ);
-               if (IS_ERR(imx_data->pins_100mhz) ||
-                               IS_ERR(imx_data->pins_200mhz)) {
-                       dev_warn(mmc_dev(host->mmc),
-                               "could not get ultra high speed state, work on normal mode\n");
-                       /*
-                        * fall back to not supporting uhs by specifying no
-                        * 1.8v quirk
-                        */
-                       host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
-               }
-       } else {
-               host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
        }
 
        /* call to generic mmc_of_parse to support additional capabilities */
index e7472590f2ed6416b800ef85c2efeae574cc6718..8e7f3e35ee3dc48eef93c126f2d161ad63b3dc12 100644 (file)
@@ -1446,6 +1446,7 @@ static int sunxi_mmc_runtime_resume(struct device *dev)
        sunxi_mmc_init_host(host);
        sunxi_mmc_set_bus_width(host, mmc->ios.bus_width);
        sunxi_mmc_set_clk(host, &mmc->ios);
+       enable_irq(host->irq);
 
        return 0;
 }
@@ -1455,6 +1456,12 @@ static int sunxi_mmc_runtime_suspend(struct device *dev)
        struct mmc_host *mmc = dev_get_drvdata(dev);
        struct sunxi_mmc_host *host = mmc_priv(mmc);
 
+       /*
+        * When clocks are off, it's possible receiving
+        * fake interrupts, which will stall the system.
+        * Disabling the irq  will prevent this.
+        */
+       disable_irq(host->irq);
        sunxi_mmc_reset_host(host);
        sunxi_mmc_disable(host);
 
index a0c655628d6d5283fd9b5cd8234b8b09857d9c75..1b64ac8c5bc86309061a5540d385eb6e2ca866cc 100644 (file)
@@ -2526,7 +2526,7 @@ static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 
 struct ppb_lock {
        struct flchip *chip;
-       loff_t offset;
+       unsigned long adr;
        int locked;
 };
 
@@ -2544,8 +2544,9 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
        unsigned long timeo;
        int ret;
 
+       adr += chip->start;
        mutex_lock(&chip->mutex);
-       ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
+       ret = get_chip(map, chip, adr, FL_LOCKING);
        if (ret) {
                mutex_unlock(&chip->mutex);
                return ret;
@@ -2563,8 +2564,8 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
 
        if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
                chip->state = FL_LOCKING;
-               map_write(map, CMD(0xA0), chip->start + adr);
-               map_write(map, CMD(0x00), chip->start + adr);
+               map_write(map, CMD(0xA0), adr);
+               map_write(map, CMD(0x00), adr);
        } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
                /*
                 * Unlocking of one specific sector is not supported, so we
@@ -2602,7 +2603,7 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
        map_write(map, CMD(0x00), chip->start);
 
        chip->state = FL_READY;
-       put_chip(map, chip, adr + chip->start);
+       put_chip(map, chip, adr);
        mutex_unlock(&chip->mutex);
 
        return ret;
@@ -2659,9 +2660,9 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
                 * sectors shall be unlocked, so lets keep their locking
                 * status at "unlocked" (locked=0) for the final re-locking.
                 */
-               if ((adr < ofs) || (adr >= (ofs + len))) {
+               if ((offset < ofs) || (offset >= (ofs + len))) {
                        sect[sectors].chip = &cfi->chips[chipnum];
-                       sect[sectors].offset = offset;
+                       sect[sectors].adr = adr;
                        sect[sectors].locked = do_ppb_xxlock(
                                map, &cfi->chips[chipnum], adr, 0,
                                DO_XXLOCK_ONEBLOCK_GETLOCK);
@@ -2675,6 +2676,8 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
                        i++;
 
                if (adr >> cfi->chipshift) {
+                       if (offset >= (ofs + len))
+                               break;
                        adr = 0;
                        chipnum++;
 
@@ -2705,7 +2708,7 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
         */
        for (i = 0; i < sectors; i++) {
                if (sect[i].locked)
-                       do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0,
+                       do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0,
                                      DO_XXLOCK_ONEBLOCK_LOCK);
        }
 
index 3a6f450d1093c4c59d8b79bfafbc0d0c8c744722..53febe8a68c3cdfadfad784bca3335879a86d1f8 100644 (file)
@@ -733,8 +733,8 @@ static struct flash_info dataflash_data[] = {
        { "AT45DB642x",  0x1f2800, 8192, 1056, 11, SUP_POW2PS},
        { "at45db642d",  0x1f2800, 8192, 1024, 10, SUP_POW2PS | IS_POW2PS},
 
-       { "AT45DB641E",  0x1f28000100, 32768, 264, 9, SUP_EXTID | SUP_POW2PS},
-       { "at45db641e",  0x1f28000100, 32768, 256, 8, SUP_EXTID | SUP_POW2PS | IS_POW2PS},
+       { "AT45DB641E",  0x1f28000100ULL, 32768, 264, 9, SUP_EXTID | SUP_POW2PS},
+       { "at45db641e",  0x1f28000100ULL, 32768, 256, 8, SUP_EXTID | SUP_POW2PS | IS_POW2PS},
 };
 
 static struct flash_info *jedec_lookup(struct spi_device *spi,
index cfd33e6ca77f903a6afc636e73f31ffb40d0d0bd..5869e90cc14b3c1f367b17a4f58bdb31c1e188ea 100644 (file)
@@ -123,7 +123,11 @@ static int denali_dt_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
-       denali->clk_x_rate = clk_get_rate(dt->clk);
+       /*
+        * Hardcode the clock rate for the backward compatibility.
+        * This works for both SOCFPGA and UniPhier.
+        */
+       denali->clk_x_rate = 200000000;
 
        ret = denali_init(denali);
        if (ret)
index 45786e707b7bd1ae5a4cba72825bcfb00fdda370..26cef218bb43ee1bd1fb2cf1dbadeb8ad38e2f8e 100644 (file)
@@ -48,7 +48,7 @@
 #define NFC_V1_V2_CONFIG               (host->regs + 0x0a)
 #define NFC_V1_V2_ECC_STATUS_RESULT    (host->regs + 0x0c)
 #define NFC_V1_V2_RSLTMAIN_AREA                (host->regs + 0x0e)
-#define NFC_V1_V2_RSLTSPARE_AREA       (host->regs + 0x10)
+#define NFC_V21_RSLTSPARE_AREA         (host->regs + 0x10)
 #define NFC_V1_V2_WRPROT               (host->regs + 0x12)
 #define NFC_V1_UNLOCKSTART_BLKADDR     (host->regs + 0x14)
 #define NFC_V1_UNLOCKEND_BLKADDR       (host->regs + 0x16)
@@ -1274,6 +1274,9 @@ static void preset_v2(struct mtd_info *mtd)
        writew(config1, NFC_V1_V2_CONFIG1);
        /* preset operation */
 
+       /* spare area size in 16-bit half-words */
+       writew(mtd->oobsize / 2, NFC_V21_RSLTSPARE_AREA);
+
        /* Unlock the internal RAM Buffer */
        writew(0x2, NFC_V1_V2_CONFIG);
 
index 10c4f9919850c3e7b56ed6bdb083a0fc35a0b7f5..b01d15ec4c56bfbdded578526d76e2ed12b65093 100644 (file)
@@ -440,7 +440,7 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
 
        for (; page < page_end; page++) {
                res = chip->ecc.read_oob(mtd, chip, page);
-               if (res)
+               if (res < 0)
                        return res;
 
                bad = chip->oob_poi[chip->badblockpos];
index 7ed1f87e742a7accbbeb441b02e199878b3ef035..49c546c97c6f9a370ff64636deaf778bce2c3ce0 100644 (file)
 
 #include <linux/mtd/rawnand.h>
 
+/*
+ * Macronix AC series does not support using SET/GET_FEATURES to change
+ * the timings unlike what is declared in the parameter page. Unflag
+ * this feature to avoid unnecessary downturns.
+ */
+static void macronix_nand_fix_broken_get_timings(struct nand_chip *chip)
+{
+       unsigned int i;
+       static const char * const broken_get_timings[] = {
+               "MX30LF1G18AC",
+               "MX30LF1G28AC",
+               "MX30LF2G18AC",
+               "MX30LF2G28AC",
+               "MX30LF4G18AC",
+               "MX30LF4G28AC",
+               "MX60LF8G18AC",
+       };
+
+       if (!chip->parameters.supports_set_get_features)
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(broken_get_timings); i++) {
+               if (!strcmp(broken_get_timings[i], chip->parameters.model))
+                       break;
+       }
+
+       if (i == ARRAY_SIZE(broken_get_timings))
+               return;
+
+       bitmap_clear(chip->parameters.get_feature_list,
+                    ONFI_FEATURE_ADDR_TIMING_MODE, 1);
+       bitmap_clear(chip->parameters.set_feature_list,
+                    ONFI_FEATURE_ADDR_TIMING_MODE, 1);
+}
+
 static int macronix_nand_init(struct nand_chip *chip)
 {
        if (nand_is_slc(chip))
                chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
 
-       /*
-        * MX30LF2G18AC chip does not support using SET/GET_FEATURES to change
-        * the timings unlike what is declared in the parameter page. Unflag
-        * this feature to avoid unnecessary downturns.
-        */
-       if (chip->parameters.supports_set_get_features &&
-           !strcmp("MX30LF2G18AC", chip->parameters.model)) {
-               bitmap_clear(chip->parameters.get_feature_list,
-                            ONFI_FEATURE_ADDR_TIMING_MODE, 1);
-               bitmap_clear(chip->parameters.set_feature_list,
-                            ONFI_FEATURE_ADDR_TIMING_MODE, 1);
-       }
+       macronix_nand_fix_broken_get_timings(chip);
 
        return 0;
 }
index 0af45b134c0cf859902f3d138b305bf5836d526d..5ec4c90a637d549a644441461ffc7717c623a4bc 100644 (file)
@@ -66,7 +66,9 @@ static int micron_nand_onfi_init(struct nand_chip *chip)
 
        if (p->supports_set_get_features) {
                set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->set_feature_list);
+               set_bit(ONFI_FEATURE_ON_DIE_ECC, p->set_feature_list);
                set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->get_feature_list);
+               set_bit(ONFI_FEATURE_ON_DIE_ECC, p->get_feature_list);
        }
 
        return 0;
index c3f7aaa5d18f7de068f797b84b05b31be4248897..d7e10b36a0b94b476fb5068d634c58ee756fbc2c 100644 (file)
@@ -926,10 +926,12 @@ static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
        if (ret)
                return ret;
 
-       if (f_pdata->use_direct_mode)
+       if (f_pdata->use_direct_mode) {
                memcpy_toio(cqspi->ahb_base + to, buf, len);
-       else
+               ret = cqspi_wait_idle(cqspi);
+       } else {
                ret = cqspi_indirect_write_execute(nor, to, buf, len);
+       }
        if (ret)
                return ret;
 
index 98663c50ded0b4b3f784be23baa8fa0e19b419ee..4d5d01cb8141b89b18cf9d7affd8049a42c19e3b 100644 (file)
@@ -743,15 +743,20 @@ const struct bond_option *bond_opt_get(unsigned int option)
 static int bond_option_mode_set(struct bonding *bond,
                                const struct bond_opt_value *newval)
 {
-       if (!bond_mode_uses_arp(newval->value) && bond->params.arp_interval) {
-               netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
-                          newval->string);
-               /* disable arp monitoring */
-               bond->params.arp_interval = 0;
-               /* set miimon to default value */
-               bond->params.miimon = BOND_DEFAULT_MIIMON;
-               netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n",
-                          bond->params.miimon);
+       if (!bond_mode_uses_arp(newval->value)) {
+               if (bond->params.arp_interval) {
+                       netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
+                                  newval->string);
+                       /* disable arp monitoring */
+                       bond->params.arp_interval = 0;
+               }
+
+               if (!bond->params.miimon) {
+                       /* set miimon to default value */
+                       bond->params.miimon = BOND_DEFAULT_MIIMON;
+                       netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n",
+                                  bond->params.miimon);
+               }
        }
 
        if (newval->value == BOND_MODE_ALB)
index b397a33f3d32b5e3c28398a660c736d45a74179d..9b449400376bc536cd0d53ea4abfe13d14515ba6 100644 (file)
@@ -634,10 +634,12 @@ static int m_can_clk_start(struct m_can_priv *priv)
        int err;
 
        err = pm_runtime_get_sync(priv->device);
-       if (err)
+       if (err < 0) {
                pm_runtime_put_noidle(priv->device);
+               return err;
+       }
 
-       return err;
+       return 0;
 }
 
 static void m_can_clk_stop(struct m_can_priv *priv)
@@ -1109,7 +1111,8 @@ static void m_can_chip_config(struct net_device *dev)
 
        } else {
        /* Version 3.1.x or 3.2.x */
-               cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE);
+               cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE |
+                         CCCR_NISO);
 
                /* Only 3.2.x has NISO Bit implemented */
                if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
@@ -1642,8 +1645,6 @@ static int m_can_plat_probe(struct platform_device *pdev)
        priv->can.clock.freq = clk_get_rate(cclk);
        priv->mram_base = mram_addr;
 
-       m_can_of_parse_mram(priv, mram_config_vals);
-
        platform_set_drvdata(pdev, dev);
        SET_NETDEV_DEV(dev, &pdev->dev);
 
@@ -1666,6 +1667,8 @@ static int m_can_plat_probe(struct platform_device *pdev)
                goto clk_disable;
        }
 
+       m_can_of_parse_mram(priv, mram_config_vals);
+
        devm_can_led_init(dev);
 
        of_can_transceiver(dev);
@@ -1687,8 +1690,6 @@ failed_ret:
        return ret;
 }
 
-/* TODO: runtime PM with power down or sleep mode  */
-
 static __maybe_unused int m_can_suspend(struct device *dev)
 {
        struct net_device *ndev = dev_get_drvdata(dev);
@@ -1715,8 +1716,6 @@ static __maybe_unused int m_can_resume(struct device *dev)
 
        pinctrl_pm_select_default_state(dev);
 
-       m_can_init_ram(priv);
-
        priv->can.state = CAN_STATE_ERROR_ACTIVE;
 
        if (netif_running(ndev)) {
@@ -1726,6 +1725,7 @@ static __maybe_unused int m_can_resume(struct device *dev)
                if (ret)
                        return ret;
 
+               m_can_init_ram(priv);
                m_can_start(ndev);
                netif_device_attach(ndev);
                netif_start_queue(ndev);
index c7427bdd3a4bff957aaef3fdb1d2f8ed0ead41cb..2949a381a94dceb2674f150ad5feaf580a201d25 100644 (file)
@@ -86,6 +86,11 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev,
                return 0;
        }
        cdm = of_iomap(np_cdm, 0);
+       if (!cdm) {
+               of_node_put(np_cdm);
+               dev_err(&ofdev->dev, "can't map clock node!\n");
+               return 0;
+       }
 
        if (in_8(&cdm->ipb_clk_sel) & 0x1)
                freq *= 2;
index b9e28578bc7bd7463485c316020af6e9cdaf9d02..455a3797a20065d264a837dcc89e91453a2a93ba 100644 (file)
@@ -58,6 +58,10 @@ MODULE_LICENSE("GPL v2");
 #define PCIEFD_REG_SYS_VER1            0x0040  /* version reg #1 */
 #define PCIEFD_REG_SYS_VER2            0x0044  /* version reg #2 */
 
+#define PCIEFD_FW_VERSION(x, y, z)     (((u32)(x) << 24) | \
+                                        ((u32)(y) << 16) | \
+                                        ((u32)(z) << 8))
+
 /* System Control Registers Bits */
 #define PCIEFD_SYS_CTL_TS_RST          0x00000001      /* timestamp clock */
 #define PCIEFD_SYS_CTL_CLK_EN          0x00000002      /* system clock */
@@ -782,6 +786,21 @@ static int peak_pciefd_probe(struct pci_dev *pdev,
                 "%ux CAN-FD PCAN-PCIe FPGA v%u.%u.%u:\n", can_count,
                 hw_ver_major, hw_ver_minor, hw_ver_sub);
 
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+       /* FW < v3.3.0 DMA logic doesn't handle correctly the mix of 32-bit and
+        * 64-bit logical addresses: this workaround forces usage of 32-bit
+        * DMA addresses only when such a fw is detected.
+        */
+       if (PCIEFD_FW_VERSION(hw_ver_major, hw_ver_minor, hw_ver_sub) <
+           PCIEFD_FW_VERSION(3, 3, 0)) {
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+               if (err)
+                       dev_warn(&pdev->dev,
+                                "warning: can't set DMA mask %llxh (err %d)\n",
+                                DMA_BIT_MASK(32), err);
+       }
+#endif
+
        /* stop system clock */
        pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN,
                            PCIEFD_REG_SYS_CTL_CLR);
index 89aec07c225f58d26a80ce4795afbaa6c19d9d84..5a24039733efd23255142c4abc0d2b758d188554 100644 (file)
@@ -2,6 +2,7 @@
  *
  * Copyright (C) 2012 - 2014 Xilinx, Inc.
  * Copyright (C) 2009 PetaLogix. All rights reserved.
+ * Copyright (C) 2017 Sandvik Mining and Construction Oy
  *
  * Description:
  * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/skbuff.h>
+#include <linux/spinlock.h>
 #include <linux/string.h>
 #include <linux/types.h>
 #include <linux/can/dev.h>
@@ -101,7 +104,7 @@ enum xcan_reg {
 #define XCAN_INTR_ALL          (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\
                                 XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \
                                 XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \
-                                XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK)
+                                XCAN_IXR_RXOFLW_MASK | XCAN_IXR_ARBLST_MASK)
 
 /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
 #define XCAN_BTR_SJW_SHIFT             7  /* Synchronous jump width */
@@ -118,6 +121,7 @@ enum xcan_reg {
 /**
  * struct xcan_priv - This definition define CAN driver instance
  * @can:                       CAN private data structure.
+ * @tx_lock:                   Lock for synchronizing TX interrupt handling
  * @tx_head:                   Tx CAN packets ready to send on the queue
  * @tx_tail:                   Tx CAN packets successfully sended on the queue
  * @tx_max:                    Maximum number packets the driver can send
@@ -132,6 +136,7 @@ enum xcan_reg {
  */
 struct xcan_priv {
        struct can_priv can;
+       spinlock_t tx_lock;
        unsigned int tx_head;
        unsigned int tx_tail;
        unsigned int tx_max;
@@ -159,6 +164,11 @@ static const struct can_bittiming_const xcan_bittiming_const = {
        .brp_inc = 1,
 };
 
+#define XCAN_CAP_WATERMARK     0x0001
+struct xcan_devtype_data {
+       unsigned int caps;
+};
+
 /**
  * xcan_write_reg_le - Write a value to the device register little endian
  * @priv:      Driver private data structure
@@ -238,6 +248,10 @@ static int set_reset_mode(struct net_device *ndev)
                usleep_range(500, 10000);
        }
 
+       /* reset clears FIFOs */
+       priv->tx_head = 0;
+       priv->tx_tail = 0;
+
        return 0;
 }
 
@@ -392,6 +406,7 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        struct net_device_stats *stats = &ndev->stats;
        struct can_frame *cf = (struct can_frame *)skb->data;
        u32 id, dlc, data[2] = {0, 0};
+       unsigned long flags;
 
        if (can_dropped_invalid_skb(ndev, skb))
                return NETDEV_TX_OK;
@@ -439,6 +454,9 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
 
        can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
+
+       spin_lock_irqsave(&priv->tx_lock, flags);
+
        priv->tx_head++;
 
        /* Write the Frame to Xilinx CAN TX FIFO */
@@ -454,10 +472,16 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                stats->tx_bytes += cf->can_dlc;
        }
 
+       /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
+       if (priv->tx_max > 1)
+               priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
+
        /* Check if the TX buffer is full */
        if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
                netif_stop_queue(ndev);
 
+       spin_unlock_irqrestore(&priv->tx_lock, flags);
+
        return NETDEV_TX_OK;
 }
 
@@ -529,6 +553,123 @@ static int xcan_rx(struct net_device *ndev)
        return 1;
 }
 
+/**
+ * xcan_current_error_state - Get current error state from HW
+ * @ndev:      Pointer to net_device structure
+ *
+ * Checks the current CAN error state from the HW. Note that this
+ * only checks for ERROR_PASSIVE and ERROR_WARNING.
+ *
+ * Return:
+ * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE
+ * otherwise.
+ */
+static enum can_state xcan_current_error_state(struct net_device *ndev)
+{
+       struct xcan_priv *priv = netdev_priv(ndev);
+       u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
+
+       if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
+               return CAN_STATE_ERROR_PASSIVE;
+       else if (status & XCAN_SR_ERRWRN_MASK)
+               return CAN_STATE_ERROR_WARNING;
+       else
+               return CAN_STATE_ERROR_ACTIVE;
+}
+
+/**
+ * xcan_set_error_state - Set new CAN error state
+ * @ndev:      Pointer to net_device structure
+ * @new_state: The new CAN state to be set
+ * @cf:                Error frame to be populated or NULL
+ *
+ * Set new CAN error state for the device, updating statistics and
+ * populating the error frame if given.
+ */
+static void xcan_set_error_state(struct net_device *ndev,
+                                enum can_state new_state,
+                                struct can_frame *cf)
+{
+       struct xcan_priv *priv = netdev_priv(ndev);
+       u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
+       u32 txerr = ecr & XCAN_ECR_TEC_MASK;
+       u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
+
+       priv->can.state = new_state;
+
+       if (cf) {
+               cf->can_id |= CAN_ERR_CRTL;
+               cf->data[6] = txerr;
+               cf->data[7] = rxerr;
+       }
+
+       switch (new_state) {
+       case CAN_STATE_ERROR_PASSIVE:
+               priv->can.can_stats.error_passive++;
+               if (cf)
+                       cf->data[1] = (rxerr > 127) ?
+                                       CAN_ERR_CRTL_RX_PASSIVE :
+                                       CAN_ERR_CRTL_TX_PASSIVE;
+               break;
+       case CAN_STATE_ERROR_WARNING:
+               priv->can.can_stats.error_warning++;
+               if (cf)
+                       cf->data[1] |= (txerr > rxerr) ?
+                                       CAN_ERR_CRTL_TX_WARNING :
+                                       CAN_ERR_CRTL_RX_WARNING;
+               break;
+       case CAN_STATE_ERROR_ACTIVE:
+               if (cf)
+                       cf->data[1] |= CAN_ERR_CRTL_ACTIVE;
+               break;
+       default:
+               /* non-ERROR states are handled elsewhere */
+               WARN_ON(1);
+               break;
+       }
+}
+
+/**
+ * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX
+ * @ndev:      Pointer to net_device structure
+ *
+ * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if
+ * the performed RX/TX has caused it to drop to a lesser state and set
+ * the interface state accordingly.
+ */
+static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
+{
+       struct xcan_priv *priv = netdev_priv(ndev);
+       enum can_state old_state = priv->can.state;
+       enum can_state new_state;
+
+       /* changing error state due to successful frame RX/TX can only
+        * occur from these states
+        */
+       if (old_state != CAN_STATE_ERROR_WARNING &&
+           old_state != CAN_STATE_ERROR_PASSIVE)
+               return;
+
+       new_state = xcan_current_error_state(ndev);
+
+       if (new_state != old_state) {
+               struct sk_buff *skb;
+               struct can_frame *cf;
+
+               skb = alloc_can_err_skb(ndev, &cf);
+
+               xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
+
+               if (skb) {
+                       struct net_device_stats *stats = &ndev->stats;
+
+                       stats->rx_packets++;
+                       stats->rx_bytes += cf->can_dlc;
+                       netif_rx(skb);
+               }
+       }
+}
+
 /**
  * xcan_err_interrupt - error frame Isr
  * @ndev:      net_device pointer
@@ -544,16 +685,12 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
        struct net_device_stats *stats = &ndev->stats;
        struct can_frame *cf;
        struct sk_buff *skb;
-       u32 err_status, status, txerr = 0, rxerr = 0;
+       u32 err_status;
 
        skb = alloc_can_err_skb(ndev, &cf);
 
        err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
        priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
-       txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
-       rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
-                       XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
-       status = priv->read_reg(priv, XCAN_SR_OFFSET);
 
        if (isr & XCAN_IXR_BSOFF_MASK) {
                priv->can.state = CAN_STATE_BUS_OFF;
@@ -563,28 +700,10 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
                can_bus_off(ndev);
                if (skb)
                        cf->can_id |= CAN_ERR_BUSOFF;
-       } else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) {
-               priv->can.state = CAN_STATE_ERROR_PASSIVE;
-               priv->can.can_stats.error_passive++;
-               if (skb) {
-                       cf->can_id |= CAN_ERR_CRTL;
-                       cf->data[1] = (rxerr > 127) ?
-                                       CAN_ERR_CRTL_RX_PASSIVE :
-                                       CAN_ERR_CRTL_TX_PASSIVE;
-                       cf->data[6] = txerr;
-                       cf->data[7] = rxerr;
-               }
-       } else if (status & XCAN_SR_ERRWRN_MASK) {
-               priv->can.state = CAN_STATE_ERROR_WARNING;
-               priv->can.can_stats.error_warning++;
-               if (skb) {
-                       cf->can_id |= CAN_ERR_CRTL;
-                       cf->data[1] |= (txerr > rxerr) ?
-                                       CAN_ERR_CRTL_TX_WARNING :
-                                       CAN_ERR_CRTL_RX_WARNING;
-                       cf->data[6] = txerr;
-                       cf->data[7] = rxerr;
-               }
+       } else {
+               enum can_state new_state = xcan_current_error_state(ndev);
+
+               xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
        }
 
        /* Check for Arbitration lost interrupt */
@@ -600,7 +719,6 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
        if (isr & XCAN_IXR_RXOFLW_MASK) {
                stats->rx_over_errors++;
                stats->rx_errors++;
-               priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
                if (skb) {
                        cf->can_id |= CAN_ERR_CRTL;
                        cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
@@ -709,26 +827,20 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
 
        isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
        while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) {
-               if (isr & XCAN_IXR_RXOK_MASK) {
-                       priv->write_reg(priv, XCAN_ICR_OFFSET,
-                               XCAN_IXR_RXOK_MASK);
-                       work_done += xcan_rx(ndev);
-               } else {
-                       priv->write_reg(priv, XCAN_ICR_OFFSET,
-                               XCAN_IXR_RXNEMP_MASK);
-                       break;
-               }
+               work_done += xcan_rx(ndev);
                priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK);
                isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
        }
 
-       if (work_done)
+       if (work_done) {
                can_led_event(ndev, CAN_LED_EVENT_RX);
+               xcan_update_error_state_after_rxtx(ndev);
+       }
 
        if (work_done < quota) {
                napi_complete_done(napi, work_done);
                ier = priv->read_reg(priv, XCAN_IER_OFFSET);
-               ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK);
+               ier |= XCAN_IXR_RXNEMP_MASK;
                priv->write_reg(priv, XCAN_IER_OFFSET, ier);
        }
        return work_done;
@@ -743,18 +855,71 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
 {
        struct xcan_priv *priv = netdev_priv(ndev);
        struct net_device_stats *stats = &ndev->stats;
+       unsigned int frames_in_fifo;
+       int frames_sent = 1; /* TXOK => at least 1 frame was sent */
+       unsigned long flags;
+       int retries = 0;
+
+       /* Synchronize with xmit as we need to know the exact number
+        * of frames in the FIFO to stay in sync due to the TXFEMP
+        * handling.
+        * This also prevents a race between netif_wake_queue() and
+        * netif_stop_queue().
+        */
+       spin_lock_irqsave(&priv->tx_lock, flags);
+
+       frames_in_fifo = priv->tx_head - priv->tx_tail;
+
+       if (WARN_ON_ONCE(frames_in_fifo == 0)) {
+               /* clear TXOK anyway to avoid getting back here */
+               priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
+               spin_unlock_irqrestore(&priv->tx_lock, flags);
+               return;
+       }
+
+       /* Check if 2 frames were sent (TXOK only means that at least 1
+        * frame was sent).
+        */
+       if (frames_in_fifo > 1) {
+               WARN_ON(frames_in_fifo > priv->tx_max);
+
+               /* Synchronize TXOK and isr so that after the loop:
+                * (1) isr variable is up-to-date at least up to TXOK clear
+                *     time. This avoids us clearing a TXOK of a second frame
+                *     but not noticing that the FIFO is now empty and thus
+                *     marking only a single frame as sent.
+                * (2) No TXOK is left. Having one could mean leaving a
+                *     stray TXOK as we might process the associated frame
+                *     via TXFEMP handling as we read TXFEMP *after* TXOK
+                *     clear to satisfy (1).
+                */
+               while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) {
+                       priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
+                       isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
+               }
 
-       while ((priv->tx_head - priv->tx_tail > 0) &&
-                       (isr & XCAN_IXR_TXOK_MASK)) {
+               if (isr & XCAN_IXR_TXFEMP_MASK) {
+                       /* nothing in FIFO anymore */
+                       frames_sent = frames_in_fifo;
+               }
+       } else {
+               /* single frame in fifo, just clear TXOK */
                priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
+       }
+
+       while (frames_sent--) {
                can_get_echo_skb(ndev, priv->tx_tail %
                                        priv->tx_max);
                priv->tx_tail++;
                stats->tx_packets++;
-               isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
        }
-       can_led_event(ndev, CAN_LED_EVENT_TX);
+
        netif_wake_queue(ndev);
+
+       spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+       can_led_event(ndev, CAN_LED_EVENT_TX);
+       xcan_update_error_state_after_rxtx(ndev);
 }
 
 /**
@@ -773,6 +938,7 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
        struct net_device *ndev = (struct net_device *)dev_id;
        struct xcan_priv *priv = netdev_priv(ndev);
        u32 isr, ier;
+       u32 isr_errors;
 
        /* Get the interrupt status from Xilinx CAN */
        isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
@@ -791,18 +957,17 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
                xcan_tx_interrupt(ndev, isr);
 
        /* Check for the type of error interrupt and Processing it */
-       if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
-                       XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) {
-               priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK |
-                               XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK |
-                               XCAN_IXR_ARBLST_MASK));
+       isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
+                           XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK);
+       if (isr_errors) {
+               priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
                xcan_err_interrupt(ndev, isr);
        }
 
        /* Check for the type of receive interrupt and Processing it */
-       if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) {
+       if (isr & XCAN_IXR_RXNEMP_MASK) {
                ier = priv->read_reg(priv, XCAN_IER_OFFSET);
-               ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK);
+               ier &= ~XCAN_IXR_RXNEMP_MASK;
                priv->write_reg(priv, XCAN_IER_OFFSET, ier);
                napi_schedule(&priv->napi);
        }
@@ -819,13 +984,9 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
 static void xcan_chip_stop(struct net_device *ndev)
 {
        struct xcan_priv *priv = netdev_priv(ndev);
-       u32 ier;
 
        /* Disable interrupts and leave the can in configuration mode */
-       ier = priv->read_reg(priv, XCAN_IER_OFFSET);
-       ier &= ~XCAN_INTR_ALL;
-       priv->write_reg(priv, XCAN_IER_OFFSET, ier);
-       priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
+       set_reset_mode(ndev);
        priv->can.state = CAN_STATE_STOPPED;
 }
 
@@ -958,10 +1119,15 @@ static const struct net_device_ops xcan_netdev_ops = {
  */
 static int __maybe_unused xcan_suspend(struct device *dev)
 {
-       if (!device_may_wakeup(dev))
-               return pm_runtime_force_suspend(dev);
+       struct net_device *ndev = dev_get_drvdata(dev);
 
-       return 0;
+       if (netif_running(ndev)) {
+               netif_stop_queue(ndev);
+               netif_device_detach(ndev);
+               xcan_chip_stop(ndev);
+       }
+
+       return pm_runtime_force_suspend(dev);
 }
 
 /**
@@ -973,11 +1139,27 @@ static int __maybe_unused xcan_suspend(struct device *dev)
  */
 static int __maybe_unused xcan_resume(struct device *dev)
 {
-       if (!device_may_wakeup(dev))
-               return pm_runtime_force_resume(dev);
+       struct net_device *ndev = dev_get_drvdata(dev);
+       int ret;
 
-       return 0;
+       ret = pm_runtime_force_resume(dev);
+       if (ret) {
+               dev_err(dev, "pm_runtime_force_resume failed on resume\n");
+               return ret;
+       }
+
+       if (netif_running(ndev)) {
+               ret = xcan_chip_start(ndev);
+               if (ret) {
+                       dev_err(dev, "xcan_chip_start failed on resume\n");
+                       return ret;
+               }
+
+               netif_device_attach(ndev);
+               netif_start_queue(ndev);
+       }
 
+       return 0;
 }
 
 /**
@@ -992,14 +1174,6 @@ static int __maybe_unused xcan_runtime_suspend(struct device *dev)
        struct net_device *ndev = dev_get_drvdata(dev);
        struct xcan_priv *priv = netdev_priv(ndev);
 
-       if (netif_running(ndev)) {
-               netif_stop_queue(ndev);
-               netif_device_detach(ndev);
-       }
-
-       priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK);
-       priv->can.state = CAN_STATE_SLEEPING;
-
        clk_disable_unprepare(priv->bus_clk);
        clk_disable_unprepare(priv->can_clk);
 
@@ -1018,7 +1192,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev)
        struct net_device *ndev = dev_get_drvdata(dev);
        struct xcan_priv *priv = netdev_priv(ndev);
        int ret;
-       u32 isr, status;
 
        ret = clk_prepare_enable(priv->bus_clk);
        if (ret) {
@@ -1032,27 +1205,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev)
                return ret;
        }
 
-       priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
-       isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
-       status = priv->read_reg(priv, XCAN_SR_OFFSET);
-
-       if (netif_running(ndev)) {
-               if (isr & XCAN_IXR_BSOFF_MASK) {
-                       priv->can.state = CAN_STATE_BUS_OFF;
-                       priv->write_reg(priv, XCAN_SRR_OFFSET,
-                                       XCAN_SRR_RESET_MASK);
-               } else if ((status & XCAN_SR_ESTAT_MASK) ==
-                                       XCAN_SR_ESTAT_MASK) {
-                       priv->can.state = CAN_STATE_ERROR_PASSIVE;
-               } else if (status & XCAN_SR_ERRWRN_MASK) {
-                       priv->can.state = CAN_STATE_ERROR_WARNING;
-               } else {
-                       priv->can.state = CAN_STATE_ERROR_ACTIVE;
-               }
-               netif_device_attach(ndev);
-               netif_start_queue(ndev);
-       }
-
        return 0;
 }
 
@@ -1061,6 +1213,18 @@ static const struct dev_pm_ops xcan_dev_pm_ops = {
        SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
 };
 
+static const struct xcan_devtype_data xcan_zynq_data = {
+       .caps = XCAN_CAP_WATERMARK,
+};
+
+/* Match table for OF platform binding */
+static const struct of_device_id xcan_of_match[] = {
+       { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
+       { .compatible = "xlnx,axi-can-1.00.a", },
+       { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, xcan_of_match);
+
 /**
  * xcan_probe - Platform registration call
  * @pdev:      Handle to the platform device structure
@@ -1075,8 +1239,10 @@ static int xcan_probe(struct platform_device *pdev)
        struct resource *res; /* IO mem resources */
        struct net_device *ndev;
        struct xcan_priv *priv;
+       const struct of_device_id *of_id;
+       int caps = 0;
        void __iomem *addr;
-       int ret, rx_max, tx_max;
+       int ret, rx_max, tx_max, tx_fifo_depth;
 
        /* Get the virtual base address for the device */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1086,7 +1252,8 @@ static int xcan_probe(struct platform_device *pdev)
                goto err;
        }
 
-       ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max);
+       ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
+                                  &tx_fifo_depth);
        if (ret < 0)
                goto err;
 
@@ -1094,6 +1261,30 @@ static int xcan_probe(struct platform_device *pdev)
        if (ret < 0)
                goto err;
 
+       of_id = of_match_device(xcan_of_match, &pdev->dev);
+       if (of_id) {
+               const struct xcan_devtype_data *devtype_data = of_id->data;
+
+               if (devtype_data)
+                       caps = devtype_data->caps;
+       }
+
+       /* There is no way to directly figure out how many frames have been
+        * sent when the TXOK interrupt is processed. If watermark programming
+        * is supported, we can have 2 frames in the FIFO and use TXFEMP
+        * to determine if 1 or 2 frames have been sent.
+        * Theoretically we should be able to use TXFWMEMP to determine up
+        * to 3 frames, but it seems that after putting a second frame in the
+        * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less
+        * than 2 frames in FIFO) is set anyway with no TXOK (a frame was
+        * sent), which is not a sensible state - possibly TXFWMEMP is not
+        * completely synchronized with the rest of the bits?
+        */
+       if (caps & XCAN_CAP_WATERMARK)
+               tx_max = min(tx_fifo_depth, 2);
+       else
+               tx_max = 1;
+
        /* Create a CAN device instance */
        ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
        if (!ndev)
@@ -1108,6 +1299,7 @@ static int xcan_probe(struct platform_device *pdev)
                                        CAN_CTRLMODE_BERR_REPORTING;
        priv->reg_base = addr;
        priv->tx_max = tx_max;
+       spin_lock_init(&priv->tx_lock);
 
        /* Get IRQ for the device */
        ndev->irq = platform_get_irq(pdev, 0);
@@ -1172,9 +1364,9 @@ static int xcan_probe(struct platform_device *pdev)
 
        pm_runtime_put(&pdev->dev);
 
-       netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n",
+       netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth: actual %d, using %d\n",
                        priv->reg_base, ndev->irq, priv->can.clock.freq,
-                       priv->tx_max);
+                       tx_fifo_depth, priv->tx_max);
 
        return 0;
 
@@ -1208,14 +1400,6 @@ static int xcan_remove(struct platform_device *pdev)
        return 0;
 }
 
-/* Match table for OF platform binding */
-static const struct of_device_id xcan_of_match[] = {
-       { .compatible = "xlnx,zynq-can-1.0", },
-       { .compatible = "xlnx,axi-can-1.00.a", },
-       { /* end of list */ },
-};
-MODULE_DEVICE_TABLE(of, xcan_of_match);
-
 static struct platform_driver xcan_driver = {
        .probe = xcan_probe,
        .remove = xcan_remove,
index 437cd6eb4faa39338108b73b43abaa89a559607c..9ef07a06aceb6b467631d95cd78c8acfe21dd7da 100644 (file)
@@ -343,6 +343,7 @@ static const struct irq_domain_ops mv88e6xxx_g1_irq_domain_ops = {
        .xlate  = irq_domain_xlate_twocell,
 };
 
+/* To be called with reg_lock held */
 static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip)
 {
        int irq, virq;
@@ -362,9 +363,15 @@ static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip)
 
 static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip)
 {
-       mv88e6xxx_g1_irq_free_common(chip);
-
+       /*
+        * free_irq must be called without reg_lock taken because the irq
+        * handler takes this lock, too.
+        */
        free_irq(chip->irq, chip);
+
+       mutex_lock(&chip->reg_lock);
+       mv88e6xxx_g1_irq_free_common(chip);
+       mutex_unlock(&chip->reg_lock);
 }
 
 static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip)
@@ -469,10 +476,12 @@ static int mv88e6xxx_irq_poll_setup(struct mv88e6xxx_chip *chip)
 
 static void mv88e6xxx_irq_poll_free(struct mv88e6xxx_chip *chip)
 {
-       mv88e6xxx_g1_irq_free_common(chip);
-
        kthread_cancel_delayed_work_sync(&chip->irq_poll_work);
        kthread_destroy_worker(chip->kworker);
+
+       mutex_lock(&chip->reg_lock);
+       mv88e6xxx_g1_irq_free_common(chip);
+       mutex_unlock(&chip->reg_lock);
 }
 
 int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask)
@@ -4506,12 +4515,10 @@ out_g2_irq:
        if (chip->info->g2_irqs > 0)
                mv88e6xxx_g2_irq_free(chip);
 out_g1_irq:
-       mutex_lock(&chip->reg_lock);
        if (chip->irq > 0)
                mv88e6xxx_g1_irq_free(chip);
        else
                mv88e6xxx_irq_poll_free(chip);
-       mutex_unlock(&chip->reg_lock);
 out:
        if (pdata)
                dev_put(pdata->netdev);
@@ -4539,12 +4546,10 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
        if (chip->info->g2_irqs > 0)
                mv88e6xxx_g2_irq_free(chip);
 
-       mutex_lock(&chip->reg_lock);
        if (chip->irq > 0)
                mv88e6xxx_g1_irq_free(chip);
        else
                mv88e6xxx_irq_poll_free(chip);
-       mutex_unlock(&chip->reg_lock);
 }
 
 static const struct of_device_id mv88e6xxx_of_match[] = {
index 5b7658bcf0209546a4577d708123231c55f97960..5c3ef9fc8207e3de01b86e4c538633ecc8a9f273 100644 (file)
@@ -32,7 +32,7 @@ config EL3
 
 config 3C515
        tristate "3c515 ISA \"Fast EtherLink\""
-       depends on ISA && ISA_DMA_API
+       depends on ISA && ISA_DMA_API && !PPC32
        ---help---
          If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
          network card, say Y here.
index d5c15e8bb3de706b12d343ee1a50477b23ab3d3f..9e5cf5583c87cc137a2a1fb2ccd13aa26074cf34 100644 (file)
@@ -44,7 +44,7 @@ config AMD8111_ETH
 
 config LANCE
        tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
-       depends on ISA && ISA_DMA_API && !ARM
+       depends on ISA && ISA_DMA_API && !ARM && !PPC32
        ---help---
          If you have a network (Ethernet) card of this type, say Y here.
          Some LinkSys cards are of this type.
@@ -138,7 +138,7 @@ config PCMCIA_NMCLAN
 
 config NI65
        tristate "NI6510 support"
-       depends on ISA && ISA_DMA_API && !ARM
+       depends on ISA && ISA_DMA_API && !ARM && !PPC32
        ---help---
          If you have a network (Ethernet) card of this type, say Y here.
 
@@ -173,7 +173,7 @@ config SUNLANCE
 
 config AMD_XGBE
        tristate "AMD 10GbE Ethernet driver"
-       depends on ((OF_NET && OF_ADDRESS) || ACPI || PCI) && HAS_IOMEM && HAS_DMA
+       depends on ((OF_NET && OF_ADDRESS) || ACPI || PCI) && HAS_IOMEM
        depends on X86 || ARM64 || COMPILE_TEST
        select BITREVERSE
        select CRC32
index 1205861b631896a0fc6b19ac608483d8e4b27d4e..eedd3f3dd22e220186578235c9f5f0b0072e80f6 100644 (file)
@@ -1,6 +1,5 @@
 config NET_XGENE_V2
        tristate "APM X-Gene SoC Ethernet-v2 Driver"
-       depends on HAS_DMA
        depends on ARCH_XGENE || COMPILE_TEST
        help
          This is the Ethernet driver for the on-chip ethernet interface
index afccb033177b39233a333994835713d577339c2f..e4e33c900b577161e77974bd62c45030cb2762e8 100644 (file)
@@ -1,6 +1,5 @@
 config NET_XGENE
        tristate "APM X-Gene SoC Ethernet Driver"
-       depends on HAS_DMA
        depends on ARCH_XGENE || COMPILE_TEST
        select PHYLIB
        select MDIO_XGENE
index fc7383106946ca6461f62ea305be0f03bb59c227..91eb8910b1c992b1b7876f05a26753a5cf79c100 100644 (file)
@@ -63,8 +63,6 @@
 
 #define AQ_CFG_NAPI_WEIGHT     64U
 
-#define AQ_CFG_MULTICAST_ADDRESS_MAX     32U
-
 /*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/
 
 #define AQ_NIC_FC_OFF    0U
index a2d416b24ffc251c71d002a9befe825d5c585fbc..2c6ebd91a9f2782e87472e497447b60974a7a571 100644 (file)
@@ -98,6 +98,8 @@ struct aq_stats_s {
 #define AQ_HW_MEDIA_TYPE_TP    1U
 #define AQ_HW_MEDIA_TYPE_FIBRE 2U
 
+#define AQ_HW_MULTICAST_ADDRESS_MAX     32U
+
 struct aq_hw_s {
        atomic_t flags;
        u8 rbl_enabled:1;
@@ -177,7 +179,7 @@ struct aq_hw_ops {
                                    unsigned int packet_filter);
 
        int (*hw_multicast_list_set)(struct aq_hw_s *self,
-                                    u8 ar_mac[AQ_CFG_MULTICAST_ADDRESS_MAX]
+                                    u8 ar_mac[AQ_HW_MULTICAST_ADDRESS_MAX]
                                     [ETH_ALEN],
                                     u32 count);
 
index ba5fe8c4125d85d0050c2c5b269cba2c320d3127..e3ae29e523f0e26738b0ab80a2f3ac431083d13b 100644 (file)
@@ -135,17 +135,10 @@ err_exit:
 static void aq_ndev_set_multicast_settings(struct net_device *ndev)
 {
        struct aq_nic_s *aq_nic = netdev_priv(ndev);
-       int err = 0;
 
-       err = aq_nic_set_packet_filter(aq_nic, ndev->flags);
-       if (err < 0)
-               return;
+       aq_nic_set_packet_filter(aq_nic, ndev->flags);
 
-       if (netdev_mc_count(ndev)) {
-               err = aq_nic_set_multicast_list(aq_nic, ndev);
-               if (err < 0)
-                       return;
-       }
+       aq_nic_set_multicast_list(aq_nic, ndev);
 }
 
 static const struct net_device_ops aq_ndev_ops = {
index 1a1a6380c128c4522b330907cc16258f0e012189..7a22d0257e04ccf07ef87cae18d5d4f87630660a 100644 (file)
@@ -563,34 +563,41 @@ err_exit:
 
 int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
 {
+       unsigned int packet_filter = self->packet_filter;
        struct netdev_hw_addr *ha = NULL;
        unsigned int i = 0U;
 
-       self->mc_list.count = 0U;
-
-       netdev_for_each_mc_addr(ha, ndev) {
-               ether_addr_copy(self->mc_list.ar[i++], ha->addr);
-               ++self->mc_list.count;
+       self->mc_list.count = 0;
+       if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
+               packet_filter |= IFF_PROMISC;
+       } else {
+               netdev_for_each_uc_addr(ha, ndev) {
+                       ether_addr_copy(self->mc_list.ar[i++], ha->addr);
 
-               if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX)
-                       break;
+                       if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
+                               break;
+               }
        }
 
-       if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX) {
-               /* Number of filters is too big: atlantic does not support this.
-                * Force all multi filter to support this.
-                * With this we disable all UC filters and setup "all pass"
-                * multicast mask
-                */
-               self->packet_filter |= IFF_ALLMULTI;
-               self->aq_nic_cfg.mc_list_count = 0;
-               return self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
-                                                            self->packet_filter);
+       if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
+               packet_filter |= IFF_ALLMULTI;
        } else {
-               return self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
-                                                   self->mc_list.ar,
-                                                   self->mc_list.count);
+               netdev_for_each_mc_addr(ha, ndev) {
+                       ether_addr_copy(self->mc_list.ar[i++], ha->addr);
+
+                       if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
+                               break;
+               }
+       }
+
+       if (i > 0 && i < AQ_HW_MULTICAST_ADDRESS_MAX) {
+               packet_filter |= IFF_MULTICAST;
+               self->mc_list.count = i;
+               self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
+                                                      self->mc_list.ar,
+                                                      self->mc_list.count);
        }
+       return aq_nic_set_packet_filter(self, packet_filter);
 }
 
 int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
index faa533a0ec474116b7d84369c947a7f0f1bfa853..fecfc401f95df041f56f348c1082147377464059 100644 (file)
@@ -75,7 +75,7 @@ struct aq_nic_s {
        struct aq_hw_link_status_s link_status;
        struct {
                u32 count;
-               u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN];
+               u8 ar[AQ_HW_MULTICAST_ADDRESS_MAX][ETH_ALEN];
        } mc_list;
 
        struct pci_dev *pdev;
index 67e2f9fb9402f3ed419ee46c47a7f6bd4d8e1ffc..8cc6abadc03b90e88fb58b09a53e7da3702710e5 100644 (file)
@@ -765,7 +765,7 @@ static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self,
 
 static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
                                           u8 ar_mac
-                                          [AQ_CFG_MULTICAST_ADDRESS_MAX]
+                                          [AQ_HW_MULTICAST_ADDRESS_MAX]
                                           [ETH_ALEN],
                                           u32 count)
 {
index 819f6bcf9b4ee76e620691ae3861a1fad213eca9..956860a697970ab427be0357d8541e929a85c489 100644 (file)
@@ -784,7 +784,7 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
 
 static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
                                           u8 ar_mac
-                                          [AQ_CFG_MULTICAST_ADDRESS_MAX]
+                                          [AQ_HW_MULTICAST_ADDRESS_MAX]
                                           [ETH_ALEN],
                                           u32 count)
 {
@@ -812,7 +812,7 @@ static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
 
                hw_atl_rpfl2_uc_flr_en_set(self,
                                           (self->aq_nic_cfg->is_mc_list_enabled),
-                                   HW_ATL_B0_MAC_MIN + i);
+                                          HW_ATL_B0_MAC_MIN + i);
        }
 
        err = aq_hw_err_from_flags(self);
index e743ddf46343302fe69c4c562c7cba239fe06dd9..5d0ab8e74b680cc6e75de6e91b79115b4637daa7 100644 (file)
@@ -24,7 +24,8 @@ config ARC_EMAC_CORE
 config ARC_EMAC
        tristate "ARC EMAC support"
        select ARC_EMAC_CORE
-       depends on OF_IRQ && OF_NET && HAS_DMA && (ARC || COMPILE_TEST)
+       depends on OF_IRQ && OF_NET
+       depends on ARC || COMPILE_TEST
        ---help---
          On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x
          non-standard on-chip ethernet device ARC EMAC 10/100 is used.
@@ -33,7 +34,8 @@ config ARC_EMAC
 config EMAC_ROCKCHIP
        tristate "Rockchip EMAC support"
        select ARC_EMAC_CORE
-       depends on OF_IRQ && OF_NET && REGULATOR && HAS_DMA && (ARCH_ROCKCHIP || COMPILE_TEST)
+       depends on OF_IRQ && OF_NET && REGULATOR
+       depends on ARCH_ROCKCHIP || COMPILE_TEST
        ---help---
          Support for Rockchip RK3036/RK3066/RK3188 EMAC ethernet controllers.
          This selects Rockchip SoC glue layer support for the
index 567ee54504bcd6eba897009259f691b74b77609e..5e5022fa1d047be078be911bc4f6cd0631f04de7 100644 (file)
@@ -1897,13 +1897,19 @@ static int alx_resume(struct device *dev)
        struct pci_dev *pdev = to_pci_dev(dev);
        struct alx_priv *alx = pci_get_drvdata(pdev);
        struct alx_hw *hw = &alx->hw;
+       int err;
 
        alx_reset_phy(hw);
 
        if (!netif_running(alx->dev))
                return 0;
        netif_device_attach(alx->dev);
-       return __alx_open(alx, true);
+
+       rtnl_lock();
+       err = __alx_open(alx, true);
+       rtnl_unlock();
+
+       return err;
 }
 
 static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
index 94270f654b3b534b88ed3296f7556de0186de123..7087b88550db5fbbbfb909aee8c5a999db5bad57 100644 (file)
@@ -1686,6 +1686,7 @@ static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter)
        skb = build_skb(page_address(page) + adapter->rx_page_offset,
                        adapter->rx_frag_size);
        if (likely(skb)) {
+               skb_reserve(skb, NET_SKB_PAD);
                adapter->rx_page_offset += adapter->rx_frag_size;
                if (adapter->rx_page_offset >= PAGE_SIZE)
                        adapter->rx_page = NULL;
index af75156919edfead9bbe1e223b92d45d4fdd444e..4c3bfde6e8de00f2010b1329e05c8b36a16e158f 100644 (file)
@@ -157,7 +157,6 @@ config BGMAC
 config BGMAC_BCMA
        tristate "Broadcom iProc GBit BCMA support"
        depends on BCMA && BCMA_HOST_SOC
-       depends on HAS_DMA
        depends on BCM47XX || ARCH_BCM_5301X || COMPILE_TEST
        select BGMAC
        select PHYLIB
@@ -170,7 +169,6 @@ config BGMAC_BCMA
 
 config BGMAC_PLATFORM
        tristate "Broadcom iProc GBit platform support"
-       depends on HAS_DMA
        depends on ARCH_BCM_IPROC || COMPILE_TEST
        depends on OF
        select BGMAC
index d5fca2e5a9bc34ad6edfa295e378dfe12078c0e5..a1f60f89e05944458e98e7faa2292960368c5ef8 100644 (file)
@@ -1946,8 +1946,8 @@ static int bcm_sysport_open(struct net_device *dev)
        if (!priv->is_lite)
                priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
        else
-               priv->crc_fwd = !!(gib_readl(priv, GIB_CONTROL) &
-                                  GIB_FCS_STRIP);
+               priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) &
+                                 GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT);
 
        phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
                                0, priv->phy_interface);
index d6e5d0cbf3a3b3c526d347add087c9cef428776a..cf440b91fd04a331a7dce529d740a22686b96dfd 100644 (file)
@@ -278,7 +278,8 @@ struct bcm_rsb {
 #define  GIB_GTX_CLK_EXT_CLK           (0 << GIB_GTX_CLK_SEL_SHIFT)
 #define  GIB_GTX_CLK_125MHZ            (1 << GIB_GTX_CLK_SEL_SHIFT)
 #define  GIB_GTX_CLK_250MHZ            (2 << GIB_GTX_CLK_SEL_SHIFT)
-#define  GIB_FCS_STRIP                 (1 << 6)
+#define  GIB_FCS_STRIP_SHIFT           6
+#define  GIB_FCS_STRIP                 (1 << GIB_FCS_STRIP_SHIFT)
 #define  GIB_LCL_LOOP_EN               (1 << 7)
 #define  GIB_LCL_LOOP_TXEN             (1 << 8)
 #define  GIB_RMT_LOOP_EN               (1 << 9)
index d847e1b9c37b5afff33e799e919e3ff39b5cd1e8..be1506169076f0a89f6a621d01dce81afe720ba7 100644 (file)
@@ -1533,6 +1533,7 @@ struct bnx2x {
        struct link_vars        link_vars;
        u32                     link_cnt;
        struct bnx2x_link_report_data last_reported_link;
+       bool                    force_link_down;
 
        struct mdio_if_info     mdio;
 
index 8cd73ff5debc276aec53d1f056fe3040875b2c0a..af7b5a4d8ba044800b0eb229d8c989c564515e94 100644 (file)
@@ -1261,6 +1261,11 @@ void __bnx2x_link_report(struct bnx2x *bp)
 {
        struct bnx2x_link_report_data cur_data;
 
+       if (bp->force_link_down) {
+               bp->link_vars.link_up = 0;
+               return;
+       }
+
        /* reread mf_cfg */
        if (IS_PF(bp) && !CHIP_IS_E1(bp))
                bnx2x_read_mf_cfg(bp);
@@ -2817,6 +2822,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
                bp->pending_max = 0;
        }
 
+       bp->force_link_down = false;
        if (bp->port.pmf) {
                rc = bnx2x_initial_phy_init(bp, load_mode);
                if (rc)
index da18aa239acb19ab87c107063ef240ac4a3b9261..a4a90b6cdb467038457fca98e8ab9f25dd72cde8 100644 (file)
@@ -3388,14 +3388,18 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
                        DP(BNX2X_MSG_ETHTOOL,
                           "rss re-configured, UDP 4-tupple %s\n",
                           udp_rss_requested ? "enabled" : "disabled");
-                       return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
+                       if (bp->state == BNX2X_STATE_OPEN)
+                               return bnx2x_rss(bp, &bp->rss_conf_obj, false,
+                                                true);
                } else if ((info->flow_type == UDP_V6_FLOW) &&
                           (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
                        bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
                        DP(BNX2X_MSG_ETHTOOL,
                           "rss re-configured, UDP 4-tupple %s\n",
                           udp_rss_requested ? "enabled" : "disabled");
-                       return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
+                       if (bp->state == BNX2X_STATE_OPEN)
+                               return bnx2x_rss(bp, &bp->rss_conf_obj, false,
+                                                true);
                }
                return 0;
 
@@ -3509,7 +3513,10 @@ static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
                bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
        }
 
-       return bnx2x_config_rss_eth(bp, false);
+       if (bp->state == BNX2X_STATE_OPEN)
+               return bnx2x_config_rss_eth(bp, false);
+
+       return 0;
 }
 
 /**
index 5b1ed240bf18be0963cc580ab4256b6adc924046..57348f2b49a31fd5b1ef5a67d2ba1e7945768ab0 100644 (file)
@@ -10279,6 +10279,12 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
                bp->sp_rtnl_state = 0;
                smp_mb();
 
+               /* Immediately indicate link as down */
+               bp->link_vars.link_up = 0;
+               bp->force_link_down = true;
+               netif_carrier_off(bp->dev);
+               BNX2X_ERR("Indicating link is down due to Tx-timeout\n");
+
                bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
                /* When ret value shows failure of allocation failure,
                 * the nic is rebooted again. If open still fails, a error
index 176fc9f4d7defe6a9d5b513902c97f56d732b323..4394c1162be4fde931aa822d69a008b89f54efb8 100644 (file)
@@ -5712,7 +5712,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
        }
        vnic->uc_filter_count = 1;
 
-       vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
+       vnic->rx_mask = 0;
+       if (bp->dev->flags & IFF_BROADCAST)
+               vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
 
        if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
                vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
@@ -5917,7 +5919,7 @@ unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
        return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
 }
 
-void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
+static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
 {
        bp->hw_resc.max_irqs = max_irqs;
 }
@@ -6888,7 +6890,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
                rc = bnxt_request_irq(bp);
                if (rc) {
                        netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
-                       goto open_err;
+                       goto open_err_irq;
                }
        }
 
@@ -6928,6 +6930,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
 open_err:
        bnxt_debug_dev_exit(bp);
        bnxt_disable_napi(bp);
+
+open_err_irq:
        bnxt_del_napi(bp);
 
 open_err_free_mem:
@@ -7214,13 +7218,16 @@ static void bnxt_set_rx_mode(struct net_device *dev)
 
        mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
                  CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
-                 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
+                 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
+                 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
 
        if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
                mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
 
        uc_update = bnxt_uc_list_updated(bp);
 
+       if (dev->flags & IFF_BROADCAST)
+               mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
        if (dev->flags & IFF_ALLMULTI) {
                mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
                vnic->mc_list_count = 0;
@@ -8502,11 +8509,11 @@ int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
        int rx, tx, cp;
 
        _bnxt_get_max_rings(bp, &rx, &tx, &cp);
+       *max_rx = rx;
+       *max_tx = tx;
        if (!rx || !tx || !cp)
                return -ENOMEM;
 
-       *max_rx = rx;
-       *max_tx = tx;
        return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
 }
 
@@ -8520,8 +8527,11 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
                /* Not enough rings, try disabling agg rings. */
                bp->flags &= ~BNXT_FLAG_AGG_RINGS;
                rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
-               if (rc)
+               if (rc) {
+                       /* set BNXT_FLAG_AGG_RINGS back for consistency */
+                       bp->flags |= BNXT_FLAG_AGG_RINGS;
                        return rc;
+               }
                bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
                bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
                bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
index 9b14eb610b9f653b61092d74b3ab9257a84383d9..91575ef97c8cb119d9407530f4b6f5472d72724c 100644 (file)
@@ -1470,7 +1470,6 @@ void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max);
 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
 void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max);
 unsigned int bnxt_get_max_func_irqs(struct bnxt *bp);
-void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max);
 int bnxt_get_avail_msix(struct bnxt *bp, int num);
 int bnxt_reserve_rings(struct bnxt *bp);
 void bnxt_tx_disable(struct bnxt *bp);
index 795f45024c209e65591a3e9fe60814315ebb3cb0..491bd40a254d8dad8810d983505b69efe2d011b1 100644 (file)
 #define BNXT_FID_INVALID                       0xffff
 #define VLAN_TCI(vid, prio)    ((vid) | ((prio) << VLAN_PRIO_SHIFT))
 
+#define is_vlan_pcp_wildcarded(vlan_tci_mask)  \
+       ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == 0x0000)
+#define is_vlan_pcp_exactmatch(vlan_tci_mask)  \
+       ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == VLAN_PRIO_MASK)
+#define is_vlan_pcp_zero(vlan_tci)     \
+       ((ntohs(vlan_tci) & VLAN_PRIO_MASK) == 0x0000)
+#define is_vid_exactmatch(vlan_tci_mask)       \
+       ((ntohs(vlan_tci_mask) & VLAN_VID_MASK) == VLAN_VID_MASK)
+
 /* Return the dst fid of the func for flow forwarding
  * For PFs: src_fid is the fid of the PF
  * For VF-reps: src_fid the fid of the VF
@@ -389,6 +398,21 @@ static bool is_exactmatch(void *mask, int len)
        return true;
 }
 
+static bool is_vlan_tci_allowed(__be16  vlan_tci_mask,
+                               __be16  vlan_tci)
+{
+       /* VLAN priority must be either exactly zero or fully wildcarded and
+        * VLAN id must be exact match.
+        */
+       if (is_vid_exactmatch(vlan_tci_mask) &&
+           ((is_vlan_pcp_exactmatch(vlan_tci_mask) &&
+             is_vlan_pcp_zero(vlan_tci)) ||
+            is_vlan_pcp_wildcarded(vlan_tci_mask)))
+               return true;
+
+       return false;
+}
+
 static bool bits_set(void *key, int len)
 {
        const u8 *p = key;
@@ -803,9 +827,9 @@ static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
        /* Currently VLAN fields cannot be partial wildcard */
        if (bits_set(&flow->l2_key.inner_vlan_tci,
                     sizeof(flow->l2_key.inner_vlan_tci)) &&
-           !is_exactmatch(&flow->l2_mask.inner_vlan_tci,
-                          sizeof(flow->l2_mask.inner_vlan_tci))) {
-               netdev_info(bp->dev, "Wildcard match unsupported for VLAN TCI\n");
+           !is_vlan_tci_allowed(flow->l2_mask.inner_vlan_tci,
+                                flow->l2_key.inner_vlan_tci)) {
+               netdev_info(bp->dev, "Unsupported VLAN TCI\n");
                return false;
        }
        if (bits_set(&flow->l2_key.inner_vlan_tpid,
index 347e4f946eb222ce5c8e1e14777c9d6555eb48dc..840f6e505f733208955bedee497ecf51397d487d 100644 (file)
@@ -169,7 +169,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
                edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
        }
        bnxt_fill_msix_vecs(bp, ent);
-       bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) - avail_msix);
        bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix);
        edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
        return avail_msix;
@@ -192,7 +191,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
        msix_requested = edev->ulp_tbl[ulp_id].msix_requested;
        bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested);
        edev->ulp_tbl[ulp_id].msix_requested = 0;
-       bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) + msix_requested);
        edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
        if (netif_running(dev)) {
                bnxt_close_nic(bp, true, false);
index 30273a7717e2df797890da57e229ce31e9d957e2..4fd829b5e65d14b56337e63fc480dd72c8420eeb 100644 (file)
@@ -660,7 +660,7 @@ static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
        id_tbl->max = size;
        id_tbl->next = next;
        spin_lock_init(&id_tbl->lock);
-       id_tbl->table = kcalloc(DIV_ROUND_UP(size, 32), 4, GFP_KERNEL);
+       id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL);
        if (!id_tbl->table)
                return -ENOMEM;
 
index 3be87efdc93d6347da8417ddcd101ed90cc12d8c..aa1374d0af9313dfdbf6a7f8dfeea92e2fee7013 100644 (file)
@@ -6,11 +6,15 @@
  * Copyright (C) 2004 Sun Microsystems Inc.
  * Copyright (C) 2005-2016 Broadcom Corporation.
  * Copyright (C) 2016-2017 Broadcom Limited.
+ * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
+ * refers to Broadcom Inc. and/or its subsidiaries.
  *
  * Firmware is:
  *     Derived from proprietary unpublished source code,
  *     Copyright (C) 2000-2016 Broadcom Corporation.
  *     Copyright (C) 2016-2017 Broadcom Ltd.
+ *     Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
+ *     refers to Broadcom Inc. and/or its subsidiaries.
  *
  *     Permission is hereby granted for the distribution of this firmware
  *     data in hexadecimal or equivalent format, provided this copyright
@@ -9290,6 +9294,15 @@ static int tg3_chip_reset(struct tg3 *tp)
 
        tg3_restore_clk(tp);
 
+       /* Increase the core clock speed to fix tx timeout issue for 5762
+        * with 100Mbps link speed.
+        */
+       if (tg3_asic_rev(tp) == ASIC_REV_5762) {
+               val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
+               tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
+                    TG3_CPMU_MAC_ORIDE_ENABLE);
+       }
+
        /* Reprobe ASF enable state.  */
        tg3_flag_clear(tp, ENABLE_ASF);
        tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
index 1d61aa3efda177c64c69465f0b72c0df5221ba37..a772a33b685c5eb8c28137107eb33cb4b6ffeb1d 100644 (file)
@@ -7,6 +7,8 @@
  * Copyright (C) 2004 Sun Microsystems Inc.
  * Copyright (C) 2007-2016 Broadcom Corporation.
  * Copyright (C) 2016-2017 Broadcom Limited.
+ * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
+ * refers to Broadcom Inc. and/or its subsidiaries.
  */
 
 #ifndef _T3_H
index 86659823b2592e20d16e1fbc0640d45c99508f47..3d45f4c92cf6e5d3f091ae654e5312165956d19f 100644 (file)
 #define GEM_DCFG6              0x0294 /* Design Config 6 */
 #define GEM_DCFG7              0x0298 /* Design Config 7 */
 #define GEM_DCFG8              0x029C /* Design Config 8 */
+#define GEM_DCFG10             0x02A4 /* Design Config 10 */
 
 #define GEM_TXBDCTRL   0x04cc /* TX Buffer Descriptor control register */
 #define GEM_RXBDCTRL   0x04d0 /* RX Buffer Descriptor control register */
 #define GEM_SCR2CMP_OFFSET                     0
 #define GEM_SCR2CMP_SIZE                       8
 
+/* Bitfields in DCFG10 */
+#define GEM_TXBD_RDBUFF_OFFSET                 12
+#define GEM_TXBD_RDBUFF_SIZE                   4
+#define GEM_RXBD_RDBUFF_OFFSET                 8
+#define GEM_RXBD_RDBUFF_SIZE                   4
+
 /* Bitfields in TISUBN */
 #define GEM_SUBNSINCR_OFFSET                   0
 #define GEM_SUBNSINCR_SIZE                     16
 #define MACB_CAPS_USRIO_DISABLED               0x00000010
 #define MACB_CAPS_JUMBO                                0x00000020
 #define MACB_CAPS_GEM_HAS_PTP                  0x00000040
+#define MACB_CAPS_BD_RD_PREFETCH               0x00000080
 #define MACB_CAPS_FIFO_MODE                    0x10000000
 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE       0x20000000
 #define MACB_CAPS_SG_DISABLED                  0x40000000
@@ -1203,6 +1211,9 @@ struct macb {
        unsigned int max_tuples;
 
        struct tasklet_struct   hresp_err_tasklet;
+
+       int     rx_bd_rd_prefetch;
+       int     tx_bd_rd_prefetch;
 };
 
 #ifdef CONFIG_MACB_USE_HWSTAMP
index 3e93df5d4e3b2573f88cc427e7eefc6d1930e3ff..a6c911bb5ce22588276a9f92561947ff5bff2726 100644 (file)
@@ -1811,23 +1811,25 @@ static void macb_free_consistent(struct macb *bp)
 {
        struct macb_queue *queue;
        unsigned int q;
+       int size;
 
-       queue = &bp->queues[0];
        bp->macbgem_ops.mog_free_rx_buffers(bp);
-       if (queue->rx_ring) {
-               dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp),
-                               queue->rx_ring, queue->rx_ring_dma);
-               queue->rx_ring = NULL;
-       }
 
        for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
                kfree(queue->tx_skb);
                queue->tx_skb = NULL;
                if (queue->tx_ring) {
-                       dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES(bp),
+                       size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
+                       dma_free_coherent(&bp->pdev->dev, size,
                                          queue->tx_ring, queue->tx_ring_dma);
                        queue->tx_ring = NULL;
                }
+               if (queue->rx_ring) {
+                       size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
+                       dma_free_coherent(&bp->pdev->dev, size,
+                                         queue->rx_ring, queue->rx_ring_dma);
+                       queue->rx_ring = NULL;
+               }
        }
 }
 
@@ -1874,7 +1876,7 @@ static int macb_alloc_consistent(struct macb *bp)
        int size;
 
        for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
-               size = TX_RING_BYTES(bp);
+               size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
                queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
                                                    &queue->tx_ring_dma,
                                                    GFP_KERNEL);
@@ -1890,7 +1892,7 @@ static int macb_alloc_consistent(struct macb *bp)
                if (!queue->tx_skb)
                        goto out_err;
 
-               size = RX_RING_BYTES(bp);
+               size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
                queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
                                                 &queue->rx_ring_dma, GFP_KERNEL);
                if (!queue->rx_ring)
@@ -3726,6 +3728,8 @@ static int at91ether_init(struct platform_device *pdev)
        int err;
        u32 reg;
 
+       bp->queues[0].bp = bp;
+
        dev->netdev_ops = &at91ether_netdev_ops;
        dev->ethtool_ops = &macb_ethtool_ops;
 
@@ -3795,7 +3799,7 @@ static const struct macb_config np4_config = {
 static const struct macb_config zynqmp_config = {
        .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
                        MACB_CAPS_JUMBO |
-                       MACB_CAPS_GEM_HAS_PTP,
+                       MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
        .dma_burst_length = 16,
        .clk_init = macb_clk_init,
        .init = macb_init,
@@ -3856,7 +3860,7 @@ static int macb_probe(struct platform_device *pdev)
        void __iomem *mem;
        const char *mac;
        struct macb *bp;
-       int err;
+       int err, val;
 
        regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        mem = devm_ioremap_resource(&pdev->dev, regs);
@@ -3945,6 +3949,18 @@ static int macb_probe(struct platform_device *pdev)
        else
                dev->max_mtu = ETH_DATA_LEN;
 
+       if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) {
+               val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10));
+               if (val)
+                       bp->rx_bd_rd_prefetch = (2 << (val - 1)) *
+                                               macb_dma_desc_get_size(bp);
+
+               val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10));
+               if (val)
+                       bp->tx_bd_rd_prefetch = (2 << (val - 1)) *
+                                               macb_dma_desc_get_size(bp);
+       }
+
        mac = of_get_mac_address(np);
        if (mac) {
                ether_addr_copy(bp->dev->dev_addr, mac);
index 2220c771092b46e8fb583d46ea99d5829e1793d0..678835136bf8069326067feaa46f8465db4e38d4 100644 (file)
@@ -170,10 +170,7 @@ static int gem_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 
        if (delta > TSU_NSEC_MAX_VAL) {
                gem_tsu_get_time(&bp->ptp_clock_info, &now);
-               if (sign)
-                       now = timespec64_sub(now, then);
-               else
-                       now = timespec64_add(now, then);
+               now = timespec64_add(now, then);
 
                gem_tsu_set_time(&bp->ptp_clock_info,
                                 (const struct timespec64 *)&now);
index 07d2201530d26c85e26cf0987553451acad936a6..9fdd496b90ff47cb0f1147777ae7b9ca0071076d 100644 (file)
@@ -1,6 +1,6 @@
 config NET_CALXEDA_XGMAC
        tristate "Calxeda 1G/10G XGMAC Ethernet driver"
-       depends on HAS_IOMEM && HAS_DMA
+       depends on HAS_IOMEM
        depends on ARCH_HIGHBANK || COMPILE_TEST
        select CRC32
        help
index 043e3c11c42bd407d47561bec2a2e0acd525f12b..92d88c5f76fb8b68e9f8b35ada37d4a77d68f739 100644 (file)
@@ -15,7 +15,7 @@ if NET_VENDOR_CAVIUM
 
 config THUNDER_NIC_PF
        tristate "Thunder Physical function driver"
-       depends on 64BIT
+       depends on 64BIT && PCI
        select THUNDER_NIC_BGX
        ---help---
          This driver supports Thunder's NIC physical function.
@@ -28,13 +28,13 @@ config THUNDER_NIC_PF
 config THUNDER_NIC_VF
        tristate "Thunder Virtual function driver"
        imply CAVIUM_PTP
-       depends on 64BIT
+       depends on 64BIT && PCI
        ---help---
          This driver supports Thunder's NIC virtual function
 
 config THUNDER_NIC_BGX
        tristate "Thunder MAC interface driver (BGX)"
-       depends on 64BIT
+       depends on 64BIT && PCI
        select PHYLIB
        select MDIO_THUNDER
        select THUNDER_NIC_RGX
@@ -44,7 +44,7 @@ config        THUNDER_NIC_BGX
 
 config THUNDER_NIC_RGX
        tristate "Thunder MAC interface driver (RGX)"
-       depends on 64BIT
+       depends on 64BIT && PCI
        select PHYLIB
        select MDIO_THUNDER
        ---help---
@@ -53,7 +53,7 @@ config        THUNDER_NIC_RGX
 
 config CAVIUM_PTP
        tristate "Cavium PTP coprocessor as PTP clock"
-       depends on 64BIT
+       depends on 64BIT && PCI
        imply PTP_1588_CLOCK
        default y
        ---help---
@@ -65,7 +65,7 @@ config CAVIUM_PTP
 
 config LIQUIDIO
        tristate "Cavium LiquidIO support"
-       depends on 64BIT
+       depends on 64BIT && PCI
        depends on MAY_USE_DEVLINK
        imply PTP_1588_CLOCK
        select FW_LOADER
index 8a815bb5717732331293e9fba5b00d3ca23aaf88..7e8454d3b1ad3f382f778c27058695c34b9f13cb 100644 (file)
@@ -91,6 +91,9 @@ static int octeon_console_debug_enabled(u32 console)
  */
 #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
 
+/* time to wait for possible in-flight requests in milliseconds */
+#define WAIT_INFLIGHT_REQUEST  msecs_to_jiffies(1000)
+
 struct lio_trusted_vf_ctx {
        struct completion complete;
        int status;
@@ -259,7 +262,7 @@ static inline void pcierror_quiesce_device(struct octeon_device *oct)
        force_io_queues_off(oct);
 
        /* To allow for in-flight requests */
-       schedule_timeout_uninterruptible(100);
+       schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST);
 
        if (wait_for_pending_requests(oct))
                dev_err(&oct->pci_dev->dev, "There were pending requests\n");
index 3f6afb54a5eb188061dcad1ce4679465d408db86..bb43ddb7539e719d0cbff780e5ddf17c756dbe05 100644 (file)
@@ -643,13 +643,21 @@ static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
 static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct octeon_mgmt *p = netdev_priv(netdev);
-       int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
+       int max_packet = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 
        netdev->mtu = new_mtu;
 
-       cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs);
+       /* HW lifts the limit if the frame is VLAN tagged
+        * (+4 bytes per each tag, up to two tags)
+        */
+       cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, max_packet);
+       /* Set the hardware to truncate packets larger than the MTU. The jabber
+        * register must be set to a multiple of 8 bytes, so round up. JABBER is
+        * an unconditional limit, so we need to account for two possible VLAN
+        * tags.
+        */
        cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER,
-                      (size_without_fcs + 7) & 0xfff8);
+                      (max_packet + 7 + VLAN_HLEN * 2) & 0xfff8);
 
        return 0;
 }
index 7b795edd9d3a9543271d29acf0cc35d760a6b065..a19172dbe6be272d9a168302bab18f551a687a17 100644 (file)
@@ -51,6 +51,7 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
+#include <linux/nospec.h>
 
 #include "common.h"
 #include "cxgb3_ioctl.h"
@@ -2268,6 +2269,7 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
 
                if (t.qset_idx >= nqsets)
                        return -EINVAL;
+               t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
 
                q = &adapter->params.sge.qset[q1 + t.qset_idx];
                t.rspq_size = q->rspq_size;
index dd04a2f89ce62db6ea9bca433023d9aac4b10e23..bc03c175a3cdf1440aca2269b4483f59cdb3a9dc 100644 (file)
@@ -263,7 +263,7 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
                                "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
                                enable ? "set" : "unset", pi->port_id, i, -err);
                else
-                       txq->dcb_prio = value;
+                       txq->dcb_prio = enable ? value : 0;
        }
 }
 
index 974a868a4824b78dc8cb7225f37b5d2cf8b24b32..3720c3e11ebb883466d04b4a2169878f0b135399 100644 (file)
@@ -8702,7 +8702,7 @@ static int t4_get_flash_params(struct adapter *adap)
        };
 
        unsigned int part, manufacturer;
-       unsigned int density, size;
+       unsigned int density, size = 0;
        u32 flashid = 0;
        int ret;
 
@@ -8772,11 +8772,6 @@ static int t4_get_flash_params(struct adapter *adap)
                case 0x22: /* 256MB */
                        size = 1 << 28;
                        break;
-
-               default:
-                       dev_err(adap->pdev_dev, "Micron Flash Part has bad size, ID = %#x, Density code = %#x\n",
-                               flashid, density);
-                       return -EINVAL;
                }
                break;
        }
@@ -8792,10 +8787,6 @@ static int t4_get_flash_params(struct adapter *adap)
                case 0x17: /* 64MB */
                        size = 1 << 26;
                        break;
-               default:
-                       dev_err(adap->pdev_dev, "ISSI Flash Part has bad size, ID = %#x, Density code = %#x\n",
-                               flashid, density);
-                       return -EINVAL;
                }
                break;
        }
@@ -8811,10 +8802,6 @@ static int t4_get_flash_params(struct adapter *adap)
                case 0x18: /* 16MB */
                        size = 1 << 24;
                        break;
-               default:
-                       dev_err(adap->pdev_dev, "Macronix Flash Part has bad size, ID = %#x, Density code = %#x\n",
-                               flashid, density);
-                       return -EINVAL;
                }
                break;
        }
@@ -8830,17 +8817,21 @@ static int t4_get_flash_params(struct adapter *adap)
                case 0x18: /* 16MB */
                        size = 1 << 24;
                        break;
-               default:
-                       dev_err(adap->pdev_dev, "Winbond Flash Part has bad size, ID = %#x, Density code = %#x\n",
-                               flashid, density);
-                       return -EINVAL;
                }
                break;
        }
-       default:
-               dev_err(adap->pdev_dev, "Unsupported Flash Part, ID = %#x\n",
-                       flashid);
-               return -EINVAL;
+       }
+
+       /* If we didn't recognize the FLASH part, that's no real issue: the
+        * Hardware/Software contract says that Hardware will _*ALWAYS*_
+        * use a FLASH part which is at least 4MB in size and has 64KB
+        * sectors.  The unrecognized FLASH part is likely to be much larger
+        * than 4MB, but that's all we really need.
+        */
+       if (size == 0) {
+               dev_warn(adap->pdev_dev, "Unknown Flash Part, ID = %#x, assuming 4MB\n",
+                        flashid);
+               size = 1 << 22;
        }
 
        /* Store decoded Flash size and fall through into vetting code. */
index 5ab912937aff2e8eb34887deb2aa36c8f45bebde..ec0b545197e2dfd7c0443917c0ec0f33861c77bd 100644 (file)
@@ -19,6 +19,7 @@ if NET_VENDOR_CIRRUS
 config CS89x0
        tristate "CS89x0 support"
        depends on ISA || EISA || ARM
+       depends on !PPC32
        ---help---
          Support for CS89x0 chipset based Ethernet cards. If you have a
          network (Ethernet) card of this type, say Y and read the file
index 973c1fb70d09929f92fc47db0e3d60e3146eaff0..99038dfc7fbe52bea5932691133e2bdeced48844 100644 (file)
@@ -79,7 +79,6 @@ void enic_rfs_flw_tbl_init(struct enic *enic)
        enic->rfs_h.max = enic->config.num_arfs;
        enic->rfs_h.free = enic->rfs_h.max;
        enic->rfs_h.toclean = 0;
-       enic_rfs_timer_start(enic);
 }
 
 void enic_rfs_flw_tbl_free(struct enic *enic)
@@ -88,7 +87,6 @@ void enic_rfs_flw_tbl_free(struct enic *enic)
 
        enic_rfs_timer_stop(enic);
        spin_lock_bh(&enic->rfs_h.lock);
-       enic->rfs_h.free = 0;
        for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
                struct hlist_head *hhead;
                struct hlist_node *tmp;
@@ -99,6 +97,7 @@ void enic_rfs_flw_tbl_free(struct enic *enic)
                        enic_delfltr(enic, n->fltr_id);
                        hlist_del(&n->node);
                        kfree(n);
+                       enic->rfs_h.free++;
                }
        }
        spin_unlock_bh(&enic->rfs_h.lock);
index 30d2eaa18c0479adcd75315db194d3785b8007bc..90c645b8538e0f7ae8c77d625ded6cd6b0e0ca0f 100644 (file)
@@ -1920,7 +1920,7 @@ static int enic_open(struct net_device *netdev)
 {
        struct enic *enic = netdev_priv(netdev);
        unsigned int i;
-       int err;
+       int err, ret;
 
        err = enic_request_intr(enic);
        if (err) {
@@ -1971,16 +1971,15 @@ static int enic_open(struct net_device *netdev)
                vnic_intr_unmask(&enic->intr[i]);
 
        enic_notify_timer_start(enic);
-       enic_rfs_flw_tbl_init(enic);
+       enic_rfs_timer_start(enic);
 
        return 0;
 
 err_out_free_rq:
        for (i = 0; i < enic->rq_count; i++) {
-               err = vnic_rq_disable(&enic->rq[i]);
-               if (err)
-                       return err;
-               vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
+               ret = vnic_rq_disable(&enic->rq[i]);
+               if (!ret)
+                       vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
        }
        enic_dev_notify_unset(enic);
 err_out_free_intr:
@@ -2904,6 +2903,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        timer_setup(&enic->notify_timer, enic_notify_timer, 0);
 
+       enic_rfs_flw_tbl_init(enic);
        enic_set_rx_coal_setting(enic);
        INIT_WORK(&enic->reset, enic_reset);
        INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset);
index 78db8e62a83f17c05d615cb674703efa4e926bd0..ed6c76d20b45b2a38ccf87e63487e77a756812a3 100644 (file)
@@ -1735,8 +1735,8 @@ static void ftgmac100_ncsi_handler(struct ncsi_dev *nd)
        if (unlikely(nd->state != ncsi_dev_state_functional))
                return;
 
-       netdev_info(nd->dev, "NCSI interface %s\n",
-                   nd->link_up ? "up" : "down");
+       netdev_dbg(nd->dev, "NCSI interface %s\n",
+                  nd->link_up ? "up" : "down");
 }
 
 static void ftgmac100_setup_clk(struct ftgmac100 *priv)
index 5f4e1ffa7b95fe4f8d2bb6447764951c51fffc67..ab02057ac7304f088242a2a07481820302d3556b 100644 (file)
@@ -125,6 +125,9 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
 /* Default alignment for start of data in an Rx FD */
 #define DPAA_FD_DATA_ALIGNMENT  16
 
+/* The DPAA requires 256 bytes reserved and mapped for the SGT */
+#define DPAA_SGT_SIZE 256
+
 /* Values for the L3R field of the FM Parse Results
  */
 /* L3 Type field: First IP Present IPv4 */
@@ -1617,8 +1620,8 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
 
        if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
                nr_frags = skb_shinfo(skb)->nr_frags;
-               dma_unmap_single(dev, addr, qm_fd_get_offset(fd) +
-                                sizeof(struct qm_sg_entry) * (1 + nr_frags),
+               dma_unmap_single(dev, addr,
+                                qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
                                 dma_dir);
 
                /* The sgt buffer has been allocated with netdev_alloc_frag(),
@@ -1903,8 +1906,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
        void *sgt_buf;
 
        /* get a page frag to store the SGTable */
-       sz = SKB_DATA_ALIGN(priv->tx_headroom +
-               sizeof(struct qm_sg_entry) * (1 + nr_frags));
+       sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE);
        sgt_buf = netdev_alloc_frag(sz);
        if (unlikely(!sgt_buf)) {
                netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n",
@@ -1972,9 +1974,8 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
        skbh = (struct sk_buff **)buffer_start;
        *skbh = skb;
 
-       addr = dma_map_single(dev, buffer_start, priv->tx_headroom +
-                             sizeof(struct qm_sg_entry) * (1 + nr_frags),
-                             dma_dir);
+       addr = dma_map_single(dev, buffer_start,
+                             priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
        if (unlikely(dma_mapping_error(dev, addr))) {
                dev_err(dev, "DMA mapping failed");
                err = -EINVAL;
index ce6e24c74978a22a1d22383f0a5b4f38ffec7c00..ecbf6187e13a1fe3d6dba06015ff9cc49aed6224 100644 (file)
@@ -324,6 +324,10 @@ struct fman_port_qmi_regs {
 #define HWP_HXS_PHE_REPORT 0x00000800
 #define HWP_HXS_PCAC_PSTAT 0x00000100
 #define HWP_HXS_PCAC_PSTOP 0x00000001
+#define HWP_HXS_TCP_OFFSET 0xA
+#define HWP_HXS_UDP_OFFSET 0xB
+#define HWP_HXS_SH_PAD_REM 0x80000000
+
 struct fman_port_hwp_regs {
        struct {
                u32 ssa; /* Soft Sequence Attachment */
@@ -728,6 +732,10 @@ static void init_hwp(struct fman_port *port)
                iowrite32be(0xffffffff, &regs->pmda[i].lcv);
        }
 
+       /* Short packet padding removal from checksum calculation */
+       iowrite32be(HWP_HXS_SH_PAD_REM, &regs->pmda[HWP_HXS_TCP_OFFSET].ssa);
+       iowrite32be(HWP_HXS_SH_PAD_REM, &regs->pmda[HWP_HXS_UDP_OFFSET].ssa);
+
        start_port_hwp(port);
 }
 
index 8bcf470ff5f38a4e62842a5f31d5c0b45141ab85..fb1a7251f45d336978199d208af5e1a40eee1556 100644 (file)
@@ -5,7 +5,7 @@
 config NET_VENDOR_HISILICON
        bool "Hisilicon devices"
        default y
-       depends on (OF || ACPI) && HAS_DMA
+       depends on OF || ACPI
        depends on ARM || ARM64 || COMPILE_TEST
        ---help---
          If you have a network (Ethernet) card belonging to this class, say Y.
index e2e5cdc7119c3ed0e890f99c7b30996d72d280e9..4c0f7eda1166c5df202c3b9a71cc2e43516531fb 100644 (file)
@@ -439,6 +439,7 @@ static void rx_free_irq(struct hinic_rxq *rxq)
 {
        struct hinic_rq *rq = rxq->rq;
 
+       irq_set_affinity_hint(rq->irq, NULL);
        free_irq(rq->irq, rxq);
        rx_del_napi(rxq);
 }
index 9128858479c4a031baa4b6b93b47d7097f01a995..2353ec829c04407d88b365ba58e14ec960c4e978 100644 (file)
@@ -229,6 +229,7 @@ netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
                txq->txq_stats.tx_busy++;
                u64_stats_update_end(&txq->txq_stats.syncp);
                err = NETDEV_TX_BUSY;
+               wqe_size = 0;
                goto flush_skbs;
        }
 
index d0e196bff0818ce214b4909cb97976aa3502bdc4..ffe7acbeaa22d372b7ce32f9950edb9a98cdf71a 100644 (file)
@@ -329,7 +329,8 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
        return;
 
 failure:
-       dev_info(dev, "replenish pools failure\n");
+       if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
+               dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
        pool->free_map[pool->next_free] = index;
        pool->rx_buff[index].skb = NULL;
 
@@ -1617,7 +1618,8 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
                                      &tx_crq);
        }
        if (lpar_rc != H_SUCCESS) {
-               dev_err(dev, "tx failed with code %ld\n", lpar_rc);
+               if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
+                       dev_err_ratelimited(dev, "tx: send failed\n");
                dev_kfree_skb_any(skb);
                tx_buff->skb = NULL;
 
@@ -1825,8 +1827,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
 
                rc = ibmvnic_login(netdev);
                if (rc) {
-                       adapter->state = VNIC_PROBED;
-                       return 0;
+                       adapter->state = reset_state;
+                       return rc;
                }
 
                if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
@@ -3204,6 +3206,25 @@ static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
        return crq;
 }
 
+static void print_subcrq_error(struct device *dev, int rc, const char *func)
+{
+       switch (rc) {
+       case H_PARAMETER:
+               dev_warn_ratelimited(dev,
+                                    "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
+                                    func, rc);
+               break;
+       case H_CLOSED:
+               dev_warn_ratelimited(dev,
+                                    "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
+                                    func, rc);
+               break;
+       default:
+               dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
+               break;
+       }
+}
+
 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
                       union sub_crq *sub_crq)
 {
@@ -3230,11 +3251,8 @@ static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
                                cpu_to_be64(u64_crq[2]),
                                cpu_to_be64(u64_crq[3]));
 
-       if (rc) {
-               if (rc == H_CLOSED)
-                       dev_warn(dev, "CRQ Queue closed\n");
-               dev_err(dev, "Send error (rc=%d)\n", rc);
-       }
+       if (rc)
+               print_subcrq_error(dev, rc, __func__);
 
        return rc;
 }
@@ -3252,11 +3270,8 @@ static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
                                cpu_to_be64(remote_handle),
                                ioba, num_entries);
 
-       if (rc) {
-               if (rc == H_CLOSED)
-                       dev_warn(dev, "CRQ Queue closed\n");
-               dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
-       }
+       if (rc)
+               print_subcrq_error(dev, rc, __func__);
 
        return rc;
 }
index 8ffb7454e67c2a0309708c1b47487c4d1c58b440..b151ae316546c2483aa91abfabc900b608e53e4a 100644 (file)
@@ -2103,9 +2103,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
        unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
 #else
        unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
-                               SKB_DATA_ALIGN(I40E_SKB_PAD +
-                                              (xdp->data_end -
-                                               xdp->data_hard_start));
+                               SKB_DATA_ALIGN(xdp->data_end -
+                                              xdp->data_hard_start);
 #endif
        struct sk_buff *skb;
 
@@ -2124,7 +2123,7 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
                return NULL;
 
        /* update pointers within the skb to store the data */
-       skb_reserve(skb, I40E_SKB_PAD + (xdp->data - xdp->data_hard_start));
+       skb_reserve(skb, xdp->data - xdp->data_hard_start);
        __skb_put(skb, xdp->data_end - xdp->data);
        if (metasize)
                skb_metadata_set(skb, metasize);
@@ -2200,9 +2199,10 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
        return true;
 }
 
-#define I40E_XDP_PASS 0
-#define I40E_XDP_CONSUMED 1
-#define I40E_XDP_TX 2
+#define I40E_XDP_PASS          0
+#define I40E_XDP_CONSUMED      BIT(0)
+#define I40E_XDP_TX            BIT(1)
+#define I40E_XDP_REDIR         BIT(2)
 
 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
                              struct i40e_ring *xdp_ring);
@@ -2249,7 +2249,7 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
                break;
        case XDP_REDIRECT:
                err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
-               result = !err ? I40E_XDP_TX : I40E_XDP_CONSUMED;
+               result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
@@ -2312,7 +2312,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
        struct sk_buff *skb = rx_ring->skb;
        u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
-       bool failure = false, xdp_xmit = false;
+       unsigned int xdp_xmit = 0;
+       bool failure = false;
        struct xdp_buff xdp;
 
        xdp.rxq = &rx_ring->xdp_rxq;
@@ -2373,8 +2374,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                }
 
                if (IS_ERR(skb)) {
-                       if (PTR_ERR(skb) == -I40E_XDP_TX) {
-                               xdp_xmit = true;
+                       unsigned int xdp_res = -PTR_ERR(skb);
+
+                       if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
+                               xdp_xmit |= xdp_res;
                                i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
                        } else {
                                rx_buffer->pagecnt_bias++;
@@ -2428,12 +2431,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                total_rx_packets++;
        }
 
-       if (xdp_xmit) {
+       if (xdp_xmit & I40E_XDP_REDIR)
+               xdp_do_flush_map();
+
+       if (xdp_xmit & I40E_XDP_TX) {
                struct i40e_ring *xdp_ring =
                        rx_ring->vsi->xdp_rings[rx_ring->queue_index];
 
                i40e_xdp_ring_update_tail(xdp_ring);
-               xdp_do_flush_map();
        }
 
        rx_ring->skb = skb;
index 3f5c350716bb0e595d79ec928188f5862461694c..0bd1294ba51737240d510f31bbd255faceffeb11 100644 (file)
@@ -1871,7 +1871,12 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
        if (enable_addr != 0)
                rar_high |= IXGBE_RAH_AV;
 
+       /* Record lower 32 bits of MAC address and then make
+        * sure that write is flushed to hardware before writing
+        * the upper 16 bits and setting the valid bit.
+        */
        IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
+       IXGBE_WRITE_FLUSH(hw);
        IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
 
        return 0;
@@ -1903,8 +1908,13 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
        rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
        rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
 
-       IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
+       /* Clear the address valid bit and upper 16 bits of the address
+        * before clearing the lower bits. This way we aren't updating
+        * a live filter.
+        */
        IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+       IXGBE_WRITE_FLUSH(hw);
+       IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
 
        /* clear VMDq pool/queue selection for this RAR */
        hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
index c116f459945d62455843d4e9262971630dd45099..da4322e4daed5de4fb44f06d8cdb488bc41f6432 100644 (file)
@@ -839,7 +839,7 @@ int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
        }
 
        itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
-       if (unlikely(itd->sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) {
+       if (unlikely(itd->sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) {
                netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
                           __func__, itd->sa_idx, xs->xso.offload_handle);
                return 0;
index 3e87dbbc90246dba3a59e3f8ccded5885b441ae2..62e57b05a0aed3d9a02bf8d473aa49505608728f 100644 (file)
@@ -2186,9 +2186,10 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
        return skb;
 }
 
-#define IXGBE_XDP_PASS 0
-#define IXGBE_XDP_CONSUMED 1
-#define IXGBE_XDP_TX 2
+#define IXGBE_XDP_PASS         0
+#define IXGBE_XDP_CONSUMED     BIT(0)
+#define IXGBE_XDP_TX           BIT(1)
+#define IXGBE_XDP_REDIR                BIT(2)
 
 static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
                               struct xdp_frame *xdpf);
@@ -2225,7 +2226,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
        case XDP_REDIRECT:
                err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
                if (!err)
-                       result = IXGBE_XDP_TX;
+                       result = IXGBE_XDP_REDIR;
                else
                        result = IXGBE_XDP_CONSUMED;
                break;
@@ -2285,7 +2286,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
        unsigned int mss = 0;
 #endif /* IXGBE_FCOE */
        u16 cleaned_count = ixgbe_desc_unused(rx_ring);
-       bool xdp_xmit = false;
+       unsigned int xdp_xmit = 0;
        struct xdp_buff xdp;
 
        xdp.rxq = &rx_ring->xdp_rxq;
@@ -2328,8 +2329,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                }
 
                if (IS_ERR(skb)) {
-                       if (PTR_ERR(skb) == -IXGBE_XDP_TX) {
-                               xdp_xmit = true;
+                       unsigned int xdp_res = -PTR_ERR(skb);
+
+                       if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
+                               xdp_xmit |= xdp_res;
                                ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
                        } else {
                                rx_buffer->pagecnt_bias++;
@@ -2401,7 +2404,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                total_rx_packets++;
        }
 
-       if (xdp_xmit) {
+       if (xdp_xmit & IXGBE_XDP_REDIR)
+               xdp_do_flush_map();
+
+       if (xdp_xmit & IXGBE_XDP_TX) {
                struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
 
                /* Force memory writes to complete before letting h/w
@@ -2409,8 +2415,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                 */
                wmb();
                writel(ring->next_to_use, ring->tail);
-
-               xdp_do_flush_map();
        }
 
        u64_stats_update_begin(&rx_ring->syncp);
index cc2f7701e71e1b033c4bd7ceb78c970351f4d9ee..f33fd22b351c856a3544cdd9628a9da500d13abf 100644 (file)
@@ -18,8 +18,8 @@ if NET_VENDOR_MARVELL
 
 config MV643XX_ETH
        tristate "Marvell Discovery (643XX) and Orion ethernet support"
-       depends on (MV64X60 || PPC32 || PLAT_ORION || COMPILE_TEST) && INET
-       depends on HAS_DMA
+       depends on MV64X60 || PPC32 || PLAT_ORION || COMPILE_TEST
+       depends on INET
        select PHYLIB
        select MVMDIO
        ---help---
@@ -58,7 +58,6 @@ config MVNETA_BM_ENABLE
 config MVNETA
        tristate "Marvell Armada 370/38x/XP/37xx network interface support"
        depends on ARCH_MVEBU || COMPILE_TEST
-       depends on HAS_DMA
        select MVMDIO
        select PHYLINK
        ---help---
@@ -84,7 +83,6 @@ config MVNETA_BM
 config MVPP2
        tristate "Marvell Armada 375/7K/8K network interface support"
        depends on ARCH_MVEBU || COMPILE_TEST
-       depends on HAS_DMA
        select MVMDIO
        select PHYLINK
        ---help---
@@ -93,7 +91,7 @@ config MVPP2
 
 config PXA168_ETH
        tristate "Marvell pxa168 ethernet support"
-       depends on HAS_IOMEM && HAS_DMA
+       depends on HAS_IOMEM
        depends on CPU_PXA168 || ARCH_BERLIN || COMPILE_TEST
        select PHYLIB
        ---help---
index 17a904cc6a5e0fbe538f42ec2b00573e035c2955..0ad2f3f7da85a029b5dea7dd3ce67b69d4ff8605 100644 (file)
@@ -1932,7 +1932,7 @@ static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
                rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
                index = rx_desc - rxq->descs;
                data = rxq->buf_virt_addr[index];
-               phys_addr = rx_desc->buf_phys_addr;
+               phys_addr = rx_desc->buf_phys_addr - pp->rx_offset_correction;
 
                if (!mvneta_rxq_desc_is_first_last(rx_status) ||
                    (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
index 9f54ccbddea74b57973ee724acf360fa23434a3e..3360f7b9ee73bdb32957472299a3438e8189f7bd 100644 (file)
@@ -474,10 +474,10 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
 {
        const struct mlx4_en_frag_info *frag_info = priv->frag_info;
        unsigned int truesize = 0;
+       bool release = true;
        int nr, frag_size;
        struct page *page;
        dma_addr_t dma;
-       bool release;
 
        /* Collect used fragments while replacing them in the HW descriptors */
        for (nr = 0;; frags++) {
@@ -500,7 +500,11 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
                        release = page_count(page) != 1 ||
                                  page_is_pfmemalloc(page) ||
                                  page_to_nid(page) != numa_mem_id();
-               } else {
+               } else if (!priv->rx_headroom) {
+                       /* rx_headroom for non XDP setup is always 0.
+                        * When XDP is set, the above condition will
+                        * guarantee page is always released.
+                        */
                        u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES);
 
                        frags->page_offset += sz_align;
index 7b1b5ac986d0779db320ad986c13defa5db82949..31bd56727022fe7a3bacc3a09e11691f79f1a974 100644 (file)
@@ -2958,7 +2958,7 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
        u32 srqn = qp_get_srqn(qpc) & 0xffffff;
        int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
        struct res_srq *srq;
-       int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
+       int local_qpn = vhcr->in_modifier & 0xffffff;
 
        err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
        if (err)
index 323ffe8bf7e473c49261b7446530a7354e69f954..456f30007ad659e98a197f969edd5611b1728127 100644 (file)
@@ -123,7 +123,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
        int i;
 
        buf->size = size;
-       buf->npages = 1 << get_order(size);
+       buf->npages = DIV_ROUND_UP(size, PAGE_SIZE);
        buf->page_shift = PAGE_SHIFT;
        buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list),
                             GFP_KERNEL);
index 487388aed98f22cc9ae814fd60d27b48d5105458..384c1fa490811ee651919c139b9cd9e724d4ff81 100644 (file)
@@ -807,6 +807,7 @@ static void cmd_work_handler(struct work_struct *work)
        unsigned long flags;
        bool poll_cmd = ent->polling;
        int alloc_ret;
+       int cmd_mode;
 
        sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
        down(sem);
@@ -853,6 +854,7 @@ static void cmd_work_handler(struct work_struct *work)
        set_signature(ent, !cmd->checksum_disabled);
        dump_command(dev, ent, 1);
        ent->ts1 = ktime_get_ns();
+       cmd_mode = cmd->mode;
 
        if (ent->callback)
                schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
@@ -877,7 +879,7 @@ static void cmd_work_handler(struct work_struct *work)
        iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
        mmiowb();
        /* if not in polling don't use ent after this point */
-       if (cmd->mode == CMD_MODE_POLLING || poll_cmd) {
+       if (cmd_mode == CMD_MODE_POLLING || poll_cmd) {
                poll_timeout(ent);
                /* make sure we read the descriptor after ownership is SW */
                rmb();
@@ -1276,7 +1278,7 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
 {
        struct mlx5_core_dev *dev = filp->private_data;
        struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
-       char outlen_str[8];
+       char outlen_str[8] = {0};
        int outlen;
        void *ptr;
        int err;
@@ -1291,8 +1293,6 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
        if (copy_from_user(outlen_str, buf, count))
                return -EFAULT;
 
-       outlen_str[7] = 0;
-
        err = sscanf(outlen_str, "%d", &outlen);
        if (err < 0)
                return err;
index 75e4308ba786aeca51bba013031aeee485e96dd9..d258bb6792713e8bc56bfc9be4c4c8b98f45aebe 100644 (file)
@@ -381,14 +381,14 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
        HLIST_HEAD(del_list);
        spin_lock_bh(&priv->fs.arfs.arfs_lock);
        mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
-               if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
-                       break;
                if (!work_pending(&arfs_rule->arfs_work) &&
                    rps_may_expire_flow(priv->netdev,
                                        arfs_rule->rxq, arfs_rule->flow_id,
                                        arfs_rule->filter_id)) {
                        hlist_del_init(&arfs_rule->hlist);
                        hlist_add_head(&arfs_rule->hlist, &del_list);
+                       if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
+                               break;
                }
        }
        spin_unlock_bh(&priv->fs.arfs.arfs_lock);
@@ -711,6 +711,9 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
            skb->protocol != htons(ETH_P_IPV6))
                return -EPROTONOSUPPORT;
 
+       if (skb->encapsulation)
+               return -EPROTONOSUPPORT;
+
        arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
        if (!arfs_t)
                return -EPROTONOSUPPORT;
index 0a52f31fef377e40ae9f19c091c33aca58bee154..86bc9ac99586e620e1b5858f767d136a8436c81f 100644 (file)
@@ -275,7 +275,8 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
 }
 
 static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
-                                   struct ieee_ets *ets)
+                                   struct ieee_ets *ets,
+                                   bool zero_sum_allowed)
 {
        bool have_ets_tc = false;
        int bw_sum = 0;
@@ -300,8 +301,9 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
        }
 
        if (have_ets_tc && bw_sum != 100) {
-               netdev_err(netdev,
-                          "Failed to validate ETS: BW sum is illegal\n");
+               if (bw_sum || (!bw_sum && !zero_sum_allowed))
+                       netdev_err(netdev,
+                                  "Failed to validate ETS: BW sum is illegal\n");
                return -EINVAL;
        }
        return 0;
@@ -316,7 +318,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
        if (!MLX5_CAP_GEN(priv->mdev, ets))
                return -EOPNOTSUPP;
 
-       err = mlx5e_dbcnl_validate_ets(netdev, ets);
+       err = mlx5e_dbcnl_validate_ets(netdev, ets, false);
        if (err)
                return err;
 
@@ -642,12 +644,9 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
                          ets.prio_tc[i]);
        }
 
-       err = mlx5e_dbcnl_validate_ets(netdev, &ets);
-       if (err) {
-               netdev_err(netdev,
-                          "%s, Failed to validate ETS: %d\n", __func__, err);
+       err = mlx5e_dbcnl_validate_ets(netdev, &ets, true);
+       if (err)
                goto out;
-       }
 
        err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
        if (err) {
index 56c1b6f5593e053d4629b15635bacf1ece9d6a88..dae4156a710ddc60467999ab56c67b7ff31914db 100644 (file)
@@ -2846,7 +2846,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
        mlx5e_activate_channels(&priv->channels);
        netif_tx_start_all_queues(priv->netdev);
 
-       if (MLX5_VPORT_MANAGER(priv->mdev))
+       if (MLX5_ESWITCH_MANAGER(priv->mdev))
                mlx5e_add_sqs_fwd_rules(priv);
 
        mlx5e_wait_channels_min_rx_wqes(&priv->channels);
@@ -2857,7 +2857,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
 {
        mlx5e_redirect_rqts_to_drop(priv);
 
-       if (MLX5_VPORT_MANAGER(priv->mdev))
+       if (MLX5_ESWITCH_MANAGER(priv->mdev))
                mlx5e_remove_sqs_fwd_rules(priv);
 
        /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
@@ -4597,7 +4597,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
        mlx5e_set_netdev_dev_addr(netdev);
 
 #if IS_ENABLED(CONFIG_MLX5_ESWITCH)
-       if (MLX5_VPORT_MANAGER(mdev))
+       if (MLX5_ESWITCH_MANAGER(mdev))
                netdev->switchdev_ops = &mlx5e_switchdev_ops;
 #endif
 
@@ -4753,7 +4753,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
 
        mlx5e_enable_async_events(priv);
 
-       if (MLX5_VPORT_MANAGER(priv->mdev))
+       if (MLX5_ESWITCH_MANAGER(priv->mdev))
                mlx5e_register_vport_reps(priv);
 
        if (netdev->reg_state != NETREG_REGISTERED)
@@ -4788,7 +4788,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
 
        queue_work(priv->wq, &priv->set_rx_mode_work);
 
-       if (MLX5_VPORT_MANAGER(priv->mdev))
+       if (MLX5_ESWITCH_MANAGER(priv->mdev))
                mlx5e_unregister_vport_reps(priv);
 
        mlx5e_disable_async_events(priv);
@@ -4972,7 +4972,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
                return NULL;
 
 #ifdef CONFIG_MLX5_ESWITCH
-       if (MLX5_VPORT_MANAGER(mdev)) {
+       if (MLX5_ESWITCH_MANAGER(mdev)) {
                rpriv = mlx5e_alloc_nic_rep_priv(mdev);
                if (!rpriv) {
                        mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n");
index 57987f6546e8357bdfaeb3e657e0f07fe47d940a..2b8040a3cdbd7c2f74bb854bd8141ba379ea37de 100644 (file)
@@ -823,7 +823,7 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
        struct mlx5e_rep_priv *rpriv = priv->ppriv;
        struct mlx5_eswitch_rep *rep;
 
-       if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
+       if (!MLX5_ESWITCH_MANAGER(priv->mdev))
                return false;
 
        rep = rpriv->rep;
@@ -837,8 +837,12 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
 static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
 {
        struct mlx5e_rep_priv *rpriv = priv->ppriv;
-       struct mlx5_eswitch_rep *rep = rpriv->rep;
+       struct mlx5_eswitch_rep *rep;
 
+       if (!MLX5_ESWITCH_MANAGER(priv->mdev))
+               return false;
+
+       rep = rpriv->rep;
        if (rep && rep->vport != FDB_UPLINK_VPORT)
                return true;
 
index 0edf4751a8ba2549e380e7ddc27f57b34d49521a..3a2c4e548226e2e66e867ffbb9b6370ae767be78 100644 (file)
@@ -1957,6 +1957,10 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
        else
                actions = flow->nic_attr->action;
 
+       if (flow->flags & MLX5E_TC_FLOW_EGRESS &&
+           !(actions & MLX5_FLOW_CONTEXT_ACTION_DECAP))
+               return false;
+
        if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
                return modify_header_match_supported(&parse_attr->spec, exts);
 
index f63dfbcd29fea1efc2237d6dcecdbdd74259e1a0..dd01ad4c0b547dc989d3cb8ca1749533114dc288 100644 (file)
@@ -1594,17 +1594,15 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
 }
 
 /* Public E-Switch API */
-#define ESW_ALLOWED(esw) ((esw) && MLX5_VPORT_MANAGER((esw)->dev))
+#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
+
 
 int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
 {
        int err;
        int i, enabled_events;
 
-       if (!ESW_ALLOWED(esw))
-               return 0;
-
-       if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
+       if (!ESW_ALLOWED(esw) ||
            !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
                esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
                return -EOPNOTSUPP;
@@ -1806,7 +1804,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
        u64 node_guid;
        int err = 0;
 
-       if (!ESW_ALLOWED(esw))
+       if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
                return -EPERM;
        if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
                return -EINVAL;
@@ -1883,7 +1881,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
 {
        struct mlx5_vport *evport;
 
-       if (!ESW_ALLOWED(esw))
+       if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
                return -EPERM;
        if (!LEGAL_VPORT(esw, vport))
                return -EINVAL;
@@ -2218,6 +2216,6 @@ free_out:
 
 u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
 {
-       return esw->mode;
+       return ESW_ALLOWED(esw) ? esw->mode : SRIOV_NONE;
 }
 EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
index cecd201f0b73ab8a42693a79070c21bcc850d6e4..91f1209886ffdbb37af33ac32369f312296f8bfa 100644 (file)
@@ -1079,8 +1079,8 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink)
        if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
                return -EOPNOTSUPP;
 
-       if (!MLX5_CAP_GEN(dev, vport_group_manager))
-               return -EOPNOTSUPP;
+       if(!MLX5_ESWITCH_MANAGER(dev))
+               return -EPERM;
 
        if (dev->priv.eswitch->mode == SRIOV_NONE)
                return -EOPNOTSUPP;
index 49a75d31185ecf25ff93c5f3a9beec6b48be28a1..6ddb2565884d5372ebfbe814baca6279da68e60b 100644 (file)
@@ -32,6 +32,7 @@
 
 #include <linux/mutex.h>
 #include <linux/mlx5/driver.h>
+#include <linux/mlx5/eswitch.h>
 
 #include "mlx5_core.h"
 #include "fs_core.h"
@@ -1886,7 +1887,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
        if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
                if (!fwd_next_prio_supported(ft))
                        return ERR_PTR(-EOPNOTSUPP);
-               if (dest)
+               if (dest_num)
                        return ERR_PTR(-EINVAL);
                mutex_lock(&root->chain_lock);
                next_ft = find_next_chained_ft(prio);
@@ -2652,7 +2653,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
                        goto err;
        }
 
-       if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
+       if (MLX5_ESWITCH_MANAGER(dev)) {
                if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
                        err = init_fdb_root_ns(steering);
                        if (err)
index afd9f4fa22f40b70506fafa49f29cf647c22a959..41ad24f0de2cf9d171e586df3b9d167515d3cb03 100644 (file)
@@ -32,6 +32,7 @@
 
 #include <linux/mlx5/driver.h>
 #include <linux/mlx5/cmd.h>
+#include <linux/mlx5/eswitch.h>
 #include <linux/module.h>
 #include "mlx5_core.h"
 #include "../../mlxfw/mlxfw.h"
@@ -159,13 +160,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
        }
 
        if (MLX5_CAP_GEN(dev, vport_group_manager) &&
-           MLX5_CAP_GEN(dev, eswitch_flow_table)) {
+           MLX5_ESWITCH_MANAGER(dev)) {
                err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE);
                if (err)
                        return err;
        }
 
-       if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
+       if (MLX5_ESWITCH_MANAGER(dev)) {
                err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH);
                if (err)
                        return err;
index 1e062e6b2587eb7217fbeca7260844ee7c2b465a..3f767cde4c1d50cbcd50d2eb670164fc20802983 100644 (file)
@@ -488,6 +488,7 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
 void mlx5_init_clock(struct mlx5_core_dev *mdev)
 {
        struct mlx5_clock *clock = &mdev->clock;
+       u64 overflow_cycles;
        u64 ns;
        u64 frac = 0;
        u32 dev_freq;
@@ -511,10 +512,17 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
 
        /* Calculate period in seconds to call the overflow watchdog - to make
         * sure counter is checked at least once every wrap around.
+        * The period is calculated as the minimum between max HW cycles count
+        * (The clock source mask) and max amount of cycles that can be
+        * multiplied by clock multiplier where the result doesn't exceed
+        * 64bits.
         */
-       ns = cyclecounter_cyc2ns(&clock->cycles, clock->cycles.mask,
+       overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
+       overflow_cycles = min(overflow_cycles, clock->cycles.mask >> 1);
+
+       ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles,
                                 frac, &frac);
-       do_div(ns, NSEC_PER_SEC / 2 / HZ);
+       do_div(ns, NSEC_PER_SEC / HZ);
        clock->overflow_period = ns;
 
        mdev->clock_info_page = alloc_page(GFP_KERNEL);
index 7cb67122e8b5f04371651e1c1e2757acb281a36e..98359559c77e4286df95df17651a4b9f2ca8e427 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/etherdevice.h>
 #include <linux/mlx5/driver.h>
 #include <linux/mlx5/mlx5_ifc.h>
+#include <linux/mlx5/eswitch.h>
 #include "mlx5_core.h"
 #include "lib/mpfs.h"
 
@@ -98,7 +99,7 @@ int mlx5_mpfs_init(struct mlx5_core_dev *dev)
        int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
        struct mlx5_mpfs *mpfs;
 
-       if (!MLX5_VPORT_MANAGER(dev))
+       if (!MLX5_ESWITCH_MANAGER(dev))
                return 0;
 
        mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL);
@@ -122,7 +123,7 @@ void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev)
 {
        struct mlx5_mpfs *mpfs = dev->priv.mpfs;
 
-       if (!MLX5_VPORT_MANAGER(dev))
+       if (!MLX5_ESWITCH_MANAGER(dev))
                return;
 
        WARN_ON(!hlist_empty(mpfs->hash));
@@ -137,7 +138,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac)
        u32 index;
        int err;
 
-       if (!MLX5_VPORT_MANAGER(dev))
+       if (!MLX5_ESWITCH_MANAGER(dev))
                return 0;
 
        mutex_lock(&mpfs->lock);
@@ -179,7 +180,7 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac)
        int err = 0;
        u32 index;
 
-       if (!MLX5_VPORT_MANAGER(dev))
+       if (!MLX5_ESWITCH_MANAGER(dev))
                return 0;
 
        mutex_lock(&mpfs->lock);
index fa9d0760dd36ffda5c2c439f12bbdffab6320ccd..31a9cbd85689b01fc0bfe9e6c221d73cc7c5fe13 100644 (file)
@@ -701,7 +701,7 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc);
 static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
                                   int inlen)
 {
-       u32 out[MLX5_ST_SZ_DW(qtct_reg)];
+       u32 out[MLX5_ST_SZ_DW(qetc_reg)];
 
        if (!MLX5_CAP_GEN(mdev, ets))
                return -EOPNOTSUPP;
@@ -713,7 +713,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
 static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
                                     int outlen)
 {
-       u32 in[MLX5_ST_SZ_DW(qtct_reg)];
+       u32 in[MLX5_ST_SZ_DW(qetc_reg)];
 
        if (!MLX5_CAP_GEN(mdev, ets))
                return -EOPNOTSUPP;
index 2a8b529ce6dd176cbc29b9bb4b74cd1d1c48f671..a0674962f02c4d2a35d05c98f84436967703101c 100644 (file)
@@ -88,6 +88,9 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
                return -EBUSY;
        }
 
+       if (!MLX5_ESWITCH_MANAGER(dev))
+               goto enable_vfs_hca;
+
        err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
        if (err) {
                mlx5_core_warn(dev,
@@ -95,6 +98,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
                return err;
        }
 
+enable_vfs_hca:
        for (vf = 0; vf < num_vfs; vf++) {
                err = mlx5_core_enable_hca(dev, vf + 1);
                if (err) {
@@ -140,7 +144,8 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
        }
 
 out:
-       mlx5_eswitch_disable_sriov(dev->priv.eswitch);
+       if (MLX5_ESWITCH_MANAGER(dev))
+               mlx5_eswitch_disable_sriov(dev->priv.eswitch);
 
        if (mlx5_wait_for_vf_pages(dev))
                mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
index 719cecb182c6c4eb5579eb1b36601acb6c0d0c5c..7eecd5b07bb1931bf3041b1ae12b0f3f5154405a 100644 (file)
@@ -549,8 +549,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
                return -EINVAL;
        if (!MLX5_CAP_GEN(mdev, vport_group_manager))
                return -EACCES;
-       if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
-               return -EOPNOTSUPP;
 
        in = kvzalloc(inlen, GFP_KERNEL);
        if (!in)
index b97bb72b4db45fde2e4687d6e517f41b8a1ad078..86478a6b99c5068e13688f2556e954ee3b3f9486 100644 (file)
@@ -113,35 +113,45 @@ err_db_free:
        return err;
 }
 
-static void mlx5e_qp_set_frag_buf(struct mlx5_frag_buf *buf,
-                                 struct mlx5_wq_qp *qp)
+static void mlx5_qp_set_frag_buf(struct mlx5_frag_buf *buf,
+                                struct mlx5_wq_qp *qp)
 {
+       struct mlx5_frag_buf_ctrl *sq_fbc;
        struct mlx5_frag_buf *rqb, *sqb;
 
-       rqb = &qp->rq.fbc.frag_buf;
+       rqb  = &qp->rq.fbc.frag_buf;
        *rqb = *buf;
        rqb->size   = mlx5_wq_cyc_get_byte_size(&qp->rq);
-       rqb->npages = 1 << get_order(rqb->size);
+       rqb->npages = DIV_ROUND_UP(rqb->size, PAGE_SIZE);
 
-       sqb = &qp->sq.fbc.frag_buf;
-       *sqb = *buf;
-       sqb->size   = mlx5_wq_cyc_get_byte_size(&qp->rq);
-       sqb->npages = 1 << get_order(sqb->size);
+       sq_fbc = &qp->sq.fbc;
+       sqb    = &sq_fbc->frag_buf;
+       *sqb   = *buf;
+       sqb->size   = mlx5_wq_cyc_get_byte_size(&qp->sq);
+       sqb->npages = DIV_ROUND_UP(sqb->size, PAGE_SIZE);
        sqb->frags += rqb->npages; /* first part is for the rq */
+       if (sq_fbc->strides_offset)
+               sqb->frags--;
 }
 
 int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
                      void *qpc, struct mlx5_wq_qp *wq,
                      struct mlx5_wq_ctrl *wq_ctrl)
 {
+       u32 sq_strides_offset;
        int err;
 
        mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
                      MLX5_GET(qpc, qpc, log_rq_size),
                      &wq->rq.fbc);
-       mlx5_fill_fbc(ilog2(MLX5_SEND_WQE_BB),
-                     MLX5_GET(qpc, qpc, log_sq_size),
-                     &wq->sq.fbc);
+
+       sq_strides_offset =
+               ((wq->rq.fbc.frag_sz_m1 + 1) % PAGE_SIZE) / MLX5_SEND_WQE_BB;
+
+       mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB),
+                            MLX5_GET(qpc, qpc, log_sq_size),
+                            sq_strides_offset,
+                            &wq->sq.fbc);
 
        err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
        if (err) {
@@ -156,7 +166,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
                goto err_db_free;
        }
 
-       mlx5e_qp_set_frag_buf(&wq_ctrl->buf, wq);
+       mlx5_qp_set_frag_buf(&wq_ctrl->buf, wq);
 
        wq->rq.db  = &wq_ctrl->db.db[MLX5_RCV_DBR];
        wq->sq.db  = &wq_ctrl->db.db[MLX5_SND_DBR];
index f4d9c9975ac3d857f50ef255756ea23a7a11fdb5..82827a8d3d67cac73ac3f6c232e3f750553deddc 100644 (file)
@@ -30,7 +30,7 @@ config MLXSW_CORE_THERMAL
 
 config MLXSW_PCI
        tristate "PCI bus implementation for Mellanox Technologies Switch ASICs"
-       depends on PCI && HAS_DMA && HAS_IOMEM && MLXSW_CORE
+       depends on PCI && HAS_IOMEM && MLXSW_CORE
        default m
        ---help---
          This is PCI bus implementation for Mellanox Technologies Switch ASICs.
index 6aaaf3d9ba31d9538d9307caa0450a848bf6b091..77b2adb293415a9de16caaabbd203b397cd12a4a 100644 (file)
@@ -4756,6 +4756,12 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
        kfree(mlxsw_sp_rt6);
 }
 
+static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt)
+{
+       /* RTF_CACHE routes are ignored */
+       return (rt->fib6_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
+}
+
 static struct fib6_info *
 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
 {
@@ -4765,11 +4771,11 @@ mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
 
 static struct mlxsw_sp_fib6_entry *
 mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
-                                const struct fib6_info *nrt, bool append)
+                                const struct fib6_info *nrt, bool replace)
 {
        struct mlxsw_sp_fib6_entry *fib6_entry;
 
-       if (!append)
+       if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
                return NULL;
 
        list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
@@ -4784,7 +4790,8 @@ mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
                        break;
                if (rt->fib6_metric < nrt->fib6_metric)
                        continue;
-               if (rt->fib6_metric == nrt->fib6_metric)
+               if (rt->fib6_metric == nrt->fib6_metric &&
+                   mlxsw_sp_fib6_rt_can_mp(rt))
                        return fib6_entry;
                if (rt->fib6_metric > nrt->fib6_metric)
                        break;
@@ -5163,7 +5170,7 @@ static struct mlxsw_sp_fib6_entry *
 mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
                              const struct fib6_info *nrt, bool replace)
 {
-       struct mlxsw_sp_fib6_entry *fib6_entry;
+       struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
 
        list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
                struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
@@ -5172,13 +5179,18 @@ mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
                        continue;
                if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
                        break;
-               if (replace && rt->fib6_metric == nrt->fib6_metric)
-                       return fib6_entry;
+               if (replace && rt->fib6_metric == nrt->fib6_metric) {
+                       if (mlxsw_sp_fib6_rt_can_mp(rt) ==
+                           mlxsw_sp_fib6_rt_can_mp(nrt))
+                               return fib6_entry;
+                       if (mlxsw_sp_fib6_rt_can_mp(nrt))
+                               fallback = fallback ?: fib6_entry;
+               }
                if (rt->fib6_metric > nrt->fib6_metric)
-                       return fib6_entry;
+                       return fallback ?: fib6_entry;
        }
 
-       return NULL;
+       return fallback;
 }
 
 static int
@@ -5304,8 +5316,7 @@ static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
 }
 
 static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
-                                   struct fib6_info *rt, bool replace,
-                                   bool append)
+                                   struct fib6_info *rt, bool replace)
 {
        struct mlxsw_sp_fib6_entry *fib6_entry;
        struct mlxsw_sp_fib_node *fib_node;
@@ -5331,7 +5342,7 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
        /* Before creating a new entry, try to append route to an existing
         * multipath entry.
         */
-       fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, append);
+       fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
        if (fib6_entry) {
                err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
                if (err)
@@ -5339,14 +5350,6 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
                return 0;
        }
 
-       /* We received an append event, yet did not find any route to
-        * append to.
-        */
-       if (WARN_ON(append)) {
-               err = -EINVAL;
-               goto err_fib6_entry_append;
-       }
-
        fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
        if (IS_ERR(fib6_entry)) {
                err = PTR_ERR(fib6_entry);
@@ -5364,7 +5367,6 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
 err_fib6_node_entry_link:
        mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
 err_fib6_entry_create:
-err_fib6_entry_append:
 err_fib6_entry_nexthop_add:
        mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
        return err;
@@ -5715,7 +5717,7 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
        struct mlxsw_sp_fib_event_work *fib_work =
                container_of(work, struct mlxsw_sp_fib_event_work, work);
        struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
-       bool replace, append;
+       bool replace;
        int err;
 
        rtnl_lock();
@@ -5726,10 +5728,8 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
        case FIB_EVENT_ENTRY_APPEND: /* fall through */
        case FIB_EVENT_ENTRY_ADD:
                replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
-               append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
                err = mlxsw_sp_router_fib6_add(mlxsw_sp,
-                                              fib_work->fen6_info.rt, replace,
-                                              append);
+                                              fib_work->fen6_info.rt, replace);
                if (err)
                        mlxsw_sp_router_fib_abort(mlxsw_sp);
                mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
index fb2c8f8071e64d3b6d52865ecaddf17f841a2b9d..776a8a9be8e3551311f5a99ba0285c4c698cf10a 100644 (file)
@@ -344,10 +344,9 @@ static int ocelot_port_stop(struct net_device *dev)
 static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info)
 {
        ifh[0] = IFH_INJ_BYPASS;
-       ifh[1] = (0xff00 & info->port) >> 8;
+       ifh[1] = (0xf00 & info->port) >> 8;
        ifh[2] = (0xff & info->port) << 24;
-       ifh[3] = IFH_INJ_POP_CNT_DISABLE | (info->cpuq << 20) |
-                (info->tag_type << 16) | info->vid;
+       ifh[3] = (info->tag_type << 16) | info->vid;
 
        return 0;
 }
@@ -370,11 +369,13 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
                         QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp);
 
        info.port = BIT(port->chip_port);
-       info.cpuq = 0xff;
+       info.tag_type = IFH_TAG_TYPE_C;
+       info.vid = skb_vlan_tag_get(skb);
        ocelot_gen_ifh(ifh, &info);
 
        for (i = 0; i < IFH_LEN; i++)
-               ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp);
+               ocelot_write_rix(ocelot, (__force u32)cpu_to_be32(ifh[i]),
+                                QS_INJ_WR, grp);
 
        count = (skb->len + 3) / 4;
        last = skb->len % 4;
index fcdfb8e7fdeab0b9dcb353f4cd4a7d76370c9817..40216d56dddcb73d997ed4e4c48e63868610da89 100644 (file)
@@ -81,10 +81,10 @@ nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
 
        ret = nfp_net_bpf_offload(nn, prog, running, extack);
        /* Stop offload if replace not possible */
-       if (ret && prog)
-               nfp_bpf_xdp_offload(app, nn, NULL, extack);
+       if (ret)
+               return ret;
 
-       nn->dp.bpf_offload_xdp = prog && !ret;
+       nn->dp.bpf_offload_xdp = !!prog;
        return ret;
 }
 
@@ -202,6 +202,9 @@ static int nfp_bpf_setup_tc_block(struct net_device *netdev,
        if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
                return -EOPNOTSUPP;
 
+       if (tcf_block_shared(f->block))
+               return -EOPNOTSUPP;
+
        switch (f->command) {
        case TC_BLOCK_BIND:
                return tcf_block_cb_register(f->block,
index 91935405f5861678077c188328d365ed5cb2ba7f..84f7a5dbea9d5bf17abd88416cc5a41f2fa4770b 100644 (file)
@@ -123,6 +123,20 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
                         NFP_FLOWER_MASK_MPLS_Q;
 
                frame->mpls_lse = cpu_to_be32(t_mpls);
+       } else if (dissector_uses_key(flow->dissector,
+                                     FLOW_DISSECTOR_KEY_BASIC)) {
+               /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q
+                * bit, which indicates an mpls ether type but without any
+                * mpls fields.
+                */
+               struct flow_dissector_key_basic *key_basic;
+
+               key_basic = skb_flow_dissector_target(flow->dissector,
+                                                     FLOW_DISSECTOR_KEY_BASIC,
+                                                     flow->key);
+               if (key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
+                   key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_MC))
+                       frame->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
        }
 }
 
index c42e64f32333f84640ff913b61ff199701e1b404..525057bee0ed8978f360d6eeb8293d8a990a0f22 100644 (file)
@@ -264,6 +264,14 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
                case cpu_to_be16(ETH_P_ARP):
                        return -EOPNOTSUPP;
 
+               case cpu_to_be16(ETH_P_MPLS_UC):
+               case cpu_to_be16(ETH_P_MPLS_MC):
+                       if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
+                               key_layer |= NFP_FLOWER_LAYER_MAC;
+                               key_size += sizeof(struct nfp_flower_mac_mpls);
+                       }
+                       break;
+
                /* Will be included in layer 2. */
                case cpu_to_be16(ETH_P_8021Q):
                        break;
@@ -623,6 +631,9 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
        if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
                return -EOPNOTSUPP;
 
+       if (tcf_block_shared(f->block))
+               return -EOPNOTSUPP;
+
        switch (f->command) {
        case TC_BLOCK_BIND:
                return tcf_block_cb_register(f->block,
index 78afe75129ab5b7a852d5bffa40f5daa9b1c76d1..382bb93cb0900f7e83e0cc34277379c3b36468bd 100644 (file)
@@ -317,7 +317,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
        payload.dst_ipv4 = flow->daddr;
 
        /* If entry has expired send dst IP with all other fields 0. */
-       if (!(neigh->nud_state & NUD_VALID)) {
+       if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
                nfp_tun_del_route_from_cache(app, payload.dst_ipv4);
                /* Trigger ARP to verify invalid neighbour state. */
                neigh_event_send(neigh, NULL);
index 46b76d5a726c6ade2c48c000172a3d9ba9db7253..152283d7e59c8f4a7a69b45f520a7c9625e9ce16 100644 (file)
@@ -240,7 +240,6 @@ static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf)
                return pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs);
 
        pf->limit_vfs = ~0;
-       pci_sriov_set_totalvfs(pf->pdev, 0); /* 0 is unset */
        /* Allow any setting for backwards compatibility if symbol not found */
        if (err == -ENOENT)
                return 0;
@@ -668,7 +667,7 @@ static int nfp_pci_probe(struct pci_dev *pdev,
 
        err = nfp_net_pci_probe(pf);
        if (err)
-               goto err_sriov_unlimit;
+               goto err_fw_unload;
 
        err = nfp_hwmon_register(pf);
        if (err) {
@@ -680,8 +679,6 @@ static int nfp_pci_probe(struct pci_dev *pdev,
 
 err_net_remove:
        nfp_net_pci_remove(pf);
-err_sriov_unlimit:
-       pci_sriov_set_totalvfs(pf->pdev, 0);
 err_fw_unload:
        kfree(pf->rtbl);
        nfp_mip_close(pf->mip);
@@ -715,7 +712,6 @@ static void nfp_pci_remove(struct pci_dev *pdev)
        nfp_hwmon_unregister(pf);
 
        nfp_pcie_sriov_disable(pdev);
-       pci_sriov_set_totalvfs(pf->pdev, 0);
 
        nfp_net_pci_remove(pf);
 
index cd34097b79f1be9d313d8f28b9701bb5bd6a3100..37a6d7822a3860647c416efeff47c7a7837a3a85 100644 (file)
@@ -232,7 +232,7 @@ struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp)
        err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res),
                           nfp_resource_address(state->res),
                           fwinf, sizeof(*fwinf));
-       if (err < sizeof(*fwinf))
+       if (err < (int)sizeof(*fwinf))
                goto err_release;
 
        if (!nffw_res_flg_init_get(fwinf))
index 00db3401b89852a7fe5eaca7342344bcb3b66d4d..1dfaccd151f0d457a2ce38447400925113ae546d 100644 (file)
@@ -502,6 +502,7 @@ enum BAR_ID {
 struct qed_nvm_image_info {
        u32 num_images;
        struct bist_nvm_image_att *image_att;
+       bool valid;
 };
 
 #define DRV_MODULE_VERSION                   \
index 8f31406ec89407713b2ad32c81a30185b2c05727..e0680ce9132815568914dff86606363b9a02cb88 100644 (file)
@@ -255,9 +255,8 @@ qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
                *type = DCBX_PROTOCOL_ROCE_V2;
        } else {
                *type = DCBX_MAX_PROTOCOL_TYPE;
-               DP_ERR(p_hwfn,
-                      "No action required, App TLV id = 0x%x app_prio_bitmap = 0x%x\n",
-                      id, app_prio_bitmap);
+               DP_ERR(p_hwfn, "No action required, App TLV entry = 0x%x\n",
+                      app_prio_bitmap);
                return false;
        }
 
@@ -710,9 +709,9 @@ qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn,
        p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE];
 
        memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id,
-              ARRAY_SIZE(p_local->local_chassis_id));
+              sizeof(p_local->local_chassis_id));
        memcpy(params->lldp_local.local_port_id, p_local->local_port_id,
-              ARRAY_SIZE(p_local->local_port_id));
+              sizeof(p_local->local_port_id));
 }
 
 static void
@@ -724,9 +723,9 @@ qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn,
        p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE];
 
        memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id,
-              ARRAY_SIZE(p_remote->peer_chassis_id));
+              sizeof(p_remote->peer_chassis_id));
        memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id,
-              ARRAY_SIZE(p_remote->peer_port_id));
+              sizeof(p_remote->peer_port_id));
 }
 
 static int
@@ -1479,8 +1478,8 @@ static u8 qed_dcbnl_getcap(struct qed_dev *cdev, int capid, u8 *cap)
                *cap = 0x80;
                break;
        case DCB_CAP_ATTR_DCBX:
-               *cap = (DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE |
-                       DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_STATIC);
+               *cap = (DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_VER_IEEE |
+                       DCB_CAP_DCBX_STATIC);
                break;
        default:
                *cap = false;
@@ -1548,8 +1547,6 @@ static u8 qed_dcbnl_getdcbx(struct qed_dev *cdev)
        if (!dcbx_info)
                return 0;
 
-       if (dcbx_info->operational.enabled)
-               mode |= DCB_CAP_DCBX_LLD_MANAGED;
        if (dcbx_info->operational.ieee)
                mode |= DCB_CAP_DCBX_VER_IEEE;
        if (dcbx_info->operational.cee)
index a14e484890299565ee8fdac8851ed9d7f3e90437..4340c4c90bcbe8b03e5373cfc674c8840ff640d9 100644 (file)
@@ -6723,7 +6723,7 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf,
                format_idx = header & MFW_TRACE_EVENTID_MASK;
 
                /* Skip message if its index doesn't exist in the meta data */
-               if (format_idx > s_mcp_trace_meta.formats_num) {
+               if (format_idx >= s_mcp_trace_meta.formats_num) {
                        u8 format_size =
                                (u8)((header & MFW_TRACE_PRM_SIZE_MASK) >>
                                     MFW_TRACE_PRM_SIZE_SHIFT);
index 329781cda77fbecc88328ea95f00e39d4be5db9b..e5249b4741d03f7c347c70a861288b787653741a 100644 (file)
@@ -1804,7 +1804,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
                        DP_INFO(p_hwfn, "Failed to update driver state\n");
 
                rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt,
-                                              QED_OV_ESWITCH_VEB);
+                                              QED_OV_ESWITCH_NONE);
                if (rc)
                        DP_INFO(p_hwfn, "Failed to update eswitch mode\n");
        }
index 99973e10b17977561be6536fee84cf2901622c3e..5ede6408649d66c25c85a25f0c9337feeb428670 100644 (file)
@@ -665,7 +665,7 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
 
        p_ramrod->common.update_approx_mcast_flg = 1;
        for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
-               u32 *p_bins = (u32 *)p_params->bins;
+               u32 *p_bins = p_params->bins;
 
                p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
        }
@@ -1476,8 +1476,8 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
                        enum spq_mode comp_mode,
                        struct qed_spq_comp_cb *p_comp_data)
 {
-       unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
        struct vport_update_ramrod_data *p_ramrod = NULL;
+       u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
        struct qed_spq_entry *p_ent = NULL;
        struct qed_sp_init_data init_data;
        u8 abs_vport_id = 0;
@@ -1513,26 +1513,25 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
        /* explicitly clear out the entire vector */
        memset(&p_ramrod->approx_mcast.bins, 0,
               sizeof(p_ramrod->approx_mcast.bins));
-       memset(bins, 0, sizeof(unsigned long) *
-              ETH_MULTICAST_MAC_BINS_IN_REGS);
+       memset(bins, 0, sizeof(bins));
        /* filter ADD op is explicit set op and it removes
         *  any existing filters for the vport
         */
        if (p_filter_cmd->opcode == QED_FILTER_ADD) {
                for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
-                       u32 bit;
+                       u32 bit, nbits;
 
                        bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
-                       __set_bit(bit, bins);
+                       nbits = sizeof(u32) * BITS_PER_BYTE;
+                       bins[bit / nbits] |= 1 << (bit % nbits);
                }
 
                /* Convert to correct endianity */
                for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
                        struct vport_update_ramrod_mcast *p_ramrod_bins;
-                       u32 *p_bins = (u32 *)bins;
 
                        p_ramrod_bins = &p_ramrod->approx_mcast;
-                       p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]);
+                       p_ramrod_bins->bins[i] = cpu_to_le32(bins[i]);
                }
        }
 
index 806a8da257e9a48cd553c6d173fc69300917391e..8d80f1095d171c85b7d010bb5297a58a1961808d 100644 (file)
@@ -215,7 +215,7 @@ struct qed_sp_vport_update_params {
        u8                              anti_spoofing_en;
        u8                              update_accept_any_vlan_flg;
        u8                              accept_any_vlan;
-       unsigned long                   bins[8];
+       u32                             bins[8];
        struct qed_rss_params           *rss_params;
        struct qed_filter_accept_flags  accept_flags;
        struct qed_sge_tpa_params       *sge_tpa_params;
index c97ebd681c471196cb4135deafbf8e07efc9d615..012973d75ad039436fb0007e9452eb0565f4938c 100644 (file)
@@ -201,8 +201,9 @@ void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
 
        skb = build_skb(buffer->data, 0);
        if (!skb) {
-               rc = -ENOMEM;
-               goto out_post;
+               DP_INFO(cdev, "Failed to build SKB\n");
+               kfree(buffer->data);
+               goto out_post1;
        }
 
        data->u.placement_offset += NET_SKB_PAD;
@@ -224,8 +225,14 @@ void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
                cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
                                      data->opaque_data_0,
                                      data->opaque_data_1);
+       } else {
+               DP_VERBOSE(p_hwfn, (NETIF_MSG_RX_STATUS | NETIF_MSG_PKTDATA |
+                                   QED_MSG_LL2 | QED_MSG_STORAGE),
+                          "Dropping the packet\n");
+               kfree(buffer->data);
        }
 
+out_post1:
        /* Update Buffer information and update FW producer */
        buffer->data = new_data;
        buffer->phys_addr = new_phys_addr;
index b04d57ca5176ee65f348bb5882965e19f107e2f8..758a9a5127fa8c00566e4f90d5f75db636570e33 100644 (file)
@@ -371,7 +371,7 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev,
                goto err2;
        }
 
-       DP_INFO(cdev, "qed_probe completed successffuly\n");
+       DP_INFO(cdev, "qed_probe completed successfully\n");
 
        return cdev;
 
@@ -567,8 +567,16 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance)
                /* Fastpath interrupts */
                for (j = 0; j < 64; j++) {
                        if ((0x2ULL << j) & status) {
-                               hwfn->simd_proto_handler[j].func(
-                                       hwfn->simd_proto_handler[j].token);
+                               struct qed_simd_fp_handler *p_handler =
+                                       &hwfn->simd_proto_handler[j];
+
+                               if (p_handler->func)
+                                       p_handler->func(p_handler->token);
+                               else
+                                       DP_NOTICE(hwfn,
+                                                 "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
+                                                 j, status);
+
                                status &= ~(0x2ULL << j);
                                rc = IRQ_HANDLED;
                        }
@@ -781,6 +789,14 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
        /* We want a minimum of one slowpath and one fastpath vector per hwfn */
        cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
 
+       if (is_kdump_kernel()) {
+               DP_INFO(cdev,
+                       "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
+                       cdev->int_params.in.min_msix_cnt);
+               cdev->int_params.in.num_vectors =
+                       cdev->int_params.in.min_msix_cnt;
+       }
+
        rc = qed_set_int_mode(cdev, false);
        if (rc)  {
                DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
index 4e0b443c9519d67bc3b888ddf3b341c93291e328..cdd645024a32aadc40f54c4e02a88988898ce219 100644 (file)
@@ -592,6 +592,9 @@ int qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn,
        *o_mcp_resp = mb_params.mcp_resp;
        *o_mcp_param = mb_params.mcp_param;
 
+       /* nvm_info needs to be updated */
+       p_hwfn->nvm_info.valid = false;
+
        return 0;
 }
 
@@ -1208,6 +1211,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
                break;
        default:
                p_link->speed = 0;
+               p_link->link_up = 0;
        }
 
        if (p_link->link_up && p_link->speed)
@@ -1305,9 +1309,15 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
        phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
        phy_cfg.adv_speed = params->speed.advertised_speeds;
        phy_cfg.loopback_mode = params->loopback_mode;
-       if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
-               if (params->eee.enable)
-                       phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
+
+       /* There are MFWs that share this capability regardless of whether
+        * this is feasible or not. And given that at the very least adv_caps
+        * would be set internally by qed, we want to make sure LFA would
+        * still work.
+        */
+       if ((p_hwfn->mcp_info->capabilities &
+            FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) {
+               phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
                if (params->eee.tx_lpi_enable)
                        phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
                if (params->eee.adv_caps & QED_EEE_1G_ADV)
@@ -2555,11 +2565,14 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
 
 int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
 {
-       struct qed_nvm_image_info *nvm_info = &p_hwfn->nvm_info;
+       struct qed_nvm_image_info nvm_info;
        struct qed_ptt *p_ptt;
        int rc;
        u32 i;
 
+       if (p_hwfn->nvm_info.valid)
+               return 0;
+
        p_ptt = qed_ptt_acquire(p_hwfn);
        if (!p_ptt) {
                DP_ERR(p_hwfn, "failed to acquire ptt\n");
@@ -2567,29 +2580,29 @@ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
        }
 
        /* Acquire from MFW the amount of available images */
-       nvm_info->num_images = 0;
+       nvm_info.num_images = 0;
        rc = qed_mcp_bist_nvm_get_num_images(p_hwfn,
-                                            p_ptt, &nvm_info->num_images);
+                                            p_ptt, &nvm_info.num_images);
        if (rc == -EOPNOTSUPP) {
                DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n");
                goto out;
-       } else if (rc || !nvm_info->num_images) {
+       } else if (rc || !nvm_info.num_images) {
                DP_ERR(p_hwfn, "Failed getting number of images\n");
                goto err0;
        }
 
-       nvm_info->image_att = kmalloc_array(nvm_info->num_images,
-                                           sizeof(struct bist_nvm_image_att),
-                                           GFP_KERNEL);
-       if (!nvm_info->image_att) {
+       nvm_info.image_att = kmalloc_array(nvm_info.num_images,
+                                          sizeof(struct bist_nvm_image_att),
+                                          GFP_KERNEL);
+       if (!nvm_info.image_att) {
                rc = -ENOMEM;
                goto err0;
        }
 
        /* Iterate over images and get their attributes */
-       for (i = 0; i < nvm_info->num_images; i++) {
+       for (i = 0; i < nvm_info.num_images; i++) {
                rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt,
-                                                   &nvm_info->image_att[i], i);
+                                                   &nvm_info.image_att[i], i);
                if (rc) {
                        DP_ERR(p_hwfn,
                               "Failed getting image index %d attributes\n", i);
@@ -2597,14 +2610,22 @@ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
                }
 
                DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", i,
-                          nvm_info->image_att[i].len);
+                          nvm_info.image_att[i].len);
        }
 out:
+       /* Update hwfn's nvm_info */
+       if (nvm_info.num_images) {
+               p_hwfn->nvm_info.num_images = nvm_info.num_images;
+               kfree(p_hwfn->nvm_info.image_att);
+               p_hwfn->nvm_info.image_att = nvm_info.image_att;
+               p_hwfn->nvm_info.valid = true;
+       }
+
        qed_ptt_release(p_hwfn, p_ptt);
        return 0;
 
 err1:
-       kfree(nvm_info->image_att);
+       kfree(nvm_info.image_att);
 err0:
        qed_ptt_release(p_hwfn, p_ptt);
        return rc;
@@ -2641,6 +2662,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
                return -EINVAL;
        }
 
+       qed_mcp_nvm_info_populate(p_hwfn);
        for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
                if (type == p_hwfn->nvm_info.image_att[i].image_type)
                        break;
index f01bf52bc381f6f02c33ee3d9df4a90982cf8245..26e918d7f2f9c0603ab6b0f2f132daba6d7bcc3b 100644 (file)
@@ -2831,7 +2831,7 @@ qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
 
        p_data->update_approx_mcast_flg = 1;
        memcpy(p_data->bins, p_mcast_tlv->bins,
-              sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
+              sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
        *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
 }
 
@@ -4513,6 +4513,8 @@ static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn,
 static int qed_sriov_enable(struct qed_dev *cdev, int num)
 {
        struct qed_iov_vf_init_params params;
+       struct qed_hwfn *hwfn;
+       struct qed_ptt *ptt;
        int i, j, rc;
 
        if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
@@ -4525,8 +4527,8 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
 
        /* Initialize HW for VF access */
        for_each_hwfn(cdev, j) {
-               struct qed_hwfn *hwfn = &cdev->hwfns[j];
-               struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+               hwfn = &cdev->hwfns[j];
+               ptt = qed_ptt_acquire(hwfn);
 
                /* Make sure not to use more than 16 queues per VF */
                params.num_queues = min_t(int,
@@ -4562,6 +4564,19 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
                goto err;
        }
 
+       hwfn = QED_LEADING_HWFN(cdev);
+       ptt = qed_ptt_acquire(hwfn);
+       if (!ptt) {
+               DP_ERR(hwfn, "Failed to acquire ptt\n");
+               rc = -EBUSY;
+               goto err;
+       }
+
+       rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB);
+       if (rc)
+               DP_INFO(cdev, "Failed to update eswitch mode\n");
+       qed_ptt_release(hwfn, ptt);
+
        return num;
 
 err:
index 2d7fcd6a0777aa264b8e228d14eae3cc0e2d212d..be6ddde1a104ff34050ee72b7dc5bc40658e6c2b 100644 (file)
@@ -1126,7 +1126,7 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
                resp_size += sizeof(struct pfvf_def_resp_tlv);
 
                memcpy(p_mcast_tlv->bins, p_params->bins,
-                      sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
+                      sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
        }
 
        update_rx = p_params->accept_flags.update_rx_mode_config;
@@ -1272,7 +1272,7 @@ void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
                        u32 bit;
 
                        bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
-                       __set_bit(bit, sp_params.bins);
+                       sp_params.bins[bit / 32] |= 1 << (bit % 32);
                }
        }
 
index 4f05d5eb3cf50ae51298a1711f2a850bcac1fe93..033409db86ae7bbbe63b79f80490bd2857abfe7c 100644 (file)
@@ -392,7 +392,12 @@ struct vfpf_vport_update_mcast_bin_tlv {
        struct channel_tlv tl;
        u8 padding[4];
 
-       u64 bins[8];
+       /* There are only 256 approx bins, and in HSI they're divided into
+        * 32-bit values. As old VFs used to set-bit to the values on its side,
+        * the upper half of the array is never expected to contain any data.
+        */
+       u64 bins[4];
+       u64 obsolete_bins[4];
 };
 
 struct vfpf_vport_update_accept_param_tlv {
index 02adb513f4756cb58c423936213bdcb4158d1dfa..013ff567283c738f342ca5d6f5358e30ca6daa72 100644 (file)
@@ -337,8 +337,14 @@ int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
 {
        struct qede_ptp *ptp = edev->ptp;
 
-       if (!ptp)
-               return -EIO;
+       if (!ptp) {
+               info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+                                       SOF_TIMESTAMPING_RX_SOFTWARE |
+                                       SOF_TIMESTAMPING_SOFTWARE;
+               info->phc_index = -1;
+
+               return 0;
+       }
 
        info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
                                SOF_TIMESTAMPING_RX_SOFTWARE |
index 891f03a7a33dc7286b5bb6d1b4ac2333ab74aacf..8d7b9bb910f2addae4712088884b334c42876934 100644 (file)
@@ -1128,6 +1128,8 @@ static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
        struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
 
        ret = kstrtoul(buf, 16, &data);
+       if (ret)
+               return ret;
 
        switch (data) {
        case QLC_83XX_FLASH_SECTOR_ERASE_CMD:
index 5803cd6db406c7f9c5426ceb87bf062d4f0434fb..206f0266463e362a0e34fe8ff5b626519500e2ed 100644 (file)
@@ -658,7 +658,7 @@ qcaspi_netdev_open(struct net_device *dev)
                return ret;
        }
 
-       netif_start_queue(qca->net_dev);
+       /* SPI thread takes care of TX queue */
 
        return 0;
 }
@@ -760,6 +760,9 @@ qcaspi_netdev_tx_timeout(struct net_device *dev)
        qca->net_dev->stats.tx_errors++;
        /* Trigger tx queue flush and QCA7000 reset */
        qca->sync = QCASPI_SYNC_UNKNOWN;
+
+       if (qca->spi_thread)
+               wake_up_process(qca->spi_thread);
 }
 
 static int
@@ -878,22 +881,22 @@ qca_spi_probe(struct spi_device *spi)
 
        if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) ||
            (qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) {
-               dev_info(&spi->dev, "Invalid clkspeed: %d\n",
-                        qcaspi_clkspeed);
+               dev_err(&spi->dev, "Invalid clkspeed: %d\n",
+                       qcaspi_clkspeed);
                return -EINVAL;
        }
 
        if ((qcaspi_burst_len < QCASPI_BURST_LEN_MIN) ||
            (qcaspi_burst_len > QCASPI_BURST_LEN_MAX)) {
-               dev_info(&spi->dev, "Invalid burst len: %d\n",
-                        qcaspi_burst_len);
+               dev_err(&spi->dev, "Invalid burst len: %d\n",
+                       qcaspi_burst_len);
                return -EINVAL;
        }
 
        if ((qcaspi_pluggable < QCASPI_PLUGGABLE_MIN) ||
            (qcaspi_pluggable > QCASPI_PLUGGABLE_MAX)) {
-               dev_info(&spi->dev, "Invalid pluggable: %d\n",
-                        qcaspi_pluggable);
+               dev_err(&spi->dev, "Invalid pluggable: %d\n",
+                       qcaspi_pluggable);
                return -EINVAL;
        }
 
@@ -955,8 +958,8 @@ qca_spi_probe(struct spi_device *spi)
        }
 
        if (register_netdev(qcaspi_devs)) {
-               dev_info(&spi->dev, "Unable to register net device %s\n",
-                        qcaspi_devs->name);
+               dev_err(&spi->dev, "Unable to register net device %s\n",
+                       qcaspi_devs->name);
                free_netdev(qcaspi_devs);
                return -EFAULT;
        }
index 75dfac0248f45cb423fd9883e38349a456b1dc0d..eaedc11ed686796b6246bf517bf7691aef43929c 100644 (file)
@@ -7148,7 +7148,7 @@ static void rtl8169_netpoll(struct net_device *dev)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
 
-       rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), dev);
+       rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), tp);
 }
 #endif
 
@@ -7734,8 +7734,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                return rc;
        }
 
-       /* override BIOS settings, use userspace tools to enable WOL */
-       __rtl8169_set_wol(tp, 0);
+       tp->saved_wolopts = __rtl8169_get_wol(tp);
 
        if (rtl_tbi_enabled(tp)) {
                tp->set_speed = rtl8169_set_speed_tbi;
@@ -7789,6 +7788,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                NETIF_F_HW_VLAN_CTAG_RX;
        dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
                NETIF_F_HIGHDMA;
+       dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
 
        tp->cp_cmd |= RxChkSum | RxVlan;
 
index 27be51f0a421b43e191e594bdb6ebcd753b65eef..f3f7477043ce106155ca30ba7c07fb7d20e968bc 100644 (file)
@@ -17,7 +17,6 @@ if NET_VENDOR_RENESAS
 
 config SH_ETH
        tristate "Renesas SuperH Ethernet support"
-       depends on HAS_DMA
        depends on ARCH_RENESAS || SUPERH || COMPILE_TEST
        select CRC32
        select MII
@@ -31,7 +30,6 @@ config SH_ETH
 
 config RAVB
        tristate "Renesas Ethernet AVB support"
-       depends on HAS_DMA
        depends on ARCH_RENESAS || COMPILE_TEST
        select CRC32
        select MII
index 68f122140966d4de381b47fa192246eb7606707a..0d811c02ff340f09a385ec0677f0388034615eef 100644 (file)
@@ -980,6 +980,13 @@ static void ravb_adjust_link(struct net_device *ndev)
        struct ravb_private *priv = netdev_priv(ndev);
        struct phy_device *phydev = ndev->phydev;
        bool new_state = false;
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       /* Disable TX and RX right over here, if E-MAC change is ignored */
+       if (priv->no_avb_link)
+               ravb_rcv_snd_disable(ndev);
 
        if (phydev->link) {
                if (phydev->duplex != priv->duplex) {
@@ -997,18 +1004,21 @@ static void ravb_adjust_link(struct net_device *ndev)
                        ravb_modify(ndev, ECMR, ECMR_TXF, 0);
                        new_state = true;
                        priv->link = phydev->link;
-                       if (priv->no_avb_link)
-                               ravb_rcv_snd_enable(ndev);
                }
        } else if (priv->link) {
                new_state = true;
                priv->link = 0;
                priv->speed = 0;
                priv->duplex = -1;
-               if (priv->no_avb_link)
-                       ravb_rcv_snd_disable(ndev);
        }
 
+       /* Enable TX and RX right over here, if E-MAC change is ignored */
+       if (priv->no_avb_link && phydev->link)
+               ravb_rcv_snd_enable(ndev);
+
+       mmiowb();
+       spin_unlock_irqrestore(&priv->lock, flags);
+
        if (new_state && netif_msg_link(priv))
                phy_print_status(phydev);
 }
@@ -1096,75 +1106,6 @@ static int ravb_phy_start(struct net_device *ndev)
        return 0;
 }
 
-static int ravb_get_link_ksettings(struct net_device *ndev,
-                                  struct ethtool_link_ksettings *cmd)
-{
-       struct ravb_private *priv = netdev_priv(ndev);
-       unsigned long flags;
-
-       if (!ndev->phydev)
-               return -ENODEV;
-
-       spin_lock_irqsave(&priv->lock, flags);
-       phy_ethtool_ksettings_get(ndev->phydev, cmd);
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       return 0;
-}
-
-static int ravb_set_link_ksettings(struct net_device *ndev,
-                                  const struct ethtool_link_ksettings *cmd)
-{
-       struct ravb_private *priv = netdev_priv(ndev);
-       unsigned long flags;
-       int error;
-
-       if (!ndev->phydev)
-               return -ENODEV;
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       /* Disable TX and RX */
-       ravb_rcv_snd_disable(ndev);
-
-       error = phy_ethtool_ksettings_set(ndev->phydev, cmd);
-       if (error)
-               goto error_exit;
-
-       if (cmd->base.duplex == DUPLEX_FULL)
-               priv->duplex = 1;
-       else
-               priv->duplex = 0;
-
-       ravb_set_duplex(ndev);
-
-error_exit:
-       mdelay(1);
-
-       /* Enable TX and RX */
-       ravb_rcv_snd_enable(ndev);
-
-       mmiowb();
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       return error;
-}
-
-static int ravb_nway_reset(struct net_device *ndev)
-{
-       struct ravb_private *priv = netdev_priv(ndev);
-       int error = -ENODEV;
-       unsigned long flags;
-
-       if (ndev->phydev) {
-               spin_lock_irqsave(&priv->lock, flags);
-               error = phy_start_aneg(ndev->phydev);
-               spin_unlock_irqrestore(&priv->lock, flags);
-       }
-
-       return error;
-}
-
 static u32 ravb_get_msglevel(struct net_device *ndev)
 {
        struct ravb_private *priv = netdev_priv(ndev);
@@ -1377,7 +1318,7 @@ static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
 }
 
 static const struct ethtool_ops ravb_ethtool_ops = {
-       .nway_reset             = ravb_nway_reset,
+       .nway_reset             = phy_ethtool_nway_reset,
        .get_msglevel           = ravb_get_msglevel,
        .set_msglevel           = ravb_set_msglevel,
        .get_link               = ethtool_op_get_link,
@@ -1387,8 +1328,8 @@ static const struct ethtool_ops ravb_ethtool_ops = {
        .get_ringparam          = ravb_get_ringparam,
        .set_ringparam          = ravb_set_ringparam,
        .get_ts_info            = ravb_get_ts_info,
-       .get_link_ksettings     = ravb_get_link_ksettings,
-       .set_link_ksettings     = ravb_set_link_ksettings,
+       .get_link_ksettings     = phy_ethtool_get_link_ksettings,
+       .set_link_ksettings     = phy_ethtool_set_link_ksettings,
        .get_wol                = ravb_get_wol,
        .set_wol                = ravb_set_wol,
 };
index e9007b613f17ca8de16b67e054df42a800522fb5..5614fd231bbe1e4685582e15faf27dad412b241b 100644 (file)
@@ -1927,8 +1927,15 @@ static void sh_eth_adjust_link(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
        struct phy_device *phydev = ndev->phydev;
+       unsigned long flags;
        int new_state = 0;
 
+       spin_lock_irqsave(&mdp->lock, flags);
+
+       /* Disable TX and RX right over here, if E-MAC change is ignored */
+       if (mdp->cd->no_psr || mdp->no_ether_link)
+               sh_eth_rcv_snd_disable(ndev);
+
        if (phydev->link) {
                if (phydev->duplex != mdp->duplex) {
                        new_state = 1;
@@ -1947,18 +1954,21 @@ static void sh_eth_adjust_link(struct net_device *ndev)
                        sh_eth_modify(ndev, ECMR, ECMR_TXF, 0);
                        new_state = 1;
                        mdp->link = phydev->link;
-                       if (mdp->cd->no_psr || mdp->no_ether_link)
-                               sh_eth_rcv_snd_enable(ndev);
                }
        } else if (mdp->link) {
                new_state = 1;
                mdp->link = 0;
                mdp->speed = 0;
                mdp->duplex = -1;
-               if (mdp->cd->no_psr || mdp->no_ether_link)
-                       sh_eth_rcv_snd_disable(ndev);
        }
 
+       /* Enable TX and RX right over here, if E-MAC change is ignored */
+       if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link)
+               sh_eth_rcv_snd_enable(ndev);
+
+       mmiowb();
+       spin_unlock_irqrestore(&mdp->lock, flags);
+
        if (new_state && netif_msg_link(mdp))
                phy_print_status(phydev);
 }
@@ -2030,60 +2040,6 @@ static int sh_eth_phy_start(struct net_device *ndev)
        return 0;
 }
 
-static int sh_eth_get_link_ksettings(struct net_device *ndev,
-                                    struct ethtool_link_ksettings *cmd)
-{
-       struct sh_eth_private *mdp = netdev_priv(ndev);
-       unsigned long flags;
-
-       if (!ndev->phydev)
-               return -ENODEV;
-
-       spin_lock_irqsave(&mdp->lock, flags);
-       phy_ethtool_ksettings_get(ndev->phydev, cmd);
-       spin_unlock_irqrestore(&mdp->lock, flags);
-
-       return 0;
-}
-
-static int sh_eth_set_link_ksettings(struct net_device *ndev,
-                                    const struct ethtool_link_ksettings *cmd)
-{
-       struct sh_eth_private *mdp = netdev_priv(ndev);
-       unsigned long flags;
-       int ret;
-
-       if (!ndev->phydev)
-               return -ENODEV;
-
-       spin_lock_irqsave(&mdp->lock, flags);
-
-       /* disable tx and rx */
-       sh_eth_rcv_snd_disable(ndev);
-
-       ret = phy_ethtool_ksettings_set(ndev->phydev, cmd);
-       if (ret)
-               goto error_exit;
-
-       if (cmd->base.duplex == DUPLEX_FULL)
-               mdp->duplex = 1;
-       else
-               mdp->duplex = 0;
-
-       if (mdp->cd->set_duplex)
-               mdp->cd->set_duplex(ndev);
-
-error_exit:
-       mdelay(1);
-
-       /* enable tx and rx */
-       sh_eth_rcv_snd_enable(ndev);
-
-       spin_unlock_irqrestore(&mdp->lock, flags);
-
-       return ret;
-}
-
 /* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the
  * version must be bumped as well.  Just adding registers up to that
  * limit is fine, as long as the existing register indices don't
@@ -2263,22 +2219,6 @@ static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
        pm_runtime_put_sync(&mdp->pdev->dev);
 }
 
-static int sh_eth_nway_reset(struct net_device *ndev)
-{
-       struct sh_eth_private *mdp = netdev_priv(ndev);
-       unsigned long flags;
-       int ret;
-
-       if (!ndev->phydev)
-               return -ENODEV;
-
-       spin_lock_irqsave(&mdp->lock, flags);
-       ret = phy_start_aneg(ndev->phydev);
-       spin_unlock_irqrestore(&mdp->lock, flags);
-
-       return ret;
-}
-
 static u32 sh_eth_get_msglevel(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -2429,7 +2369,7 @@ static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
 static const struct ethtool_ops sh_eth_ethtool_ops = {
        .get_regs_len   = sh_eth_get_regs_len,
        .get_regs       = sh_eth_get_regs,
-       .nway_reset     = sh_eth_nway_reset,
+       .nway_reset     = phy_ethtool_nway_reset,
        .get_msglevel   = sh_eth_get_msglevel,
        .set_msglevel   = sh_eth_set_msglevel,
        .get_link       = ethtool_op_get_link,
@@ -2438,8 +2378,8 @@ static const struct ethtool_ops sh_eth_ethtool_ops = {
        .get_sset_count     = sh_eth_get_sset_count,
        .get_ringparam  = sh_eth_get_ringparam,
        .set_ringparam  = sh_eth_set_ringparam,
-       .get_link_ksettings = sh_eth_get_link_ksettings,
-       .set_link_ksettings = sh_eth_set_link_ksettings,
+       .get_link_ksettings = phy_ethtool_get_link_ksettings,
+       .set_link_ksettings = phy_ethtool_set_link_ksettings,
        .get_wol        = sh_eth_get_wol,
        .set_wol        = sh_eth_set_wol,
 };
index 23f0785c0573ec72fea3db10dfdf353c41341ee8..7eeac3d6cfe898a9a4ef6df9378d8c6d29383ce1 100644 (file)
@@ -4288,9 +4288,9 @@ static int efx_ef10_filter_pri(struct efx_ef10_filter_table *table,
        return -EPROTONOSUPPORT;
 }
 
-static s32 efx_ef10_filter_insert(struct efx_nic *efx,
-                                 struct efx_filter_spec *spec,
-                                 bool replace_equal)
+static s32 efx_ef10_filter_insert_locked(struct efx_nic *efx,
+                                        struct efx_filter_spec *spec,
+                                        bool replace_equal)
 {
        DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -4307,7 +4307,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
        bool is_mc_recip;
        s32 rc;
 
-       down_read(&efx->filter_sem);
+       WARN_ON(!rwsem_is_locked(&efx->filter_sem));
        table = efx->filter_state;
        down_write(&table->lock);
 
@@ -4498,10 +4498,22 @@ out_unlock:
        if (rss_locked)
                mutex_unlock(&efx->rss_lock);
        up_write(&table->lock);
-       up_read(&efx->filter_sem);
        return rc;
 }
 
+static s32 efx_ef10_filter_insert(struct efx_nic *efx,
+                                 struct efx_filter_spec *spec,
+                                 bool replace_equal)
+{
+       s32 ret;
+
+       down_read(&efx->filter_sem);
+       ret = efx_ef10_filter_insert_locked(efx, spec, replace_equal);
+       up_read(&efx->filter_sem);
+
+       return ret;
+}
+
 static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
 {
        /* no need to do anything here on EF10 */
@@ -5285,7 +5297,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
                EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID);
                efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
                efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr);
-               rc = efx_ef10_filter_insert(efx, &spec, true);
+               rc = efx_ef10_filter_insert_locked(efx, &spec, true);
                if (rc < 0) {
                        if (rollback) {
                                netif_info(efx, drv, efx->net_dev,
@@ -5314,7 +5326,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
                efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
                eth_broadcast_addr(baddr);
                efx_filter_set_eth_local(&spec, vlan->vid, baddr);
-               rc = efx_ef10_filter_insert(efx, &spec, true);
+               rc = efx_ef10_filter_insert_locked(efx, &spec, true);
                if (rc < 0) {
                        netif_warn(efx, drv, efx->net_dev,
                                   "Broadcast filter insert failed rc=%d\n", rc);
@@ -5370,7 +5382,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx,
        if (vlan->vid != EFX_FILTER_VID_UNSPEC)
                efx_filter_set_eth_local(&spec, vlan->vid, NULL);
 
-       rc = efx_ef10_filter_insert(efx, &spec, true);
+       rc = efx_ef10_filter_insert_locked(efx, &spec, true);
        if (rc < 0) {
                const char *um = multicast ? "Multicast" : "Unicast";
                const char *encap_name = "";
@@ -5430,7 +5442,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx,
                                           filter_flags, 0);
                        eth_broadcast_addr(baddr);
                        efx_filter_set_eth_local(&spec, vlan->vid, baddr);
-                       rc = efx_ef10_filter_insert(efx, &spec, true);
+                       rc = efx_ef10_filter_insert_locked(efx, &spec, true);
                        if (rc < 0) {
                                netif_warn(efx, drv, efx->net_dev,
                                           "Broadcast filter insert failed rc=%d\n",
index ad4a354ce570e143a741e7ab7155ae84a8a5df34..ce3a177081a854a683493f7f6f2c79ac63f60cc4 100644 (file)
@@ -1871,12 +1871,6 @@ static void efx_remove_filters(struct efx_nic *efx)
        up_write(&efx->filter_sem);
 }
 
-static void efx_restore_filters(struct efx_nic *efx)
-{
-       down_read(&efx->filter_sem);
-       efx->type->filter_table_restore(efx);
-       up_read(&efx->filter_sem);
-}
 
 /**************************************************************************
  *
@@ -2688,6 +2682,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
        efx_disable_interrupts(efx);
 
        mutex_lock(&efx->mac_lock);
+       down_write(&efx->filter_sem);
        mutex_lock(&efx->rss_lock);
        if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
            method != RESET_TYPE_DATAPATH)
@@ -2745,9 +2740,8 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
        if (efx->type->rx_restore_rss_contexts)
                efx->type->rx_restore_rss_contexts(efx);
        mutex_unlock(&efx->rss_lock);
-       down_read(&efx->filter_sem);
-       efx_restore_filters(efx);
-       up_read(&efx->filter_sem);
+       efx->type->filter_table_restore(efx);
+       up_write(&efx->filter_sem);
        if (efx->type->sriov_reset)
                efx->type->sriov_reset(efx);
 
@@ -2764,6 +2758,7 @@ fail:
        efx->port_initialized = false;
 
        mutex_unlock(&efx->rss_lock);
+       up_write(&efx->filter_sem);
        mutex_unlock(&efx->mac_lock);
 
        return rc;
@@ -3180,6 +3175,7 @@ bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
        return true;
 }
 
+static
 struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
                                       const struct efx_filter_spec *spec)
 {
@@ -3472,7 +3468,9 @@ static int efx_pci_probe_main(struct efx_nic *efx)
 
        efx_init_napi(efx);
 
+       down_write(&efx->filter_sem);
        rc = efx->type->init(efx);
+       up_write(&efx->filter_sem);
        if (rc) {
                netif_err(efx, probe, efx->net_dev,
                          "failed to initialise NIC\n");
@@ -3764,7 +3762,9 @@ static int efx_pm_resume(struct device *dev)
        rc = efx->type->reset(efx, RESET_TYPE_ALL);
        if (rc)
                return rc;
+       down_write(&efx->filter_sem);
        rc = efx->type->init(efx);
+       up_write(&efx->filter_sem);
        if (rc)
                return rc;
        rc = efx_pm_thaw(dev);
index 8edf20967c82c583bb59ace5f1f9c30dcfd1530d..e045a5d6b938f43f391a726f301d8911f156b32c 100644 (file)
@@ -2794,6 +2794,7 @@ int efx_farch_filter_table_probe(struct efx_nic *efx)
        if (!state)
                return -ENOMEM;
        efx->filter_state = state;
+       init_rwsem(&state->lock);
 
        table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
        table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
index cb5b0f58c395c2bdbf32e7283d91cf8c4ac5dbe9..edf20361ea5f15c7ddee617f899e31b92d7e261e 100644 (file)
@@ -111,7 +111,7 @@ config DWMAC_ROCKCHIP
 config DWMAC_SOCFPGA
        tristate "SOCFPGA dwmac support"
        default ARCH_SOCFPGA
-       depends on OF && (ARCH_SOCFPGA || COMPILE_TEST)
+       depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST)
        select MFD_SYSCON
        help
          Support for ethernet controller on Altera SOCFPGA
index 6e359572b9f0ea53ed46b553fb1cb51273415f57..5b3b06a0a3bf53e1eac9572ae8d14add0c3835e7 100644 (file)
@@ -55,6 +55,7 @@ struct socfpga_dwmac {
        struct  device *dev;
        struct regmap *sys_mgr_base_addr;
        struct reset_control *stmmac_rst;
+       struct reset_control *stmmac_ocp_rst;
        void __iomem *splitter_base;
        bool f2h_ptp_ref_clk;
        struct tse_pcs pcs;
@@ -262,8 +263,8 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
                val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
 
        /* Assert reset to the enet controller before changing the phy mode */
-       if (dwmac->stmmac_rst)
-               reset_control_assert(dwmac->stmmac_rst);
+       reset_control_assert(dwmac->stmmac_ocp_rst);
+       reset_control_assert(dwmac->stmmac_rst);
 
        regmap_read(sys_mgr_base_addr, reg_offset, &ctrl);
        ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
@@ -288,8 +289,8 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
        /* Deassert reset for the phy configuration to be sampled by
         * the enet controller, and operation to start in requested mode
         */
-       if (dwmac->stmmac_rst)
-               reset_control_deassert(dwmac->stmmac_rst);
+       reset_control_deassert(dwmac->stmmac_ocp_rst);
+       reset_control_deassert(dwmac->stmmac_rst);
        if (phymode == PHY_INTERFACE_MODE_SGMII) {
                if (tse_pcs_init(dwmac->pcs.tse_pcs_base, &dwmac->pcs) != 0) {
                        dev_err(dwmac->dev, "Unable to initialize TSE PCS");
@@ -324,6 +325,15 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
                goto err_remove_config_dt;
        }
 
+       dwmac->stmmac_ocp_rst = devm_reset_control_get_optional(dev, "stmmaceth-ocp");
+       if (IS_ERR(dwmac->stmmac_ocp_rst)) {
+               ret = PTR_ERR(dwmac->stmmac_ocp_rst);
+               dev_err(dev, "error getting reset control of ocp %d\n", ret);
+               goto err_remove_config_dt;
+       }
+
+       reset_control_deassert(dwmac->stmmac_ocp_rst);
+
        ret = socfpga_dwmac_parse_data(dwmac, dev);
        if (ret) {
                dev_err(dev, "Unable to parse OF data\n");
index 2e6e2a96b4f263023e04eaad77e56f160cbedc5c..f9a61f90cfbc6acb269d4e8320bb9a078ae04239 100644 (file)
@@ -37,7 +37,7 @@
  *             is done in the "stmmac files"
  */
 
-/* struct emac_variant - Descrive dwmac-sun8i hardware variant
+/* struct emac_variant - Describe dwmac-sun8i hardware variant
  * @default_syscon_value:      The default value of the EMAC register in syscon
  *                             This value is used for disabling properly EMAC
  *                             and used as a good starting value in case of the
index d37f17ca62fecf66a6b5af1c9aa105923310a341..65bc3556bd8f8c25b9b37421c80d6a663d8eb0db 100644 (file)
@@ -407,6 +407,16 @@ static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
        }
 }
 
+static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
+{
+       u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
+
+       value &= ~DMA_RBSZ_MASK;
+       value |= (bfsize << DMA_RBSZ_SHIFT) & DMA_RBSZ_MASK;
+
+       writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
+}
+
 const struct stmmac_dma_ops dwmac4_dma_ops = {
        .reset = dwmac4_dma_reset,
        .init = dwmac4_dma_init,
@@ -431,6 +441,7 @@ const struct stmmac_dma_ops dwmac4_dma_ops = {
        .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
        .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
        .enable_tso = dwmac4_enable_tso,
+       .set_bfsize = dwmac4_set_bfsize,
 };
 
 const struct stmmac_dma_ops dwmac410_dma_ops = {
@@ -457,4 +468,5 @@ const struct stmmac_dma_ops dwmac410_dma_ops = {
        .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
        .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
        .enable_tso = dwmac4_enable_tso,
+       .set_bfsize = dwmac4_set_bfsize,
 };
index c63c1fe3f26b9e4d5cb714ea3ceed56bf103b17e..22a4a6dbb1a4af42d3d7467e3ebca50efef57986 100644 (file)
 
 /* DMA Rx Channel X Control register defines */
 #define DMA_CONTROL_SR                 BIT(0)
+#define DMA_RBSZ_MASK                  GENMASK(14, 1)
+#define DMA_RBSZ_SHIFT                 1
 
 /* Interrupt status per channel */
 #define DMA_CHAN_STATUS_REB            GENMASK(21, 19)
index e44e7b26ce829be0eff000c6a68b064139d532b8..fe8b536b13f864bfff723ea2236a3e5982026533 100644 (file)
@@ -183,6 +183,7 @@ struct stmmac_dma_ops {
        void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
        void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
        void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
+       void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan);
 };
 
 #define stmmac_reset(__priv, __args...) \
@@ -235,6 +236,8 @@ struct stmmac_dma_ops {
        stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args)
 #define stmmac_enable_tso(__priv, __args...) \
        stmmac_do_void_callback(__priv, dma, enable_tso, __args)
+#define stmmac_set_dma_bfsize(__priv, __args...) \
+       stmmac_do_void_callback(__priv, dma, set_bfsize, __args)
 
 struct mac_device_info;
 struct net_device;
index e79b0d7b388a16d524917b0dfed1b4dd2f079c2f..60f59abab009e6fcb077eeccacb545dddad47fa6 100644 (file)
@@ -928,6 +928,7 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
 static int stmmac_init_phy(struct net_device *dev)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
+       u32 tx_cnt = priv->plat->tx_queues_to_use;
        struct phy_device *phydev;
        char phy_id_fmt[MII_BUS_ID_SIZE + 3];
        char bus_id[MII_BUS_ID_SIZE];
@@ -968,6 +969,15 @@ static int stmmac_init_phy(struct net_device *dev)
                phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
                                         SUPPORTED_1000baseT_Full);
 
+       /*
+        * Half-duplex mode not supported with multiqueue
+        * half-duplex can only works with single queue
+        */
+       if (tx_cnt > 1)
+               phydev->supported &= ~(SUPPORTED_1000baseT_Half |
+                                      SUPPORTED_100baseT_Half |
+                                      SUPPORTED_10baseT_Half);
+
        /*
         * Broken HW is sometimes missing the pull-up resistor on the
         * MDIO line, which results in reads to non-existent devices returning
@@ -1794,6 +1804,8 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
 
                stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
                                rxfifosz, qmode);
+               stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
+                               chan);
        }
 
        for (chan = 0; chan < tx_channels_count; chan++) {
index 6d141f3931eb650902469cebc18fa3613ad0dcb9..72da77b94ecd987e7e683d0ec890c842090e117e 100644 (file)
@@ -94,7 +94,6 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries)
 /**
  * stmmac_axi_setup - parse DT parameters for programming the AXI register
  * @pdev: platform device
- * @priv: driver private struct.
  * Description:
  * if required, from device-tree the AXI internal register can be tuned
  * by using platform parameters.
index 7a16d40a72d13cf1d522e8a3a396c826fe76f9b9..b9221fc1674dfa0ef17a43f8ff86d700a1ae514f 100644 (file)
@@ -60,8 +60,7 @@
 #include <linux/sungem_phy.h>
 #include "sungem.h"
 
-/* Stripping FCS is causing problems, disabled for now */
-#undef STRIP_FCS
+#define STRIP_FCS
 
 #define DEFAULT_MSG    (NETIF_MSG_DRV          | \
                         NETIF_MSG_PROBE        | \
@@ -435,7 +434,7 @@ static int gem_rxmac_reset(struct gem *gp)
        writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
        writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
        val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
-              ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
+              (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
        writel(val, gp->regs + RXDMA_CFG);
        if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
                writel(((5 & RXDMA_BLANK_IPKTS) |
@@ -760,7 +759,6 @@ static int gem_rx(struct gem *gp, int work_to_do)
        struct net_device *dev = gp->dev;
        int entry, drops, work_done = 0;
        u32 done;
-       __sum16 csum;
 
        if (netif_msg_rx_status(gp))
                printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
@@ -855,9 +853,13 @@ static int gem_rx(struct gem *gp, int work_to_do)
                        skb = copy_skb;
                }
 
-               csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
-               skb->csum = csum_unfold(csum);
-               skb->ip_summed = CHECKSUM_COMPLETE;
+               if (likely(dev->features & NETIF_F_RXCSUM)) {
+                       __sum16 csum;
+
+                       csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
+                       skb->csum = csum_unfold(csum);
+                       skb->ip_summed = CHECKSUM_COMPLETE;
+               }
                skb->protocol = eth_type_trans(skb, gp->dev);
 
                napi_gro_receive(&gp->napi, skb);
@@ -1761,7 +1763,7 @@ static void gem_init_dma(struct gem *gp)
        writel(0, gp->regs + TXDMA_KICK);
 
        val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
-              ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
+              (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
        writel(val, gp->regs + RXDMA_CFG);
 
        writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
@@ -2985,8 +2987,8 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        pci_set_drvdata(pdev, dev);
 
        /* We can do scatter/gather and HW checksum */
-       dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
-       dev->features |= dev->hw_features | NETIF_F_RXCSUM;
+       dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+       dev->features = dev->hw_features;
        if (pci_using_dac)
                dev->features |= NETIF_F_HIGHDMA;
 
index cdbddf16dd2931ba66df103c064705d5f0aef350..4f1267477aa4b56b7f3e1d19420302728da56e7d 100644 (file)
@@ -205,7 +205,7 @@ static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr)
  * devices (e.g. cpsw switches) use plain old memory.  Descriptor pools
  * abstract out these details
  */
-int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr)
+static int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr)
 {
        struct cpdma_params *cpdma_params = &ctlr->params;
        struct cpdma_desc_pool *pool;
index 06d7c9e4dcda92deb027522dc04b34326c9fdc8a..f270beebb4289326baff5e86b33f47eae2eaa49b 100644 (file)
@@ -1385,6 +1385,15 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
                return -EOPNOTSUPP;
 }
 
+static int match_first_device(struct device *dev, void *data)
+{
+       if (dev->parent && dev->parent->of_node)
+               return of_device_is_compatible(dev->parent->of_node,
+                                              "ti,davinci_mdio");
+
+       return !strncmp(dev_name(dev), "davinci_mdio", 12);
+}
+
 /**
  * emac_dev_open - EMAC device open
  * @ndev: The DaVinci EMAC network adapter
@@ -1484,8 +1493,14 @@ static int emac_dev_open(struct net_device *ndev)
 
        /* use the first phy on the bus if pdata did not give us a phy id */
        if (!phydev && !priv->phy_id) {
-               phy = bus_find_device_by_name(&mdio_bus_type, NULL,
-                                             "davinci_mdio");
+               /* NOTE: we can't use bus_find_device_by_name() here because
+                * the device name is not guaranteed to be 'davinci_mdio'. On
+                * some systems it can be 'davinci_mdio.0' so we need to use
+                * strncmp() against the first part of the string to correctly
+                * match it.
+                */
+               phy = bus_find_device(&mdio_bus_type, NULL, NULL,
+                                     match_first_device);
                if (phy) {
                        priv->phy_id = dev_name(phy);
                        if (!priv->phy_id || !*priv->phy_id)
index 16c3bfbe19928dfb56b2719490d0b401fae2ee5e..757a3b37ae8a8af8077001d548b65fe862d03c2d 100644 (file)
@@ -218,6 +218,7 @@ issue:
        ret = of_mdiobus_register(bus, np1);
        if (ret) {
                mdiobus_free(bus);
+               lp->mii_bus = NULL;
                return ret;
        }
        return 0;
index 750eaa53bf0ce59429d524ba0658ad6f488a4ba0..ada33c2d9ac20e01af4acec33727623204fda803 100644 (file)
@@ -476,7 +476,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk,
 out_unlock:
        rcu_read_unlock();
 out:
-       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_flush_final(skb, pp, flush);
 
        return pp;
 }
index f347fd9c5b28370f6452f042bb7f59c0ec8a3cd3..777fa59f5e0cd5abdfb8390ac358d09cf77636a1 100644 (file)
 static const char banner[] __initconst = KERN_INFO \
        "AX.25: bpqether driver version 004\n";
 
-static char bcast_addr[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
-
-static char bpq_eth_addr[6];
-
 static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
 static int bpq_device_event(struct notifier_block *, unsigned long, void *);
 
@@ -501,8 +497,8 @@ static int bpq_new_device(struct net_device *edev)
        bpq->ethdev = edev;
        bpq->axdev = ndev;
 
-       memcpy(bpq->dest_addr, bcast_addr, sizeof(bpq_eth_addr));
-       memcpy(bpq->acpt_addr, bcast_addr, sizeof(bpq_eth_addr));
+       eth_broadcast_addr(bpq->dest_addr);
+       eth_broadcast_addr(bpq->acpt_addr);
 
        err = register_netdevice(ndev);
        if (err)
index 1a924b867b0742b0aa3e5a15f4da3e6885173e74..4b6e308199d270cd455b7df0de20a8458f6b7941 100644 (file)
@@ -210,7 +210,7 @@ int netvsc_recv_callback(struct net_device *net,
 void netvsc_channel_cb(void *context);
 int netvsc_poll(struct napi_struct *napi, int budget);
 
-void rndis_set_subchannel(struct work_struct *w);
+int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev);
 int rndis_filter_open(struct netvsc_device *nvdev);
 int rndis_filter_close(struct netvsc_device *nvdev);
 struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
index 5d5bd513847fff4ff353e7c58d9967a354d06955..31c3d77b4733f0aa9900138b5c49f398d0642db4 100644 (file)
@@ -65,6 +65,41 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf)
                               VM_PKT_DATA_INBAND, 0);
 }
 
+/* Worker to setup sub channels on initial setup
+ * Initial hotplug event occurs in softirq context
+ * and can't wait for channels.
+ */
+static void netvsc_subchan_work(struct work_struct *w)
+{
+       struct netvsc_device *nvdev =
+               container_of(w, struct netvsc_device, subchan_work);
+       struct rndis_device *rdev;
+       int i, ret;
+
+       /* Avoid deadlock with device removal already under RTNL */
+       if (!rtnl_trylock()) {
+               schedule_work(w);
+               return;
+       }
+
+       rdev = nvdev->extension;
+       if (rdev) {
+               ret = rndis_set_subchannel(rdev->ndev, nvdev);
+               if (ret == 0) {
+                       netif_device_attach(rdev->ndev);
+               } else {
+                       /* fallback to only primary channel */
+                       for (i = 1; i < nvdev->num_chn; i++)
+                               netif_napi_del(&nvdev->chan_table[i].napi);
+
+                       nvdev->max_chn = 1;
+                       nvdev->num_chn = 1;
+               }
+       }
+
+       rtnl_unlock();
+}
+
 static struct netvsc_device *alloc_net_device(void)
 {
        struct netvsc_device *net_device;
@@ -81,7 +116,7 @@ static struct netvsc_device *alloc_net_device(void)
 
        init_completion(&net_device->channel_init_wait);
        init_waitqueue_head(&net_device->subchan_open);
-       INIT_WORK(&net_device->subchan_work, rndis_set_subchannel);
+       INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
 
        return net_device;
 }
@@ -1239,6 +1274,7 @@ int netvsc_poll(struct napi_struct *napi, int budget)
        struct hv_device *device = netvsc_channel_to_device(channel);
        struct net_device *ndev = hv_get_drvdata(device);
        int work_done = 0;
+       int ret;
 
        /* If starting a new interval */
        if (!nvchan->desc)
@@ -1250,16 +1286,18 @@ int netvsc_poll(struct napi_struct *napi, int budget)
                nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
        }
 
-       /* If send of pending receive completions suceeded
-        *   and did not exhaust NAPI budget this time
-        *   and not doing busy poll
+       /* Send any pending receive completions */
+       ret = send_recv_completions(ndev, net_device, nvchan);
+
+       /* If it did not exhaust NAPI budget this time
+        *  and not doing busy poll
         * then re-enable host interrupts
-        *     and reschedule if ring is not empty.
+        *  and reschedule if ring is not empty
+        *   or sending receive completion failed.
         */
-       if (send_recv_completions(ndev, net_device, nvchan) == 0 &&
-           work_done < budget &&
+       if (work_done < budget &&
            napi_complete_done(napi, work_done) &&
-           hv_end_read(&channel->inbound) &&
+           (ret || hv_end_read(&channel->inbound)) &&
            napi_schedule_prep(napi)) {
                hv_begin_read(&channel->inbound);
                __napi_schedule(napi);
index fe2256bf1d137fea6b76c5e3a564b191e2b5da7c..dd1d6e115145d4c14fb25d1883d1e42614e211a9 100644 (file)
@@ -905,8 +905,20 @@ static int netvsc_attach(struct net_device *ndev,
        if (IS_ERR(nvdev))
                return PTR_ERR(nvdev);
 
-       /* Note: enable and attach happen when sub-channels setup */
+       if (nvdev->num_chn > 1) {
+               ret = rndis_set_subchannel(ndev, nvdev);
+
+               /* if unavailable, just proceed with one queue */
+               if (ret) {
+                       nvdev->max_chn = 1;
+                       nvdev->num_chn = 1;
+               }
+       }
+
+       /* In any case device is now ready */
+       netif_device_attach(ndev);
 
+       /* Note: enable and attach happen when sub-channels setup */
        netif_carrier_off(ndev);
 
        if (netif_running(ndev)) {
@@ -2089,6 +2101,9 @@ static int netvsc_probe(struct hv_device *dev,
 
        memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
 
+       if (nvdev->num_chn > 1)
+               schedule_work(&nvdev->subchan_work);
+
        /* hw_features computed in rndis_netdev_set_hwcaps() */
        net->features = net->hw_features |
                NETIF_F_HIGHDMA | NETIF_F_SG |
index 5428bb26110262fdfb66daaac8463c91e7981d42..408ece27131c4611a8600028831f10aa8b47ed60 100644 (file)
@@ -1062,29 +1062,15 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
  * This breaks overlap of processing the host message for the
  * new primary channel with the initialization of sub-channels.
  */
-void rndis_set_subchannel(struct work_struct *w)
+int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev)
 {
-       struct netvsc_device *nvdev
-               = container_of(w, struct netvsc_device, subchan_work);
        struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
-       struct net_device_context *ndev_ctx;
-       struct rndis_device *rdev;
-       struct net_device *ndev;
-       struct hv_device *hv_dev;
+       struct net_device_context *ndev_ctx = netdev_priv(ndev);
+       struct hv_device *hv_dev = ndev_ctx->device_ctx;
+       struct rndis_device *rdev = nvdev->extension;
        int i, ret;
 
-       if (!rtnl_trylock()) {
-               schedule_work(w);
-               return;
-       }
-
-       rdev = nvdev->extension;
-       if (!rdev)
-               goto unlock;    /* device was removed */
-
-       ndev = rdev->ndev;
-       ndev_ctx = netdev_priv(ndev);
-       hv_dev = ndev_ctx->device_ctx;
+       ASSERT_RTNL();
 
        memset(init_packet, 0, sizeof(struct nvsp_message));
        init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
@@ -1100,13 +1086,13 @@ void rndis_set_subchannel(struct work_struct *w)
                               VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
        if (ret) {
                netdev_err(ndev, "sub channel allocate send failed: %d\n", ret);
-               goto failed;
+               return ret;
        }
 
        wait_for_completion(&nvdev->channel_init_wait);
        if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
                netdev_err(ndev, "sub channel request failed\n");
-               goto failed;
+               return -EIO;
        }
 
        nvdev->num_chn = 1 +
@@ -1125,21 +1111,7 @@ void rndis_set_subchannel(struct work_struct *w)
        for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
                ndev_ctx->tx_table[i] = i % nvdev->num_chn;
 
-       netif_device_attach(ndev);
-       rtnl_unlock();
-       return;
-
-failed:
-       /* fallback to only primary channel */
-       for (i = 1; i < nvdev->num_chn; i++)
-               netif_napi_del(&nvdev->chan_table[i].napi);
-
-       nvdev->max_chn = 1;
-       nvdev->num_chn = 1;
-
-       netif_device_attach(ndev);
-unlock:
-       rtnl_unlock();
+       return 0;
 }
 
 static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
@@ -1360,21 +1332,13 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
                netif_napi_add(net, &net_device->chan_table[i].napi,
                               netvsc_poll, NAPI_POLL_WEIGHT);
 
-       if (net_device->num_chn > 1)
-               schedule_work(&net_device->subchan_work);
+       return net_device;
 
 out:
-       /* if unavailable, just proceed with one queue */
-       if (ret) {
-               net_device->max_chn = 1;
-               net_device->num_chn = 1;
-       }
-
-       /* No sub channels, device is ready */
-       if (net_device->num_chn == 1)
-               netif_device_attach(net);
-
-       return net_device;
+       /* setting up multiple channels failed */
+       net_device->max_chn = 1;
+       net_device->num_chn = 1;
+       return 0;
 
 err_dev_remv:
        rndis_filter_device_remove(dev, net_device);
index 64f1b1e77bc0f361dc59538072f59ca7a7c72690..23a52b9293f35eaec1d71063305a029ba466d819 100644 (file)
@@ -275,6 +275,8 @@ struct adf7242_local {
        struct spi_message stat_msg;
        struct spi_transfer stat_xfer;
        struct dentry *debugfs_root;
+       struct delayed_work work;
+       struct workqueue_struct *wqueue;
        unsigned long flags;
        int tx_stat;
        bool promiscuous;
@@ -575,10 +577,26 @@ static int adf7242_cmd_rx(struct adf7242_local *lp)
        /* Wait until the ACK is sent */
        adf7242_wait_status(lp, RC_STATUS_PHY_RDY, RC_STATUS_MASK, __LINE__);
        adf7242_clear_irqstat(lp);
+       mod_delayed_work(lp->wqueue, &lp->work, msecs_to_jiffies(400));
 
        return adf7242_cmd(lp, CMD_RC_RX);
 }
 
+static void adf7242_rx_cal_work(struct work_struct *work)
+{
+       struct adf7242_local *lp =
+       container_of(work, struct adf7242_local, work.work);
+
+       /* Reissuing RC_RX every 400ms - to adjust for offset
+        * drift in receiver (datasheet page 61, OCL section)
+        */
+
+       if (!test_bit(FLAG_XMIT, &lp->flags)) {
+               adf7242_cmd(lp, CMD_RC_PHY_RDY);
+               adf7242_cmd_rx(lp);
+       }
+}
+
 static int adf7242_set_txpower(struct ieee802154_hw *hw, int mbm)
 {
        struct adf7242_local *lp = hw->priv;
@@ -686,7 +704,7 @@ static int adf7242_start(struct ieee802154_hw *hw)
        enable_irq(lp->spi->irq);
        set_bit(FLAG_START, &lp->flags);
 
-       return adf7242_cmd(lp, CMD_RC_RX);
+       return adf7242_cmd_rx(lp);
 }
 
 static void adf7242_stop(struct ieee802154_hw *hw)
@@ -694,6 +712,7 @@ static void adf7242_stop(struct ieee802154_hw *hw)
        struct adf7242_local *lp = hw->priv;
 
        disable_irq(lp->spi->irq);
+       cancel_delayed_work_sync(&lp->work);
        adf7242_cmd(lp, CMD_RC_IDLE);
        clear_bit(FLAG_START, &lp->flags);
        adf7242_clear_irqstat(lp);
@@ -719,7 +738,10 @@ static int adf7242_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
        adf7242_write_reg(lp, REG_CH_FREQ1, freq >> 8);
        adf7242_write_reg(lp, REG_CH_FREQ2, freq >> 16);
 
-       return adf7242_cmd(lp, CMD_RC_RX);
+       if (test_bit(FLAG_START, &lp->flags))
+               return adf7242_cmd_rx(lp);
+       else
+               return adf7242_cmd(lp, CMD_RC_PHY_RDY);
 }
 
 static int adf7242_set_hw_addr_filt(struct ieee802154_hw *hw,
@@ -814,6 +836,7 @@ static int adf7242_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
        /* ensure existing instances of the IRQ handler have completed */
        disable_irq(lp->spi->irq);
        set_bit(FLAG_XMIT, &lp->flags);
+       cancel_delayed_work_sync(&lp->work);
        reinit_completion(&lp->tx_complete);
        adf7242_cmd(lp, CMD_RC_PHY_RDY);
        adf7242_clear_irqstat(lp);
@@ -952,6 +975,7 @@ static irqreturn_t adf7242_isr(int irq, void *data)
        unsigned int xmit;
        u8 irq1;
 
+       mod_delayed_work(lp->wqueue, &lp->work, msecs_to_jiffies(400));
        adf7242_read_reg(lp, REG_IRQ1_SRC1, &irq1);
 
        if (!(irq1 & (IRQ_RX_PKT_RCVD | IRQ_CSMA_CA)))
@@ -1241,6 +1265,9 @@ static int adf7242_probe(struct spi_device *spi)
        spi_message_add_tail(&lp->stat_xfer, &lp->stat_msg);
 
        spi_set_drvdata(spi, lp);
+       INIT_DELAYED_WORK(&lp->work, adf7242_rx_cal_work);
+       lp->wqueue = alloc_ordered_workqueue(dev_name(&spi->dev),
+                                            WQ_MEM_RECLAIM);
 
        ret = adf7242_hw_init(lp);
        if (ret)
@@ -1284,6 +1311,9 @@ static int adf7242_remove(struct spi_device *spi)
        if (!IS_ERR_OR_NULL(lp->debugfs_root))
                debugfs_remove_recursive(lp->debugfs_root);
 
+       cancel_delayed_work_sync(&lp->work);
+       destroy_workqueue(lp->wqueue);
+
        ieee802154_unregister_hw(lp->hw);
        mutex_destroy(&lp->bmux);
        ieee802154_free_hw(lp->hw);
index 77abedf0b52447b4f1d0b5bdd99c259cb3555c1a..3d9e91579866826e476ceb2374b0d286e70c07fd 100644 (file)
@@ -940,7 +940,7 @@ at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
 static int
 at86rf230_ed(struct ieee802154_hw *hw, u8 *level)
 {
-       BUG_ON(!level);
+       WARN_ON(!level);
        *level = 0xbe;
        return 0;
 }
@@ -1121,8 +1121,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
        if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
                u16 addr = le16_to_cpu(filt->short_addr);
 
-               dev_vdbg(&lp->spi->dev,
-                        "at86rf230_set_hw_addr_filt called for saddr\n");
+               dev_vdbg(&lp->spi->dev, "%s called for saddr\n", __func__);
                __at86rf230_write(lp, RG_SHORT_ADDR_0, addr);
                __at86rf230_write(lp, RG_SHORT_ADDR_1, addr >> 8);
        }
@@ -1130,8 +1129,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
        if (changed & IEEE802154_AFILT_PANID_CHANGED) {
                u16 pan = le16_to_cpu(filt->pan_id);
 
-               dev_vdbg(&lp->spi->dev,
-                        "at86rf230_set_hw_addr_filt called for pan id\n");
+               dev_vdbg(&lp->spi->dev, "%s called for pan id\n", __func__);
                __at86rf230_write(lp, RG_PAN_ID_0, pan);
                __at86rf230_write(lp, RG_PAN_ID_1, pan >> 8);
        }
@@ -1140,15 +1138,13 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
                u8 i, addr[8];
 
                memcpy(addr, &filt->ieee_addr, 8);
-               dev_vdbg(&lp->spi->dev,
-                        "at86rf230_set_hw_addr_filt called for IEEE addr\n");
+               dev_vdbg(&lp->spi->dev, "%s called for IEEE addr\n", __func__);
                for (i = 0; i < 8; i++)
                        __at86rf230_write(lp, RG_IEEE_ADDR_0 + i, addr[i]);
        }
 
        if (changed & IEEE802154_AFILT_PANC_CHANGED) {
-               dev_vdbg(&lp->spi->dev,
-                        "at86rf230_set_hw_addr_filt called for panc change\n");
+               dev_vdbg(&lp->spi->dev, "%s called for panc change\n", __func__);
                if (filt->pan_coord)
                        at86rf230_write_subreg(lp, SR_AACK_I_AM_COORD, 1);
                else
@@ -1252,7 +1248,6 @@ at86rf230_set_cca_mode(struct ieee802154_hw *hw,
        return at86rf230_write_subreg(lp, SR_CCA_MODE, val);
 }
 
-
 static int
 at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm)
 {
index 0d673f7682ee065223b64462bc2c4df0a03826a0..176395e4b7bb0ca628bdd22b4f13a23e425bfae2 100644 (file)
@@ -49,7 +49,7 @@ struct fakelb_phy {
 
 static int fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level)
 {
-       BUG_ON(!level);
+       WARN_ON(!level);
        *level = 0xbe;
 
        return 0;
index de0d7f28a181ca4acb1da2131d82a981627a8e96..e428277781ac4422bec2e8f47fd35476a85a74f7 100644 (file)
  */
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/spi/spi.h>
 #include <linux/workqueue.h>
 #include <linux/interrupt.h>
+#include <linux/irq.h>
 #include <linux/skbuff.h>
 #include <linux/of_gpio.h>
 #include <linux/regmap.h>
index 4377c26f714d0522ebf5d1de6ac774b6e42024ea..4a949569ec4c51668fe7b795caef7ece5d61854b 100644 (file)
@@ -75,10 +75,23 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
 {
        struct ipvl_dev *ipvlan;
        struct net_device *mdev = port->dev;
-       int err = 0;
+       unsigned int flags;
+       int err;
 
        ASSERT_RTNL();
        if (port->mode != nval) {
+               list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
+                       flags = ipvlan->dev->flags;
+                       if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S) {
+                               err = dev_change_flags(ipvlan->dev,
+                                                      flags | IFF_NOARP);
+                       } else {
+                               err = dev_change_flags(ipvlan->dev,
+                                                      flags & ~IFF_NOARP);
+                       }
+                       if (unlikely(err))
+                               goto fail;
+               }
                if (nval == IPVLAN_MODE_L3S) {
                        /* New mode is L3S */
                        err = ipvlan_register_nf_hook(read_pnet(&port->pnet));
@@ -86,21 +99,28 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
                                mdev->l3mdev_ops = &ipvl_l3mdev_ops;
                                mdev->priv_flags |= IFF_L3MDEV_MASTER;
                        } else
-                               return err;
+                               goto fail;
                } else if (port->mode == IPVLAN_MODE_L3S) {
                        /* Old mode was L3S */
                        mdev->priv_flags &= ~IFF_L3MDEV_MASTER;
                        ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
                        mdev->l3mdev_ops = NULL;
                }
-               list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
-                       if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S)
-                               ipvlan->dev->flags |= IFF_NOARP;
-                       else
-                               ipvlan->dev->flags &= ~IFF_NOARP;
-               }
                port->mode = nval;
        }
+       return 0;
+
+fail:
+       /* Undo the flags changes that have been done so far. */
+       list_for_each_entry_continue_reverse(ipvlan, &port->ipvlans, pnode) {
+               flags = ipvlan->dev->flags;
+               if (port->mode == IPVLAN_MODE_L3 ||
+                   port->mode == IPVLAN_MODE_L3S)
+                       dev_change_flags(ipvlan->dev, flags | IFF_NOARP);
+               else
+                       dev_change_flags(ipvlan->dev, flags & ~IFF_NOARP);
+       }
+
        return err;
 }
 
@@ -594,7 +614,8 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
        ipvlan->phy_dev = phy_dev;
        ipvlan->dev = dev;
        ipvlan->sfeatures = IPVLAN_FEATURES;
-       ipvlan_adjust_mtu(ipvlan, phy_dev);
+       if (!tb[IFLA_MTU])
+               ipvlan_adjust_mtu(ipvlan, phy_dev);
        INIT_LIST_HEAD(&ipvlan->addrs);
        spin_lock_init(&ipvlan->addrs_lock);
 
@@ -693,6 +714,7 @@ void ipvlan_link_setup(struct net_device *dev)
 {
        ether_setup(dev);
 
+       dev->max_mtu = ETH_MAX_MTU;
        dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
        dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE;
        dev->netdev_ops = &ipvlan_netdev_ops;
index 83f7420ddea569126db0cc25719940892d760075..4f390fa557e4ba0c897b20faefaa85b03f4ec70a 100644 (file)
@@ -527,7 +527,7 @@ static int net_failover_slave_register(struct net_device *slave_dev,
 
        netif_addr_lock_bh(failover_dev);
        dev_uc_sync_multiple(slave_dev, failover_dev);
-       dev_uc_sync_multiple(slave_dev, failover_dev);
+       dev_mc_sync_multiple(slave_dev, failover_dev);
        netif_addr_unlock_bh(failover_dev);
 
        err = vlan_vids_add_by_dev(slave_dev, failover_dev);
index 081d99aa39853097e7d486e813f344fb895598aa..49ac678eb2dc7ca6539794b9ace40ba86aaa8d6a 100644 (file)
@@ -222,7 +222,7 @@ static int dp83811_config_intr(struct phy_device *phydev)
                if (err < 0)
                        return err;
 
-               err = phy_write(phydev, MII_DP83811_INT_STAT1, 0);
+               err = phy_write(phydev, MII_DP83811_INT_STAT2, 0);
        }
 
        return err;
index b8f57e9b937901fd142413c4002f39205546c35a..1cd439bdf6087af2913f589b499cd5c5abe5a3bb 100644 (file)
 #define MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS             BIT(12)
 #define MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE    BIT(14)
 
-#define MII_88E1121_PHY_LED_CTRL       16
+#define MII_PHY_LED_CTRL               16
 #define MII_88E1121_PHY_LED_DEF                0x0030
+#define MII_88E1510_PHY_LED_DEF                0x1177
 
 #define MII_M1011_PHY_STATUS           0x11
 #define MII_M1011_PHY_STATUS_1000      0x8000
@@ -632,8 +633,40 @@ error:
        return err;
 }
 
+static void marvell_config_led(struct phy_device *phydev)
+{
+       u16 def_config;
+       int err;
+
+       switch (MARVELL_PHY_FAMILY_ID(phydev->phy_id)) {
+       /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
+       case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1121R):
+       case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1318S):
+               def_config = MII_88E1121_PHY_LED_DEF;
+               break;
+       /* Default PHY LED config:
+        * LED[0] .. 1000Mbps Link
+        * LED[1] .. 100Mbps Link
+        * LED[2] .. Blink, Activity
+        */
+       case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1510):
+               def_config = MII_88E1510_PHY_LED_DEF;
+               break;
+       default:
+               return;
+       }
+
+       err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE, MII_PHY_LED_CTRL,
+                             def_config);
+       if (err < 0)
+               pr_warn("Fail to config marvell phy LED.\n");
+}
+
 static int marvell_config_init(struct phy_device *phydev)
 {
+       /* Set defalut LED */
+       marvell_config_led(phydev);
+
        /* Set registers from marvell,reg-init DT property */
        return marvell_of_reg_init(phydev);
 }
@@ -813,21 +846,6 @@ static int m88e1111_config_init(struct phy_device *phydev)
        return genphy_soft_reset(phydev);
 }
 
-static int m88e1121_config_init(struct phy_device *phydev)
-{
-       int err;
-
-       /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
-       err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE,
-                             MII_88E1121_PHY_LED_CTRL,
-                             MII_88E1121_PHY_LED_DEF);
-       if (err < 0)
-               return err;
-
-       /* Set marvell,reg-init configuration from device tree */
-       return marvell_config_init(phydev);
-}
-
 static int m88e1318_config_init(struct phy_device *phydev)
 {
        if (phy_interrupt_is_valid(phydev)) {
@@ -841,7 +859,7 @@ static int m88e1318_config_init(struct phy_device *phydev)
                        return err;
        }
 
-       return m88e1121_config_init(phydev);
+       return marvell_config_init(phydev);
 }
 
 static int m88e1510_config_init(struct phy_device *phydev)
@@ -2087,7 +2105,7 @@ static struct phy_driver marvell_drivers[] = {
                .features = PHY_GBIT_FEATURES,
                .flags = PHY_HAS_INTERRUPT,
                .probe = &m88e1121_probe,
-               .config_init = &m88e1121_config_init,
+               .config_init = &marvell_config_init,
                .config_aneg = &m88e1121_config_aneg,
                .read_status = &marvell_read_status,
                .ack_interrupt = &marvell_ack_interrupt,
index 537297d2b4b4309adeacd1e66541b2aa1830b8b8..6c9b24fe31488b03499a9866f13a065449b29cce 100644 (file)
@@ -514,7 +514,7 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
         * negotiation may already be done and aneg interrupt may not be
         * generated.
         */
-       if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) {
+       if (phydev->irq != PHY_POLL && phydev->state == PHY_AN) {
                err = phy_aneg_done(phydev);
                if (err > 0) {
                        trigger = true;
index bd0f339f69fd064737f8f3c80e6645e73c56a2b9..b9f5f40a7ac1e6640a653e8207cdd8885100e09f 100644 (file)
@@ -1724,11 +1724,8 @@ EXPORT_SYMBOL(genphy_loopback);
 
 static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
 {
-       /* The default values for phydev->supported are provided by the PHY
-        * driver "features" member, we want to reset to sane defaults first
-        * before supporting higher speeds.
-        */
-       phydev->supported &= PHY_DEFAULT_FEATURES;
+       phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES |
+                              PHY_10BT_FEATURES);
 
        switch (max_speed) {
        default:
index d437f4f5ed5291d21236a71ef3e36089344f9201..740655261e5b7347116d2a5b53445c8d023cb49c 100644 (file)
@@ -349,7 +349,6 @@ static int sfp_register_bus(struct sfp_bus *bus)
        }
        if (bus->started)
                bus->socket_ops->start(bus->sfp);
-       bus->netdev->sfp_bus = bus;
        bus->registered = true;
        return 0;
 }
@@ -364,7 +363,6 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
                if (bus->phydev && ops && ops->disconnect_phy)
                        ops->disconnect_phy(bus->upstream);
        }
-       bus->netdev->sfp_bus = NULL;
        bus->registered = false;
 }
 
@@ -436,6 +434,14 @@ void sfp_upstream_stop(struct sfp_bus *bus)
 }
 EXPORT_SYMBOL_GPL(sfp_upstream_stop);
 
+static void sfp_upstream_clear(struct sfp_bus *bus)
+{
+       bus->upstream_ops = NULL;
+       bus->upstream = NULL;
+       bus->netdev->sfp_bus = NULL;
+       bus->netdev = NULL;
+}
+
 /**
  * sfp_register_upstream() - Register the neighbouring device
  * @fwnode: firmware node for the SFP bus
@@ -461,9 +467,13 @@ struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
                bus->upstream_ops = ops;
                bus->upstream = upstream;
                bus->netdev = ndev;
+               ndev->sfp_bus = bus;
 
-               if (bus->sfp)
+               if (bus->sfp) {
                        ret = sfp_register_bus(bus);
+                       if (ret)
+                               sfp_upstream_clear(bus);
+               }
                rtnl_unlock();
        }
 
@@ -488,8 +498,7 @@ void sfp_unregister_upstream(struct sfp_bus *bus)
        rtnl_lock();
        if (bus->sfp)
                sfp_unregister_bus(bus);
-       bus->upstream = NULL;
-       bus->netdev = NULL;
+       sfp_upstream_clear(bus);
        rtnl_unlock();
 
        sfp_bus_put(bus);
@@ -561,6 +570,13 @@ void sfp_module_remove(struct sfp_bus *bus)
 }
 EXPORT_SYMBOL_GPL(sfp_module_remove);
 
+static void sfp_socket_clear(struct sfp_bus *bus)
+{
+       bus->sfp_dev = NULL;
+       bus->sfp = NULL;
+       bus->socket_ops = NULL;
+}
+
 struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp,
                                    const struct sfp_socket_ops *ops)
 {
@@ -573,8 +589,11 @@ struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp,
                bus->sfp = sfp;
                bus->socket_ops = ops;
 
-               if (bus->netdev)
+               if (bus->netdev) {
                        ret = sfp_register_bus(bus);
+                       if (ret)
+                               sfp_socket_clear(bus);
+               }
                rtnl_unlock();
        }
 
@@ -592,9 +611,7 @@ void sfp_unregister_socket(struct sfp_bus *bus)
        rtnl_lock();
        if (bus->netdev)
                sfp_unregister_bus(bus);
-       bus->sfp_dev = NULL;
-       bus->sfp = NULL;
-       bus->socket_ops = NULL;
+       sfp_socket_clear(bus);
        rtnl_unlock();
 
        sfp_bus_put(bus);
index de51e8f70f44ea6663b330d2ae41024e99865490..ce61231e96ea5fe27f512fbd0d80d4609997e508 100644 (file)
@@ -1107,7 +1107,7 @@ static const struct proto_ops pppoe_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = pppoe_getname,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
        .setsockopt     = sock_no_setsockopt,
index a192a017cc68878360505b93df151de3d0b9b730..f5727baac84a5d10fd70837a75fcfa8194992f9a 100644 (file)
@@ -1688,7 +1688,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
                case XDP_TX:
                        get_page(alloc_frag->page);
                        alloc_frag->offset += buflen;
-                       if (tun_xdp_tx(tun->dev, &xdp))
+                       if (tun_xdp_tx(tun->dev, &xdp) < 0)
                                goto err_redirect;
                        rcu_read_unlock();
                        local_bh_enable();
index 3d4f7959dabb9c39e17754df4f72013c89743d5a..b1b3d8f7e67dd052eae618e33698c633751df60a 100644 (file)
@@ -642,10 +642,12 @@ static void ax88772_restore_phy(struct usbnet *dev)
                                     priv->presvd_phy_advertise);
 
                /* Restore BMCR */
+               if (priv->presvd_phy_bmcr & BMCR_ANENABLE)
+                       priv->presvd_phy_bmcr |= BMCR_ANRESTART;
+
                asix_mdio_write_nopm(dev->net, dev->mii.phy_id, MII_BMCR,
                                     priv->presvd_phy_bmcr);
 
-               mii_nway_restart(&dev->mii);
                priv->presvd_phy_advertise = 0;
                priv->presvd_phy_bmcr = 0;
        }
index b0e8b9613054137215e2f502f9deeab3bbad8f80..1eaec648bd1f716db3d06622cdfb7834e64e4e38 100644 (file)
@@ -967,8 +967,7 @@ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
 
        atomic_set(&ctx->stop, 1);
 
-       if (hrtimer_active(&ctx->tx_timer))
-               hrtimer_cancel(&ctx->tx_timer);
+       hrtimer_cancel(&ctx->tx_timer);
 
        tasklet_kill(&ctx->bh);
 
index 8dff87ec6d99c5dca122dcdb5d3697157564cfa2..ed10d49eb5e0b66068fe366950cee3de2de3257d 100644 (file)
@@ -64,6 +64,7 @@
 #define DEFAULT_RX_CSUM_ENABLE         (true)
 #define DEFAULT_TSO_CSUM_ENABLE                (true)
 #define DEFAULT_VLAN_FILTER_ENABLE     (true)
+#define DEFAULT_VLAN_RX_OFFLOAD                (true)
 #define TX_OVERHEAD                    (8)
 #define RXW_PADDING                    2
 
@@ -2298,7 +2299,7 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
        if ((ll_mtu % dev->maxpacket) == 0)
                return -EDOM;
 
-       ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
+       ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
 
        netdev->mtu = new_mtu;
 
@@ -2364,6 +2365,11 @@ static int lan78xx_set_features(struct net_device *netdev,
        }
 
        if (features & NETIF_F_HW_VLAN_CTAG_RX)
+               pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
+       else
+               pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
+
+       if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
                pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
        else
                pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
@@ -2587,7 +2593,8 @@ static int lan78xx_reset(struct lan78xx_net *dev)
        buf |= FCT_TX_CTL_EN_;
        ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
 
-       ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
+       ret = lan78xx_set_rx_max_frame_length(dev,
+                                             dev->net->mtu + VLAN_ETH_HLEN);
 
        ret = lan78xx_read_reg(dev, MAC_RX, &buf);
        buf |= MAC_RX_RXEN_;
@@ -2975,6 +2982,12 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
        if (DEFAULT_TSO_CSUM_ENABLE)
                dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
 
+       if (DEFAULT_VLAN_RX_OFFLOAD)
+               dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
+
+       if (DEFAULT_VLAN_FILTER_ENABLE)
+               dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
        dev->net->hw_features = dev->net->features;
 
        ret = lan78xx_setup_irq_domain(dev);
@@ -3039,8 +3052,13 @@ static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
                                    struct sk_buff *skb,
                                    u32 rx_cmd_a, u32 rx_cmd_b)
 {
+       /* HW Checksum offload appears to be flawed if used when not stripping
+        * VLAN headers. Drop back to S/W checksums under these conditions.
+        */
        if (!(dev->net->features & NETIF_F_RXCSUM) ||
-           unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
+           unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
+           ((rx_cmd_a & RX_CMD_A_FVTG_) &&
+            !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
                skb->ip_summed = CHECKSUM_NONE;
        } else {
                skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
@@ -3048,6 +3066,16 @@ static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
        }
 }
 
+static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
+                                   struct sk_buff *skb,
+                                   u32 rx_cmd_a, u32 rx_cmd_b)
+{
+       if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+           (rx_cmd_a & RX_CMD_A_FVTG_))
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+                                      (rx_cmd_b & 0xffff));
+}
+
 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
 {
        int             status;
@@ -3112,6 +3140,8 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
                        if (skb->len == size) {
                                lan78xx_rx_csum_offload(dev, skb,
                                                        rx_cmd_a, rx_cmd_b);
+                               lan78xx_rx_vlan_offload(dev, skb,
+                                                       rx_cmd_a, rx_cmd_b);
 
                                skb_trim(skb, skb->len - 4); /* remove fcs */
                                skb->truesize = size + sizeof(struct sk_buff);
@@ -3130,6 +3160,7 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
                        skb_set_tail_pointer(skb2, size);
 
                        lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
+                       lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
 
                        skb_trim(skb2, skb2->len - 4); /* remove fcs */
                        skb2->truesize = size + sizeof(struct sk_buff);
@@ -3313,6 +3344,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
        pkt_cnt = 0;
        count = 0;
        length = 0;
+       spin_lock_irqsave(&tqp->lock, flags);
        for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
                if (skb_is_gso(skb)) {
                        if (pkt_cnt) {
@@ -3321,7 +3353,8 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
                        }
                        count = 1;
                        length = skb->len - TX_OVERHEAD;
-                       skb2 = skb_dequeue(tqp);
+                       __skb_unlink(skb, tqp);
+                       spin_unlock_irqrestore(&tqp->lock, flags);
                        goto gso_skb;
                }
 
@@ -3330,6 +3363,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
                skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
                pkt_cnt++;
        }
+       spin_unlock_irqrestore(&tqp->lock, flags);
 
        /* copy to a single skb */
        skb = alloc_skb(skb_totallen, GFP_ATOMIC);
index 8e8b51f171f4fa227340e80009ce5c2c059db053..cb0cc30c3d6a190e8d3132b6bab4c5d67e29979c 100644 (file)
@@ -1246,12 +1246,14 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x413c, 0x81b3, 8)},    /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
        {QMI_FIXED_INTF(0x413c, 0x81b6, 8)},    /* Dell Wireless 5811e */
        {QMI_FIXED_INTF(0x413c, 0x81b6, 10)},   /* Dell Wireless 5811e */
+       {QMI_FIXED_INTF(0x413c, 0x81d7, 0)},    /* Dell Wireless 5821e */
        {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)},    /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
        {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)},    /* HP lt4120 Snapdragon X5 LTE */
        {QMI_FIXED_INTF(0x22de, 0x9061, 3)},    /* WeTelecom WPD-600N */
        {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */
        {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0  Mini PCIe */
        {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
+       {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
        {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)},    /* Quectel BG96 */
        {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */
 
index 86f7196f9d91fbf55c791fff88687a43518d66d8..2a58607a6aea809b14e0aa03955cfa099118e607 100644 (file)
@@ -3962,7 +3962,8 @@ static int rtl8152_close(struct net_device *netdev)
 #ifdef CONFIG_PM_SLEEP
        unregister_pm_notifier(&tp->pm_notifier);
 #endif
-       napi_disable(&tp->napi);
+       if (!test_bit(RTL8152_UNPLUG, &tp->flags))
+               napi_disable(&tp->napi);
        clear_bit(WORK_ENABLE, &tp->flags);
        usb_kill_urb(tp->intr_urb);
        cancel_delayed_work_sync(&tp->schedule);
index 5f565bd574da3bc7ce741e3b280a9ff5dece4352..48ba80a8ca5ce8e566931979edcff4bcfe47bc2e 100644 (file)
@@ -681,7 +681,7 @@ static void rtl8150_set_multicast(struct net_device *netdev)
                   (netdev->flags & IFF_ALLMULTI)) {
                rx_creg &= 0xfffe;
                rx_creg |= 0x0002;
-               dev_info(&netdev->dev, "%s: allmulti set\n", netdev->name);
+               dev_dbg(&netdev->dev, "%s: allmulti set\n", netdev->name);
        } else {
                /* ~RX_MULTICAST, ~RX_PROMISCUOUS */
                rx_creg &= 0x00fc;
index 7a6a1fe793090b8e28f5ef075f5ebc2ad385b5eb..05553d2524469f97e4a02bb48f43f6820ad2b3e5 100644 (file)
@@ -82,6 +82,9 @@ static bool turbo_mode = true;
 module_param(turbo_mode, bool, 0644);
 MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
 
+static int smsc75xx_link_ok_nopm(struct usbnet *dev);
+static int smsc75xx_phy_gig_workaround(struct usbnet *dev);
+
 static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index,
                                            u32 *data, int in_pm)
 {
@@ -852,6 +855,9 @@ static int smsc75xx_phy_initialize(struct usbnet *dev)
                return -EIO;
        }
 
+       /* phy workaround for gig link */
+       smsc75xx_phy_gig_workaround(dev);
+
        smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
                ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP |
                ADVERTISE_PAUSE_ASYM);
@@ -987,6 +993,62 @@ static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm)
        return -EIO;
 }
 
+static int smsc75xx_phy_gig_workaround(struct usbnet *dev)
+{
+       struct mii_if_info *mii = &dev->mii;
+       int ret = 0, timeout = 0;
+       u32 buf, link_up = 0;
+
+       /* Set the phy in Gig loopback */
+       smsc75xx_mdio_write(dev->net, mii->phy_id, MII_BMCR, 0x4040);
+
+       /* Wait for the link up */
+       do {
+               link_up = smsc75xx_link_ok_nopm(dev);
+               usleep_range(10000, 20000);
+               timeout++;
+       } while ((!link_up) && (timeout < 1000));
+
+       if (timeout >= 1000) {
+               netdev_warn(dev->net, "Timeout waiting for PHY link up\n");
+               return -EIO;
+       }
+
+       /* phy reset */
+       ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
+       if (ret < 0) {
+               netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret);
+               return ret;
+       }
+
+       buf |= PMT_CTL_PHY_RST;
+
+       ret = smsc75xx_write_reg(dev, PMT_CTL, buf);
+       if (ret < 0) {
+               netdev_warn(dev->net, "Failed to write PMT_CTL: %d\n", ret);
+               return ret;
+       }
+
+       timeout = 0;
+       do {
+               usleep_range(10000, 20000);
+               ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
+               if (ret < 0) {
+                       netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n",
+                                   ret);
+                       return ret;
+               }
+               timeout++;
+       } while ((buf & PMT_CTL_PHY_RST) && (timeout < 100));
+
+       if (timeout >= 100) {
+               netdev_warn(dev->net, "timeout waiting for PHY Reset\n");
+               return -EIO;
+       }
+
+       return 0;
+}
+
 static int smsc75xx_reset(struct usbnet *dev)
 {
        struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
index b6c9a2af37328d1037c3b0ba761256092556167e..53085c63277b4ecfa9d8651543bfb5e545fa73ee 100644 (file)
@@ -53,6 +53,10 @@ module_param(napi_tx, bool, 0644);
 /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
 #define VIRTIO_XDP_HEADROOM 256
 
+/* Separating two types of XDP xmit */
+#define VIRTIO_XDP_TX          BIT(0)
+#define VIRTIO_XDP_REDIR       BIT(1)
+
 /* RX packet size EWMA. The average packet size is used to determine the packet
  * buffer size when refilling RX rings. As the entire RX ring may be refilled
  * at once, the weight is chosen so that the EWMA will be insensitive to short-
@@ -582,7 +586,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
                                     struct receive_queue *rq,
                                     void *buf, void *ctx,
                                     unsigned int len,
-                                    bool *xdp_xmit)
+                                    unsigned int *xdp_xmit)
 {
        struct sk_buff *skb;
        struct bpf_prog *xdp_prog;
@@ -654,14 +658,14 @@ static struct sk_buff *receive_small(struct net_device *dev,
                                trace_xdp_exception(vi->dev, xdp_prog, act);
                                goto err_xdp;
                        }
-                       *xdp_xmit = true;
+                       *xdp_xmit |= VIRTIO_XDP_TX;
                        rcu_read_unlock();
                        goto xdp_xmit;
                case XDP_REDIRECT:
                        err = xdp_do_redirect(dev, &xdp, xdp_prog);
                        if (err)
                                goto err_xdp;
-                       *xdp_xmit = true;
+                       *xdp_xmit |= VIRTIO_XDP_REDIR;
                        rcu_read_unlock();
                        goto xdp_xmit;
                default:
@@ -723,7 +727,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                                         void *buf,
                                         void *ctx,
                                         unsigned int len,
-                                        bool *xdp_xmit)
+                                        unsigned int *xdp_xmit)
 {
        struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
        u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
@@ -818,7 +822,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                                        put_page(xdp_page);
                                goto err_xdp;
                        }
-                       *xdp_xmit = true;
+                       *xdp_xmit |= VIRTIO_XDP_TX;
                        if (unlikely(xdp_page != page))
                                put_page(page);
                        rcu_read_unlock();
@@ -830,7 +834,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                                        put_page(xdp_page);
                                goto err_xdp;
                        }
-                       *xdp_xmit = true;
+                       *xdp_xmit |= VIRTIO_XDP_REDIR;
                        if (unlikely(xdp_page != page))
                                put_page(page);
                        rcu_read_unlock();
@@ -939,7 +943,8 @@ xdp_xmit:
 }
 
 static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
-                      void *buf, unsigned int len, void **ctx, bool *xdp_xmit)
+                      void *buf, unsigned int len, void **ctx,
+                      unsigned int *xdp_xmit)
 {
        struct net_device *dev = vi->dev;
        struct sk_buff *skb;
@@ -1232,7 +1237,8 @@ static void refill_work(struct work_struct *work)
        }
 }
 
-static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit)
+static int virtnet_receive(struct receive_queue *rq, int budget,
+                          unsigned int *xdp_xmit)
 {
        struct virtnet_info *vi = rq->vq->vdev->priv;
        unsigned int len, received = 0, bytes = 0;
@@ -1321,7 +1327,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
        struct virtnet_info *vi = rq->vq->vdev->priv;
        struct send_queue *sq;
        unsigned int received, qp;
-       bool xdp_xmit = false;
+       unsigned int xdp_xmit = 0;
 
        virtnet_poll_cleantx(rq);
 
@@ -1331,12 +1337,14 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
        if (received < budget)
                virtqueue_napi_complete(napi, rq->vq, received);
 
-       if (xdp_xmit) {
+       if (xdp_xmit & VIRTIO_XDP_REDIR)
+               xdp_do_flush_map();
+
+       if (xdp_xmit & VIRTIO_XDP_TX) {
                qp = vi->curr_queue_pairs - vi->xdp_queue_pairs +
                     smp_processor_id();
                sq = &vi->sq[qp];
                virtqueue_kick(sq->vq);
-               xdp_do_flush_map();
        }
 
        return received;
index aee0e60471f10d59c39ad39f8170eedea722455d..e857cb3335f6bd4e54b01050d11a4aa4b12b087b 100644 (file)
@@ -623,9 +623,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
        flush = 0;
 
 out:
-       skb_gro_remcsum_cleanup(skb, &grc);
-       skb->remcsum_offload = 0;
-       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
 
        return pp;
 }
@@ -638,8 +636,61 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
        return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
 }
 
-/* Add new entry to forwarding table -- assumes lock held */
+static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan,
+                                        const u8 *mac, __u16 state,
+                                        __be32 src_vni, __u8 ndm_flags)
+{
+       struct vxlan_fdb *f;
+
+       f = kmalloc(sizeof(*f), GFP_ATOMIC);
+       if (!f)
+               return NULL;
+       f->state = state;
+       f->flags = ndm_flags;
+       f->updated = f->used = jiffies;
+       f->vni = src_vni;
+       INIT_LIST_HEAD(&f->remotes);
+       memcpy(f->eth_addr, mac, ETH_ALEN);
+
+       return f;
+}
+
 static int vxlan_fdb_create(struct vxlan_dev *vxlan,
+                           const u8 *mac, union vxlan_addr *ip,
+                           __u16 state, __be16 port, __be32 src_vni,
+                           __be32 vni, __u32 ifindex, __u8 ndm_flags,
+                           struct vxlan_fdb **fdb)
+{
+       struct vxlan_rdst *rd = NULL;
+       struct vxlan_fdb *f;
+       int rc;
+
+       if (vxlan->cfg.addrmax &&
+           vxlan->addrcnt >= vxlan->cfg.addrmax)
+               return -ENOSPC;
+
+       netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
+       f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags);
+       if (!f)
+               return -ENOMEM;
+
+       rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
+       if (rc < 0) {
+               kfree(f);
+               return rc;
+       }
+
+       ++vxlan->addrcnt;
+       hlist_add_head_rcu(&f->hlist,
+                          vxlan_fdb_head(vxlan, mac, src_vni));
+
+       *fdb = f;
+
+       return 0;
+}
+
+/* Add new entry to forwarding table -- assumes lock held */
+static int vxlan_fdb_update(struct vxlan_dev *vxlan,
                            const u8 *mac, union vxlan_addr *ip,
                            __u16 state, __u16 flags,
                            __be16 port, __be32 src_vni, __be32 vni,
@@ -689,37 +740,17 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
                if (!(flags & NLM_F_CREATE))
                        return -ENOENT;
 
-               if (vxlan->cfg.addrmax &&
-                   vxlan->addrcnt >= vxlan->cfg.addrmax)
-                       return -ENOSPC;
-
                /* Disallow replace to add a multicast entry */
                if ((flags & NLM_F_REPLACE) &&
                    (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
                        return -EOPNOTSUPP;
 
                netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
-               f = kmalloc(sizeof(*f), GFP_ATOMIC);
-               if (!f)
-                       return -ENOMEM;
-
-               notify = 1;
-               f->state = state;
-               f->flags = ndm_flags;
-               f->updated = f->used = jiffies;
-               f->vni = src_vni;
-               INIT_LIST_HEAD(&f->remotes);
-               memcpy(f->eth_addr, mac, ETH_ALEN);
-
-               rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
-               if (rc < 0) {
-                       kfree(f);
+               rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni,
+                                     vni, ifindex, ndm_flags, &f);
+               if (rc < 0)
                        return rc;
-               }
-
-               ++vxlan->addrcnt;
-               hlist_add_head_rcu(&f->hlist,
-                                  vxlan_fdb_head(vxlan, mac, src_vni));
+               notify = 1;
        }
 
        if (notify) {
@@ -743,13 +774,15 @@ static void vxlan_fdb_free(struct rcu_head *head)
        kfree(f);
 }
 
-static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
+static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
+                             bool do_notify)
 {
        netdev_dbg(vxlan->dev,
                    "delete %pM\n", f->eth_addr);
 
        --vxlan->addrcnt;
-       vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
+       if (do_notify)
+               vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
 
        hlist_del_rcu(&f->hlist);
        call_rcu(&f->rcu, vxlan_fdb_free);
@@ -865,7 +898,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                return -EAFNOSUPPORT;
 
        spin_lock_bh(&vxlan->hash_lock);
-       err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
+       err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags,
                               port, src_vni, vni, ifindex, ndm->ndm_flags);
        spin_unlock_bh(&vxlan->hash_lock);
 
@@ -899,7 +932,7 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
                goto out;
        }
 
-       vxlan_fdb_destroy(vxlan, f);
+       vxlan_fdb_destroy(vxlan, f, true);
 
 out:
        return 0;
@@ -1008,7 +1041,7 @@ static bool vxlan_snoop(struct net_device *dev,
 
                /* close off race between vxlan_flush and incoming packets */
                if (netif_running(dev))
-                       vxlan_fdb_create(vxlan, src_mac, src_ip,
+                       vxlan_fdb_update(vxlan, src_mac, src_ip,
                                         NUD_REACHABLE,
                                         NLM_F_EXCL|NLM_F_CREATE,
                                         vxlan->cfg.dst_port,
@@ -2366,7 +2399,7 @@ static void vxlan_cleanup(struct timer_list *t)
                                           "garbage collect %pM\n",
                                           f->eth_addr);
                                f->state = NUD_STALE;
-                               vxlan_fdb_destroy(vxlan, f);
+                               vxlan_fdb_destroy(vxlan, f, true);
                        } else if (time_before(timeout, next_timer))
                                next_timer = timeout;
                }
@@ -2417,7 +2450,7 @@ static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni)
        spin_lock_bh(&vxlan->hash_lock);
        f = __vxlan_find_mac(vxlan, all_zeros_mac, vni);
        if (f)
-               vxlan_fdb_destroy(vxlan, f);
+               vxlan_fdb_destroy(vxlan, f, true);
        spin_unlock_bh(&vxlan->hash_lock);
 }
 
@@ -2471,7 +2504,7 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
                                continue;
                        /* the all_zeros_mac entry is deleted at vxlan_uninit */
                        if (!is_zero_ether_addr(f->eth_addr))
-                               vxlan_fdb_destroy(vxlan, f);
+                               vxlan_fdb_destroy(vxlan, f, true);
                }
        }
        spin_unlock_bh(&vxlan->hash_lock);
@@ -3162,6 +3195,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
 {
        struct vxlan_net *vn = net_generic(net, vxlan_net_id);
        struct vxlan_dev *vxlan = netdev_priv(dev);
+       struct vxlan_fdb *f = NULL;
        int err;
 
        err = vxlan_dev_configure(net, dev, conf, false, extack);
@@ -3175,24 +3209,35 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
                err = vxlan_fdb_create(vxlan, all_zeros_mac,
                                       &vxlan->default_dst.remote_ip,
                                       NUD_REACHABLE | NUD_PERMANENT,
-                                      NLM_F_EXCL | NLM_F_CREATE,
                                       vxlan->cfg.dst_port,
                                       vxlan->default_dst.remote_vni,
                                       vxlan->default_dst.remote_vni,
                                       vxlan->default_dst.remote_ifindex,
-                                      NTF_SELF);
+                                      NTF_SELF, &f);
                if (err)
                        return err;
        }
 
        err = register_netdevice(dev);
+       if (err)
+               goto errout;
+
+       err = rtnl_configure_link(dev, NULL);
        if (err) {
-               vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni);
-               return err;
+               unregister_netdevice(dev);
+               goto errout;
        }
 
+       /* notify default fdb entry */
+       if (f)
+               vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
+
        list_add(&vxlan->next, &vn->vxlan_list);
        return 0;
+errout:
+       if (f)
+               vxlan_fdb_destroy(vxlan, f, false);
+       return err;
 }
 
 static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
@@ -3427,6 +3472,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
        struct vxlan_rdst *dst = &vxlan->default_dst;
        struct vxlan_rdst old_dst;
        struct vxlan_config conf;
+       struct vxlan_fdb *f = NULL;
        int err;
 
        err = vxlan_nl2conf(tb, data,
@@ -3455,16 +3501,16 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
                        err = vxlan_fdb_create(vxlan, all_zeros_mac,
                                               &dst->remote_ip,
                                               NUD_REACHABLE | NUD_PERMANENT,
-                                              NLM_F_CREATE | NLM_F_APPEND,
                                               vxlan->cfg.dst_port,
                                               dst->remote_vni,
                                               dst->remote_vni,
                                               dst->remote_ifindex,
-                                              NTF_SELF);
+                                              NTF_SELF, &f);
                        if (err) {
                                spin_unlock_bh(&vxlan->hash_lock);
                                return err;
                        }
+                       vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
                }
                spin_unlock_bh(&vxlan->hash_lock);
        }
index e9c2fb318c03362d84031241a4191db9f4602c1a..836e0a47b94a0a192b210620d6652c41145cbab1 100644 (file)
@@ -6058,8 +6058,19 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
                           ath10k_mac_max_vht_nss(vht_mcs_mask)));
 
        if (changed & IEEE80211_RC_BW_CHANGED) {
-               ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
-                          sta->addr, bw);
+               enum wmi_phy_mode mode;
+
+               mode = chan_to_phymode(&def);
+               ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d phymode %d\n",
+                               sta->addr, bw, mode);
+
+               err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+                               WMI_PEER_PHYMODE, mode);
+               if (err) {
+                       ath10k_warn(ar, "failed to update STA %pM peer phymode %d: %d\n",
+                                       sta->addr, mode, err);
+                       goto exit;
+               }
 
                err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
                                                WMI_PEER_CHAN_WIDTH, bw);
@@ -6100,6 +6111,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
                                    sta->addr);
        }
 
+exit:
        mutex_unlock(&ar->conf_mutex);
 }
 
index b48db54e986516271daab61f30277815a7a9bf6d..d68afb65402a069528b0dc3a01eab142c833898c 100644 (file)
@@ -6144,6 +6144,7 @@ enum wmi_peer_param {
        WMI_PEER_NSS        = 0x5,
        WMI_PEER_USE_4ADDR  = 0x6,
        WMI_PEER_DEBUG      = 0xa,
+       WMI_PEER_PHYMODE    = 0xd,
        WMI_PEER_DUMMY_VAR  = 0xff, /* dummy parameter for STA PS workaround */
 };
 
index 1279064a3b716c2ef6cf82d82c27ea664f1496b4..51a038022c8b80404b9bd841c6fefd3b866ffe66 100644 (file)
@@ -1,4 +1,4 @@
-/*
+/*
  * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
index 9d99eb42d9176f0f833048b3f87a906542c9e90c..6acba67bca07abd7d662466b4422295dff359a33 100644 (file)
@@ -60,7 +60,6 @@ config BRCMFMAC_PCIE
        bool "PCIE bus interface support for FullMAC driver"
        depends on BRCMFMAC
        depends on PCI
-       depends on HAS_DMA
        select BRCMFMAC_PROTO_MSGBUF
        select FW_LOADER
        ---help---
index c99a191e8d693a3e6ef006826fcde5affb74a02d..a907d7b065fa8e0b7ab6a35dc2c265a6385d9c75 100644 (file)
@@ -4296,6 +4296,13 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
        brcmf_dbg(TRACE, "Enter\n");
 
        if (bus) {
+               /* Stop watchdog task */
+               if (bus->watchdog_tsk) {
+                       send_sig(SIGTERM, bus->watchdog_tsk, 1);
+                       kthread_stop(bus->watchdog_tsk);
+                       bus->watchdog_tsk = NULL;
+               }
+
                /* De-register interrupt handler */
                brcmf_sdiod_intr_unregister(bus->sdiodev);
 
index 6e3cf9817730b53f751f31401425ffd1c1dc82a4..88f4c89f89ba85f5ff64085f8f29abf20722d5ea 100644 (file)
@@ -644,11 +644,6 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf)
                                         MWIFIEX_FUNC_SHUTDOWN);
        }
 
-       if (adapter->workqueue)
-               flush_workqueue(adapter->workqueue);
-
-       mwifiex_usb_free(card);
-
        mwifiex_dbg(adapter, FATAL,
                    "%s: removing card\n", __func__);
        mwifiex_remove_card(adapter);
@@ -1356,6 +1351,8 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
 {
        struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
 
+       mwifiex_usb_free(card);
+
        mwifiex_usb_cleanup_tx_aggr(adapter);
 
        card->adapter = NULL;
index 9d2f9a776ef18e405c80e73c40c9eb3bc137b50d..b804abd464ae06365adbe108a5706412efe53f4b 100644 (file)
@@ -986,13 +986,15 @@ static void mt7601u_agc_tune(struct mt7601u_dev *dev)
         */
        spin_lock_bh(&dev->con_mon_lock);
        avg_rssi = ewma_rssi_read(&dev->avg_rssi);
-       WARN_ON_ONCE(avg_rssi == 0);
+       spin_unlock_bh(&dev->con_mon_lock);
+       if (avg_rssi == 0)
+               return;
+
        avg_rssi = -avg_rssi;
        if (avg_rssi <= -70)
                val -= 0x20;
        else if (avg_rssi <= -60)
                val -= 0x10;
-       spin_unlock_bh(&dev->con_mon_lock);
 
        if (val != mt7601u_bbp_rr(dev, 66))
                mt7601u_bbp_wr(dev, 66, val);
index 025fa6018550895ae529c7222d9595a1fb621748..8d1492a90bd135c09213f05d52ff85682a80de71 100644 (file)
@@ -7,7 +7,7 @@ config QTNFMAC
 config QTNFMAC_PEARL_PCIE
        tristate "Quantenna QSR10g PCIe support"
        default n
-       depends on HAS_DMA && PCI && CFG80211
+       depends on PCI && CFG80211
        select QTNFMAC
        select FW_LOADER
        select CRC32
index 220e2b71020859163cc4affc71f505648561b151..ae0ca800684950e65ecc01916f4782af54a8e0eb 100644 (file)
@@ -654,8 +654,7 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev,
        vif = qtnf_mac_get_base_vif(mac);
        if (!vif) {
                pr_err("MAC%u: primary VIF is not configured\n", mac->macid);
-               ret = -EFAULT;
-               goto out;
+               return -EFAULT;
        }
 
        if (vif->wdev.iftype != NL80211_IFTYPE_STATION) {
index 39c817eddd78e9cf736fbbd440c6617867afad20..54c9f6ab0c8cadb483d10413783b45b401c6f6f4 100644 (file)
@@ -484,18 +484,21 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
 
 }
 
-void rtl_deinit_deferred_work(struct ieee80211_hw *hw)
+void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
 
        del_timer_sync(&rtlpriv->works.watchdog_timer);
 
-       cancel_delayed_work(&rtlpriv->works.watchdog_wq);
-       cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
-       cancel_delayed_work(&rtlpriv->works.ps_work);
-       cancel_delayed_work(&rtlpriv->works.ps_rfon_wq);
-       cancel_delayed_work(&rtlpriv->works.fwevt_wq);
-       cancel_delayed_work(&rtlpriv->works.c2hcmd_wq);
+       cancel_delayed_work_sync(&rtlpriv->works.watchdog_wq);
+       if (ips_wq)
+               cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
+       else
+               cancel_delayed_work_sync(&rtlpriv->works.ips_nic_off_wq);
+       cancel_delayed_work_sync(&rtlpriv->works.ps_work);
+       cancel_delayed_work_sync(&rtlpriv->works.ps_rfon_wq);
+       cancel_delayed_work_sync(&rtlpriv->works.fwevt_wq);
+       cancel_delayed_work_sync(&rtlpriv->works.c2hcmd_wq);
 }
 EXPORT_SYMBOL_GPL(rtl_deinit_deferred_work);
 
index 912f205779c39e68387269825fdae5c4fcaa206f..a7ae40eaa3cd538f96622e4e9a53da3c2b13ccec 100644 (file)
@@ -121,7 +121,7 @@ void rtl_init_rfkill(struct ieee80211_hw *hw);
 void rtl_deinit_rfkill(struct ieee80211_hw *hw);
 
 void rtl_watch_dog_timer_callback(struct timer_list *t);
-void rtl_deinit_deferred_work(struct ieee80211_hw *hw);
+void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq);
 
 bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
 int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht,
index cfea57efa7f43c6bc1c6e99a45978053a1eed2ba..4bf7967590ca7be3b9b452d0b0ec65d98b829599 100644 (file)
@@ -130,7 +130,6 @@ found_alt:
                       firmware->size);
                rtlpriv->rtlhal.wowlan_fwsize = firmware->size;
        }
-       rtlpriv->rtlhal.fwsize = firmware->size;
        release_firmware(firmware);
 }
 
@@ -196,7 +195,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
                /* reset sec info */
                rtl_cam_reset_sec_info(hw);
 
-               rtl_deinit_deferred_work(hw);
+               rtl_deinit_deferred_work(hw, false);
        }
        rtlpriv->intf_ops->adapter_stop(hw);
 
index ae13bcfb3bf09cc142a81c9ab78c31ad80fd035e..5d1fda16fc8c4c966ff8e24ca8d49bd3b6ca38c6 100644 (file)
@@ -2377,7 +2377,7 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
                ieee80211_unregister_hw(hw);
                rtlmac->mac80211_registered = 0;
        } else {
-               rtl_deinit_deferred_work(hw);
+               rtl_deinit_deferred_work(hw, false);
                rtlpriv->intf_ops->adapter_stop(hw);
        }
        rtlpriv->cfg->ops->disable_interrupt(hw);
index 71af24e2e05197a344dd549c94d6f8ecba7ce00f..479a4cfc245d349e105457845719ca79b7ebb10d 100644 (file)
@@ -71,7 +71,7 @@ bool rtl_ps_disable_nic(struct ieee80211_hw *hw)
        struct rtl_priv *rtlpriv = rtl_priv(hw);
 
        /*<1> Stop all timer */
-       rtl_deinit_deferred_work(hw);
+       rtl_deinit_deferred_work(hw, true);
 
        /*<2> Disable Interrupt */
        rtlpriv->cfg->ops->disable_interrupt(hw);
@@ -292,7 +292,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
        enum rf_pwrstate rtstate;
 
-       cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
+       cancel_delayed_work_sync(&rtlpriv->works.ips_nic_off_wq);
 
        mutex_lock(&rtlpriv->locks.ips_mutex);
        if (ppsc->inactiveps) {
index f9faffc498bcbd2d94cad365814955f9b1347759..2ac5004d7a401ab5d1255126c5c0a00a5e233705 100644 (file)
@@ -1132,7 +1132,7 @@ void rtl_usb_disconnect(struct usb_interface *intf)
                ieee80211_unregister_hw(hw);
                rtlmac->mac80211_registered = 0;
        } else {
-               rtl_deinit_deferred_work(hw);
+               rtl_deinit_deferred_work(hw, false);
                rtlpriv->intf_ops->adapter_stop(hw);
        }
        /*deinit rfkill */
index 922ce0abf5cf105a5394285b07356ebcad055d78..a57daecf1d574fc1a6e25ca5eb043c1617fe2dcc 100644 (file)
@@ -1810,7 +1810,7 @@ static int talk_to_netback(struct xenbus_device *dev,
        err = xen_net_read_mac(dev, info->netdev->dev_addr);
        if (err) {
                xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
-               goto out;
+               goto out_unlocked;
        }
 
        rtnl_lock();
@@ -1925,6 +1925,7 @@ abort_transaction_no_dev_fatal:
        xennet_destroy_queues(info);
  out:
        rtnl_unlock();
+out_unlocked:
        device_unregister(&dev->dev);
        return err;
 }
@@ -1950,10 +1951,6 @@ static int xennet_connect(struct net_device *dev)
        /* talk_to_netback() sets the correct number of queues */
        num_queues = dev->real_num_tx_queues;
 
-       rtnl_lock();
-       netdev_update_features(dev);
-       rtnl_unlock();
-
        if (dev->reg_state == NETREG_UNINITIALIZED) {
                err = register_netdev(dev);
                if (err) {
@@ -1963,6 +1960,10 @@ static int xennet_connect(struct net_device *dev)
                }
        }
 
+       rtnl_lock();
+       netdev_update_features(dev);
+       rtnl_unlock();
+
        /*
         * All public and private state should now be sane.  Get
         * ready to start sending and receiving packets and give the driver
index d5553c47014fade81a4f461903b3cb6c4372ccf5..5d823e965883b0f5f23db5ab39afc9f96a128267 100644 (file)
@@ -74,7 +74,7 @@ static void pn533_recv_response(struct urb *urb)
        struct sk_buff *skb = NULL;
 
        if (!urb->status) {
-               skb = alloc_skb(urb->actual_length, GFP_KERNEL);
+               skb = alloc_skb(urb->actual_length, GFP_ATOMIC);
                if (!skb) {
                        nfc_err(&phy->udev->dev, "failed to alloc memory\n");
                } else {
@@ -186,7 +186,7 @@ static int pn533_usb_send_frame(struct pn533 *dev,
 
        if (dev->protocol_type == PN533_PROTO_REQ_RESP) {
                /* request for response for sent packet directly */
-               rc = pn533_submit_urb_for_response(phy, GFP_ATOMIC);
+               rc = pn533_submit_urb_for_response(phy, GFP_KERNEL);
                if (rc)
                        goto error;
        } else if (dev->protocol_type == PN533_PROTO_REQ_ACK_RESP) {
index 2e96b34bc936bf89f6a9a65d983e4bbf3a673fbd..fb667bf469c7e980411c2836d4a9b97d1f96a53d 100644 (file)
@@ -278,6 +278,7 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
                        return -EIO;
                if (memcpy_mcsafe(buf, nsio->addr + offset, size) != 0)
                        return -EIO;
+               return 0;
        }
 
        if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
index 68940356cad3f100f4cfbdd325d42235ea3c5da4..8b1fd7f1a224eedebf08cddfe2258949c50a6bcf 100644 (file)
@@ -414,7 +414,8 @@ static int pmem_attach_disk(struct device *dev,
        blk_queue_logical_block_size(q, pmem_sector_size(ndns));
        blk_queue_max_hw_sectors(q, UINT_MAX);
        blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
-       blk_queue_flag_set(QUEUE_FLAG_DAX, q);
+       if (pmem->pfn_flags & PFN_MAP)
+               blk_queue_flag_set(QUEUE_FLAG_DAX, q);
        q->queuedata = pmem;
 
        disk = alloc_disk_node(0, nid);
index 21710a7460c823bbc4f84134d7ecce70d3f993ba..bf65501e6ed634a9e60c8c142955a16a60630159 100644 (file)
@@ -100,6 +100,22 @@ static struct class *nvme_subsys_class;
 static void nvme_ns_remove(struct nvme_ns *ns);
 static int nvme_revalidate_disk(struct gendisk *disk);
 static void nvme_put_subsystem(struct nvme_subsystem *subsys);
+static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
+                                          unsigned nsid);
+
+static void nvme_set_queue_dying(struct nvme_ns *ns)
+{
+       /*
+        * Revalidating a dead namespace sets capacity to 0. This will end
+        * buffered writers dirtying pages that can't be synced.
+        */
+       if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
+               return;
+       revalidate_disk(ns->disk);
+       blk_set_queue_dying(ns->queue);
+       /* Forcibly unquiesce queues to avoid blocking dispatch */
+       blk_mq_unquiesce_queue(ns->queue);
+}
 
 static void nvme_queue_scan(struct nvme_ctrl *ctrl)
 {
@@ -1044,14 +1060,17 @@ EXPORT_SYMBOL_GPL(nvme_set_queue_count);
 
 static void nvme_enable_aen(struct nvme_ctrl *ctrl)
 {
-       u32 result;
+       u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
        int status;
 
-       status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT,
-                       ctrl->oaes & NVME_AEN_SUPPORTED, NULL, 0, &result);
+       if (!supported_aens)
+               return;
+
+       status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens,
+                       NULL, 0, &result);
        if (status)
                dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
-                        ctrl->oaes & NVME_AEN_SUPPORTED);
+                        supported_aens);
 }
 
 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
@@ -1151,19 +1170,15 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 
 static void nvme_update_formats(struct nvme_ctrl *ctrl)
 {
-       struct nvme_ns *ns, *next;
-       LIST_HEAD(rm_list);
+       struct nvme_ns *ns;
 
-       down_write(&ctrl->namespaces_rwsem);
-       list_for_each_entry(ns, &ctrl->namespaces, list) {
-               if (ns->disk && nvme_revalidate_disk(ns->disk)) {
-                       list_move_tail(&ns->list, &rm_list);
-               }
-       }
-       up_write(&ctrl->namespaces_rwsem);
+       down_read(&ctrl->namespaces_rwsem);
+       list_for_each_entry(ns, &ctrl->namespaces, list)
+               if (ns->disk && nvme_revalidate_disk(ns->disk))
+                       nvme_set_queue_dying(ns);
+       up_read(&ctrl->namespaces_rwsem);
 
-       list_for_each_entry_safe(ns, next, &rm_list, list)
-               nvme_ns_remove(ns);
+       nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
 }
 
 static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
@@ -1218,7 +1233,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
        effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
        status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
                        (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
-                       (void __user *)(uintptr_t)cmd.metadata, cmd.metadata,
+                       (void __user *)(uintptr_t)cmd.metadata, cmd.metadata_len,
                        0, &cmd.result, timeout);
        nvme_passthru_end(ctrl, effects);
 
@@ -1808,6 +1823,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
                u32 max_segments =
                        (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
 
+               max_segments = min_not_zero(max_segments, ctrl->max_segments);
                blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
                blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
        }
@@ -3137,7 +3153,7 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
 
        down_write(&ctrl->namespaces_rwsem);
        list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
-               if (ns->head->ns_id > nsid)
+               if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags))
                        list_move_tail(&ns->list, &rm_list);
        }
        up_write(&ctrl->namespaces_rwsem);
@@ -3541,19 +3557,9 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
        if (ctrl->admin_q)
                blk_mq_unquiesce_queue(ctrl->admin_q);
 
-       list_for_each_entry(ns, &ctrl->namespaces, list) {
-               /*
-                * Revalidating a dead namespace sets capacity to 0. This will
-                * end buffered writers dirtying pages that can't be synced.
-                */
-               if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
-                       continue;
-               revalidate_disk(ns->disk);
-               blk_set_queue_dying(ns->queue);
+       list_for_each_entry(ns, &ctrl->namespaces, list)
+               nvme_set_queue_dying(ns);
 
-               /* Forcibly unquiesce queues to avoid blocking dispatch */
-               blk_mq_unquiesce_queue(ns->queue);
-       }
        up_read(&ctrl->namespaces_rwsem);
 }
 EXPORT_SYMBOL_GPL(nvme_kill_queues);
index 903eb4545e2699bc1b62365e5ca4490e824a8c5c..f7efe5a58cc7c2f11163e9bb9c9319a08b3eb1eb 100644 (file)
@@ -539,14 +539,18 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
 /*
  * For something we're not in a state to send to the device the default action
  * is to busy it and retry it after the controller state is recovered.  However,
- * anything marked for failfast or nvme multipath is immediately failed.
+ * if the controller is deleting or if anything is marked for failfast or
+ * nvme multipath it is immediately failed.
  *
  * Note: commands used to initialize the controller will be marked for failfast.
  * Note: nvme cli/ioctl commands are marked for failfast.
  */
-blk_status_t nvmf_fail_nonready_command(struct request *rq)
+blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
+               struct request *rq)
 {
-       if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
+       if (ctrl->state != NVME_CTRL_DELETING &&
+           ctrl->state != NVME_CTRL_DEAD &&
+           !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
                return BLK_STS_RESOURCE;
        nvme_req(rq)->status = NVME_SC_ABORT_REQ;
        return BLK_STS_IOERR;
index e1818a27aa2d7bcf75ff0e2c4522a61e294d8d9b..aa2fdb2a2e8fc0143b59ff48692284ba50c8225f 100644 (file)
@@ -162,7 +162,8 @@ void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
 void nvmf_free_options(struct nvmf_ctrl_options *opts);
 int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
 bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
-blk_status_t nvmf_fail_nonready_command(struct request *rq);
+blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
+               struct request *rq);
 bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
                bool queue_live);
 
index b528a2f5826cbfe19b22aadd7e09e1ceff512cb6..9bac912173ba37811545d036fa518ee882971f5a 100644 (file)
@@ -2272,7 +2272,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
 
        if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
            !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
-               return nvmf_fail_nonready_command(rq);
+               return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
 
        ret = nvme_setup_cmd(ns, rq, sqe);
        if (ret)
@@ -2790,6 +2790,9 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
        /* re-enable the admin_q so anything new can fast fail */
        blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
 
+       /* resume the io queues so that things will fast fail */
+       nvme_start_queues(&ctrl->ctrl);
+
        nvme_fc_ctlr_inactive_on_rport(ctrl);
 }
 
@@ -2804,9 +2807,6 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
         * waiting for io to terminate
         */
        nvme_fc_delete_association(ctrl);
-
-       /* resume the io queues so that things will fast fail */
-       nvme_start_queues(nctrl);
 }
 
 static void
index 231807cbc849869afcbc16fce2e3389539ce2684..0c4a33df3b2f3bb9d8a710556dab3f8c55ed2302 100644 (file)
@@ -170,6 +170,7 @@ struct nvme_ctrl {
        u64 cap;
        u32 page_size;
        u32 max_hw_sectors;
+       u32 max_segments;
        u16 oncs;
        u16 oacs;
        u16 nssa;
index fc33804662e7bd35cfbacd93a26101bf23b3f43d..ddd441b1516aff50a8eb919d123d7103e18e69d3 100644 (file)
 
 #define SGES_PER_PAGE  (PAGE_SIZE / sizeof(struct nvme_sgl_desc))
 
+/*
+ * These can be higher, but we need to ensure that any command doesn't
+ * require an sg allocation that needs more than a page of data.
+ */
+#define NVME_MAX_KB_SZ 4096
+#define NVME_MAX_SEGS  127
+
 static int use_threaded_interrupts;
 module_param(use_threaded_interrupts, int, 0);
 
@@ -100,6 +107,8 @@ struct nvme_dev {
        struct nvme_ctrl ctrl;
        struct completion ioq_wait;
 
+       mempool_t *iod_mempool;
+
        /* shadow doorbell buffer support: */
        u32 *dbbuf_dbs;
        dma_addr_t dbbuf_dbs_dma_addr;
@@ -477,10 +486,7 @@ static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
        iod->use_sgl = nvme_pci_use_sgls(dev, rq);
 
        if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
-               size_t alloc_size = nvme_pci_iod_alloc_size(dev, size, nseg,
-                               iod->use_sgl);
-
-               iod->sg = kmalloc(alloc_size, GFP_ATOMIC);
+               iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
                if (!iod->sg)
                        return BLK_STS_RESOURCE;
        } else {
@@ -526,7 +532,7 @@ static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
        }
 
        if (iod->sg != iod->inline_sg)
-               kfree(iod->sg);
+               mempool_free(iod->sg, dev->iod_mempool);
 }
 
 #ifdef CONFIG_BLK_DEV_INTEGRITY
@@ -2280,6 +2286,7 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
                blk_put_queue(dev->ctrl.admin_q);
        kfree(dev->queues);
        free_opal_dev(dev->ctrl.opal_dev);
+       mempool_destroy(dev->iod_mempool);
        kfree(dev);
 }
 
@@ -2289,6 +2296,7 @@ static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
 
        nvme_get_ctrl(&dev->ctrl);
        nvme_dev_disable(dev, false);
+       nvme_kill_queues(&dev->ctrl);
        if (!queue_work(nvme_wq, &dev->remove_work))
                nvme_put_ctrl(&dev->ctrl);
 }
@@ -2333,6 +2341,13 @@ static void nvme_reset_work(struct work_struct *work)
        if (result)
                goto out;
 
+       /*
+        * Limit the max command size to prevent iod->sg allocations going
+        * over a single page.
+        */
+       dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1;
+       dev->ctrl.max_segments = NVME_MAX_SEGS;
+
        result = nvme_init_identify(&dev->ctrl);
        if (result)
                goto out;
@@ -2405,7 +2420,6 @@ static void nvme_remove_dead_ctrl_work(struct work_struct *work)
        struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work);
        struct pci_dev *pdev = to_pci_dev(dev->dev);
 
-       nvme_kill_queues(&dev->ctrl);
        if (pci_get_drvdata(pdev))
                device_release_driver(&pdev->dev);
        nvme_put_ctrl(&dev->ctrl);
@@ -2509,6 +2523,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        int node, result = -ENOMEM;
        struct nvme_dev *dev;
        unsigned long quirks = id->driver_data;
+       size_t alloc_size;
 
        node = dev_to_node(&pdev->dev);
        if (node == NUMA_NO_NODE)
@@ -2541,10 +2556,27 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        quirks |= check_vendor_combination_bug(pdev);
 
+       /*
+        * Double check that our mempool alloc size will cover the biggest
+        * command we support.
+        */
+       alloc_size = nvme_pci_iod_alloc_size(dev, NVME_MAX_KB_SZ,
+                                               NVME_MAX_SEGS, true);
+       WARN_ON_ONCE(alloc_size > PAGE_SIZE);
+
+       dev->iod_mempool = mempool_create_node(1, mempool_kmalloc,
+                                               mempool_kfree,
+                                               (void *) alloc_size,
+                                               GFP_KERNEL, node);
+       if (!dev->iod_mempool) {
+               result = -ENOMEM;
+               goto release_pools;
+       }
+
        result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
                        quirks);
        if (result)
-               goto release_pools;
+               goto release_mempool;
 
        dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
 
@@ -2553,6 +2585,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        return 0;
 
+ release_mempool:
+       mempool_destroy(dev->iod_mempool);
  release_pools:
        nvme_release_prp_pools(dev);
  unmap:
index c9424da0d23e3cbbdd0e2b5209d9eddca9f1591f..66ec5985c9f3a9f9f2176cf14c47e222b1c59cbe 100644 (file)
@@ -560,12 +560,6 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
        if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
                return;
 
-       if (nvme_rdma_queue_idx(queue) == 0) {
-               nvme_rdma_free_qe(queue->device->dev,
-                       &queue->ctrl->async_event_sqe,
-                       sizeof(struct nvme_command), DMA_TO_DEVICE);
-       }
-
        nvme_rdma_destroy_queue_ib(queue);
        rdma_destroy_id(queue->cm_id);
 }
@@ -698,7 +692,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
                set = &ctrl->tag_set;
                memset(set, 0, sizeof(*set));
                set->ops = &nvme_rdma_mq_ops;
-               set->queue_depth = nctrl->opts->queue_size;
+               set->queue_depth = nctrl->sqsize + 1;
                set->reserved_tags = 1; /* fabric connect */
                set->numa_node = NUMA_NO_NODE;
                set->flags = BLK_MQ_F_SHOULD_MERGE;
@@ -734,11 +728,15 @@ out:
 static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
                bool remove)
 {
-       nvme_rdma_stop_queue(&ctrl->queues[0]);
        if (remove) {
                blk_cleanup_queue(ctrl->ctrl.admin_q);
                nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
        }
+       if (ctrl->async_event_sqe.data) {
+               nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
+                               sizeof(struct nvme_command), DMA_TO_DEVICE);
+               ctrl->async_event_sqe.data = NULL;
+       }
        nvme_rdma_free_queue(&ctrl->queues[0]);
 }
 
@@ -755,11 +753,16 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
 
        ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev);
 
+       error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe,
+                       sizeof(struct nvme_command), DMA_TO_DEVICE);
+       if (error)
+               goto out_free_queue;
+
        if (new) {
                ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
                if (IS_ERR(ctrl->ctrl.admin_tagset)) {
                        error = PTR_ERR(ctrl->ctrl.admin_tagset);
-                       goto out_free_queue;
+                       goto out_free_async_qe;
                }
 
                ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
@@ -795,12 +798,6 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
        if (error)
                goto out_stop_queue;
 
-       error = nvme_rdma_alloc_qe(ctrl->queues[0].device->dev,
-                       &ctrl->async_event_sqe, sizeof(struct nvme_command),
-                       DMA_TO_DEVICE);
-       if (error)
-               goto out_stop_queue;
-
        return 0;
 
 out_stop_queue:
@@ -811,6 +808,9 @@ out_cleanup_queue:
 out_free_tagset:
        if (new)
                nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
+out_free_async_qe:
+       nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
+               sizeof(struct nvme_command), DMA_TO_DEVICE);
 out_free_queue:
        nvme_rdma_free_queue(&ctrl->queues[0]);
        return error;
@@ -819,7 +819,6 @@ out_free_queue:
 static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
                bool remove)
 {
-       nvme_rdma_stop_io_queues(ctrl);
        if (remove) {
                blk_cleanup_queue(ctrl->ctrl.connect_q);
                nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
@@ -888,9 +887,9 @@ static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
        list_del(&ctrl->list);
        mutex_unlock(&nvme_rdma_ctrl_mutex);
 
-       kfree(ctrl->queues);
        nvmf_free_options(nctrl->opts);
 free_ctrl:
+       kfree(ctrl->queues);
        kfree(ctrl);
 }
 
@@ -949,6 +948,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
        return;
 
 destroy_admin:
+       nvme_rdma_stop_queue(&ctrl->queues[0]);
        nvme_rdma_destroy_admin_queue(ctrl, false);
 requeue:
        dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
@@ -965,12 +965,14 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
 
        if (ctrl->ctrl.queue_count > 1) {
                nvme_stop_queues(&ctrl->ctrl);
+               nvme_rdma_stop_io_queues(ctrl);
                blk_mq_tagset_busy_iter(&ctrl->tag_set,
                                        nvme_cancel_request, &ctrl->ctrl);
                nvme_rdma_destroy_io_queues(ctrl, false);
        }
 
        blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+       nvme_rdma_stop_queue(&ctrl->queues[0]);
        blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
                                nvme_cancel_request, &ctrl->ctrl);
        nvme_rdma_destroy_admin_queue(ctrl, false);
@@ -1637,7 +1639,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
        WARN_ON_ONCE(rq->tag < 0);
 
        if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
-               return nvmf_fail_nonready_command(rq);
+               return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
 
        dev = queue->device->dev;
        ib_dma_sync_single_for_cpu(dev, sqe->dma,
@@ -1736,6 +1738,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
 {
        if (ctrl->ctrl.queue_count > 1) {
                nvme_stop_queues(&ctrl->ctrl);
+               nvme_rdma_stop_io_queues(ctrl);
                blk_mq_tagset_busy_iter(&ctrl->tag_set,
                                        nvme_cancel_request, &ctrl->ctrl);
                nvme_rdma_destroy_io_queues(ctrl, shutdown);
@@ -1747,6 +1750,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
                nvme_disable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
 
        blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+       nvme_rdma_stop_queue(&ctrl->queues[0]);
        blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
                                nvme_cancel_request, &ctrl->ctrl);
        blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
@@ -1932,11 +1936,6 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
                goto out_free_ctrl;
        }
 
-       ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
-                               0 /* no quirks, we're perfect! */);
-       if (ret)
-               goto out_free_ctrl;
-
        INIT_DELAYED_WORK(&ctrl->reconnect_work,
                        nvme_rdma_reconnect_ctrl_work);
        INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
@@ -1950,14 +1949,19 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
        ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
                                GFP_KERNEL);
        if (!ctrl->queues)
-               goto out_uninit_ctrl;
+               goto out_free_ctrl;
+
+       ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
+                               0 /* no quirks, we're perfect! */);
+       if (ret)
+               goto out_kfree_queues;
 
        changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
        WARN_ON_ONCE(!changed);
 
        ret = nvme_rdma_configure_admin_queue(ctrl, true);
        if (ret)
-               goto out_kfree_queues;
+               goto out_uninit_ctrl;
 
        /* sanity check icdoff */
        if (ctrl->ctrl.icdoff) {
@@ -1974,20 +1978,19 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
                goto out_remove_admin_queue;
        }
 
-       if (opts->queue_size > ctrl->ctrl.maxcmd) {
-               /* warn if maxcmd is lower than queue_size */
-               dev_warn(ctrl->ctrl.device,
-                       "queue_size %zu > ctrl maxcmd %u, clamping down\n",
-                       opts->queue_size, ctrl->ctrl.maxcmd);
-               opts->queue_size = ctrl->ctrl.maxcmd;
-       }
-
+       /* only warn if argument is too large here, will clamp later */
        if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
-               /* warn if sqsize is lower than queue_size */
                dev_warn(ctrl->ctrl.device,
                        "queue_size %zu > ctrl sqsize %u, clamping down\n",
                        opts->queue_size, ctrl->ctrl.sqsize + 1);
-               opts->queue_size = ctrl->ctrl.sqsize + 1;
+       }
+
+       /* warn if maxcmd is lower than sqsize+1 */
+       if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
+               dev_warn(ctrl->ctrl.device,
+                       "sqsize %u > ctrl maxcmd %u, clamping down\n",
+                       ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd);
+               ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1;
        }
 
        if (opts->nr_io_queues) {
@@ -2013,15 +2016,16 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
        return &ctrl->ctrl;
 
 out_remove_admin_queue:
+       nvme_rdma_stop_queue(&ctrl->queues[0]);
        nvme_rdma_destroy_admin_queue(ctrl, true);
-out_kfree_queues:
-       kfree(ctrl->queues);
 out_uninit_ctrl:
        nvme_uninit_ctrl(&ctrl->ctrl);
        nvme_put_ctrl(&ctrl->ctrl);
        if (ret > 0)
                ret = -EIO;
        return ERR_PTR(ret);
+out_kfree_queues:
+       kfree(ctrl->queues);
 out_free_ctrl:
        kfree(ctrl);
        return ERR_PTR(ret);
index d3f3b3ec4d1afaf3d7ed3626f3211ccba54c87e4..ebea1373d1b7af0fa7bfb2a8675b64f10239c59d 100644 (file)
@@ -282,6 +282,7 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
 {
        struct nvmet_ns *ns = to_nvmet_ns(item);
        struct nvmet_subsys *subsys = ns->subsys;
+       size_t len;
        int ret;
 
        mutex_lock(&subsys->lock);
@@ -289,10 +290,14 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
        if (ns->enabled)
                goto out_unlock;
 
-       kfree(ns->device_path);
+       ret = -EINVAL;
+       len = strcspn(page, "\n");
+       if (!len)
+               goto out_unlock;
 
+       kfree(ns->device_path);
        ret = -ENOMEM;
-       ns->device_path = kstrndup(page, strcspn(page, "\n"), GFP_KERNEL);
+       ns->device_path = kstrndup(page, len, GFP_KERNEL);
        if (!ns->device_path)
                goto out_unlock;
 
index a03da764ecae8cb3ec9bf0bb9c68971669b895fc..9838103f2d629b5490b12177693bed04c130e353 100644 (file)
@@ -339,7 +339,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
                goto out_unlock;
 
        ret = nvmet_bdev_ns_enable(ns);
-       if (ret)
+       if (ret == -ENOTBLK)
                ret = nvmet_file_ns_enable(ns);
        if (ret)
                goto out_unlock;
@@ -686,6 +686,14 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
        }
 
        ctrl->csts = NVME_CSTS_RDY;
+
+       /*
+        * Controllers that are not yet enabled should not really enforce the
+        * keep alive timeout, but we still want to track a timeout and cleanup
+        * in case a host died before it enabled the controller.  Hence, simply
+        * reset the keep alive timer when the controller is enabled.
+        */
+       mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
 }
 
 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
index 408279cb6f2c8041886b4eedaecf8eba9d8aadf5..29b4b236afd85fc7dc668d4ed60f66a093174d17 100644 (file)
@@ -58,8 +58,8 @@ struct nvmet_fc_ls_iod {
        struct work_struct              work;
 } __aligned(sizeof(unsigned long long));
 
+/* desired maximum for a single sequence - if sg list allows it */
 #define NVMET_FC_MAX_SEQ_LENGTH                (256 * 1024)
-#define NVMET_FC_MAX_XFR_SGENTS                (NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE)
 
 enum nvmet_fcp_datadir {
        NVMET_FCP_NODATA,
@@ -74,6 +74,7 @@ struct nvmet_fc_fcp_iod {
        struct nvme_fc_cmd_iu           cmdiubuf;
        struct nvme_fc_ersp_iu          rspiubuf;
        dma_addr_t                      rspdma;
+       struct scatterlist              *next_sg;
        struct scatterlist              *data_sg;
        int                             data_sg_cnt;
        u32                             offset;
@@ -1025,8 +1026,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
        INIT_LIST_HEAD(&newrec->assoc_list);
        kref_init(&newrec->ref);
        ida_init(&newrec->assoc_cnt);
-       newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS,
-                                       template->max_sgl_segments);
+       newrec->max_sg_cnt = template->max_sgl_segments;
 
        ret = nvmet_fc_alloc_ls_iodlist(newrec);
        if (ret) {
@@ -1722,6 +1722,7 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
                                ((fod->io_dir == NVMET_FCP_WRITE) ?
                                        DMA_FROM_DEVICE : DMA_TO_DEVICE));
                                /* note: write from initiator perspective */
+       fod->next_sg = fod->data_sg;
 
        return 0;
 
@@ -1866,24 +1867,49 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
                                struct nvmet_fc_fcp_iod *fod, u8 op)
 {
        struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
+       struct scatterlist *sg = fod->next_sg;
        unsigned long flags;
-       u32 tlen;
+       u32 remaininglen = fod->req.transfer_len - fod->offset;
+       u32 tlen = 0;
        int ret;
 
        fcpreq->op = op;
        fcpreq->offset = fod->offset;
        fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
 
-       tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE,
-                       (fod->req.transfer_len - fod->offset));
+       /*
+        * for next sequence:
+        *  break at a sg element boundary
+        *  attempt to keep sequence length capped at
+        *    NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
+        *    be longer if a single sg element is larger
+        *    than that amount. This is done to avoid creating
+        *    a new sg list to use for the tgtport api.
+        */
+       fcpreq->sg = sg;
+       fcpreq->sg_cnt = 0;
+       while (tlen < remaininglen &&
+              fcpreq->sg_cnt < tgtport->max_sg_cnt &&
+              tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
+               fcpreq->sg_cnt++;
+               tlen += sg_dma_len(sg);
+               sg = sg_next(sg);
+       }
+       if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
+               fcpreq->sg_cnt++;
+               tlen += min_t(u32, sg_dma_len(sg), remaininglen);
+               sg = sg_next(sg);
+       }
+       if (tlen < remaininglen)
+               fod->next_sg = sg;
+       else
+               fod->next_sg = NULL;
+
        fcpreq->transfer_length = tlen;
        fcpreq->transferred_length = 0;
        fcpreq->fcp_error = 0;
        fcpreq->rsplen = 0;
 
-       fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE];
-       fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE);
-
        /*
         * If the last READDATA request: check if LLDD supports
         * combined xfr with response.
index d8d91f04bd7eedae3e183c3a89dc3d42bd33a3ff..ae7586b8be07b6310a267217642f33147177eb98 100644 (file)
@@ -162,7 +162,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
        blk_status_t ret;
 
        if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
-               return nvmf_fail_nonready_command(req);
+               return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req);
 
        ret = nvme_setup_cmd(ns, req, &iod->cmd);
        if (ret)
index b5b0cdc21d01b4cd940e4fddb6f06976258e782d..514d1dfc563059684d4fbc6a3e8b7aa8bd07da64 100644 (file)
@@ -936,6 +936,10 @@ struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id)
                        return cell;
        }
 
+       /* NULL cell_id only allowed for device tree; invalid otherwise */
+       if (!cell_id)
+               return ERR_PTR(-EINVAL);
+
        return nvmem_cell_get_from_list(cell_id);
 }
 EXPORT_SYMBOL_GPL(nvmem_cell_get);
index 848f549164cd0434ae8a28210d47b9ade2b42de3..466e3c8582f0fd62628b90872b2046971e064776 100644 (file)
@@ -102,7 +102,7 @@ static u32 phandle_cache_mask;
  *   - the phandle lookup overhead reduction provided by the cache
  *     will likely be less
  */
-static void of_populate_phandle_cache(void)
+void of_populate_phandle_cache(void)
 {
        unsigned long flags;
        u32 cache_entries;
@@ -134,8 +134,7 @@ out:
        raw_spin_unlock_irqrestore(&devtree_lock, flags);
 }
 
-#ifndef CONFIG_MODULES
-static int __init of_free_phandle_cache(void)
+int of_free_phandle_cache(void)
 {
        unsigned long flags;
 
@@ -148,6 +147,7 @@ static int __init of_free_phandle_cache(void)
 
        return 0;
 }
+#if !defined(CONFIG_MODULES)
 late_initcall_sync(of_free_phandle_cache);
 #endif
 
index 891d780c076a12d14dde8d50b87d940e9c83707e..216175d11d3dc2ca3fdfaa429306ecf50218a01a 100644 (file)
@@ -79,6 +79,8 @@ int of_resolve_phandles(struct device_node *tree);
 #if defined(CONFIG_OF_OVERLAY)
 void of_overlay_mutex_lock(void);
 void of_overlay_mutex_unlock(void);
+int of_free_phandle_cache(void);
+void of_populate_phandle_cache(void);
 #else
 static inline void of_overlay_mutex_lock(void) {};
 static inline void of_overlay_mutex_unlock(void) {};
index 7baa53e5b1d74d469959341945e3bc239cf7d5c7..eda57ef12fd057b3d92c750dec983703e85f38e3 100644 (file)
@@ -804,6 +804,8 @@ static int of_overlay_apply(const void *fdt, struct device_node *tree,
                goto err_free_overlay_changeset;
        }
 
+       of_populate_phandle_cache();
+
        ret = __of_changeset_apply_notify(&ovcs->cset);
        if (ret)
                pr_err("overlay changeset entry notify error %d\n", ret);
@@ -1046,8 +1048,17 @@ int of_overlay_remove(int *ovcs_id)
 
        list_del(&ovcs->ovcs_list);
 
+       /*
+        * Disable phandle cache.  Avoids race condition that would arise
+        * from removing cache entry when the associated node is deleted.
+        */
+       of_free_phandle_cache();
+
        ret_apply = 0;
        ret = __of_changeset_revert_entries(&ovcs->cset, &ret_apply);
+
+       of_populate_phandle_cache();
+
        if (ret) {
                if (ret_apply)
                        devicetree_state_flags |= DTSF_REVERT_FAIL;
index ab2f3fead6b1ceee55b0ced767dafdacb1202b35..31ff03dbeb83771be1ba57fde89ea6c32f63c2aa 100644 (file)
@@ -598,7 +598,7 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table,
        }
 
        /* Scaling up? Scale voltage before frequency */
-       if (freq > old_freq) {
+       if (freq >= old_freq) {
                ret = _set_opp_voltage(dev, reg, new_supply);
                if (ret)
                        goto restore_voltage;
index 535201984b8b0c5c0c58d585528b7593bbcf61be..1b2cfe51e8d719ce6cd6976fbaad485cc8143fd8 100644 (file)
@@ -28,10 +28,10 @@ obj-$(CONFIG_PCI_PF_STUB)   += pci-pf-stub.o
 obj-$(CONFIG_PCI_ECAM)         += ecam.o
 obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
 
-obj-y                          += controller/
-obj-y                          += switch/
-
 # Endpoint library must be initialized before its users
 obj-$(CONFIG_PCI_ENDPOINT)     += endpoint/
 
+obj-y                          += controller/
+obj-y                          += switch/
+
 ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG
index 18fa09b3ac8f2c377ccd8e9ba890f01d66b3c367..cc9fa02d32a08e6051e2adbcee288b6b4f629f14 100644 (file)
@@ -96,7 +96,6 @@ config PCI_HOST_GENERIC
        depends on OF
        select PCI_HOST_COMMON
        select IRQ_DOMAIN
-       select PCI_DOMAINS
        help
          Say Y here if you want to support a simple generic PCI host
          controller, such as the one emulated by kvmtool.
@@ -138,7 +137,6 @@ config PCI_VERSATILE
 
 config PCIE_IPROC
        tristate
-       select PCI_DOMAINS
        help
          This enables the iProc PCIe core controller support for Broadcom's
          iProc family of SoCs. An appropriate bus interface driver needs
@@ -176,7 +174,6 @@ config PCIE_IPROC_MSI
 config PCIE_ALTERA
        bool "Altera PCIe controller"
        depends on ARM || NIOS2 || COMPILE_TEST
-       select PCI_DOMAINS
        help
          Say Y here if you want to enable PCIe controller support on Altera
          FPGA.
index 16f52c626b4bd5101afd9c517b48dc6ca014f082..91b0194240a57e8f1d1d78ab682f20135002a68d 100644 (file)
@@ -58,7 +58,6 @@ config PCIE_DW_PLAT_HOST
        depends on PCI && PCI_MSI_IRQ_DOMAIN
        select PCIE_DW_HOST
        select PCIE_DW_PLAT
-       default y
        help
          Enables support for the PCIe controller in the Designware IP to
          work in host mode. There are two instances of PCIe controller in
index 781aa03aeede34adbad23fa6b37b0d2275d00458..29a05759a29421aab29efb6026ffec9d083abef1 100644 (file)
@@ -363,7 +363,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
        resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
                switch (resource_type(win->res)) {
                case IORESOURCE_IO:
-                       ret = pci_remap_iospace(win->res, pp->io_base);
+                       ret = devm_pci_remap_iospace(dev, win->res,
+                                                    pp->io_base);
                        if (ret) {
                                dev_warn(dev, "Error %d: failed to map resource %pR\n",
                                         ret, win->res);
index d3172d5d3d352f3ff665f753718e63534b99230d..0fae816fba39b3cc09e36850ab101ae64e8c6b9f 100644 (file)
@@ -849,7 +849,7 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
                                             0, 0xF8000000, 0,
                                             lower_32_bits(res->start),
                                             OB_PCIE_IO);
-                       err = pci_remap_iospace(res, iobase);
+                       err = devm_pci_remap_iospace(dev, res, iobase);
                        if (err) {
                                dev_warn(dev, "error %d: failed to map resource %pR\n",
                                         err, res);
index a1ebe9ed441f0aef256a55393bf296b44f08c9d6..bf5ece5d9291f18691b316a520db356910d09ffb 100644 (file)
@@ -355,11 +355,13 @@ static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p)
        irq = of_irq_get(intc, 0);
        if (irq <= 0) {
                dev_err(p->dev, "failed to get parent IRQ\n");
+               of_node_put(intc);
                return irq ?: -EINVAL;
        }
 
        p->irqdomain = irq_domain_add_linear(intc, PCI_NUM_INTX,
                                             &faraday_pci_irqdomain_ops, p);
+       of_node_put(intc);
        if (!p->irqdomain) {
                dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n");
                return -EINVAL;
@@ -501,7 +503,7 @@ static int faraday_pci_probe(struct platform_device *pdev)
                                dev_err(dev, "illegal IO mem size\n");
                                return -EINVAL;
                        }
-                       ret = pci_remap_iospace(io, io_base);
+                       ret = devm_pci_remap_iospace(dev, io, io_base);
                        if (ret) {
                                dev_warn(dev, "error %d: failed to map resource %pR\n",
                                         ret, io);
index 6cc5036ac83cface8941f2e58817c98a4eb80084..f6325f1a89e878ed69591b3ee8f96b0bec852a60 100644 (file)
@@ -1073,6 +1073,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
        struct pci_bus *pbus;
        struct pci_dev *pdev;
        struct cpumask *dest;
+       unsigned long flags;
        struct compose_comp_ctxt comp;
        struct tran_int_desc *int_desc;
        struct {
@@ -1164,14 +1165,15 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
                 * the channel callback directly when channel->target_cpu is
                 * the current CPU. When the higher level interrupt code
                 * calls us with interrupt enabled, let's add the
-                * local_bh_disable()/enable() to avoid race.
+                * local_irq_save()/restore() to avoid race:
+                * hv_pci_onchannelcallback() can also run in tasklet.
                 */
-               local_bh_disable();
+               local_irq_save(flags);
 
                if (hbus->hdev->channel->target_cpu == smp_processor_id())
                        hv_pci_onchannelcallback(hbus);
 
-               local_bh_enable();
+               local_irq_restore(flags);
 
                if (hpdev->state == hv_pcichild_ejecting) {
                        dev_err_once(&hbus->hdev->device,
index 68b8bfbdb867d0e53500e9a01a9b9ad041fc1c0d..d219404bad92b8394b902554b81f44d6c544869a 100644 (file)
@@ -537,7 +537,7 @@ static int v3_pci_setup_resource(struct v3_pci *v3,
                v3->io_bus_addr = io->start - win->offset;
                dev_dbg(dev, "I/O window %pR, bus addr %pap\n",
                        io, &v3->io_bus_addr);
-               ret = pci_remap_iospace(io, io_base);
+               ret = devm_pci_remap_iospace(dev, io, io_base);
                if (ret) {
                        dev_warn(dev,
                                 "error %d: failed to map resource %pR\n",
index 994f32061b325d90d6a790452d61e5b3b0558154..f59ad2728c0b3266c8308e788d25e9c72f8dd96d 100644 (file)
@@ -82,7 +82,7 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
 
                switch (resource_type(res)) {
                case IORESOURCE_IO:
-                       err = pci_remap_iospace(res, iobase);
+                       err = devm_pci_remap_iospace(dev, res, iobase);
                        if (err) {
                                dev_warn(dev, "error %d: failed to map resource %pR\n",
                                         err, res);
index d854d67e873cc1ee58d4a2ac7915cb6c72f024ac..ffda3e8b474268cebdbc807920b2f444f62102db 100644 (file)
@@ -423,7 +423,7 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
                case IORESOURCE_IO:
                        xgene_pcie_setup_ob_reg(port, res, OMR3BARL, io_base,
                                                res->start - window->offset);
-                       ret = pci_remap_iospace(res, io_base);
+                       ret = devm_pci_remap_iospace(dev, res, io_base);
                        if (ret < 0)
                                return ret;
                        break;
index 0baabe30858fd39d6d95310e2e5bfaf125684e7b..861dda69f3669970163d81bff102d849c2bd191f 100644 (file)
@@ -1109,7 +1109,7 @@ static int mtk_pcie_request_resources(struct mtk_pcie *pcie)
        if (err < 0)
                return err;
 
-       pci_remap_iospace(&pcie->pio, pcie->io.start);
+       devm_pci_remap_iospace(dev, &pcie->pio, pcie->io.start);
 
        return 0;
 }
index 874d75c9ee4ac44513f584267272253d4cf84e7d..c8febb009454cdcfc0ed473f4cdb9540af0de59e 100644 (file)
@@ -680,7 +680,11 @@ static int rcar_pcie_phy_init_gen3(struct rcar_pcie *pcie)
        if (err)
                return err;
 
-       return phy_power_on(pcie->phy);
+       err = phy_power_on(pcie->phy);
+       if (err)
+               phy_exit(pcie->phy);
+
+       return err;
 }
 
 static int rcar_msi_alloc(struct rcar_msi *chip)
@@ -1165,7 +1169,7 @@ static int rcar_pcie_probe(struct platform_device *pdev)
        if (rcar_pcie_hw_init(pcie)) {
                dev_info(dev, "PCIe link down\n");
                err = -ENODEV;
-               goto err_clk_disable;
+               goto err_phy_shutdown;
        }
 
        data = rcar_pci_read_reg(pcie, MACSR);
@@ -1177,7 +1181,7 @@ static int rcar_pcie_probe(struct platform_device *pdev)
                        dev_err(dev,
                                "failed to enable MSI support: %d\n",
                                err);
-                       goto err_clk_disable;
+                       goto err_phy_shutdown;
                }
        }
 
@@ -1191,6 +1195,12 @@ err_msi_teardown:
        if (IS_ENABLED(CONFIG_PCI_MSI))
                rcar_pcie_teardown_msi(pcie);
 
+err_phy_shutdown:
+       if (pcie->phy) {
+               phy_power_off(pcie->phy);
+               phy_exit(pcie->phy);
+       }
+
 err_clk_disable:
        clk_disable_unprepare(pcie->bus_clk);
 
index 6a4bbb5b3de006f37f005bf0f198b28de16c7619..fb32840ce8e66ac75f7c164a402ac9a0e0ae094b 100644 (file)
@@ -559,7 +559,7 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
                                                        PCI_NUM_INTX,
                                                        &legacy_domain_ops,
                                                        pcie);
-
+       of_node_put(legacy_intc_node);
        if (!pcie->legacy_irq_domain) {
                dev_err(dev, "failed to create IRQ domain\n");
                return -ENOMEM;
index b110a3a814e35e3bbd7839e68994cef56f19ed43..7b1389d8e2a5711383a4e8d8a6a424a4a33448f4 100644 (file)
@@ -509,6 +509,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
        port->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
                                                 &intx_domain_ops,
                                                 port);
+       of_node_put(pcie_intc_node);
        if (!port->leg_domain) {
                dev_err(dev, "Failed to get a INTx IRQ domain\n");
                return -ENODEV;
index 523a8cab3bfba16613a03d13916aef3b223ef2cb..825fa24427a396a711b734ee03643038e1260185 100644 (file)
@@ -137,6 +137,20 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar)
 }
 EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
 
+static void pci_epf_remove_cfs(struct pci_epf_driver *driver)
+{
+       struct config_group *group, *tmp;
+
+       if (!IS_ENABLED(CONFIG_PCI_ENDPOINT_CONFIGFS))
+               return;
+
+       mutex_lock(&pci_epf_mutex);
+       list_for_each_entry_safe(group, tmp, &driver->epf_group, group_entry)
+               pci_ep_cfs_remove_epf_group(group);
+       list_del(&driver->epf_group);
+       mutex_unlock(&pci_epf_mutex);
+}
+
 /**
  * pci_epf_unregister_driver() - unregister the PCI EPF driver
  * @driver: the PCI EPF driver that has to be unregistered
@@ -145,17 +159,38 @@ EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
  */
 void pci_epf_unregister_driver(struct pci_epf_driver *driver)
 {
-       struct config_group *group;
-
-       mutex_lock(&pci_epf_mutex);
-       list_for_each_entry(group, &driver->epf_group, group_entry)
-               pci_ep_cfs_remove_epf_group(group);
-       list_del(&driver->epf_group);
-       mutex_unlock(&pci_epf_mutex);
+       pci_epf_remove_cfs(driver);
        driver_unregister(&driver->driver);
 }
 EXPORT_SYMBOL_GPL(pci_epf_unregister_driver);
 
+static int pci_epf_add_cfs(struct pci_epf_driver *driver)
+{
+       struct config_group *group;
+       const struct pci_epf_device_id *id;
+
+       if (!IS_ENABLED(CONFIG_PCI_ENDPOINT_CONFIGFS))
+               return 0;
+
+       INIT_LIST_HEAD(&driver->epf_group);
+
+       id = driver->id_table;
+       while (id->name[0]) {
+               group = pci_ep_cfs_add_epf_group(id->name);
+               if (IS_ERR(group)) {
+                       pci_epf_remove_cfs(driver);
+                       return PTR_ERR(group);
+               }
+
+               mutex_lock(&pci_epf_mutex);
+               list_add_tail(&group->group_entry, &driver->epf_group);
+               mutex_unlock(&pci_epf_mutex);
+               id++;
+       }
+
+       return 0;
+}
+
 /**
  * __pci_epf_register_driver() - register a new PCI EPF driver
  * @driver: structure representing PCI EPF driver
@@ -167,8 +202,6 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver,
                              struct module *owner)
 {
        int ret;
-       struct config_group *group;
-       const struct pci_epf_device_id *id;
 
        if (!driver->ops)
                return -EINVAL;
@@ -183,16 +216,7 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver,
        if (ret)
                return ret;
 
-       INIT_LIST_HEAD(&driver->epf_group);
-
-       id = driver->id_table;
-       while (id->name[0]) {
-               group = pci_ep_cfs_add_epf_group(id->name);
-               mutex_lock(&pci_epf_mutex);
-               list_add_tail(&group->group_entry, &driver->epf_group);
-               mutex_unlock(&pci_epf_mutex);
-               id++;
-       }
+       pci_epf_add_cfs(driver);
 
        return 0;
 }
index 3979f89b250ad86ec622d4aa90d1450422324db8..5bd6c1573295696acf8387bd7cd953bf8f369546 100644 (file)
@@ -7,7 +7,6 @@
  * All rights reserved.
  *
  * Send feedback to <kristen.c.accardi@intel.com>
- *
  */
 
 #include <linux/module.h>
@@ -87,8 +86,17 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev)
                return 0;
 
        /* If _OSC exists, we should not evaluate OSHP */
+
+       /*
+        * If there's no ACPI host bridge (i.e., ACPI support is compiled
+        * into the kernel but the hardware platform doesn't support ACPI),
+        * there's nothing to do here.
+        */
        host = pci_find_host_bridge(pdev->bus);
        root = acpi_pci_find_root(ACPI_HANDLE(&host->dev));
+       if (!root)
+               return 0;
+
        if (root->osc_support_set)
                goto no_control;
 
index d0d73dbbd5ca4123fce20254a0bf6495dcbe1d9e..0f04ae648cf14bcc590ce55b5b91a5635b02c500 100644 (file)
@@ -574,6 +574,22 @@ void pci_iov_release(struct pci_dev *dev)
                sriov_release(dev);
 }
 
+/**
+ * pci_iov_remove - clean up SR-IOV state after PF driver is detached
+ * @dev: the PCI device
+ */
+void pci_iov_remove(struct pci_dev *dev)
+{
+       struct pci_sriov *iov = dev->sriov;
+
+       if (!dev->is_physfn)
+               return;
+
+       iov->driver_max_VFs = iov->total_VFs;
+       if (iov->num_VFs)
+               pci_warn(dev, "driver left SR-IOV enabled after remove\n");
+}
+
 /**
  * pci_iov_update_resource - update a VF BAR
  * @dev: the PCI device
index d088c9147f10534ef767dba007e57e72337c2b0e..69a60d6ebd7365f52fcdf0c5e1e9ce870cd990f7 100644 (file)
@@ -612,7 +612,7 @@ int pci_parse_request_of_pci_ranges(struct device *dev,
 
                switch (resource_type(res)) {
                case IORESOURCE_IO:
-                       err = pci_remap_iospace(res, iobase);
+                       err = devm_pci_remap_iospace(dev, res, iobase);
                        if (err) {
                                dev_warn(dev, "error %d: failed to map resource %pR\n",
                                         err, res);
index 65113b6eed1473aa00daf59b1dfd57b8abd6baaf..89ee6a2b6eb838f426d6d9d3773f70769d1a489f 100644 (file)
@@ -629,6 +629,18 @@ static bool acpi_pci_need_resume(struct pci_dev *dev)
 {
        struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
 
+       /*
+        * In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over
+        * system-wide suspend/resume confuses the platform firmware, so avoid
+        * doing that, unless the bridge has a driver that should take care of
+        * the PM handling.  According to Section 16.1.6 of ACPI 6.2, endpoint
+        * devices are expected to be in D3 before invoking the S3 entry path
+        * from the firmware, so they should not be affected by this issue.
+        */
+       if (pci_is_bridge(dev) && !dev->driver &&
+           acpi_target_system_state() != ACPI_STATE_S0)
+               return true;
+
        if (!adev || !acpi_device_power_manageable(adev))
                return false;
 
index c125d53033c69cafe42734c32935561547abbdba..6792292b5fc7055ab145703a3e718e50d0d4751c 100644 (file)
@@ -445,6 +445,7 @@ static int pci_device_remove(struct device *dev)
                }
                pcibios_free_irq(pci_dev);
                pci_dev->driver = NULL;
+               pci_iov_remove(pci_dev);
        }
 
        /* Undo the runtime PM settings in local_pci_probe() */
index 97acba712e4e7f7191df5fd7ae7800597dc94fd5..aa1684d99b709698dc47db379456cf641f3f5057 100644 (file)
@@ -3579,6 +3579,44 @@ void pci_unmap_iospace(struct resource *res)
 }
 EXPORT_SYMBOL(pci_unmap_iospace);
 
+static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
+{
+       struct resource **res = ptr;
+
+       pci_unmap_iospace(*res);
+}
+
+/**
+ * devm_pci_remap_iospace - Managed pci_remap_iospace()
+ * @dev: Generic device to remap IO address for
+ * @res: Resource describing the I/O space
+ * @phys_addr: physical address of range to be mapped
+ *
+ * Managed pci_remap_iospace().  Map is automatically unmapped on driver
+ * detach.
+ */
+int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
+                          phys_addr_t phys_addr)
+{
+       const struct resource **ptr;
+       int error;
+
+       ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
+       if (!ptr)
+               return -ENOMEM;
+
+       error = pci_remap_iospace(res, phys_addr);
+       if (error) {
+               devres_free(ptr);
+       } else  {
+               *ptr = res;
+               devres_add(dev, ptr);
+       }
+
+       return error;
+}
+EXPORT_SYMBOL(devm_pci_remap_iospace);
+
 /**
  * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
  * @dev: Generic device to remap IO address for
@@ -5222,6 +5260,7 @@ enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
 
        return PCI_SPEED_UNKNOWN;
 }
+EXPORT_SYMBOL(pcie_get_speed_cap);
 
 /**
  * pcie_get_width_cap - query for the PCI device's link width capability
@@ -5240,6 +5279,7 @@ enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
 
        return PCIE_LNK_WIDTH_UNKNOWN;
 }
+EXPORT_SYMBOL(pcie_get_width_cap);
 
 /**
  * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
index c358e7a07f3faf2abbfff9de4b9e219ae6558e58..882f1f9596dfffa644e624b3a42a120999f06581 100644 (file)
@@ -311,6 +311,7 @@ static inline void pci_restore_ats_state(struct pci_dev *dev)
 #ifdef CONFIG_PCI_IOV
 int pci_iov_init(struct pci_dev *dev);
 void pci_iov_release(struct pci_dev *dev);
+void pci_iov_remove(struct pci_dev *dev);
 void pci_iov_update_resource(struct pci_dev *dev, int resno);
 resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
 void pci_restore_iov_state(struct pci_dev *dev);
@@ -323,6 +324,9 @@ static inline int pci_iov_init(struct pci_dev *dev)
 }
 static inline void pci_iov_release(struct pci_dev *dev)
 
+{
+}
+static inline void pci_iov_remove(struct pci_dev *dev)
 {
 }
 static inline void pci_restore_iov_state(struct pci_dev *dev)
index f7ce0cb0b0b70a48902010c3cea0fb1707e5ead9..f02e334beb457da586857736fc8fc45530847fe3 100644 (file)
@@ -295,6 +295,7 @@ void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service)
 
        parent = udev->subordinate;
        pci_lock_rescan_remove();
+       pci_dev_get(dev);
        list_for_each_entry_safe_reverse(pdev, temp, &parent->devices,
                                         bus_list) {
                pci_dev_get(pdev);
@@ -328,6 +329,7 @@ void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service)
                pci_info(dev, "Device recovery from fatal error failed\n");
        }
 
+       pci_dev_put(dev);
        pci_unlock_rescan_remove();
 }
 
index 6bdb1dad805f8198a6879aeab21a5ff9051b1510..0e31f1392a53ca042519bbbe4bc37dab3bfe87c0 100644 (file)
@@ -1463,7 +1463,7 @@ static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id)
        case PMU_TYPE_IOB:
                return devm_kasprintf(dev, GFP_KERNEL, "iob%d", id);
        case PMU_TYPE_IOB_SLOW:
-               return devm_kasprintf(dev, GFP_KERNEL, "iob-slow%d", id);
+               return devm_kasprintf(dev, GFP_KERNEL, "iob_slow%d", id);
        case PMU_TYPE_MCB:
                return devm_kasprintf(dev, GFP_KERNEL, "mcb%d", id);
        case PMU_TYPE_MC:
index 1b7febc43da932628dc0223b8a37eac18320daa9..29d2c3b1913ac95bb9e49f705622e576643c01e1 100644 (file)
@@ -962,6 +962,10 @@ void brcm_usb_init_xhci(struct brcm_usb_init_params *params)
 {
        void __iomem *ctrl = params->ctrl_regs;
 
+       USB_CTRL_UNSET(ctrl, USB30_PCTL, PHY3_IDDQ_OVERRIDE);
+       /* 1 millisecond - for USB clocks to settle down */
+       usleep_range(1000, 2000);
+
        if (BRCM_ID(params->family_id) == 0x7366) {
                /*
                 * The PHY3_SOFT_RESETB bits default to the wrong state.
index 23705e1a002371327e059e48cccbf4828dabb33d..0075fb0bef8c55eab8d66804f9c6310003e8ec17 100644 (file)
@@ -182,13 +182,13 @@ static void phy_mdm6600_status(struct work_struct *work)
        ddata = container_of(work, struct phy_mdm6600, status_work.work);
        dev = ddata->dev;
 
-       error = gpiod_get_array_value_cansleep(PHY_MDM6600_NR_CMD_LINES,
+       error = gpiod_get_array_value_cansleep(PHY_MDM6600_NR_STATUS_LINES,
                                               ddata->status_gpios->desc,
                                               values);
        if (error)
                return;
 
-       for (i = 0; i < PHY_MDM6600_NR_CMD_LINES; i++) {
+       for (i = 0; i < PHY_MDM6600_NR_STATUS_LINES; i++) {
                val |= values[i] << i;
                dev_dbg(ddata->dev, "XXX %s: i: %i values[i]: %i val: %i\n",
                        __func__, i, values[i], val);
index 76243caa08c630c064ebd674f566089bea1fc4ba..b5c880b50bb371f5fb5eeddcd0799cd9d7057289 100644 (file)
@@ -333,7 +333,7 @@ static int owl_pin_config_set(struct pinctrl_dev *pctrldev,
        unsigned long flags;
        unsigned int param;
        u32 reg, bit, width, arg;
-       int ret, i;
+       int ret = 0, i;
 
        info = &pctrl->soc->padinfo[pin];
 
index 35c17653c694767c8e13d1a8d02d7d91718d23b3..87618a4e90e451f2834214a337ce81e12de560fc 100644 (file)
@@ -460,8 +460,8 @@ static int nsp_pinmux_enable(struct pinctrl_dev *pctrl_dev,
        const struct nsp_pin_function *func;
        const struct nsp_pin_group *grp;
 
-       if (grp_select > pinctrl->num_groups ||
-               func_select > pinctrl->num_functions)
+       if (grp_select >= pinctrl->num_groups ||
+           func_select >= pinctrl->num_functions)
                return -EINVAL;
 
        func = &pinctrl->functions[func_select];
@@ -577,6 +577,8 @@ static int nsp_pinmux_probe(struct platform_device *pdev)
                return PTR_ERR(pinctrl->base0);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       if (!res)
+               return -EINVAL;
        pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start,
                                              resource_size(res));
        if (!pinctrl->base1) {
index b601039d6c69a28d771eff622f0001d70bf84204..c4aa411f5935b7b0275c004a3924ffda3613630a 100644 (file)
@@ -101,10 +101,11 @@ struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
 }
 
 static int dt_to_map_one_config(struct pinctrl *p,
-                               struct pinctrl_dev *pctldev,
+                               struct pinctrl_dev *hog_pctldev,
                                const char *statename,
                                struct device_node *np_config)
 {
+       struct pinctrl_dev *pctldev = NULL;
        struct device_node *np_pctldev;
        const struct pinctrl_ops *ops;
        int ret;
@@ -123,8 +124,10 @@ static int dt_to_map_one_config(struct pinctrl *p,
                        return -EPROBE_DEFER;
                }
                /* If we're creating a hog we can use the passed pctldev */
-               if (pctldev && (np_pctldev == p->dev->of_node))
+               if (hog_pctldev && (np_pctldev == p->dev->of_node)) {
+                       pctldev = hog_pctldev;
                        break;
+               }
                pctldev = get_pinctrl_dev_from_of_node(np_pctldev);
                if (pctldev)
                        break;
index ad6da1184c9f0b1117275df587f502d78abbe904..4c4740ffeb9ca0807f63d77271ad619407155c3b 100644 (file)
@@ -1424,7 +1424,7 @@ static struct pinctrl_desc mtk_desc = {
 
 static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio)
 {
-       struct mtk_pinctrl *hw = dev_get_drvdata(chip->parent);
+       struct mtk_pinctrl *hw = gpiochip_get_data(chip);
        int value, err;
 
        err = mtk_hw_get_value(hw, gpio, PINCTRL_PIN_REG_DI, &value);
@@ -1436,7 +1436,7 @@ static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio)
 
 static void mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
 {
-       struct mtk_pinctrl *hw = dev_get_drvdata(chip->parent);
+       struct mtk_pinctrl *hw = gpiochip_get_data(chip);
 
        mtk_hw_set_value(hw, gpio, PINCTRL_PIN_REG_DO, !!value);
 }
@@ -1459,6 +1459,9 @@ static int mtk_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
        struct mtk_pinctrl *hw = gpiochip_get_data(chip);
        unsigned long eint_n;
 
+       if (!hw->eint)
+               return -ENOTSUPP;
+
        eint_n = offset;
 
        return mtk_eint_find_irq(hw->eint, eint_n);
@@ -1471,7 +1474,8 @@ static int mtk_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
        unsigned long eint_n;
        u32 debounce;
 
-       if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+       if (!hw->eint ||
+           pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
                return -ENOTSUPP;
 
        debounce = pinconf_to_config_argument(config);
@@ -1504,11 +1508,20 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw, struct device_node *np)
        if (ret < 0)
                return ret;
 
-       ret = gpiochip_add_pin_range(chip, dev_name(hw->dev), 0, 0,
-                                    chip->ngpio);
-       if (ret < 0) {
-               gpiochip_remove(chip);
-               return ret;
+       /* Just for backward compatible for these old pinctrl nodes without
+        * "gpio-ranges" property. Otherwise, called directly from a
+        * DeviceTree-supported pinctrl driver is DEPRECATED.
+        * Please see Section 2.1 of
+        * Documentation/devicetree/bindings/gpio/gpio.txt on how to
+        * bind pinctrl and gpio drivers via the "gpio-ranges" property.
+        */
+       if (!of_find_property(np, "gpio-ranges", NULL)) {
+               ret = gpiochip_add_pin_range(chip, dev_name(hw->dev), 0, 0,
+                                            chip->ngpio);
+               if (ret < 0) {
+                       gpiochip_remove(chip);
+                       return ret;
+               }
        }
 
        return 0;
@@ -1691,15 +1704,16 @@ static int mtk_pinctrl_probe(struct platform_device *pdev)
        mtk_desc.custom_conf_items = mtk_conf_items;
 #endif
 
-       hw->pctrl = devm_pinctrl_register(&pdev->dev, &mtk_desc, hw);
-       if (IS_ERR(hw->pctrl))
-               return PTR_ERR(hw->pctrl);
+       err = devm_pinctrl_register_and_init(&pdev->dev, &mtk_desc, hw,
+                                            &hw->pctrl);
+       if (err)
+               return err;
 
        /* Setup groups descriptions per SoC types */
        err = mtk_build_groups(hw);
        if (err) {
                dev_err(&pdev->dev, "Failed to build groups\n");
-               return 0;
+               return err;
        }
 
        /* Setup functions descriptions per SoC types */
@@ -1709,17 +1723,25 @@ static int mtk_pinctrl_probe(struct platform_device *pdev)
                return err;
        }
 
-       err = mtk_build_gpiochip(hw, pdev->dev.of_node);
-       if (err) {
-               dev_err(&pdev->dev, "Failed to add gpio_chip\n");
+       /* For able to make pinctrl_claim_hogs, we must not enable pinctrl
+        * until all groups and functions are being added one.
+        */
+       err = pinctrl_enable(hw->pctrl);
+       if (err)
                return err;
-       }
 
        err = mtk_build_eint(hw, pdev);
        if (err)
                dev_warn(&pdev->dev,
                         "Failed to add EINT, but pinctrl still can work\n");
 
+       /* Build gpiochip should be after pinctrl_enable is done */
+       err = mtk_build_gpiochip(hw, pdev->dev.of_node);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to add gpio_chip\n");
+               return err;
+       }
+
        platform_set_drvdata(pdev, hw);
 
        return 0;
index b3799695d8db8264ce917232cb3b1439793fe845..16ff56f93501794edb33231b770afa3dc5d55b73 100644 (file)
@@ -1000,11 +1000,6 @@ static int mtk_eint_init(struct mtk_pinctrl *pctl, struct platform_device *pdev)
                return -ENOMEM;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "Unable to get eint resource\n");
-               return -ENODEV;
-       }
-
        pctl->eint->base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(pctl->eint->base))
                return PTR_ERR(pctl->eint->base);
index a1d7156d0a43ad49ac6312ab67b67a7312ad9e10..6a1b6058b9910269c60c448cbca3350bce399af6 100644 (file)
@@ -536,7 +536,7 @@ static int ingenic_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
                ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input);
        } else {
                ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, false);
-               ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, input);
+               ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, !input);
                ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, false);
        }
 
index b3153c095199d3bed84d7b846432fa3e783c08f0..e5647dac0818d46353629a543733fe0af804210b 100644 (file)
@@ -1590,8 +1590,11 @@ static int pcs_save_context(struct pcs_device *pcs)
 
        mux_bytes = pcs->width / BITS_PER_BYTE;
 
-       if (!pcs->saved_vals)
+       if (!pcs->saved_vals) {
                pcs->saved_vals = devm_kzalloc(pcs->dev, pcs->size, GFP_ATOMIC);
+               if (!pcs->saved_vals)
+                       return -ENOMEM;
+       }
 
        switch (pcs->width) {
        case 64:
@@ -1651,8 +1654,13 @@ static int pinctrl_single_suspend(struct platform_device *pdev,
        if (!pcs)
                return -EINVAL;
 
-       if (pcs->flags & PCS_CONTEXT_LOSS_OFF)
-               pcs_save_context(pcs);
+       if (pcs->flags & PCS_CONTEXT_LOSS_OFF) {
+               int ret;
+
+               ret = pcs_save_context(pcs);
+               if (ret < 0)
+                       return ret;
+       }
 
        return pinctrl_force_sleep(pcs->pctl);
 }
index b02caf31671186d97ea194e612b94adeb70335f4..eeb58b3bbc9a0cef4b47f65c4b8855f683e9f7e0 100644 (file)
 #include "core.h"
 #include "sh_pfc.h"
 
-#define CFG_FLAGS SH_PFC_PIN_CFG_DRIVE_STRENGTH
-
 #define CPU_ALL_PORT(fn, sfx)                                          \
-       PORT_GP_CFG_22(0, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \
-       PORT_GP_CFG_28(1, fn, sfx, CFG_FLAGS),                          \
-       PORT_GP_CFG_17(2, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \
-       PORT_GP_CFG_17(3, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \
-       PORT_GP_CFG_6(4,  fn, sfx, CFG_FLAGS),                          \
-       PORT_GP_CFG_15(5, fn, sfx, CFG_FLAGS)
+       PORT_GP_CFG_22(0, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE),          \
+       PORT_GP_28(1, fn, sfx),                                         \
+       PORT_GP_CFG_17(2, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE),          \
+       PORT_GP_CFG_17(3, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE),          \
+       PORT_GP_6(4,  fn, sfx),                                         \
+       PORT_GP_15(5, fn, sfx)
 /*
  * F_() : just information
  * FM() : macro for FN_xxx / xxx_MARK
index f1fa8612db406168f53db3d71a025255c0622af0..06978c14c83b23c5e35c4cb721863903a736486d 100644 (file)
@@ -2185,7 +2185,7 @@ static int __init dell_init(void)
                dell_fill_request(&buffer, token->location, 0, 0, 0);
                ret = dell_send_request(&buffer,
                                        CLASS_TOKEN_READ, SELECT_TOKEN_AC);
-               if (ret)
+               if (ret == 0)
                        max_intensity = buffer.output[3];
        }
 
index 767c485af59b2ee0583242b7dbf31581f76cb91a..01b0e2bb33190c78fb3818e34d5aebf4f60b2832 100644 (file)
@@ -89,6 +89,7 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
        case PTP_PF_PHYSYNC:
                if (chan != 0)
                        return -EINVAL;
+               break;
        default:
                return -EINVAL;
        }
@@ -221,7 +222,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
                }
                pct = &sysoff->ts[0];
                for (i = 0; i < sysoff->n_samples; i++) {
-                       getnstimeofday64(&ts);
+                       ktime_get_real_ts64(&ts);
                        pct->sec = ts.tv_sec;
                        pct->nsec = ts.tv_nsec;
                        pct++;
@@ -230,7 +231,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
                        pct->nsec = ts.tv_nsec;
                        pct++;
                }
-               getnstimeofday64(&ts);
+               ktime_get_real_ts64(&ts);
                pct->sec = ts.tv_sec;
                pct->nsec = ts.tv_nsec;
                if (copy_to_user((void __user *)arg, sysoff, sizeof(*sysoff)))
index 1468a1642b4978f5048ccace2af1846baf9c51e9..e8652c148c5223d24c67089539b3bb4e861e42d5 100644 (file)
@@ -374,7 +374,7 @@ static int qoriq_ptp_probe(struct platform_device *dev)
                pr_err("ioremap ptp registers failed\n");
                goto no_ioremap;
        }
-       getnstimeofday64(&now);
+       ktime_get_real_ts64(&now);
        ptp_qoriq_settime(&qoriq_ptp->caps, &now);
 
        tmr_ctrl =
index 6d4012dd69221a1ebc4b72866824be8a95f29468..bac1eeb3d31204d9e99a93e1e682972b5f7177bb 100644 (file)
@@ -265,8 +265,10 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
                        return err;
 
                /* full-function RTCs won't have such missing fields */
-               if (rtc_valid_tm(&alarm->time) == 0)
+               if (rtc_valid_tm(&alarm->time) == 0) {
+                       rtc_add_offset(rtc, &alarm->time);
                        return 0;
+               }
 
                /* get the "after" timestamp, to detect wrapped fields */
                err = rtc_read_time(rtc, &now);
@@ -409,7 +411,6 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
        if (err)
                return err;
 
-       rtc_subtract_offset(rtc, &alarm->time);
        scheduled = rtc_tm_to_time64(&alarm->time);
 
        /* Make sure we're not setting alarms in the past */
@@ -426,6 +427,8 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
         * over right here, before we set the alarm.
         */
 
+       rtc_subtract_offset(rtc, &alarm->time);
+
        if (!rtc->ops)
                err = -ENODEV;
        else if (!rtc->ops->set_alarm)
@@ -467,7 +470,6 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
 
        mutex_unlock(&rtc->ops_lock);
 
-       rtc_add_offset(rtc, &alarm->time);
        return err;
 }
 EXPORT_SYMBOL_GPL(rtc_set_alarm);
index 097a4d4e2aba1e947ceaae3c3a7651a56927dac5..1925aaf09093713326553740db6db4358eb9fb51 100644 (file)
@@ -367,10 +367,8 @@ static int vrtc_mrst_do_probe(struct device *dev, struct resource *iomem,
        }
 
        retval = rtc_register_device(mrst_rtc.rtc);
-       if (retval) {
-               retval = PTR_ERR(mrst_rtc.rtc);
+       if (retval)
                goto cleanup0;
-       }
 
        dev_dbg(dev, "initialised\n");
        return 0;
index 73cce3ecb97fefbccc66266a4fd29f08e453079e..a9f60d0ee02ea2e941edfc7969377b3482e5d04e 100644 (file)
 
 #define DASD_DIAG_MOD          "dasd_diag_mod"
 
+static unsigned int queue_depth = 32;
+static unsigned int nr_hw_queues = 4;
+
+module_param(queue_depth, uint, 0444);
+MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices");
+
+module_param(nr_hw_queues, uint, 0444);
+MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices");
+
 /*
  * SECTION: exported variables of dasd.c
  */
@@ -1222,80 +1231,37 @@ static void dasd_hosts_init(struct dentry *base_dentry,
                device->hosts_dentry = pde;
 }
 
-/*
- * Allocate memory for a channel program with 'cplength' channel
- * command words and 'datasize' additional space. There are two
- * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
- * memory and 2) dasd_smalloc_request uses the static ccw memory
- * that gets allocated for each device.
- */
-struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength,
-                                         int datasize,
-                                         struct dasd_device *device)
-{
-       struct dasd_ccw_req *cqr;
-
-       /* Sanity checks */
-       BUG_ON(datasize > PAGE_SIZE ||
-            (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
-
-       cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
-       if (cqr == NULL)
-               return ERR_PTR(-ENOMEM);
-       cqr->cpaddr = NULL;
-       if (cplength > 0) {
-               cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
-                                     GFP_ATOMIC | GFP_DMA);
-               if (cqr->cpaddr == NULL) {
-                       kfree(cqr);
-                       return ERR_PTR(-ENOMEM);
-               }
-       }
-       cqr->data = NULL;
-       if (datasize > 0) {
-               cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA);
-               if (cqr->data == NULL) {
-                       kfree(cqr->cpaddr);
-                       kfree(cqr);
-                       return ERR_PTR(-ENOMEM);
-               }
-       }
-       cqr->magic =  magic;
-       set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
-       dasd_get_device(device);
-       return cqr;
-}
-EXPORT_SYMBOL(dasd_kmalloc_request);
-
-struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
-                                         int datasize,
-                                         struct dasd_device *device)
+struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize,
+                                         struct dasd_device *device,
+                                         struct dasd_ccw_req *cqr)
 {
        unsigned long flags;
-       struct dasd_ccw_req *cqr;
-       char *data;
-       int size;
+       char *data, *chunk;
+       int size = 0;
 
-       size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
        if (cplength > 0)
                size += cplength * sizeof(struct ccw1);
        if (datasize > 0)
                size += datasize;
+       if (!cqr)
+               size += (sizeof(*cqr) + 7L) & -8L;
+
        spin_lock_irqsave(&device->mem_lock, flags);
-       cqr = (struct dasd_ccw_req *)
-               dasd_alloc_chunk(&device->ccw_chunks, size);
+       data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size);
        spin_unlock_irqrestore(&device->mem_lock, flags);
-       if (cqr == NULL)
+       if (!chunk)
                return ERR_PTR(-ENOMEM);
-       memset(cqr, 0, sizeof(struct dasd_ccw_req));
-       data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
-       cqr->cpaddr = NULL;
+       if (!cqr) {
+               cqr = (void *) data;
+               data += (sizeof(*cqr) + 7L) & -8L;
+       }
+       memset(cqr, 0, sizeof(*cqr));
+       cqr->mem_chunk = chunk;
        if (cplength > 0) {
-               cqr->cpaddr = (struct ccw1 *) data;
-               data += cplength*sizeof(struct ccw1);
-               memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
+               cqr->cpaddr = data;
+               data += cplength * sizeof(struct ccw1);
+               memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
        }
-       cqr->data = NULL;
        if (datasize > 0) {
                cqr->data = data;
                memset(cqr->data, 0, datasize);
@@ -1307,33 +1273,12 @@ struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
 }
 EXPORT_SYMBOL(dasd_smalloc_request);
 
-/*
- * Free memory of a channel program. This function needs to free all the
- * idal lists that might have been created by dasd_set_cda and the
- * struct dasd_ccw_req itself.
- */
-void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
-{
-       struct ccw1 *ccw;
-
-       /* Clear any idals used for the request. */
-       ccw = cqr->cpaddr;
-       do {
-               clear_normalized_cda(ccw);
-       } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
-       kfree(cqr->cpaddr);
-       kfree(cqr->data);
-       kfree(cqr);
-       dasd_put_device(device);
-}
-EXPORT_SYMBOL(dasd_kfree_request);
-
 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
 {
        unsigned long flags;
 
        spin_lock_irqsave(&device->mem_lock, flags);
-       dasd_free_chunk(&device->ccw_chunks, cqr);
+       dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk);
        spin_unlock_irqrestore(&device->mem_lock, flags);
        dasd_put_device(device);
 }
@@ -1885,6 +1830,33 @@ static void __dasd_device_process_ccw_queue(struct dasd_device *device,
        }
 }
 
+static void __dasd_process_cqr(struct dasd_device *device,
+                              struct dasd_ccw_req *cqr)
+{
+       char errorstring[ERRORLENGTH];
+
+       switch (cqr->status) {
+       case DASD_CQR_SUCCESS:
+               cqr->status = DASD_CQR_DONE;
+               break;
+       case DASD_CQR_ERROR:
+               cqr->status = DASD_CQR_NEED_ERP;
+               break;
+       case DASD_CQR_CLEARED:
+               cqr->status = DASD_CQR_TERMINATED;
+               break;
+       default:
+               /* internal error 12 - wrong cqr status*/
+               snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
+               dev_err(&device->cdev->dev,
+                       "An error occurred in the DASD device driver, "
+                       "reason=%s\n", errorstring);
+               BUG();
+       }
+       if (cqr->callback)
+               cqr->callback(cqr, cqr->callback_data);
+}
+
 /*
  * the cqrs from the final queue are returned to the upper layer
  * by setting a dasd_block state and calling the callback function
@@ -1895,40 +1867,18 @@ static void __dasd_device_process_final_queue(struct dasd_device *device,
        struct list_head *l, *n;
        struct dasd_ccw_req *cqr;
        struct dasd_block *block;
-       void (*callback)(struct dasd_ccw_req *, void *data);
-       void *callback_data;
-       char errorstring[ERRORLENGTH];
 
        list_for_each_safe(l, n, final_queue) {
                cqr = list_entry(l, struct dasd_ccw_req, devlist);
                list_del_init(&cqr->devlist);
                block = cqr->block;
-               callback = cqr->callback;
-               callback_data = cqr->callback_data;
-               if (block)
+               if (!block) {
+                       __dasd_process_cqr(device, cqr);
+               } else {
                        spin_lock_bh(&block->queue_lock);
-               switch (cqr->status) {
-               case DASD_CQR_SUCCESS:
-                       cqr->status = DASD_CQR_DONE;
-                       break;
-               case DASD_CQR_ERROR:
-                       cqr->status = DASD_CQR_NEED_ERP;
-                       break;
-               case DASD_CQR_CLEARED:
-                       cqr->status = DASD_CQR_TERMINATED;
-                       break;
-               default:
-                       /* internal error 12 - wrong cqr status*/
-                       snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
-                       dev_err(&device->cdev->dev,
-                               "An error occurred in the DASD device driver, "
-                               "reason=%s\n", errorstring);
-                       BUG();
-               }
-               if (cqr->callback != NULL)
-                       (callback)(cqr, callback_data);
-               if (block)
+                       __dasd_process_cqr(device, cqr);
                        spin_unlock_bh(&block->queue_lock);
+               }
        }
 }
 
@@ -3041,7 +2991,6 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
        cqr->callback_data = req;
        cqr->status = DASD_CQR_FILLED;
        cqr->dq = dq;
-       *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)) = cqr;
 
        blk_mq_start_request(req);
        spin_lock(&block->queue_lock);
@@ -3072,7 +3021,7 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
        unsigned long flags;
        int rc = 0;
 
-       cqr = *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req));
+       cqr = blk_mq_rq_to_pdu(req);
        if (!cqr)
                return BLK_EH_DONE;
 
@@ -3174,9 +3123,9 @@ static int dasd_alloc_queue(struct dasd_block *block)
        int rc;
 
        block->tag_set.ops = &dasd_mq_ops;
-       block->tag_set.cmd_size = sizeof(struct dasd_ccw_req *);
-       block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES;
-       block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV;
+       block->tag_set.cmd_size = sizeof(struct dasd_ccw_req);
+       block->tag_set.nr_hw_queues = nr_hw_queues;
+       block->tag_set.queue_depth = queue_depth;
        block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
 
        rc = blk_mq_alloc_tag_set(&block->tag_set);
@@ -4038,7 +3987,8 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
        struct ccw1 *ccw;
        unsigned long *idaw;
 
-       cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device);
+       cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device,
+                                  NULL);
 
        if (IS_ERR(cqr)) {
                /* internal error 13 - Allocating the RDC request failed*/
index 5e963fe0e38d4c2125c43ae801ca7e9b28d98d07..e36a114354fc368e2141aae5c080c7f61b674da8 100644 (file)
@@ -407,9 +407,9 @@ static int read_unit_address_configuration(struct dasd_device *device,
        int rc;
        unsigned long flags;
 
-       cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
                                   (sizeof(struct dasd_psf_prssd_data)),
-                                  device);
+                                  device, NULL);
        if (IS_ERR(cqr))
                return PTR_ERR(cqr);
        cqr->startdev = device;
@@ -457,7 +457,7 @@ static int read_unit_address_configuration(struct dasd_device *device,
                lcu->flags |= NEED_UAC_UPDATE;
                spin_unlock_irqrestore(&lcu->lock, flags);
        }
-       dasd_kfree_request(cqr, cqr->memdev);
+       dasd_sfree_request(cqr, cqr->memdev);
        return rc;
 }
 
index 131f1989f6f3dff0345250c71943f5ac338af19c..e1fe02477ea8fca951232dabe7f89754c8f287ff 100644 (file)
@@ -536,7 +536,8 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
        /* Build the request */
        datasize = sizeof(struct dasd_diag_req) +
                count*sizeof(struct dasd_diag_bio);
-       cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev);
+       cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev,
+                                  blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
 
index be208e7adcb46087e7fb2436fadf8a737d7c472e..bbf95b78ef5d9e4c5903e466e7de3f71b615c9ce 100644 (file)
@@ -886,7 +886,7 @@ static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
        }
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
                                   0, /* use rcd_buf as data ara */
-                                  device);
+                                  device, NULL);
        if (IS_ERR(cqr)) {
                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
                              "Could not allocate RCD request");
@@ -1442,7 +1442,7 @@ static int dasd_eckd_read_features(struct dasd_device *device)
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
                                   (sizeof(struct dasd_psf_prssd_data) +
                                    sizeof(struct dasd_rssd_features)),
-                                  device);
+                                  device, NULL);
        if (IS_ERR(cqr)) {
                DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
                                "allocate initialization request");
@@ -1504,7 +1504,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
 
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
                                  sizeof(struct dasd_psf_ssc_data),
-                                 device);
+                                  device, NULL);
 
        if (IS_ERR(cqr)) {
                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
@@ -1815,7 +1815,8 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
 
        cplength = 8;
        datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device,
+                                  NULL);
        if (IS_ERR(cqr))
                return cqr;
        ccw = cqr->cpaddr;
@@ -2092,7 +2093,8 @@ dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
         */
        itcw_size = itcw_calc_size(0, count, 0);
 
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
+                                  NULL);
        if (IS_ERR(cqr))
                return cqr;
 
@@ -2186,7 +2188,7 @@ dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
        cplength += count;
 
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
-                                 startdev);
+                                  startdev, NULL);
        if (IS_ERR(cqr))
                return cqr;
 
@@ -2332,7 +2334,7 @@ dasd_eckd_build_format(struct dasd_device *base,
        }
        /* Allocate the format ccw request. */
        fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
-                                  datasize, startdev);
+                                  datasize, startdev, NULL);
        if (IS_ERR(fcp))
                return fcp;
 
@@ -3103,7 +3105,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
        }
        /* Allocate the ccw request. */
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
-                                  startdev);
+                                  startdev, blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
        ccw = cqr->cpaddr;
@@ -3262,7 +3264,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
 
        /* Allocate the ccw request. */
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
-                                  startdev);
+                                  startdev, blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
        ccw = cqr->cpaddr;
@@ -3595,7 +3597,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
 
        /* Allocate the ccw request. */
        itcw_size = itcw_calc_size(0, ctidaw, 0);
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
+                                  blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
 
@@ -3862,7 +3865,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
 
        /* Allocate the ccw request. */
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
-                                  datasize, startdev);
+                                  datasize, startdev, blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
 
@@ -4102,7 +4105,7 @@ dasd_eckd_release(struct dasd_device *device)
                return -EACCES;
 
        useglobal = 0;
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
        if (IS_ERR(cqr)) {
                mutex_lock(&dasd_reserve_mutex);
                useglobal = 1;
@@ -4157,7 +4160,7 @@ dasd_eckd_reserve(struct dasd_device *device)
                return -EACCES;
 
        useglobal = 0;
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
        if (IS_ERR(cqr)) {
                mutex_lock(&dasd_reserve_mutex);
                useglobal = 1;
@@ -4211,7 +4214,7 @@ dasd_eckd_steal_lock(struct dasd_device *device)
                return -EACCES;
 
        useglobal = 0;
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
        if (IS_ERR(cqr)) {
                mutex_lock(&dasd_reserve_mutex);
                useglobal = 1;
@@ -4271,7 +4274,8 @@ static int dasd_eckd_snid(struct dasd_device *device,
 
        useglobal = 0;
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
-                                  sizeof(struct dasd_snid_data), device);
+                                  sizeof(struct dasd_snid_data), device,
+                                  NULL);
        if (IS_ERR(cqr)) {
                mutex_lock(&dasd_reserve_mutex);
                useglobal = 1;
@@ -4331,7 +4335,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */  + 1 /* RSSD */,
                                   (sizeof(struct dasd_psf_prssd_data) +
                                    sizeof(struct dasd_rssd_perf_stats_t)),
-                                  device);
+                                  device, NULL);
        if (IS_ERR(cqr)) {
                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
                            "Could not allocate initialization request");
@@ -4477,7 +4481,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
        psf1 = psf_data[1];
 
        /* setup CCWs for PSF + RSSD */
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 , 0, device);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL);
        if (IS_ERR(cqr)) {
                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
                        "Could not allocate initialization request");
@@ -5037,7 +5041,7 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device,
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
                                   (sizeof(struct dasd_psf_prssd_data) +
                                    sizeof(struct dasd_rssd_messages)),
-                                  device);
+                                  device, NULL);
        if (IS_ERR(cqr)) {
                DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
                                "Could not allocate read message buffer request");
@@ -5126,7 +5130,7 @@ static int dasd_eckd_query_host_access(struct dasd_device *device,
 
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
                                   sizeof(struct dasd_psf_prssd_data) + 1,
-                                  device);
+                                  device, NULL);
        if (IS_ERR(cqr)) {
                DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
                                "Could not allocate read message buffer request");
@@ -5284,8 +5288,8 @@ dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
        int rc;
 
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
-                                 sizeof(struct dasd_psf_cuir_response),
-                                 device);
+                                  sizeof(struct dasd_psf_cuir_response),
+                                  device, NULL);
 
        if (IS_ERR(cqr)) {
                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
index 0af8c5295b650b1132e5946b123b558a08e91ccc..6ef8714dc6935047ec0f48d8cc3f6e34dd77c19c 100644 (file)
@@ -447,7 +447,7 @@ static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data)
                 * is a new ccw in device->eer_cqr. Free the "old"
                 * snss request now.
                 */
-               dasd_kfree_request(cqr, device);
+               dasd_sfree_request(cqr, device);
 }
 
 /*
@@ -472,8 +472,8 @@ int dasd_eer_enable(struct dasd_device *device)
        if (rc)
                goto out;
 
-       cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
-                                  SNSS_DATA_SIZE, device);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
+                                  SNSS_DATA_SIZE, device, NULL);
        if (IS_ERR(cqr)) {
                rc = -ENOMEM;
                cqr = NULL;
@@ -505,7 +505,7 @@ out:
        spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
 
        if (cqr)
-               dasd_kfree_request(cqr, device);
+               dasd_sfree_request(cqr, device);
 
        return rc;
 }
@@ -528,7 +528,7 @@ void dasd_eer_disable(struct dasd_device *device)
        in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
        spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
        if (cqr && !in_use)
-               dasd_kfree_request(cqr, device);
+               dasd_sfree_request(cqr, device);
 }
 
 /*
index a6b132f7e869eb4eb804b3fa8407cd064c92b699..56007a3e7f110358e27ad74563f24e428cbae473 100644 (file)
@@ -356,7 +356,8 @@ static struct dasd_ccw_req *dasd_fba_build_cp_discard(
        datasize = sizeof(struct DE_fba_data) +
                nr_ccws * (sizeof(struct LO_fba_data) + sizeof(struct ccw1));
 
-       cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev);
+       cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev,
+                                  blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
 
@@ -490,7 +491,8 @@ static struct dasd_ccw_req *dasd_fba_build_cp_regular(
                datasize += (count - 1)*sizeof(struct LO_fba_data);
        }
        /* Allocate the ccw request. */
-       cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev);
+       cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev,
+                                  blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
        ccw = cqr->cpaddr;
index 96709b1a7bf8d8af0f4e0db7748cd5ac8e5a8650..de6b96036aa40fb104e84c6c9e58ba89beebb232 100644 (file)
@@ -158,40 +158,33 @@ do { \
 
 struct dasd_ccw_req {
        unsigned int magic;             /* Eye catcher */
+       int intrc;                      /* internal error, e.g. from start_IO */
        struct list_head devlist;       /* for dasd_device request queue */
        struct list_head blocklist;     /* for dasd_block request queue */
-
-       /* Where to execute what... */
        struct dasd_block *block;       /* the originating block device */
        struct dasd_device *memdev;     /* the device used to allocate this */
        struct dasd_device *startdev;   /* device the request is started on */
        struct dasd_device *basedev;    /* base device if no block->base */
        void *cpaddr;                   /* address of ccw or tcw */
+       short retries;                  /* A retry counter */
        unsigned char cpmode;           /* 0 = cmd mode, 1 = itcw */
        char status;                    /* status of this request */
-       short retries;                  /* A retry counter */
+       char lpm;                       /* logical path mask */
        unsigned long flags;            /* flags of this request */
        struct dasd_queue *dq;
-
-       /* ... and how */
        unsigned long starttime;        /* jiffies time of request start */
        unsigned long expires;          /* expiration period in jiffies */
-       char lpm;                       /* logical path mask */
        void *data;                     /* pointer to data area */
-
-       /* these are important for recovering erroneous requests          */
-       int intrc;                      /* internal error, e.g. from start_IO */
        struct irb irb;                 /* device status in case of an error */
        struct dasd_ccw_req *refers;    /* ERP-chain queueing. */
        void *function;                 /* originating ERP action */
+       void *mem_chunk;
 
-       /* these are for statistics only */
        unsigned long buildclk;         /* TOD-clock of request generation */
        unsigned long startclk;         /* TOD-clock of request start */
        unsigned long stopclk;          /* TOD-clock of request interrupt */
        unsigned long endclk;           /* TOD-clock of request termination */
 
-        /* Callback that is called after reaching final status. */
        void (*callback)(struct dasd_ccw_req *, void *data);
        void *callback_data;
 };
@@ -235,14 +228,6 @@ struct dasd_ccw_req {
 #define DASD_CQR_SUPPRESS_IL   6       /* Suppress 'Incorrect Length' error */
 #define DASD_CQR_SUPPRESS_CR   7       /* Suppress 'Command Reject' error */
 
-/*
- * There is no reliable way to determine the number of available CPUs on
- * LPAR but there is no big performance difference between 1 and the
- * maximum CPU number.
- * 64 is a good trade off performance wise.
- */
-#define DASD_NR_HW_QUEUES 64
-#define DASD_MAX_LCU_DEV 256
 #define DASD_REQ_PER_DEV 4
 
 /* Signature for error recovery functions. */
@@ -714,19 +699,10 @@ extern const struct block_device_operations dasd_device_operations;
 extern struct kmem_cache *dasd_page_cache;
 
 struct dasd_ccw_req *
-dasd_kmalloc_request(int , int, int, struct dasd_device *);
-struct dasd_ccw_req *
-dasd_smalloc_request(int , int, int, struct dasd_device *);
-void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *);
+dasd_smalloc_request(int, int, int, struct dasd_device *, struct dasd_ccw_req *);
 void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *);
 void dasd_wakeup_cb(struct dasd_ccw_req *, void *);
 
-static inline int
-dasd_kmalloc_set_cda(struct ccw1 *ccw, void *cda, struct dasd_device *device)
-{
-       return set_normalized_cda(ccw, cda);
-}
-
 struct dasd_device *dasd_alloc_device(void);
 void dasd_free_device(struct dasd_device *);
 
index a070ef0efe65d0079cc10245b1ed8b79b8e8fba9..f230516abb96d31b4eabb2689a7230905857c48f 100644 (file)
@@ -5,6 +5,7 @@
 
 # The following is required for define_trace.h to find ./trace.h
 CFLAGS_trace.o := -I$(src)
+CFLAGS_vfio_ccw_fsm.o := -I$(src)
 
 obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
        fcx.o itcw.o crw.o ccwreq.o trace.o ioasm.o
index dce92b2a895d6ff3bbe38104ed08ea32c7979432..dbe7c7ac9ac8c8c4456f142b14c740d3bdc0c5e6 100644 (file)
 #define CCWCHAIN_LEN_MAX       256
 
 struct pfn_array {
+       /* Starting guest physical I/O address. */
        unsigned long           pa_iova;
+       /* Array that stores PFNs of the pages need to pin. */
        unsigned long           *pa_iova_pfn;
+       /* Array that receives PFNs of the pages pinned. */
        unsigned long           *pa_pfn;
+       /* Number of pages pinned from @pa_iova. */
        int                     pa_nr;
 };
 
@@ -46,70 +50,33 @@ struct ccwchain {
 };
 
 /*
- * pfn_array_pin() - pin user pages in memory
+ * pfn_array_alloc_pin() - alloc memory for PFNs, then pin user pages in memory
  * @pa: pfn_array on which to perform the operation
  * @mdev: the mediated device to perform pin/unpin operations
+ * @iova: target guest physical address
+ * @len: number of bytes that should be pinned from @iova
  *
- * Attempt to pin user pages in memory.
+ * Attempt to allocate memory for PFNs, and pin user pages in memory.
  *
  * Usage of pfn_array:
- * @pa->pa_iova     starting guest physical I/O address. Assigned by caller.
- * @pa->pa_iova_pfn array that stores PFNs of the pages need to pin. Allocated
- *                  by caller.
- * @pa->pa_pfn      array that receives PFNs of the pages pinned. Allocated by
- *                  caller.
- * @pa->pa_nr       number of pages from @pa->pa_iova to pin. Assigned by
- *                  caller.
- *                  number of pages pinned. Assigned by callee.
+ * We expect (pa_nr == 0) and (pa_iova_pfn == NULL), any field in
+ * this structure will be filled in by this function.
  *
  * Returns:
  *   Number of pages pinned on success.
- *   If @pa->pa_nr is 0 or negative, returns 0.
+ *   If @pa->pa_nr is not 0, or @pa->pa_iova_pfn is not NULL initially,
+ *   returns -EINVAL.
  *   If no pages were pinned, returns -errno.
  */
-static int pfn_array_pin(struct pfn_array *pa, struct device *mdev)
-{
-       int i, ret;
-
-       if (pa->pa_nr <= 0) {
-               pa->pa_nr = 0;
-               return 0;
-       }
-
-       pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
-       for (i = 1; i < pa->pa_nr; i++)
-               pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
-
-       ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr,
-                            IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
-
-       if (ret > 0 && ret != pa->pa_nr) {
-               vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret);
-               pa->pa_nr = 0;
-               return 0;
-       }
-
-       return ret;
-}
-
-/* Unpin the pages before releasing the memory. */
-static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev)
-{
-       vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
-       pa->pa_nr = 0;
-       kfree(pa->pa_iova_pfn);
-}
-
-/* Alloc memory for PFNs, then pin pages with them. */
 static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
                               u64 iova, unsigned int len)
 {
-       int ret = 0;
+       int i, ret = 0;
 
        if (!len)
                return 0;
 
-       if (pa->pa_nr)
+       if (pa->pa_nr || pa->pa_iova_pfn)
                return -EINVAL;
 
        pa->pa_iova = iova;
@@ -126,18 +93,39 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
                return -ENOMEM;
        pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
 
-       ret = pfn_array_pin(pa, mdev);
+       pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
+       for (i = 1; i < pa->pa_nr; i++)
+               pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
 
-       if (ret > 0)
-               return ret;
-       else if (!ret)
+       ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr,
+                            IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
+
+       if (ret < 0) {
+               goto err_out;
+       } else if (ret > 0 && ret != pa->pa_nr) {
+               vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret);
                ret = -EINVAL;
+               goto err_out;
+       }
 
+       return ret;
+
+err_out:
+       pa->pa_nr = 0;
        kfree(pa->pa_iova_pfn);
+       pa->pa_iova_pfn = NULL;
 
        return ret;
 }
 
+/* Unpin the pages before releasing the memory. */
+static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev)
+{
+       vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
+       pa->pa_nr = 0;
+       kfree(pa->pa_iova_pfn);
+}
+
 static int pfn_array_table_init(struct pfn_array_table *pat, int nr)
 {
        pat->pat_pa = kcalloc(nr, sizeof(*pat->pat_pa), GFP_KERNEL);
@@ -365,6 +353,9 @@ static void cp_unpin_free(struct channel_program *cp)
  * This is the chain length not considering any TICs.
  * You need to do a new round for each TIC target.
  *
+ * The program is also validated for absence of not yet supported
+ * indirect data addressing scenarios.
+ *
  * Returns: the length of the ccw chain or -errno.
  */
 static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
@@ -391,6 +382,14 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
        do {
                cnt++;
 
+               /*
+                * As we don't want to fail direct addressing even if the
+                * orb specified one of the unsupported formats, we defer
+                * checking for IDAWs in unsupported formats to here.
+                */
+               if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw))
+                       return -EOPNOTSUPP;
+
                if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw)))
                        break;
 
@@ -503,7 +502,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
        struct ccw1 *ccw;
        struct pfn_array_table *pat;
        unsigned long *idaws;
-       int idaw_nr;
+       int ret;
 
        ccw = chain->ch_ccw + idx;
 
@@ -523,18 +522,19 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
         * needed when translating a direct ccw to a idal ccw.
         */
        pat = chain->ch_pat + idx;
-       if (pfn_array_table_init(pat, 1))
-               return -ENOMEM;
-       idaw_nr = pfn_array_alloc_pin(pat->pat_pa, cp->mdev,
-                                     ccw->cda, ccw->count);
-       if (idaw_nr < 0)
-               return idaw_nr;
+       ret = pfn_array_table_init(pat, 1);
+       if (ret)
+               goto out_init;
+
+       ret = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, ccw->cda, ccw->count);
+       if (ret < 0)
+               goto out_init;
 
        /* Translate this direct ccw to a idal ccw. */
-       idaws = kcalloc(idaw_nr, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
+       idaws = kcalloc(ret, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
        if (!idaws) {
-               pfn_array_table_unpin_free(pat, cp->mdev);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto out_unpin;
        }
        ccw->cda = (__u32) virt_to_phys(idaws);
        ccw->flags |= CCW_FLAG_IDA;
@@ -542,6 +542,12 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
        pfn_array_table_idal_create_words(pat, idaws);
 
        return 0;
+
+out_unpin:
+       pfn_array_table_unpin_free(pat, cp->mdev);
+out_init:
+       ccw->cda = 0;
+       return ret;
 }
 
 static int ccwchain_fetch_idal(struct ccwchain *chain,
@@ -571,7 +577,7 @@ static int ccwchain_fetch_idal(struct ccwchain *chain,
        pat = chain->ch_pat + idx;
        ret = pfn_array_table_init(pat, idaw_nr);
        if (ret)
-               return ret;
+               goto out_init;
 
        /* Translate idal ccw to use new allocated idaws. */
        idaws = kzalloc(idaw_len, GFP_DMA | GFP_KERNEL);
@@ -603,6 +609,8 @@ out_free_idaws:
        kfree(idaws);
 out_unpin:
        pfn_array_table_unpin_free(pat, cp->mdev);
+out_init:
+       ccw->cda = 0;
        return ret;
 }
 
@@ -656,10 +664,8 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
        /*
         * XXX:
         * Only support prefetch enable mode now.
-        * Only support 64bit addressing idal.
-        * Only support 4k IDAW.
         */
-       if (!orb->cmd.pfch || !orb->cmd.c64 || orb->cmd.i2k)
+       if (!orb->cmd.pfch)
                return -EOPNOTSUPP;
 
        INIT_LIST_HEAD(&cp->ccwchain_list);
@@ -688,6 +694,10 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
        ret = ccwchain_loop_tic(chain, cp);
        if (ret)
                cp_unpin_free(cp);
+       /* It is safe to force: if not set but idals used
+        * ccwchain_calc_length returns an error.
+        */
+       cp->orb.cmd.c64 = 1;
 
        return ret;
 }
index ea6a2d0b2894decac95c3421c544183ee89c3383..770fa9cfc31041dd84a78a00f0f4135bef5a79ed 100644 (file)
@@ -177,6 +177,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
 {
        struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
        unsigned long flags;
+       int rc = -EAGAIN;
 
        spin_lock_irqsave(sch->lock, flags);
        if (!device_is_registered(&sch->dev))
@@ -187,6 +188,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
 
        if (cio_update_schib(sch)) {
                vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
+               rc = 0;
                goto out_unlock;
        }
 
@@ -195,11 +197,12 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
                private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
                                 VFIO_CCW_STATE_STANDBY;
        }
+       rc = 0;
 
 out_unlock:
        spin_unlock_irqrestore(sch->lock, flags);
 
-       return 0;
+       return rc;
 }
 
 static struct css_device_id vfio_ccw_sch_ids[] = {
index 3c800642134e4330d62bb8c0053df62618840ff3..797a82731159a5f9f584810f924adc3467b1e702 100644 (file)
@@ -13,6 +13,9 @@
 #include "ioasm.h"
 #include "vfio_ccw_private.h"
 
+#define CREATE_TRACE_POINTS
+#include "vfio_ccw_trace.h"
+
 static int fsm_io_helper(struct vfio_ccw_private *private)
 {
        struct subchannel *sch;
@@ -110,6 +113,10 @@ static void fsm_disabled_irq(struct vfio_ccw_private *private,
         */
        cio_disable_subchannel(sch);
 }
+inline struct subchannel_id get_schid(struct vfio_ccw_private *p)
+{
+       return p->sch->schid;
+}
 
 /*
  * Deal with the ccw command request from the userspace.
@@ -121,6 +128,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
        union scsw *scsw = &private->scsw;
        struct ccw_io_region *io_region = &private->io_region;
        struct mdev_device *mdev = private->mdev;
+       char *errstr = "request";
 
        private->state = VFIO_CCW_STATE_BOXED;
 
@@ -132,15 +140,19 @@ static void fsm_io_request(struct vfio_ccw_private *private,
                /* Don't try to build a cp if transport mode is specified. */
                if (orb->tm.b) {
                        io_region->ret_code = -EOPNOTSUPP;
+                       errstr = "transport mode";
                        goto err_out;
                }
                io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev),
                                              orb);
-               if (io_region->ret_code)
+               if (io_region->ret_code) {
+                       errstr = "cp init";
                        goto err_out;
+               }
 
                io_region->ret_code = cp_prefetch(&private->cp);
                if (io_region->ret_code) {
+                       errstr = "cp prefetch";
                        cp_free(&private->cp);
                        goto err_out;
                }
@@ -148,6 +160,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
                /* Start channel program and wait for I/O interrupt. */
                io_region->ret_code = fsm_io_helper(private);
                if (io_region->ret_code) {
+                       errstr = "cp fsm_io_helper";
                        cp_free(&private->cp);
                        goto err_out;
                }
@@ -164,6 +177,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
 
 err_out:
        private->state = VFIO_CCW_STATE_IDLE;
+       trace_vfio_ccw_io_fctl(scsw->cmd.fctl, get_schid(private),
+                              io_region->ret_code, errstr);
 }
 
 /*
diff --git a/drivers/s390/cio/vfio_ccw_trace.h b/drivers/s390/cio/vfio_ccw_trace.h
new file mode 100644 (file)
index 0000000..b1da53d
--- /dev/null
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Tracepoints for vfio_ccw driver
+ *
+ * Copyright IBM Corp. 2018
+ *
+ * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
+ *            Halil Pasic <pasic@linux.vnet.ibm.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM vfio_ccw
+
+#if !defined(_VFIO_CCW_TRACE_) || defined(TRACE_HEADER_MULTI_READ)
+#define _VFIO_CCW_TRACE_
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(vfio_ccw_io_fctl,
+       TP_PROTO(int fctl, struct subchannel_id schid, int errno, char *errstr),
+       TP_ARGS(fctl, schid, errno, errstr),
+
+       TP_STRUCT__entry(
+               __field(int, fctl)
+               __field_struct(struct subchannel_id, schid)
+               __field(int, errno)
+               __field(char*, errstr)
+       ),
+
+       TP_fast_assign(
+               __entry->fctl = fctl;
+               __entry->schid = schid;
+               __entry->errno = errno;
+               __entry->errstr = errstr;
+       ),
+
+       TP_printk("schid=%x.%x.%04x fctl=%x errno=%d info=%s",
+                 __entry->schid.cssid,
+                 __entry->schid.ssid,
+                 __entry->schid.sch_no,
+                 __entry->fctl,
+                 __entry->errno,
+                 __entry->errstr)
+);
+
+#endif /* _VFIO_CCW_TRACE_ */
+
+/* This part must be outside protection */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE vfio_ccw_trace
+
+#include <trace/define_trace.h>
index 2a5fec55bf60f6f30fd684e1c8c5c9a594aa8e75..a246a618f9a497047e4a81614f38da1eb295ef0b 100644 (file)
@@ -829,6 +829,17 @@ struct qeth_trap_id {
 /*some helper functions*/
 #define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
 
+static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
+                                         unsigned int elements)
+{
+       unsigned int i;
+
+       for (i = 0; i < elements; i++)
+               memset(&buf->element[i], 0, sizeof(struct qdio_buffer_element));
+       buf->element[14].sflags = 0;
+       buf->element[15].sflags = 0;
+}
+
 /**
  * qeth_get_elements_for_range() -     find number of SBALEs to cover range.
  * @start:                             Start of the address range.
@@ -1029,7 +1040,7 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
                                                 __u16, __u16,
                                                 enum qeth_prot_versions);
 int qeth_set_features(struct net_device *, netdev_features_t);
-void qeth_recover_features(struct net_device *dev);
+void qeth_enable_hw_features(struct net_device *dev);
 netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
 netdev_features_t qeth_features_check(struct sk_buff *skb,
                                      struct net_device *dev,
index 8e1474f1ffacfb22b773b02aa1bff6ff91c61ce9..d01ac29fd986d82b84b7215c5268c37e94aaadaa 100644 (file)
@@ -73,9 +73,6 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
                struct qeth_qdio_out_buffer *buf,
                enum iucv_tx_notify notification);
 static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
-static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
-               struct qeth_qdio_out_buffer *buf,
-               enum qeth_qdio_buffer_states newbufstate);
 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
 
 struct workqueue_struct *qeth_wq;
@@ -489,6 +486,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
        struct qaob *aob;
        struct qeth_qdio_out_buffer *buffer;
        enum iucv_tx_notify notification;
+       unsigned int i;
 
        aob = (struct qaob *) phys_to_virt(phys_aob_addr);
        QETH_CARD_TEXT(card, 5, "haob");
@@ -513,10 +511,18 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
        qeth_notify_skbs(buffer->q, buffer, notification);
 
        buffer->aob = NULL;
-       qeth_clear_output_buffer(buffer->q, buffer,
-                                QETH_QDIO_BUF_HANDLED_DELAYED);
+       /* Free dangling allocations. The attached skbs are handled by
+        * qeth_cleanup_handled_pending().
+        */
+       for (i = 0;
+            i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
+            i++) {
+               if (aob->sba[i] && buffer->is_header[i])
+                       kmem_cache_free(qeth_core_header_cache,
+                                       (void *) aob->sba[i]);
+       }
+       atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
 
-       /* from here on: do not touch buffer anymore */
        qdio_release_aob(aob);
 }
 
@@ -3759,6 +3765,10 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
                        QETH_CARD_TEXT(queue->card, 5, "aob");
                        QETH_CARD_TEXT_(queue->card, 5, "%lx",
                                        virt_to_phys(buffer->aob));
+
+                       /* prepare the queue slot for re-use: */
+                       qeth_scrub_qdio_buffer(buffer->buffer,
+                                              QETH_MAX_BUFFER_ELEMENTS(card));
                        if (qeth_init_qdio_out_buf(queue, bidx)) {
                                QETH_CARD_TEXT(card, 2, "outofbuf");
                                qeth_schedule_recovery(card);
@@ -4834,7 +4844,7 @@ int qeth_vm_request_mac(struct qeth_card *card)
                goto out;
        }
 
-       ccw_device_get_id(CARD_RDEV(card), &id);
+       ccw_device_get_id(CARD_DDEV(card), &id);
        request->resp_buf_len = sizeof(*response);
        request->resp_version = DIAG26C_VERSION2;
        request->op_code = DIAG26C_GET_MAC;
@@ -6459,28 +6469,27 @@ static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
 #define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \
                          NETIF_F_IPV6_CSUM)
 /**
- * qeth_recover_features() - Restore device features after recovery
- * @dev:       the recovering net_device
- *
- * Caller must hold rtnl lock.
+ * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
+ * @dev:       a net_device
  */
-void qeth_recover_features(struct net_device *dev)
+void qeth_enable_hw_features(struct net_device *dev)
 {
-       netdev_features_t features = dev->features;
        struct qeth_card *card = dev->ml_priv;
+       netdev_features_t features;
 
+       rtnl_lock();
+       features = dev->features;
        /* force-off any feature that needs an IPA sequence.
         * netdev_update_features() will restart them.
         */
        dev->features &= ~QETH_HW_FEATURES;
        netdev_update_features(dev);
-
-       if (features == dev->features)
-               return;
-       dev_warn(&card->gdev->dev,
-                "Device recovery failed to restore all offload features\n");
+       if (features != dev->features)
+               dev_warn(&card->gdev->dev,
+                        "Device recovery failed to restore all offload features\n");
+       rtnl_unlock();
 }
-EXPORT_SYMBOL_GPL(qeth_recover_features);
+EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
 
 int qeth_set_features(struct net_device *dev, netdev_features_t features)
 {
index a7cb37da6a21313eda8d03119135f1475d35f47d..2487f0aeb165c1afae905540d1ff547f7fab4f54 100644 (file)
@@ -140,7 +140,7 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
 
 static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
 {
-       enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
+       enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
                                        IPA_CMD_SETGMAC : IPA_CMD_SETVMAC;
        int rc;
 
@@ -157,7 +157,7 @@ static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
 
 static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
 {
-       enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
+       enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
                                        IPA_CMD_DELGMAC : IPA_CMD_DELVMAC;
        int rc;
 
@@ -501,27 +501,34 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
                return -ERESTARTSYS;
        }
 
+       /* avoid racing against concurrent state change: */
+       if (!mutex_trylock(&card->conf_mutex))
+               return -EAGAIN;
+
        if (!qeth_card_hw_is_reachable(card)) {
                ether_addr_copy(dev->dev_addr, addr->sa_data);
-               return 0;
+               goto out_unlock;
        }
 
        /* don't register the same address twice */
        if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
            (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
-               return 0;
+               goto out_unlock;
 
        /* add the new address, switch over, drop the old */
        rc = qeth_l2_send_setmac(card, addr->sa_data);
        if (rc)
-               return rc;
+               goto out_unlock;
        ether_addr_copy(old_addr, dev->dev_addr);
        ether_addr_copy(dev->dev_addr, addr->sa_data);
 
        if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
                qeth_l2_remove_mac(card, old_addr);
        card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
-       return 0;
+
+out_unlock:
+       mutex_unlock(&card->conf_mutex);
+       return rc;
 }
 
 static void qeth_promisc_to_bridge(struct qeth_card *card)
@@ -1112,6 +1119,8 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
                netif_carrier_off(card->dev);
 
        qeth_set_allowed_threads(card, 0xffffffff, 0);
+
+       qeth_enable_hw_features(card->dev);
        if (recover_flag == CARD_STATE_RECOVER) {
                if (recovery_mode &&
                    card->info.type != QETH_CARD_TYPE_OSN) {
@@ -1123,9 +1132,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
                }
                /* this also sets saved unicast addresses */
                qeth_l2_set_rx_mode(card->dev);
-               rtnl_lock();
-               qeth_recover_features(card->dev);
-               rtnl_unlock();
        }
        /* let user_space know that device is online */
        kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
index e7fa479adf47e0dd41bfacaed8fd347bc40b5581..5905dc63e2569baf761611ad25bf3b91786a3235 100644 (file)
@@ -2662,6 +2662,8 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
                netif_carrier_on(card->dev);
        else
                netif_carrier_off(card->dev);
+
+       qeth_enable_hw_features(card->dev);
        if (recover_flag == CARD_STATE_RECOVER) {
                rtnl_lock();
                if (recovery_mode)
@@ -2669,7 +2671,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
                else
                        dev_open(card->dev);
                qeth_l3_set_rx_mode(card->dev);
-               qeth_recover_features(card->dev);
                rtnl_unlock();
        }
        qeth_trace_features(card);
index a9831bd37a73d52462489d025c1631a576ca2fbc..a57f3a7d47488e5aac06d24e47b14eb90fc9615c 100644 (file)
@@ -1974,7 +1974,6 @@ static void aac_set_safw_attr_all_targets(struct aac_dev *dev)
        u32 lun_count, nexus;
        u32 i, bus, target;
        u8 expose_flag, attribs;
-       u8 devtype;
 
        lun_count = aac_get_safw_phys_lun_count(dev);
 
@@ -1992,23 +1991,23 @@ static void aac_set_safw_attr_all_targets(struct aac_dev *dev)
                        continue;
 
                if (expose_flag != 0) {
-                       devtype = AAC_DEVTYPE_RAID_MEMBER;
-                       goto update_devtype;
+                       dev->hba_map[bus][target].devtype =
+                               AAC_DEVTYPE_RAID_MEMBER;
+                       continue;
                }
 
                if (nexus != 0 && (attribs & 8)) {
-                       devtype = AAC_DEVTYPE_NATIVE_RAW;
+                       dev->hba_map[bus][target].devtype =
+                               AAC_DEVTYPE_NATIVE_RAW;
                        dev->hba_map[bus][target].rmw_nexus =
                                        nexus;
                } else
-                       devtype = AAC_DEVTYPE_ARC_RAW;
+                       dev->hba_map[bus][target].devtype =
+                               AAC_DEVTYPE_ARC_RAW;
 
                dev->hba_map[bus][target].scan_counter = dev->scan_counter;
 
                aac_set_safw_target_qd(dev, bus, target);
-
-update_devtype:
-               dev->hba_map[bus][target].devtype = devtype;
        }
 }
 
index 2a3977823812cce8889e6577f65d3f862b626918..a39be94d110cdad1fc3153a58641c6d9f5a08eab 100644 (file)
@@ -107,12 +107,12 @@ cxlflash_assign_ops(struct dev_dependent_vals *ddv)
 {
        const struct cxlflash_backend_ops *ops = NULL;
 
-#ifdef CONFIG_OCXL
+#ifdef CONFIG_OCXL_BASE
        if (ddv->flags & CXLFLASH_OCXL_DEV)
                ops = &cxlflash_ocxl_ops;
 #endif
 
-#ifdef CONFIG_CXL
+#ifdef CONFIG_CXL_BASE
        if (!(ddv->flags & CXLFLASH_OCXL_DEV))
                ops = &cxlflash_cxl_ops;
 #endif
index 0a95b5f253807888a8fa680397e5e344236d81d6..497a6838946198b6bac403b38e2ecc76b9678ff7 100644 (file)
@@ -134,15 +134,14 @@ static struct file *ocxlflash_getfile(struct device *dev, const char *name,
                rc = PTR_ERR(file);
                dev_err(dev, "%s: alloc_file failed rc=%d\n",
                        __func__, rc);
-               goto err5;
+               path_put(&path);
+               goto err3;
        }
 
        file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
        file->private_data = priv;
 out:
        return file;
-err5:
-       path_put(&path);
 err4:
        iput(inode);
 err3:
index 15c7f3b6f35eecee2ca3ca88c16c63809c2791bf..58bb70b886d70d714ee6b448843aa113c2b313c0 100644 (file)
@@ -3440,11 +3440,11 @@ static void hpsa_get_enclosure_info(struct ctlr_info *h,
        struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
        u16 bmic_device_index = 0;
 
-       bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
-
-       encl_dev->sas_address =
+       encl_dev->eli =
                hpsa_get_enclosure_logical_identifier(h, scsi3addr);
 
+       bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
+
        if (encl_dev->target == -1 || encl_dev->lun == -1) {
                rc = IO_OK;
                goto out;
@@ -9697,7 +9697,24 @@ hpsa_sas_get_linkerrors(struct sas_phy *phy)
 static int
 hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
 {
-       *identifier = rphy->identify.sas_address;
+       struct Scsi_Host *shost = phy_to_shost(rphy);
+       struct ctlr_info *h;
+       struct hpsa_scsi_dev_t *sd;
+
+       if (!shost)
+               return -ENXIO;
+
+       h = shost_to_hba(shost);
+
+       if (!h)
+               return -ENXIO;
+
+       sd = hpsa_find_device_by_sas_rphy(h, rphy);
+       if (!sd)
+               return -ENXIO;
+
+       *identifier = sd->eli;
+
        return 0;
 }
 
index fb9f5e7f8209447771d07016bca7924774b143af..59e023696fffe96d3fb7869de5233161dce8fa1d 100644 (file)
@@ -68,6 +68,7 @@ struct hpsa_scsi_dev_t {
 #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
        unsigned char device_id[16];    /* from inquiry pg. 0x83 */
        u64 sas_address;
+       u64 eli;                        /* from report diags. */
        unsigned char vendor[8];        /* bytes 8-15 of inquiry data */
        unsigned char model[16];        /* bytes 16-31 of inquiry data */
        unsigned char rev;              /* byte 2 of inquiry data */
index 0a9b8b387bd2e70e87310ef7908012a46f32942f..02d65dce74e504230ceb3080b58972d8d5dff950 100644 (file)
@@ -760,7 +760,6 @@ static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
                ioa_cfg->hrrq[i].allow_interrupts = 0;
                spin_unlock(&ioa_cfg->hrrq[i]._lock);
        }
-       wmb();
 
        /* Set interrupt mask to stop all new interrupts */
        if (ioa_cfg->sis64)
@@ -8403,7 +8402,6 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
                ioa_cfg->hrrq[i].allow_interrupts = 1;
                spin_unlock(&ioa_cfg->hrrq[i]._lock);
        }
-       wmb();
        if (ioa_cfg->sis64) {
                /* Set the adapter to the correct endian mode. */
                writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
index 90394cef0f414cdac6a5990ecb0bbc5ed9961a89..0a5dd5595dd3c42179543d8453e9d3afc98ba3f7 100644 (file)
@@ -3295,6 +3295,11 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
 
        init_completion(&qedf->flogi_compl);
 
+       status = qed_ops->common->update_drv_state(qedf->cdev, true);
+       if (status)
+               QEDF_ERR(&(qedf->dbg_ctx),
+                       "Failed to send drv state to MFW.\n");
+
        memset(&link_params, 0, sizeof(struct qed_link_params));
        link_params.link_up = true;
        status = qed_ops->common->set_link(qedf->cdev, &link_params);
@@ -3343,6 +3348,7 @@ static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 static void __qedf_remove(struct pci_dev *pdev, int mode)
 {
        struct qedf_ctx *qedf;
+       int rc;
 
        if (!pdev) {
                QEDF_ERR(NULL, "pdev is NULL.\n");
@@ -3437,6 +3443,12 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
                qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
                pci_set_drvdata(pdev, NULL);
        }
+
+       rc = qed_ops->common->update_drv_state(qedf->cdev, false);
+       if (rc)
+               QEDF_ERR(&(qedf->dbg_ctx),
+                       "Failed to send drv state to MFW.\n");
+
        qed_ops->common->slowpath_stop(qedf->cdev);
        qed_ops->common->remove(qedf->cdev);
 
index cf274a79e77aac86d338d358a71a753636002812..091ec1207beae23040f17c0c6b1d32d8ca30192c 100644 (file)
@@ -2273,6 +2273,7 @@ kset_free:
 static void __qedi_remove(struct pci_dev *pdev, int mode)
 {
        struct qedi_ctx *qedi = pci_get_drvdata(pdev);
+       int rval;
 
        if (qedi->tmf_thread) {
                flush_workqueue(qedi->tmf_thread);
@@ -2302,6 +2303,10 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
        if (mode == QEDI_MODE_NORMAL)
                qedi_free_iscsi_pf_param(qedi);
 
+       rval = qedi_ops->common->update_drv_state(qedi->cdev, false);
+       if (rval)
+               QEDI_ERR(&qedi->dbg_ctx, "Failed to send drv state to MFW\n");
+
        if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) {
                qedi_ops->common->slowpath_stop(qedi->cdev);
                qedi_ops->common->remove(qedi->cdev);
@@ -2576,6 +2581,12 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
                if (qedi_setup_boot_info(qedi))
                        QEDI_ERR(&qedi->dbg_ctx,
                                 "No iSCSI boot target configured\n");
+
+               rc = qedi_ops->common->update_drv_state(qedi->cdev, true);
+               if (rc)
+                       QEDI_ERR(&qedi->dbg_ctx,
+                                "Failed to send drv state to MFW\n");
+
        }
 
        return 0;
index 9442e18aef6fdbf818ba5999e0773417fde9149a..0f94b1d62d3f2f8611fb5a0924bf374b368a7028 100644 (file)
@@ -361,6 +361,8 @@ struct ct_arg {
        dma_addr_t      rsp_dma;
        u32             req_size;
        u32             rsp_size;
+       u32             req_allocated_size;
+       u32             rsp_allocated_size;
        void            *req;
        void            *rsp;
        port_id_t       id;
index 4bc2b66b299f234b098fdecb2e80e53555b9d684..2c35b0b2baa07f27211140fb0a7ea48b4cf8a7f4 100644 (file)
@@ -556,7 +556,7 @@ err2:
                /* please ignore kernel warning. otherwise, we have mem leak. */
                if (sp->u.iocb_cmd.u.ctarg.req) {
                        dma_free_coherent(&vha->hw->pdev->dev,
-                           sizeof(struct ct_sns_pkt),
+                           sp->u.iocb_cmd.u.ctarg.req_allocated_size,
                            sp->u.iocb_cmd.u.ctarg.req,
                            sp->u.iocb_cmd.u.ctarg.req_dma);
                        sp->u.iocb_cmd.u.ctarg.req = NULL;
@@ -564,7 +564,7 @@ err2:
 
                if (sp->u.iocb_cmd.u.ctarg.rsp) {
                        dma_free_coherent(&vha->hw->pdev->dev,
-                           sizeof(struct ct_sns_pkt),
+                           sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
                            sp->u.iocb_cmd.u.ctarg.rsp,
                            sp->u.iocb_cmd.u.ctarg.rsp_dma);
                        sp->u.iocb_cmd.u.ctarg.rsp = NULL;
@@ -617,6 +617,7 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
        sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
            sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
            GFP_KERNEL);
+       sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
        if (!sp->u.iocb_cmd.u.ctarg.req) {
                ql_log(ql_log_warn, vha, 0xd041,
                    "%s: Failed to allocate ct_sns request.\n",
@@ -627,6 +628,7 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
        sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
            sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
            GFP_KERNEL);
+       sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
        if (!sp->u.iocb_cmd.u.ctarg.rsp) {
                ql_log(ql_log_warn, vha, 0xd042,
                    "%s: Failed to allocate ct_sns request.\n",
@@ -712,6 +714,7 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
        sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
            sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
            GFP_KERNEL);
+       sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
        if (!sp->u.iocb_cmd.u.ctarg.req) {
                ql_log(ql_log_warn, vha, 0xd041,
                    "%s: Failed to allocate ct_sns request.\n",
@@ -722,6 +725,7 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
        sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
            sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
            GFP_KERNEL);
+       sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
        if (!sp->u.iocb_cmd.u.ctarg.rsp) {
                ql_log(ql_log_warn, vha, 0xd042,
                    "%s: Failed to allocate ct_sns request.\n",
@@ -802,6 +806,7 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
        sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
            sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
            GFP_KERNEL);
+       sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
        if (!sp->u.iocb_cmd.u.ctarg.req) {
                ql_log(ql_log_warn, vha, 0xd041,
                    "%s: Failed to allocate ct_sns request.\n",
@@ -812,6 +817,7 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
        sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
            sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
            GFP_KERNEL);
+       sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
        if (!sp->u.iocb_cmd.u.ctarg.rsp) {
                ql_log(ql_log_warn, vha, 0xd042,
                    "%s: Failed to allocate ct_sns request.\n",
@@ -909,6 +915,7 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
        sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
            sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
            GFP_KERNEL);
+       sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
        if (!sp->u.iocb_cmd.u.ctarg.req) {
                ql_log(ql_log_warn, vha, 0xd041,
                    "%s: Failed to allocate ct_sns request.\n",
@@ -919,6 +926,7 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
        sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
            sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
            GFP_KERNEL);
+       sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
        if (!sp->u.iocb_cmd.u.ctarg.rsp) {
                ql_log(ql_log_warn, vha, 0xd042,
                    "%s: Failed to allocate ct_sns request.\n",
@@ -3388,14 +3396,14 @@ void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
 {
        if (sp->u.iocb_cmd.u.ctarg.req) {
                dma_free_coherent(&vha->hw->pdev->dev,
-                       sizeof(struct ct_sns_pkt),
+                       sp->u.iocb_cmd.u.ctarg.req_allocated_size,
                        sp->u.iocb_cmd.u.ctarg.req,
                        sp->u.iocb_cmd.u.ctarg.req_dma);
                sp->u.iocb_cmd.u.ctarg.req = NULL;
        }
        if (sp->u.iocb_cmd.u.ctarg.rsp) {
                dma_free_coherent(&vha->hw->pdev->dev,
-                       sizeof(struct ct_sns_pkt),
+                       sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
                        sp->u.iocb_cmd.u.ctarg.rsp,
                        sp->u.iocb_cmd.u.ctarg.rsp_dma);
                sp->u.iocb_cmd.u.ctarg.rsp = NULL;
@@ -3596,14 +3604,14 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res)
                /* please ignore kernel warning. otherwise, we have mem leak. */
                if (sp->u.iocb_cmd.u.ctarg.req) {
                        dma_free_coherent(&vha->hw->pdev->dev,
-                               sizeof(struct ct_sns_pkt),
+                               sp->u.iocb_cmd.u.ctarg.req_allocated_size,
                                sp->u.iocb_cmd.u.ctarg.req,
                                sp->u.iocb_cmd.u.ctarg.req_dma);
                        sp->u.iocb_cmd.u.ctarg.req = NULL;
                }
                if (sp->u.iocb_cmd.u.ctarg.rsp) {
                        dma_free_coherent(&vha->hw->pdev->dev,
-                               sizeof(struct ct_sns_pkt),
+                               sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
                                sp->u.iocb_cmd.u.ctarg.rsp,
                                sp->u.iocb_cmd.u.ctarg.rsp_dma);
                        sp->u.iocb_cmd.u.ctarg.rsp = NULL;
@@ -3654,6 +3662,7 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
        sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
                sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
                GFP_KERNEL);
+       sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
        if (!sp->u.iocb_cmd.u.ctarg.req) {
                ql_log(ql_log_warn, vha, 0xd041,
                    "Failed to allocate ct_sns request.\n");
@@ -3663,6 +3672,7 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
        sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
                sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
                GFP_KERNEL);
+       sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
        if (!sp->u.iocb_cmd.u.ctarg.rsp) {
                ql_log(ql_log_warn, vha, 0xd042,
                    "Failed to allocate ct_sns request.\n");
@@ -4142,14 +4152,14 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
                         */
                        if (sp->u.iocb_cmd.u.ctarg.req) {
                                dma_free_coherent(&vha->hw->pdev->dev,
-                                   sizeof(struct ct_sns_pkt),
+                                   sp->u.iocb_cmd.u.ctarg.req_allocated_size,
                                    sp->u.iocb_cmd.u.ctarg.req,
                                    sp->u.iocb_cmd.u.ctarg.req_dma);
                                sp->u.iocb_cmd.u.ctarg.req = NULL;
                        }
                        if (sp->u.iocb_cmd.u.ctarg.rsp) {
                                dma_free_coherent(&vha->hw->pdev->dev,
-                                   sizeof(struct ct_sns_pkt),
+                                   sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
                                    sp->u.iocb_cmd.u.ctarg.rsp,
                                    sp->u.iocb_cmd.u.ctarg.rsp_dma);
                                sp->u.iocb_cmd.u.ctarg.rsp = NULL;
@@ -4179,14 +4189,14 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
                /* please ignore kernel warning. Otherwise, we have mem leak. */
                if (sp->u.iocb_cmd.u.ctarg.req) {
                        dma_free_coherent(&vha->hw->pdev->dev,
-                           sizeof(struct ct_sns_pkt),
+                           sp->u.iocb_cmd.u.ctarg.req_allocated_size,
                            sp->u.iocb_cmd.u.ctarg.req,
                            sp->u.iocb_cmd.u.ctarg.req_dma);
                        sp->u.iocb_cmd.u.ctarg.req = NULL;
                }
                if (sp->u.iocb_cmd.u.ctarg.rsp) {
                        dma_free_coherent(&vha->hw->pdev->dev,
-                           sizeof(struct ct_sns_pkt),
+                           sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
                            sp->u.iocb_cmd.u.ctarg.rsp,
                            sp->u.iocb_cmd.u.ctarg.rsp_dma);
                        sp->u.iocb_cmd.u.ctarg.rsp = NULL;
@@ -4281,14 +4291,14 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
 done_free_sp:
        if (sp->u.iocb_cmd.u.ctarg.req) {
                dma_free_coherent(&vha->hw->pdev->dev,
-                   sizeof(struct ct_sns_pkt),
+                   sp->u.iocb_cmd.u.ctarg.req_allocated_size,
                    sp->u.iocb_cmd.u.ctarg.req,
                    sp->u.iocb_cmd.u.ctarg.req_dma);
                sp->u.iocb_cmd.u.ctarg.req = NULL;
        }
        if (sp->u.iocb_cmd.u.ctarg.rsp) {
                dma_free_coherent(&vha->hw->pdev->dev,
-                   sizeof(struct ct_sns_pkt),
+                   sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
                    sp->u.iocb_cmd.u.ctarg.rsp,
                    sp->u.iocb_cmd.u.ctarg.rsp_dma);
                sp->u.iocb_cmd.u.ctarg.rsp = NULL;
@@ -4349,6 +4359,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
                sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent(
                        &vha->hw->pdev->dev, sizeof(struct ct_sns_pkt),
                        &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL);
+               sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
                if (!sp->u.iocb_cmd.u.ctarg.req) {
                        ql_log(ql_log_warn, vha, 0xffff,
                            "Failed to allocate ct_sns request.\n");
@@ -4366,6 +4377,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
                sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent(
                        &vha->hw->pdev->dev, rspsz,
                        &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL);
+               sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
                if (!sp->u.iocb_cmd.u.ctarg.rsp) {
                        ql_log(ql_log_warn, vha, 0xffff,
                            "Failed to allocate ct_sns request.\n");
@@ -4425,14 +4437,14 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
 done_free_sp:
        if (sp->u.iocb_cmd.u.ctarg.req) {
                dma_free_coherent(&vha->hw->pdev->dev,
-                   sizeof(struct ct_sns_pkt),
+                   sp->u.iocb_cmd.u.ctarg.req_allocated_size,
                    sp->u.iocb_cmd.u.ctarg.req,
                    sp->u.iocb_cmd.u.ctarg.req_dma);
                sp->u.iocb_cmd.u.ctarg.req = NULL;
        }
        if (sp->u.iocb_cmd.u.ctarg.rsp) {
                dma_free_coherent(&vha->hw->pdev->dev,
-                   sizeof(struct ct_sns_pkt),
+                   sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
                    sp->u.iocb_cmd.u.ctarg.rsp,
                    sp->u.iocb_cmd.u.ctarg.rsp_dma);
                sp->u.iocb_cmd.u.ctarg.rsp = NULL;
index 7b675243bd16c61a703cffa69c0f5f0a55a62ea6..db0e3279e07abe06071da74012ebf04f3a690a39 100644 (file)
@@ -591,12 +591,14 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
                                conflict_fcport =
                                        qla2x00_find_fcport_by_wwpn(vha,
                                            e->port_name, 0);
-                               ql_dbg(ql_dbg_disc, vha, 0x20e6,
-                                   "%s %d %8phC post del sess\n",
-                                   __func__, __LINE__,
-                                   conflict_fcport->port_name);
-                               qlt_schedule_sess_for_deletion
-                                       (conflict_fcport);
+                               if (conflict_fcport) {
+                                       qlt_schedule_sess_for_deletion
+                                               (conflict_fcport);
+                                       ql_dbg(ql_dbg_disc, vha, 0x20e6,
+                                           "%s %d %8phC post del sess\n",
+                                           __func__, __LINE__,
+                                           conflict_fcport->port_name);
+                               }
                        }
 
                        /* FW already picked this loop id for another fcport */
index e881fce7477a90956a4d45b856e484d89821b9e4..9f309e572be468b0c76bb3e648fffbbd33594e0c 100644 (file)
@@ -3180,6 +3180,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
            "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
            req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
 
+       ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0);
+
        if (ha->isp_ops->initialize_adapter(base_vha)) {
                ql_log(ql_log_fatal, base_vha, 0x00d6,
                    "Failed to initialize adapter - Adapter flags %x.\n",
@@ -3216,8 +3218,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
            host->can_queue, base_vha->req,
            base_vha->mgmt_svr_loop_id, host->sg_tablesize);
 
-       ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0);
-
        if (ha->mqenable) {
                bool mq = false;
                bool startit = false;
index 0fea2e2326becbf4993dd7cc216e36dad529d678..1027b0cb7fa3634baf0bd870ffdc93e9286cac8e 100644 (file)
@@ -1224,7 +1224,6 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess)
 void qlt_schedule_sess_for_deletion(struct fc_port *sess)
 {
        struct qla_tgt *tgt = sess->tgt;
-       struct qla_hw_data *ha = sess->vha->hw;
        unsigned long flags;
 
        if (sess->disc_state == DSC_DELETE_PEND)
@@ -1241,16 +1240,16 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
                        return;
        }
 
-       spin_lock_irqsave(&ha->tgt.sess_lock, flags);
        if (sess->deleted == QLA_SESS_DELETED)
                sess->logout_on_delete = 0;
 
+       spin_lock_irqsave(&sess->vha->work_lock, flags);
        if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
-               spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+               spin_unlock_irqrestore(&sess->vha->work_lock, flags);
                return;
        }
        sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
-       spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+       spin_unlock_irqrestore(&sess->vha->work_lock, flags);
 
        sess->disc_state = DSC_DELETE_PEND;
 
index 24d7496cd9e23cfc2a97126fc22b5f4c25a253b0..364e71861bfd5c2c17caf1e93cdeae669e95b971 100644 (file)
@@ -5507,9 +5507,9 @@ static void __exit scsi_debug_exit(void)
        int k = sdebug_add_host;
 
        stop_all_queued();
-       free_all_queued();
        for (; k; k--)
                sdebug_remove_adapter();
+       free_all_queued();
        driver_unregister(&sdebug_driverfs_driver);
        bus_unregister(&pseudo_lld_bus);
        root_device_unregister(pseudo_primary);
index 8932ae81a15a7c36bf7e7800cada555d87a6bd26..2715cdaa669c0802bfb224157e33803e5d0310aa 100644 (file)
@@ -296,6 +296,20 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
                rtn = host->hostt->eh_timed_out(scmd);
 
        if (rtn == BLK_EH_DONE) {
+               /*
+                * For blk-mq, we must set the request state to complete now
+                * before sending the request to the scsi error handler. This
+                * will prevent a use-after-free in the event the LLD manages
+                * to complete the request before the error handler finishes
+                * processing this timed out request.
+                *
+                * If the request was already completed, then the LLD beat the
+                * time out handler from transferring the request to the scsi
+                * error handler. In that case we can return immediately as no
+                * further action is required.
+                */
+               if (req->q->mq_ops && !blk_mq_mark_complete(req))
+                       return rtn;
                if (scsi_abort_command(scmd) != SUCCESS) {
                        set_host_byte(scmd, DID_TIME_OUT);
                        scsi_eh_scmd_add(scmd);
index 1da3d71e9f61f784e8131093bd5378d94bb98745..13948102ca298cf1a20d45d49781aa4dee55d851 100644 (file)
@@ -3592,7 +3592,7 @@ fc_bsg_job_timeout(struct request *req)
 
        /* the blk_end_sync_io() doesn't check the error */
        if (inflight)
-               blk_mq_complete_request(req);
+               __blk_complete_request(req);
        return BLK_EH_DONE;
 }
 
index a14fef11776ec846c482178ee555c8122a598d22..2bf3bf73886e373ab573cf6de16ee4a1802c288d 100644 (file)
@@ -391,7 +391,8 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf)
  * Check that all zones of the device are equal. The last zone can however
  * be smaller. The zone size must also be a power of two number of LBAs.
  *
- * Returns the zone size in bytes upon success or an error code upon failure.
+ * Returns the zone size in number of blocks upon success or an error code
+ * upon failure.
  */
 static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
 {
@@ -401,7 +402,7 @@ static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
        unsigned char *rec;
        unsigned int buf_len;
        unsigned int list_length;
-       int ret;
+       s64 ret;
        u8 same;
 
        /* Get a buffer */
index 53ae52dbff84afd2021e80b7c1329cb7c53117c2..cd2fdac000c9e23fbf1df8cca453f30379b22d66 100644 (file)
@@ -51,6 +51,7 @@ static int sg_version_num = 30536;    /* 2 digits for each component */
 #include <linux/atomic.h>
 #include <linux/ratelimit.h>
 #include <linux/uio.h>
+#include <linux/cred.h> /* for sg_check_file_access() */
 
 #include "scsi.h"
 #include <scsi/scsi_dbg.h>
@@ -209,6 +210,33 @@ static void sg_device_destroy(struct kref *kref);
        sdev_prefix_printk(prefix, (sdp)->device,               \
                           (sdp)->disk->disk_name, fmt, ##a)
 
+/*
+ * The SCSI interfaces that use read() and write() as an asynchronous variant of
+ * ioctl(..., SG_IO, ...) are fundamentally unsafe, since there are lots of ways
+ * to trigger read() and write() calls from various contexts with elevated
+ * privileges. This can lead to kernel memory corruption (e.g. if these
+ * interfaces are called through splice()) and privilege escalation inside
+ * userspace (e.g. if a process with access to such a device passes a file
+ * descriptor to a SUID binary as stdin/stdout/stderr).
+ *
+ * This function provides protection for the legacy API by restricting the
+ * calling context.
+ */
+static int sg_check_file_access(struct file *filp, const char *caller)
+{
+       if (filp->f_cred != current_real_cred()) {
+               pr_err_once("%s: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
+                       caller, task_tgid_vnr(current), current->comm);
+               return -EPERM;
+       }
+       if (uaccess_kernel()) {
+               pr_err_once("%s: process %d (%s) called from kernel context, this is not allowed.\n",
+                       caller, task_tgid_vnr(current), current->comm);
+               return -EACCES;
+       }
+       return 0;
+}
+
 static int sg_allow_access(struct file *filp, unsigned char *cmd)
 {
        struct sg_fd *sfp = filp->private_data;
@@ -393,6 +421,14 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
        struct sg_header *old_hdr = NULL;
        int retval = 0;
 
+       /*
+        * This could cause a response to be stranded. Close the associated
+        * file descriptor to free up any resources being held.
+        */
+       retval = sg_check_file_access(filp, __func__);
+       if (retval)
+               return retval;
+
        if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
                return -ENXIO;
        SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
@@ -580,9 +616,11 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
        struct sg_header old_hdr;
        sg_io_hdr_t *hp;
        unsigned char cmnd[SG_MAX_CDB_SIZE];
+       int retval;
 
-       if (unlikely(uaccess_kernel()))
-               return -EINVAL;
+       retval = sg_check_file_access(filp, __func__);
+       if (retval)
+               return retval;
 
        if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
                return -ENXIO;
index 36f59a1be7e9a60be61c2b1ba8f7468dfbd8c6c9..61389bdc7926690100fc0a38fc59e8b6a73853ab 100644 (file)
@@ -654,10 +654,17 @@ static int scsifront_dev_reset_handler(struct scsi_cmnd *sc)
 static int scsifront_sdev_configure(struct scsi_device *sdev)
 {
        struct vscsifrnt_info *info = shost_priv(sdev->host);
+       int err;
 
-       if (info && current == info->curr)
-               xenbus_printf(XBT_NIL, info->dev->nodename,
+       if (info && current == info->curr) {
+               err = xenbus_printf(XBT_NIL, info->dev->nodename,
                              info->dev_state_path, "%d", XenbusStateConnected);
+               if (err) {
+                       xenbus_dev_error(info->dev, err,
+                               "%s: writing dev_state_path", __func__);
+                       return err;
+               }
+       }
 
        return 0;
 }
@@ -665,10 +672,15 @@ static int scsifront_sdev_configure(struct scsi_device *sdev)
 static void scsifront_sdev_destroy(struct scsi_device *sdev)
 {
        struct vscsifrnt_info *info = shost_priv(sdev->host);
+       int err;
 
-       if (info && current == info->curr)
-               xenbus_printf(XBT_NIL, info->dev->nodename,
+       if (info && current == info->curr) {
+               err = xenbus_printf(XBT_NIL, info->dev->nodename,
                              info->dev_state_path, "%d", XenbusStateClosed);
+               if (err)
+                       xenbus_dev_error(info->dev, err,
+                               "%s: writing dev_state_path", __func__);
+       }
 }
 
 static struct scsi_host_template scsifront_sht = {
@@ -1003,9 +1015,12 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
 
                        if (scsi_add_device(info->host, chn, tgt, lun)) {
                                dev_err(&dev->dev, "scsi_add_device\n");
-                               xenbus_printf(XBT_NIL, dev->nodename,
+                               err = xenbus_printf(XBT_NIL, dev->nodename,
                                              info->dev_state_path,
                                              "%d", XenbusStateClosed);
+                               if (err)
+                                       xenbus_dev_error(dev, err,
+                                               "%s: writing dev_state_path", __func__);
                        }
                        break;
                case VSCSIFRONT_OP_DEL_LUN:
@@ -1019,10 +1034,14 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
                        }
                        break;
                case VSCSIFRONT_OP_READD_LUN:
-                       if (device_state == XenbusStateConnected)
-                               xenbus_printf(XBT_NIL, dev->nodename,
+                       if (device_state == XenbusStateConnected) {
+                               err = xenbus_printf(XBT_NIL, dev->nodename,
                                              info->dev_state_path,
                                              "%d", XenbusStateConnected);
+                               if (err)
+                                       xenbus_dev_error(dev, err,
+                                               "%s: writing dev_state_path", __func__);
+                       }
                        break;
                default:
                        break;
index 32f0748fd0678fedc520b08b807590981d007154..0097a939487fd30cc29e73424656dc2f912cc099 100644 (file)
 #define GPC_PGC_SW2ISO_SHIFT   0x8
 #define GPC_PGC_SW_SHIFT       0x0
 
+#define GPC_PGC_PCI_PDN                0x200
+#define GPC_PGC_PCI_SR         0x20c
+
 #define GPC_PGC_GPU_PDN                0x260
 #define GPC_PGC_GPU_PUPSCR     0x264
 #define GPC_PGC_GPU_PDNSCR     0x268
+#define GPC_PGC_GPU_SR         0x26c
+
+#define GPC_PGC_DISP_PDN       0x240
+#define GPC_PGC_DISP_SR                0x24c
 
 #define GPU_VPU_PUP_REQ                BIT(1)
 #define GPU_VPU_PDN_REQ                BIT(0)
@@ -318,10 +325,24 @@ static const struct of_device_id imx_gpc_dt_ids[] = {
        { }
 };
 
+static const struct regmap_range yes_ranges[] = {
+       regmap_reg_range(GPC_CNTR, GPC_CNTR),
+       regmap_reg_range(GPC_PGC_PCI_PDN, GPC_PGC_PCI_SR),
+       regmap_reg_range(GPC_PGC_GPU_PDN, GPC_PGC_GPU_SR),
+       regmap_reg_range(GPC_PGC_DISP_PDN, GPC_PGC_DISP_SR),
+};
+
+static const struct regmap_access_table access_table = {
+       .yes_ranges     = yes_ranges,
+       .n_yes_ranges   = ARRAY_SIZE(yes_ranges),
+};
+
 static const struct regmap_config imx_gpc_regmap_config = {
        .reg_bits = 32,
        .val_bits = 32,
        .reg_stride = 4,
+       .rd_table = &access_table,
+       .wr_table = &access_table,
        .max_register = 0x2ac,
 };
 
index f4e3bd40c72e60c0448c98456f7b53f6be7936bd..6ef18cf8f24387e324cf455ae98c30f2b27c95d3 100644 (file)
 
 #define GPC_M4_PU_PDN_FLG              0x1bc
 
-
-#define PGC_MIPI                       4
-#define PGC_PCIE                       5
-#define PGC_USB_HSIC                   8
+/*
+ * The PGC offset values in Reference Manual
+ * (Rev. 1, 01/2018 and the older ones) GPC chapter's
+ * GPC_PGC memory map are incorrect, below offset
+ * values are from design RTL.
+ */
+#define PGC_MIPI                       16
+#define PGC_PCIE                       17
+#define PGC_USB_HSIC                   20
 #define GPC_PGC_CTRL(n)                        (0x800 + (n) * 0x40)
 #define GPC_PGC_SR(n)                  (GPC_PGC_CTRL(n) + 0xc)
 
index 9dc02f390ba314bf8cfbb1b86223af1859af2507..5856e792d09c8d317b01627c2d03a97eeaebff37 100644 (file)
@@ -5,7 +5,8 @@ menu "Qualcomm SoC drivers"
 
 config QCOM_COMMAND_DB
        bool "Qualcomm Command DB"
-       depends on (ARCH_QCOM && OF) || COMPILE_TEST
+       depends on ARCH_QCOM || COMPILE_TEST
+       depends on OF_RESERVED_MEM
        help
          Command DB queries shared memory by key string for shared system
          resources. Platform drivers that require to set state of a shared
index 95120acc4d806da630f49737cca1eede3286edae..50d03d8b4f9a55f50d52f328039afe0c27991740 100644 (file)
@@ -194,11 +194,12 @@ static int rcar_sysc_pd_power_on(struct generic_pm_domain *genpd)
 
 static bool has_cpg_mstp;
 
-static void __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
+static int __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
 {
        struct generic_pm_domain *genpd = &pd->genpd;
        const char *name = pd->genpd.name;
        struct dev_power_governor *gov = &simple_qos_governor;
+       int error;
 
        if (pd->flags & PD_CPU) {
                /*
@@ -251,7 +252,11 @@ static void __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
        rcar_sysc_power_up(&pd->ch);
 
 finalize:
-       pm_genpd_init(genpd, gov, false);
+       error = pm_genpd_init(genpd, gov, false);
+       if (error)
+               pr_err("Failed to init PM domain %s: %d\n", name, error);
+
+       return error;
 }
 
 static const struct of_device_id rcar_sysc_matches[] __initconst = {
@@ -375,6 +380,9 @@ static int __init rcar_sysc_pd_init(void)
        pr_debug("%pOF: syscier = 0x%08x\n", np, syscier);
        iowrite32(syscier, base + SYSCIER);
 
+       /*
+        * First, create all PM domains
+        */
        for (i = 0; i < info->num_areas; i++) {
                const struct rcar_sysc_area *area = &info->areas[i];
                struct rcar_sysc_pd *pd;
@@ -397,14 +405,29 @@ static int __init rcar_sysc_pd_init(void)
                pd->ch.isr_bit = area->isr_bit;
                pd->flags = area->flags;
 
-               rcar_sysc_pd_setup(pd);
-               if (area->parent >= 0)
-                       pm_genpd_add_subdomain(domains->domains[area->parent],
-                                              &pd->genpd);
+               error = rcar_sysc_pd_setup(pd);
+               if (error)
+                       goto out_put;
 
                domains->domains[area->isr_bit] = &pd->genpd;
        }
 
+       /*
+        * Second, link all PM domains to their parents
+        */
+       for (i = 0; i < info->num_areas; i++) {
+               const struct rcar_sysc_area *area = &info->areas[i];
+
+               if (!area->name || area->parent < 0)
+                       continue;
+
+               error = pm_genpd_add_subdomain(domains->domains[area->parent],
+                                              domains->domains[area->isr_bit]);
+               if (error)
+                       pr_warn("Failed to add PM subdomain %s to parent %u\n",
+                               area->name, area->parent);
+       }
+
        error = of_genpd_add_provider_onecell(np, &domains->onecell_data);
 
 out_put:
index 9d1109e43ed442e5614568c312ee31e01ca0dd17..99073325b0c00ca8784cbd1ed7849d4cdb09ec8e 100644 (file)
@@ -201,7 +201,7 @@ struct ion_dma_buf_attachment {
        struct list_head list;
 };
 
-static int ion_dma_buf_attach(struct dma_buf *dmabuf, struct device *dev,
+static int ion_dma_buf_attach(struct dma_buf *dmabuf,
                              struct dma_buf_attachment *attachment)
 {
        struct ion_dma_buf_attachment *a;
@@ -219,7 +219,7 @@ static int ion_dma_buf_attach(struct dma_buf *dmabuf, struct device *dev,
        }
 
        a->table = table;
-       a->dev = dev;
+       a->dev = attachment->dev;
        INIT_LIST_HEAD(&a->list);
 
        attachment->priv = a;
@@ -375,8 +375,6 @@ static const struct dma_buf_ops dma_buf_ops = {
        .detach = ion_dma_buf_detatch,
        .begin_cpu_access = ion_dma_buf_begin_cpu_access,
        .end_cpu_access = ion_dma_buf_end_cpu_access,
-       .map_atomic = ion_dma_buf_kmap,
-       .unmap_atomic = ion_dma_buf_kunmap,
        .map = ion_dma_buf_kmap,
        .unmap = ion_dma_buf_kunmap,
 };
index e8c4403297082898c54d0aacf09c778a399aa2d0..31db510018a9462ead01b016f02c4952c6871c96 100644 (file)
@@ -30,7 +30,7 @@ void *ion_heap_map_kernel(struct ion_heap *heap,
        struct page **tmp = pages;
 
        if (!pages)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        if (buffer->flags & ION_FLAG_CACHED)
                pgprot = PAGE_KERNEL;
index ea194aa01a642e0c691c9cb8b78e9d8ef43cfa79..257b0daff01f21317cf1e8f1a251efdbd9dc1189 100644 (file)
@@ -642,7 +642,7 @@ static int daqp_ao_insn_write(struct comedi_device *dev,
        /* Make sure D/A update mode is direct update */
        outb(0, dev->iobase + DAQP_AUX_REG);
 
-       for (i = 0; i > insn->n; i++) {
+       for (i = 0; i < insn->n; i++) {
                unsigned int val = data[i];
                int ret;
 
index 0ecffab52ec28f0faeaf84624db51774a3080419..abdaf7cf816269fb9063928607580ee38b114fef 100644 (file)
@@ -1842,15 +1842,15 @@ void hostif_sme_multicast_set(struct ks_wlan_private *priv)
        memset(set_address, 0, NIC_MAX_MCAST_LIST * ETH_ALEN);
 
        if (dev->flags & IFF_PROMISC) {
-               hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER,
-                                           MCAST_FILTER_PROMISC);
+               hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
+                                          MCAST_FILTER_PROMISC);
                goto spin_unlock;
        }
 
        if ((netdev_mc_count(dev) > NIC_MAX_MCAST_LIST) ||
            (dev->flags & IFF_ALLMULTI)) {
-               hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER,
-                                           MCAST_FILTER_MCASTALL);
+               hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
+                                          MCAST_FILTER_MCASTALL);
                goto spin_unlock;
        }
 
@@ -1866,8 +1866,8 @@ void hostif_sme_multicast_set(struct ks_wlan_private *priv)
                                               ETH_ALEN * mc_count);
        } else {
                priv->sme_i.sme_flag |= SME_MULTICAST;
-               hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER,
-                                           MCAST_FILTER_MCAST);
+               hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
+                                          MCAST_FILTER_MCAST);
        }
 
 spin_unlock:
index a3a83424a926c793c99cdc3ee54f0f61b2b679cd..16478fe9e3f8ad3e07cdfbc84db734d4834f6530 100644 (file)
@@ -11,7 +11,6 @@
  * (at your option) any later version.
  */
 
-#include <asm/cacheflush.h>
 #include <linux/clk.h>
 #include <linux/mm.h>
 #include <linux/pagemap.h>
@@ -24,6 +23,8 @@
 #include <media/v4l2-ioctl.h>
 #include <media/v4l2-mc.h>
 
+#include <asm/cacheflush.h>
+
 #include "iss_video.h"
 #include "iss.h"
 
index 673fdce2553070ab5cf28182edf2adcfe01e110d..ff7832798a7730c0932853df571b98ded8e054b0 100644 (file)
@@ -7,7 +7,6 @@ config R8188EU
        select LIB80211
        select LIB80211_CRYPT_WEP
        select LIB80211_CRYPT_CCMP
-       select LIB80211_CRYPT_TKIP
        ---help---
        This option adds the Realtek RTL8188EU USB device such as TP-Link TL-WN725N.
        If built as a module, it will be called r8188eu.
index 05936a45eb93dc2ba8ca79ce1ba0278d3f8c25db..c6857a5be12aaab6968a67ac93e71b1a4a77cc93 100644 (file)
@@ -23,7 +23,6 @@
 #include <mon.h>
 #include <wifi.h>
 #include <linux/vmalloc.h>
-#include <net/lib80211.h>
 
 #define ETHERNET_HEADER_SIZE   14      /*  Ethernet Header Length */
 #define LLC_HEADER_SIZE                        6       /*  LLC Header Length */
@@ -221,20 +220,31 @@ u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter)
 static int recvframe_chkmic(struct adapter *adapter,
                            struct recv_frame *precvframe)
 {
-       int res = _SUCCESS;
-       struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
-       struct sta_info *stainfo = rtw_get_stainfo(&adapter->stapriv, prxattrib->ta);
+       int     i, res = _SUCCESS;
+       u32     datalen;
+       u8      miccode[8];
+       u8      bmic_err = false, brpt_micerror = true;
+       u8      *pframe, *payload, *pframemic;
+       u8      *mickey;
+       struct  sta_info                *stainfo;
+       struct  rx_pkt_attrib   *prxattrib = &precvframe->attrib;
+       struct  security_priv   *psecuritypriv = &adapter->securitypriv;
+
+       struct mlme_ext_priv    *pmlmeext = &adapter->mlmeextpriv;
+       struct mlme_ext_info    *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+       stainfo = rtw_get_stainfo(&adapter->stapriv, &prxattrib->ta[0]);
 
        if (prxattrib->encrypt == _TKIP_) {
+               RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+                        ("\n %s: prxattrib->encrypt==_TKIP_\n", __func__));
+               RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+                        ("\n %s: da=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+                         __func__, prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2],
+                         prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5]));
+
+               /* calculate mic code */
                if (stainfo) {
-                       int key_idx;
-                       const int iv_len = 8, icv_len = 4, key_length = 32;
-                       struct sk_buff *skb = precvframe->pkt;
-                       u8 key[32], iv[8], icv[4], *pframe = skb->data;
-                       void *crypto_private = NULL;
-                       struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("TKIP"), "lib80211_crypt_tkip");
-                       struct security_priv *psecuritypriv = &adapter->securitypriv;
-
                        if (IS_MCAST(prxattrib->ra)) {
                                if (!psecuritypriv) {
                                        res = _FAIL;
@@ -243,58 +253,115 @@ static int recvframe_chkmic(struct adapter *adapter,
                                        DBG_88E("\n %s: didn't install group key!!!!!!!!!!\n", __func__);
                                        goto exit;
                                }
-                               key_idx = prxattrib->key_index;
-                               memcpy(key, psecuritypriv->dot118021XGrpKey[key_idx].skey, 16);
-                               memcpy(key + 16, psecuritypriv->dot118021XGrprxmickey[key_idx].skey, 16);
+                               mickey = &psecuritypriv->dot118021XGrprxmickey[prxattrib->key_index].skey[0];
+
+                               RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+                                        ("\n %s: bcmc key\n", __func__));
                        } else {
-                               key_idx = 0;
-                               memcpy(key, stainfo->dot118021x_UncstKey.skey, 16);
-                               memcpy(key + 16, stainfo->dot11tkiprxmickey.skey, 16);
+                               mickey = &stainfo->dot11tkiprxmickey.skey[0];
+                               RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+                                        ("\n %s: unicast key\n", __func__));
                        }
 
-                       if (!crypto_ops) {
-                               res = _FAIL;
-                               goto exit_lib80211_tkip;
-                       }
+                       /* icv_len included the mic code */
+                       datalen = precvframe->pkt->len-prxattrib->hdrlen -
+                                 prxattrib->iv_len-prxattrib->icv_len-8;
+                       pframe = precvframe->pkt->data;
+                       payload = pframe+prxattrib->hdrlen+prxattrib->iv_len;
 
-                       memcpy(iv, pframe + prxattrib->hdrlen, iv_len);
-                       memcpy(icv, pframe + skb->len - icv_len, icv_len);
-                       memmove(pframe + iv_len, pframe, prxattrib->hdrlen);
+                       RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n prxattrib->iv_len=%d prxattrib->icv_len=%d\n", prxattrib->iv_len, prxattrib->icv_len));
+                       rtw_seccalctkipmic(mickey, pframe, payload, datalen, &miccode[0],
+                                          (unsigned char)prxattrib->priority); /* care the length of the data */
 
-                       skb_pull(skb, iv_len);
-                       skb_trim(skb, skb->len - icv_len);
+                       pframemic = payload+datalen;
 
-                       crypto_private = crypto_ops->init(key_idx);
-                       if (!crypto_private) {
-                               res = _FAIL;
-                               goto exit_lib80211_tkip;
-                       }
-                       if (crypto_ops->set_key(key, key_length, NULL, crypto_private) < 0) {
-                               res = _FAIL;
-                               goto exit_lib80211_tkip;
-                       }
-                       if (crypto_ops->decrypt_msdu(skb, key_idx, prxattrib->hdrlen, crypto_private)) {
-                               res = _FAIL;
-                               goto exit_lib80211_tkip;
+                       bmic_err = false;
+
+                       for (i = 0; i < 8; i++) {
+                               if (miccode[i] != *(pframemic+i)) {
+                                       RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+                                                ("%s: miccode[%d](%02x)!=*(pframemic+%d)(%02x) ",
+                                                 __func__, i, miccode[i], i, *(pframemic + i)));
+                                       bmic_err = true;
+                               }
                        }
 
-                       memmove(pframe, pframe + iv_len, prxattrib->hdrlen);
-                       skb_push(skb, iv_len);
-                       skb_put(skb, icv_len);
+                       if (bmic_err) {
+                               RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+                                        ("\n *(pframemic-8)-*(pframemic-1)=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+                                        *(pframemic-8), *(pframemic-7), *(pframemic-6),
+                                        *(pframemic-5), *(pframemic-4), *(pframemic-3),
+                                        *(pframemic-2), *(pframemic-1)));
+                               RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+                                        ("\n *(pframemic-16)-*(pframemic-9)=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+                                        *(pframemic-16), *(pframemic-15), *(pframemic-14),
+                                        *(pframemic-13), *(pframemic-12), *(pframemic-11),
+                                        *(pframemic-10), *(pframemic-9)));
+                               {
+                                       uint i;
 
-                       memcpy(pframe + prxattrib->hdrlen, iv, iv_len);
-                       memcpy(pframe + skb->len - icv_len, icv, icv_len);
+                                       RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+                                                ("\n ======demp packet (len=%d)======\n",
+                                                precvframe->pkt->len));
+                                       for (i = 0; i < precvframe->pkt->len; i += 8) {
+                                               RT_TRACE(_module_rtl871x_recv_c_,
+                                                        _drv_err_,
+                                                        ("0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x",
+                                                        *(precvframe->pkt->data+i),
+                                                        *(precvframe->pkt->data+i+1),
+                                                        *(precvframe->pkt->data+i+2),
+                                                        *(precvframe->pkt->data+i+3),
+                                                        *(precvframe->pkt->data+i+4),
+                                                        *(precvframe->pkt->data+i+5),
+                                                        *(precvframe->pkt->data+i+6),
+                                                        *(precvframe->pkt->data+i+7)));
+                                       }
+                                       RT_TRACE(_module_rtl871x_recv_c_,
+                                                _drv_err_,
+                                                ("\n ====== demp packet end [len=%d]======\n",
+                                                precvframe->pkt->len));
+                                       RT_TRACE(_module_rtl871x_recv_c_,
+                                                _drv_err_,
+                                                ("\n hrdlen=%d,\n",
+                                                prxattrib->hdrlen));
+                               }
 
-exit_lib80211_tkip:
-                       if (crypto_ops && crypto_private)
-                               crypto_ops->deinit(crypto_private);
+                               RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+                                        ("ra=0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x psecuritypriv->binstallGrpkey=%d ",
+                                        prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2],
+                                        prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5], psecuritypriv->binstallGrpkey));
+
+                               /*  double check key_index for some timing issue , */
+                               /*  cannot compare with psecuritypriv->dot118021XGrpKeyid also cause timing issue */
+                               if ((IS_MCAST(prxattrib->ra) == true)  && (prxattrib->key_index != pmlmeinfo->key_index))
+                                       brpt_micerror = false;
+
+                               if ((prxattrib->bdecrypted) && (brpt_micerror)) {
+                                       rtw_handle_tkip_mic_err(adapter, (u8)IS_MCAST(prxattrib->ra));
+                                       RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" mic error :prxattrib->bdecrypted=%d ", prxattrib->bdecrypted));
+                                       DBG_88E(" mic error :prxattrib->bdecrypted=%d\n", prxattrib->bdecrypted);
+                               } else {
+                                       RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" mic error :prxattrib->bdecrypted=%d ", prxattrib->bdecrypted));
+                                       DBG_88E(" mic error :prxattrib->bdecrypted=%d\n", prxattrib->bdecrypted);
+                               }
+                               res = _FAIL;
+                       } else {
+                               /* mic checked ok */
+                               if ((!psecuritypriv->bcheck_grpkey) && (IS_MCAST(prxattrib->ra))) {
+                                       psecuritypriv->bcheck_grpkey = true;
+                                       RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("psecuritypriv->bcheck_grpkey = true"));
+                               }
+                       }
                } else {
                        RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
                                 ("%s: rtw_get_stainfo==NULL!!!\n", __func__));
                }
+
+               skb_trim(precvframe->pkt, precvframe->pkt->len - 8);
        }
 
 exit:
+
        return res;
 }
 
index bfe0b217e6798070294c5991eaf13d63105877d3..67a2490f055e234b3ea4621fce7ca0903104801c 100644 (file)
@@ -650,71 +650,71 @@ u32       rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
        return res;
 }
 
+/* The hlen isn't include the IV */
 u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
-{
-       struct rx_pkt_attrib *prxattrib = &((struct recv_frame *)precvframe)->attrib;
-       u32 res = _SUCCESS;
+{                                                                                                                                      /*  exclude ICV */
+       u16 pnl;
+       u32 pnh;
+       u8   rc4key[16];
+       u8   ttkey[16];
+       u8      crc[4];
+       struct arc4context mycontext;
+       int                     length;
+
+       u8      *pframe, *payload, *iv, *prwskey;
+       union pn48 dot11txpn;
+       struct  sta_info                *stainfo;
+       struct  rx_pkt_attrib    *prxattrib = &((struct recv_frame *)precvframe)->attrib;
+       struct  security_priv   *psecuritypriv = &padapter->securitypriv;
+       u32             res = _SUCCESS;
+
+
+       pframe = (unsigned char *)((struct recv_frame *)precvframe)->pkt->data;
 
        /* 4 start to decrypt recvframe */
        if (prxattrib->encrypt == _TKIP_) {
-               struct sta_info *stainfo = rtw_get_stainfo(&padapter->stapriv, prxattrib->ta);
-
+               stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]);
                if (stainfo) {
-                       int key_idx;
-                       const int iv_len = 8, icv_len = 4, key_length = 32;
-                       void *crypto_private = NULL;
-                       struct sk_buff *skb = ((struct recv_frame *)precvframe)->pkt;
-                       u8 key[32], iv[8], icv[4], *pframe = skb->data;
-                       struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("TKIP"), "lib80211_crypt_tkip");
-                       struct security_priv *psecuritypriv = &padapter->securitypriv;
-
                        if (IS_MCAST(prxattrib->ra)) {
                                if (!psecuritypriv->binstallGrpkey) {
                                        res = _FAIL;
                                        DBG_88E("%s:rx bc/mc packets, but didn't install group key!!!!!!!!!!\n", __func__);
                                        goto exit;
                                }
-                               key_idx = prxattrib->key_index;
-                               memcpy(key, psecuritypriv->dot118021XGrpKey[key_idx].skey, 16);
-                               memcpy(key + 16, psecuritypriv->dot118021XGrprxmickey[key_idx].skey, 16);
+                               prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey;
                        } else {
-                               key_idx = 0;
-                               memcpy(key, stainfo->dot118021x_UncstKey.skey, 16);
-                               memcpy(key + 16, stainfo->dot11tkiprxmickey.skey, 16);
+                               RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo!= NULL!!!\n", __func__));
+                               prwskey = &stainfo->dot118021x_UncstKey.skey[0];
                        }
 
-                       if (!crypto_ops) {
-                               res = _FAIL;
-                               goto exit_lib80211_tkip;
-                       }
+                       iv = pframe+prxattrib->hdrlen;
+                       payload = pframe+prxattrib->iv_len+prxattrib->hdrlen;
+                       length = ((struct recv_frame *)precvframe)->pkt->len-prxattrib->hdrlen-prxattrib->iv_len;
 
-                       memcpy(iv, pframe + prxattrib->hdrlen, iv_len);
-                       memcpy(icv, pframe + skb->len - icv_len, icv_len);
+                       GET_TKIP_PN(iv, dot11txpn);
 
-                       crypto_private = crypto_ops->init(key_idx);
-                       if (!crypto_private) {
-                               res = _FAIL;
-                               goto exit_lib80211_tkip;
-                       }
-                       if (crypto_ops->set_key(key, key_length, NULL, crypto_private) < 0) {
-                               res = _FAIL;
-                               goto exit_lib80211_tkip;
-                       }
-                       if (crypto_ops->decrypt_mpdu(skb, prxattrib->hdrlen, crypto_private)) {
-                               res = _FAIL;
-                               goto exit_lib80211_tkip;
-                       }
+                       pnl = (u16)(dot11txpn.val);
+                       pnh = (u32)(dot11txpn.val>>16);
 
-                       memmove(pframe, pframe + iv_len, prxattrib->hdrlen);
-                       skb_push(skb, iv_len);
-                       skb_put(skb, icv_len);
+                       phase1((u16 *)&ttkey[0], prwskey, &prxattrib->ta[0], pnh);
+                       phase2(&rc4key[0], prwskey, (unsigned short *)&ttkey[0], pnl);
 
-                       memcpy(pframe + prxattrib->hdrlen, iv, iv_len);
-                       memcpy(pframe + skb->len - icv_len, icv, icv_len);
+                       /* 4 decrypt payload include icv */
 
-exit_lib80211_tkip:
-                       if (crypto_ops && crypto_private)
-                               crypto_ops->deinit(crypto_private);
+                       arcfour_init(&mycontext, rc4key, 16);
+                       arcfour_encrypt(&mycontext, payload, payload, length);
+
+                       *((__le32 *)crc) = getcrc32(payload, length-4);
+
+                       if (crc[3] != payload[length-1] ||
+                           crc[2] != payload[length-2] ||
+                           crc[1] != payload[length-3] ||
+                           crc[0] != payload[length-4]) {
+                               RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
+                                        ("rtw_wep_decrypt:icv error crc (%4ph)!=payload (%4ph)\n",
+                                        &crc, &payload[length-4]));
+                               res = _FAIL;
+                       }
                } else {
                        RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_decrypt: stainfo==NULL!!!\n"));
                        res = _FAIL;
index 45c05527a57a327a7490acfe889581cfe0bb41ba..faf4b4158cfa2c174ea2f68c3829f7933d64c35f 100644 (file)
@@ -1051,7 +1051,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf,  int len)
                return _FAIL;
 
 
-       if (len > MAX_IE_SZ)
+       if (len < 0 || len > MAX_IE_SZ)
                return _FAIL;
 
        pbss_network->IELength = len;
index 7947edb239a13b7d7752cf2e2a82f0814424306e..88ba5b2fea6acdb47863ba959f61eb05318f75a7 100644 (file)
@@ -803,7 +803,7 @@ static void _rtl8822be_enable_aspm_back_door(struct ieee80211_hw *hw)
                return;
 
        pci_read_config_byte(rtlpci->pdev, 0x70f, &tmp);
-       pci_write_config_byte(rtlpci->pdev, 0x70f, tmp | BIT(7));
+       pci_write_config_byte(rtlpci->pdev, 0x70f, tmp | ASPM_L1_LATENCY << 3);
 
        pci_read_config_byte(rtlpci->pdev, 0x719, &tmp);
        pci_write_config_byte(rtlpci->pdev, 0x719, tmp | BIT(3) | BIT(4));
index 012fb618840b05e910e6d68fba38e044dfc26ab0..a45f0eb69d3f2fdb1b14df330a5dbf8cb0ddc24d 100644 (file)
@@ -88,6 +88,7 @@
 #define RTL_USB_MAX_RX_COUNT                   100
 #define QBSS_LOAD_SIZE                         5
 #define MAX_WMMELE_LENGTH                      64
+#define ASPM_L1_LATENCY                                7
 
 #define TOTAL_CAM_ENTRY                                32
 
index a61bc41b82d7845f29dda7cd34669a2311833896..947c79532e1004818eea7ef94b101ec93303e2b2 100644 (file)
@@ -198,11 +198,15 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
        int chars_sent = 0;
        char __user *cp;
        char *init;
+       size_t bytes_per_ch = unicode ? 3 : 1;
        u16 ch;
        int empty;
        unsigned long flags;
        DEFINE_WAIT(wait);
 
+       if (count < bytes_per_ch)
+               return -EINVAL;
+
        spin_lock_irqsave(&speakup_info.spinlock, flags);
        while (1) {
                prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE);
@@ -228,7 +232,7 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
        init = get_initstring();
 
        /* Keep 3 bytes available for a 16bit UTF-8-encoded character */
-       while (chars_sent <= count - 3) {
+       while (chars_sent <= count - bytes_per_ch) {
                if (speakup_info.flushing) {
                        speakup_info.flushing = 0;
                        ch = '\x18';
index 3aa981fbc8f56c4215344de1d20269cfc1ae9fc3..e45ed08a51668fbe5ba03abc849746471a1a48e1 100644 (file)
@@ -11,6 +11,7 @@ config TYPEC_TCPCI
 
 config TYPEC_RT1711H
        tristate "Richtek RT1711H Type-C chip driver"
+       depends on I2C
        select TYPEC_TCPCI
        help
          Richtek RT1711H Type-C chip driver that works with
index 5c7ea237893e63dbe9ef0a9952c855d479c79f49..da4a93df8d75c51bf0284d4f5d9dd0ee95bd0ecb 100644 (file)
@@ -504,7 +504,7 @@ static void vbox_set_edid(struct drm_connector *connector, int width,
        for (i = 0; i < EDID_SIZE - 1; ++i)
                sum += edid[i];
        edid[EDID_SIZE - 1] = (0x100 - (sum & 0xFF)) & 0xFF;
-       drm_mode_connector_update_edid_property(connector, (struct edid *)edid);
+       drm_connector_update_edid_property(connector, (struct edid *)edid);
 }
 
 static int vbox_get_modes(struct drm_connector *connector)
@@ -655,7 +655,7 @@ static int vbox_connector_init(struct drm_device *dev,
                                   dev->mode_config.suggested_y_property, 0);
        drm_connector_register(connector);
 
-       drm_mode_connector_attach_encoder(connector, encoder);
+       drm_connector_attach_encoder(connector, encoder);
 
        return 0;
 }
index 01ac306131c1f163c6eb6043651a412e3e71dc76..10db5656fd5dcb8e95769a922223b8e88cf23983 100644 (file)
@@ -3727,11 +3727,16 @@ core_scsi3_pri_read_keys(struct se_cmd *cmd)
                 * Check for overflow of 8byte PRI READ_KEYS payload and
                 * next reservation key list descriptor.
                 */
-               if ((add_len + 8) > (cmd->data_length - 8))
-                       break;
-
-               put_unaligned_be64(pr_reg->pr_res_key, &buf[off]);
-               off += 8;
+               if (off + 8 <= cmd->data_length) {
+                       put_unaligned_be64(pr_reg->pr_res_key, &buf[off]);
+                       off += 8;
+               }
+               /*
+                * SPC5r17: 6.16.2 READ KEYS service action
+                * The ADDITIONAL LENGTH field indicates the number of bytes in
+                * the Reservation key list. The contents of the ADDITIONAL
+                * LENGTH field are not altered based on the allocation length
+                */
                add_len += 8;
        }
        spin_unlock(&dev->t10_pr.registration_lock);
index 7f96dfa32b9cdf1cbf167fe1b0581e3b94f1a08b..d8dc3d22051f7810efa5faafba0cc71e3ad43040 100644 (file)
@@ -656,7 +656,7 @@ static void scatter_data_area(struct tcmu_dev *udev,
 }
 
 static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
-                            bool bidi)
+                            bool bidi, uint32_t read_len)
 {
        struct se_cmd *se_cmd = cmd->se_cmd;
        int i, dbi;
@@ -689,7 +689,7 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
        for_each_sg(data_sg, sg, data_nents, i) {
                int sg_remaining = sg->length;
                to = kmap_atomic(sg_page(sg)) + sg->offset;
-               while (sg_remaining > 0) {
+               while (sg_remaining > 0 && read_len > 0) {
                        if (block_remaining == 0) {
                                if (from)
                                        kunmap_atomic(from);
@@ -701,6 +701,8 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
                        }
                        copy_bytes = min_t(size_t, sg_remaining,
                                        block_remaining);
+                       if (read_len < copy_bytes)
+                               copy_bytes = read_len;
                        offset = DATA_BLOCK_SIZE - block_remaining;
                        tcmu_flush_dcache_range(from, copy_bytes);
                        memcpy(to + sg->length - sg_remaining, from + offset,
@@ -708,8 +710,11 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
 
                        sg_remaining -= copy_bytes;
                        block_remaining -= copy_bytes;
+                       read_len -= copy_bytes;
                }
                kunmap_atomic(to - sg->offset);
+               if (read_len == 0)
+                       break;
        }
        if (from)
                kunmap_atomic(from);
@@ -1042,6 +1047,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
 {
        struct se_cmd *se_cmd = cmd->se_cmd;
        struct tcmu_dev *udev = cmd->tcmu_dev;
+       bool read_len_valid = false;
+       uint32_t read_len = se_cmd->data_length;
 
        /*
         * cmd has been completed already from timeout, just reclaim
@@ -1056,13 +1063,28 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
                pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
                        cmd->se_cmd);
                entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
-       } else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
+               goto done;
+       }
+
+       if (se_cmd->data_direction == DMA_FROM_DEVICE &&
+           (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
+               read_len_valid = true;
+               if (entry->rsp.read_len < read_len)
+                       read_len = entry->rsp.read_len;
+       }
+
+       if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
                transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer);
-       } else if (se_cmd->se_cmd_flags & SCF_BIDI) {
+               if (!read_len_valid )
+                       goto done;
+               else
+                       se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL;
+       }
+       if (se_cmd->se_cmd_flags & SCF_BIDI) {
                /* Get Data-In buffer before clean up */
-               gather_data_area(udev, cmd, true);
+               gather_data_area(udev, cmd, true, read_len);
        } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
-               gather_data_area(udev, cmd, false);
+               gather_data_area(udev, cmd, false, read_len);
        } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
                /* TODO: */
        } else if (se_cmd->data_direction != DMA_NONE) {
@@ -1070,7 +1092,13 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
                        se_cmd->data_direction);
        }
 
-       target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
+done:
+       if (read_len_valid) {
+               pr_debug("read_len = %d\n", read_len);
+               target_complete_cmd_with_length(cmd->se_cmd,
+                                       entry->rsp.scsi_status, read_len);
+       } else
+               target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
 
 out:
        cmd->se_cmd = NULL;
@@ -1740,7 +1768,7 @@ static int tcmu_configure_device(struct se_device *dev)
        /* Initialise the mailbox of the ring buffer */
        mb = udev->mb_addr;
        mb->version = TCMU_MAILBOX_VERSION;
-       mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC;
+       mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC | TCMU_MAILBOX_FLAG_CAP_READ_LEN;
        mb->cmdr_off = CMDR_OFF;
        mb->cmdr_size = udev->cmdr_size;
 
index 07d3be6f0780db209ac2be07354ac390c31d6be8..0b9ab1d0dd45dd69046f921e6bf846e7af23fe88 100644 (file)
@@ -80,11 +80,6 @@ static void tee_shm_op_release(struct dma_buf *dmabuf)
        tee_shm_release(shm);
 }
 
-static void *tee_shm_op_map_atomic(struct dma_buf *dmabuf, unsigned long pgnum)
-{
-       return NULL;
-}
-
 static void *tee_shm_op_map(struct dma_buf *dmabuf, unsigned long pgnum)
 {
        return NULL;
@@ -107,7 +102,6 @@ static const struct dma_buf_ops tee_shm_dma_buf_ops = {
        .map_dma_buf = tee_shm_op_map_dma_buf,
        .unmap_dma_buf = tee_shm_op_unmap_dma_buf,
        .release = tee_shm_op_release,
-       .map_atomic = tee_shm_op_map_atomic,
        .map = tee_shm_op_map,
        .mmap = tee_shm_op_mmap,
 };
index 6281266b8ec0a15721da5196b641b707b8a5c973..a923ebdeb73c80bf845af7acc90dcc20a9c2ce1b 100644 (file)
@@ -213,6 +213,10 @@ static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr,
                goto err_free_acl;
        }
        ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl);
+       if (!ret) {
+               /* Notify userspace about the change */
+               kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE);
+       }
        mutex_unlock(&tb->lock);
 
 err_free_acl:
index cbe98bc2b998276fd95b2d8086a6fabcf2351bf7..43174220170924e094567ad6271703bd881e2a6f 100644 (file)
@@ -124,6 +124,8 @@ struct n_tty_data {
        struct mutex output_lock;
 };
 
+#define MASK(x) ((x) & (N_TTY_BUF_SIZE - 1))
+
 static inline size_t read_cnt(struct n_tty_data *ldata)
 {
        return ldata->read_head - ldata->read_tail;
@@ -141,6 +143,7 @@ static inline unsigned char *read_buf_addr(struct n_tty_data *ldata, size_t i)
 
 static inline unsigned char echo_buf(struct n_tty_data *ldata, size_t i)
 {
+       smp_rmb(); /* Matches smp_wmb() in add_echo_byte(). */
        return ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)];
 }
 
@@ -316,9 +319,7 @@ static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
 static void reset_buffer_flags(struct n_tty_data *ldata)
 {
        ldata->read_head = ldata->canon_head = ldata->read_tail = 0;
-       ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0;
        ldata->commit_head = 0;
-       ldata->echo_mark = 0;
        ldata->line_start = 0;
 
        ldata->erasing = 0;
@@ -617,12 +618,19 @@ static size_t __process_echoes(struct tty_struct *tty)
        old_space = space = tty_write_room(tty);
 
        tail = ldata->echo_tail;
-       while (ldata->echo_commit != tail) {
+       while (MASK(ldata->echo_commit) != MASK(tail)) {
                c = echo_buf(ldata, tail);
                if (c == ECHO_OP_START) {
                        unsigned char op;
                        int no_space_left = 0;
 
+                       /*
+                        * Since add_echo_byte() is called without holding
+                        * output_lock, we might see only portion of multi-byte
+                        * operation.
+                        */
+                       if (MASK(ldata->echo_commit) == MASK(tail + 1))
+                               goto not_yet_stored;
                        /*
                         * If the buffer byte is the start of a multi-byte
                         * operation, get the next byte, which is either the
@@ -634,6 +642,8 @@ static size_t __process_echoes(struct tty_struct *tty)
                                unsigned int num_chars, num_bs;
 
                        case ECHO_OP_ERASE_TAB:
+                               if (MASK(ldata->echo_commit) == MASK(tail + 2))
+                                       goto not_yet_stored;
                                num_chars = echo_buf(ldata, tail + 2);
 
                                /*
@@ -728,7 +738,8 @@ static size_t __process_echoes(struct tty_struct *tty)
        /* If the echo buffer is nearly full (so that the possibility exists
         * of echo overrun before the next commit), then discard enough
         * data at the tail to prevent a subsequent overrun */
-       while (ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
+       while (ldata->echo_commit > tail &&
+              ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
                if (echo_buf(ldata, tail) == ECHO_OP_START) {
                        if (echo_buf(ldata, tail + 1) == ECHO_OP_ERASE_TAB)
                                tail += 3;
@@ -738,6 +749,7 @@ static size_t __process_echoes(struct tty_struct *tty)
                        tail++;
        }
 
+ not_yet_stored:
        ldata->echo_tail = tail;
        return old_space - space;
 }
@@ -748,6 +760,7 @@ static void commit_echoes(struct tty_struct *tty)
        size_t nr, old, echoed;
        size_t head;
 
+       mutex_lock(&ldata->output_lock);
        head = ldata->echo_head;
        ldata->echo_mark = head;
        old = ldata->echo_commit - ldata->echo_tail;
@@ -756,10 +769,12 @@ static void commit_echoes(struct tty_struct *tty)
         * is over the threshold (and try again each time another
         * block is accumulated) */
        nr = head - ldata->echo_tail;
-       if (nr < ECHO_COMMIT_WATERMARK || (nr % ECHO_BLOCK > old % ECHO_BLOCK))
+       if (nr < ECHO_COMMIT_WATERMARK ||
+           (nr % ECHO_BLOCK > old % ECHO_BLOCK)) {
+               mutex_unlock(&ldata->output_lock);
                return;
+       }
 
-       mutex_lock(&ldata->output_lock);
        ldata->echo_commit = head;
        echoed = __process_echoes(tty);
        mutex_unlock(&ldata->output_lock);
@@ -810,7 +825,9 @@ static void flush_echoes(struct tty_struct *tty)
 
 static inline void add_echo_byte(unsigned char c, struct n_tty_data *ldata)
 {
-       *echo_buf_addr(ldata, ldata->echo_head++) = c;
+       *echo_buf_addr(ldata, ldata->echo_head) = c;
+       smp_wmb(); /* Matches smp_rmb() in echo_buf(). */
+       ldata->echo_head++;
 }
 
 /**
@@ -978,14 +995,15 @@ static void eraser(unsigned char c, struct tty_struct *tty)
        }
 
        seen_alnums = 0;
-       while (ldata->read_head != ldata->canon_head) {
+       while (MASK(ldata->read_head) != MASK(ldata->canon_head)) {
                head = ldata->read_head;
 
                /* erase a single possibly multibyte character */
                do {
                        head--;
                        c = read_buf(ldata, head);
-               } while (is_continuation(c, tty) && head != ldata->canon_head);
+               } while (is_continuation(c, tty) &&
+                        MASK(head) != MASK(ldata->canon_head));
 
                /* do not partially erase */
                if (is_continuation(c, tty))
@@ -1027,7 +1045,7 @@ static void eraser(unsigned char c, struct tty_struct *tty)
                                 * This info is used to go back the correct
                                 * number of columns.
                                 */
-                               while (tail != ldata->canon_head) {
+                               while (MASK(tail) != MASK(ldata->canon_head)) {
                                        tail--;
                                        c = read_buf(ldata, tail);
                                        if (c == '\t') {
@@ -1302,7 +1320,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
                        finish_erasing(ldata);
                        echo_char(c, tty);
                        echo_char_raw('\n', ldata);
-                       while (tail != ldata->read_head) {
+                       while (MASK(tail) != MASK(ldata->read_head)) {
                                echo_char(read_buf(ldata, tail), tty);
                                tail++;
                        }
@@ -1878,30 +1896,21 @@ static int n_tty_open(struct tty_struct *tty)
        struct n_tty_data *ldata;
 
        /* Currently a malloc failure here can panic */
-       ldata = vmalloc(sizeof(*ldata));
+       ldata = vzalloc(sizeof(*ldata));
        if (!ldata)
-               goto err;
+               return -ENOMEM;
 
        ldata->overrun_time = jiffies;
        mutex_init(&ldata->atomic_read_lock);
        mutex_init(&ldata->output_lock);
 
        tty->disc_data = ldata;
-       reset_buffer_flags(tty->disc_data);
-       ldata->column = 0;
-       ldata->canon_column = 0;
-       ldata->num_overrun = 0;
-       ldata->no_room = 0;
-       ldata->lnext = 0;
        tty->closing = 0;
        /* indicate buffer work may resume */
        clear_bit(TTY_LDISC_HALTED, &tty->flags);
        n_tty_set_termios(tty, NULL);
        tty_unthrottle(tty);
-
        return 0;
-err:
-       return -ENOMEM;
 }
 
 static inline int input_available_p(struct tty_struct *tty, int poll)
@@ -2411,7 +2420,7 @@ static unsigned long inq_canon(struct n_tty_data *ldata)
        tail = ldata->read_tail;
        nr = head - tail;
        /* Skip EOF-chars.. */
-       while (head != tail) {
+       while (MASK(head) != MASK(tail)) {
                if (test_bit(tail & (N_TTY_BUF_SIZE - 1), ldata->read_flags) &&
                    read_buf(ldata, tail) == __DISABLED_CHAR)
                        nr--;
index df93b727e984ee3d185fa0f5a42cad09d63a3f65..9e59f4788589c879358ce12507362baec459533d 100644 (file)
@@ -617,6 +617,7 @@ EXPORT_SYMBOL_GPL(__serdev_device_driver_register);
 static void __exit serdev_exit(void)
 {
        bus_unregister(&serdev_bus_type);
+       ida_destroy(&ctrl_ida);
 }
 module_exit(serdev_exit);
 
index 3296a05cda2db8d53b1d869123ed8e2aa884a248..f80a300b5d68f6e8ad61b7daf2544234da7e1662 100644 (file)
@@ -3339,9 +3339,7 @@ static const struct pci_device_id blacklist[] = {
        /* multi-io cards handled by parport_serial */
        { PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */
        { PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */
-       { PCI_DEVICE(0x4348, 0x7173), }, /* WCH CH355 4S */
        { PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */
-       { PCI_DEVICE(0x1c00, 0x3470), }, /* WCH CH384 4S */
 
        /* Moxa Smartio MUE boards handled by 8250_moxa */
        { PCI_VDEVICE(MOXA, 0x1024), },
index 1eb1a376a0419d4084cd7a72e1e2c7eb769798f7..15eb6c829d39c5b108adfca034fa50769763c0bc 100644 (file)
@@ -784,7 +784,7 @@ int vc_allocate(unsigned int currcons)      /* return 0 on success */
        if (!*vc->vc_uni_pagedir_loc)
                con_set_default_unimap(vc);
 
-       vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL);
+       vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_KERNEL);
        if (!vc->vc_screenbuf)
                goto err_free;
 
@@ -871,7 +871,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
 
        if (new_screen_size > (4 << 20))
                return -EINVAL;
-       newscreen = kmalloc(new_screen_size, GFP_USER);
+       newscreen = kzalloc(new_screen_size, GFP_USER);
        if (!newscreen)
                return -ENOMEM;
 
index e8f4ac9400ea842a8fe631fe23519e04f565f4e6..5d421d7e8904fc633f9f25c0faf6d69ec1d8f498 100644 (file)
@@ -215,7 +215,20 @@ static ssize_t name_show(struct device *dev,
                         struct device_attribute *attr, char *buf)
 {
        struct uio_device *idev = dev_get_drvdata(dev);
-       return sprintf(buf, "%s\n", idev->info->name);
+       int ret;
+
+       mutex_lock(&idev->info_lock);
+       if (!idev->info) {
+               ret = -EINVAL;
+               dev_err(dev, "the device has been unregistered\n");
+               goto out;
+       }
+
+       ret = sprintf(buf, "%s\n", idev->info->name);
+
+out:
+       mutex_unlock(&idev->info_lock);
+       return ret;
 }
 static DEVICE_ATTR_RO(name);
 
@@ -223,7 +236,20 @@ static ssize_t version_show(struct device *dev,
                            struct device_attribute *attr, char *buf)
 {
        struct uio_device *idev = dev_get_drvdata(dev);
-       return sprintf(buf, "%s\n", idev->info->version);
+       int ret;
+
+       mutex_lock(&idev->info_lock);
+       if (!idev->info) {
+               ret = -EINVAL;
+               dev_err(dev, "the device has been unregistered\n");
+               goto out;
+       }
+
+       ret = sprintf(buf, "%s\n", idev->info->version);
+
+out:
+       mutex_unlock(&idev->info_lock);
+       return ret;
 }
 static DEVICE_ATTR_RO(version);
 
@@ -415,11 +441,15 @@ EXPORT_SYMBOL_GPL(uio_event_notify);
 static irqreturn_t uio_interrupt(int irq, void *dev_id)
 {
        struct uio_device *idev = (struct uio_device *)dev_id;
-       irqreturn_t ret = idev->info->handler(irq, idev->info);
+       irqreturn_t ret;
 
+       mutex_lock(&idev->info_lock);
+
+       ret = idev->info->handler(irq, idev->info);
        if (ret == IRQ_HANDLED)
                uio_event_notify(idev->info);
 
+       mutex_unlock(&idev->info_lock);
        return ret;
 }
 
@@ -433,7 +463,6 @@ static int uio_open(struct inode *inode, struct file *filep)
        struct uio_device *idev;
        struct uio_listener *listener;
        int ret = 0;
-       unsigned long flags;
 
        mutex_lock(&minor_lock);
        idev = idr_find(&uio_idr, iminor(inode));
@@ -460,10 +489,16 @@ static int uio_open(struct inode *inode, struct file *filep)
        listener->event_count = atomic_read(&idev->event);
        filep->private_data = listener;
 
-       spin_lock_irqsave(&idev->info_lock, flags);
+       mutex_lock(&idev->info_lock);
+       if (!idev->info) {
+               mutex_unlock(&idev->info_lock);
+               ret = -EINVAL;
+               goto err_alloc_listener;
+       }
+
        if (idev->info && idev->info->open)
                ret = idev->info->open(idev->info, inode);
-       spin_unlock_irqrestore(&idev->info_lock, flags);
+       mutex_unlock(&idev->info_lock);
        if (ret)
                goto err_infoopen;
 
@@ -495,12 +530,11 @@ static int uio_release(struct inode *inode, struct file *filep)
        int ret = 0;
        struct uio_listener *listener = filep->private_data;
        struct uio_device *idev = listener->dev;
-       unsigned long flags;
 
-       spin_lock_irqsave(&idev->info_lock, flags);
+       mutex_lock(&idev->info_lock);
        if (idev->info && idev->info->release)
                ret = idev->info->release(idev->info, inode);
-       spin_unlock_irqrestore(&idev->info_lock, flags);
+       mutex_unlock(&idev->info_lock);
 
        module_put(idev->owner);
        kfree(listener);
@@ -513,12 +547,11 @@ static __poll_t uio_poll(struct file *filep, poll_table *wait)
        struct uio_listener *listener = filep->private_data;
        struct uio_device *idev = listener->dev;
        __poll_t ret = 0;
-       unsigned long flags;
 
-       spin_lock_irqsave(&idev->info_lock, flags);
+       mutex_lock(&idev->info_lock);
        if (!idev->info || !idev->info->irq)
                ret = -EIO;
-       spin_unlock_irqrestore(&idev->info_lock, flags);
+       mutex_unlock(&idev->info_lock);
 
        if (ret)
                return ret;
@@ -537,12 +570,11 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
        DECLARE_WAITQUEUE(wait, current);
        ssize_t retval = 0;
        s32 event_count;
-       unsigned long flags;
 
-       spin_lock_irqsave(&idev->info_lock, flags);
+       mutex_lock(&idev->info_lock);
        if (!idev->info || !idev->info->irq)
                retval = -EIO;
-       spin_unlock_irqrestore(&idev->info_lock, flags);
+       mutex_unlock(&idev->info_lock);
 
        if (retval)
                return retval;
@@ -592,9 +624,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
        struct uio_device *idev = listener->dev;
        ssize_t retval;
        s32 irq_on;
-       unsigned long flags;
 
-       spin_lock_irqsave(&idev->info_lock, flags);
+       mutex_lock(&idev->info_lock);
+       if (!idev->info) {
+               retval = -EINVAL;
+               goto out;
+       }
+
        if (!idev->info || !idev->info->irq) {
                retval = -EIO;
                goto out;
@@ -618,7 +654,7 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
        retval = idev->info->irqcontrol(idev->info, irq_on);
 
 out:
-       spin_unlock_irqrestore(&idev->info_lock, flags);
+       mutex_unlock(&idev->info_lock);
        return retval ? retval : sizeof(s32);
 }
 
@@ -640,10 +676,20 @@ static vm_fault_t uio_vma_fault(struct vm_fault *vmf)
        struct page *page;
        unsigned long offset;
        void *addr;
+       int ret = 0;
+       int mi;
 
-       int mi = uio_find_mem_index(vmf->vma);
-       if (mi < 0)
-               return VM_FAULT_SIGBUS;
+       mutex_lock(&idev->info_lock);
+       if (!idev->info) {
+               ret = VM_FAULT_SIGBUS;
+               goto out;
+       }
+
+       mi = uio_find_mem_index(vmf->vma);
+       if (mi < 0) {
+               ret = VM_FAULT_SIGBUS;
+               goto out;
+       }
 
        /*
         * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
@@ -658,7 +704,11 @@ static vm_fault_t uio_vma_fault(struct vm_fault *vmf)
                page = vmalloc_to_page(addr);
        get_page(page);
        vmf->page = page;
-       return 0;
+
+out:
+       mutex_unlock(&idev->info_lock);
+
+       return ret;
 }
 
 static const struct vm_operations_struct uio_logical_vm_ops = {
@@ -683,6 +733,7 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
        struct uio_device *idev = vma->vm_private_data;
        int mi = uio_find_mem_index(vma);
        struct uio_mem *mem;
+
        if (mi < 0)
                return -EINVAL;
        mem = idev->info->mem + mi;
@@ -724,30 +775,46 @@ static int uio_mmap(struct file *filep, struct vm_area_struct *vma)
 
        vma->vm_private_data = idev;
 
+       mutex_lock(&idev->info_lock);
+       if (!idev->info) {
+               ret = -EINVAL;
+               goto out;
+       }
+
        mi = uio_find_mem_index(vma);
-       if (mi < 0)
-               return -EINVAL;
+       if (mi < 0) {
+               ret = -EINVAL;
+               goto out;
+       }
 
        requested_pages = vma_pages(vma);
        actual_pages = ((idev->info->mem[mi].addr & ~PAGE_MASK)
                        + idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT;
-       if (requested_pages > actual_pages)
-               return -EINVAL;
+       if (requested_pages > actual_pages) {
+               ret = -EINVAL;
+               goto out;
+       }
 
        if (idev->info->mmap) {
                ret = idev->info->mmap(idev->info, vma);
-               return ret;
+               goto out;
        }
 
        switch (idev->info->mem[mi].memtype) {
                case UIO_MEM_PHYS:
-                       return uio_mmap_physical(vma);
+                       ret = uio_mmap_physical(vma);
+                       break;
                case UIO_MEM_LOGICAL:
                case UIO_MEM_VIRTUAL:
-                       return uio_mmap_logical(vma);
+                       ret = uio_mmap_logical(vma);
+                       break;
                default:
-                       return -EINVAL;
+                       ret = -EINVAL;
        }
+
+out:
+       mutex_unlock(&idev->info_lock);
+       return 0;
 }
 
 static const struct file_operations uio_fops = {
@@ -865,7 +932,7 @@ int __uio_register_device(struct module *owner,
 
        idev->owner = owner;
        idev->info = info;
-       spin_lock_init(&idev->info_lock);
+       mutex_init(&idev->info_lock);
        init_waitqueue_head(&idev->wait);
        atomic_set(&idev->event, 0);
 
@@ -902,8 +969,9 @@ int __uio_register_device(struct module *owner,
                 * FDs at the time of unregister and therefore may not be
                 * freed until they are released.
                 */
-               ret = request_irq(info->irq, uio_interrupt,
-                                 info->irq_flags, info->name, idev);
+               ret = request_threaded_irq(info->irq, NULL, uio_interrupt,
+                                          info->irq_flags, info->name, idev);
+
                if (ret)
                        goto err_request_irq;
        }
@@ -928,7 +996,6 @@ EXPORT_SYMBOL_GPL(__uio_register_device);
 void uio_unregister_device(struct uio_info *info)
 {
        struct uio_device *idev;
-       unsigned long flags;
 
        if (!info || !info->uio_dev)
                return;
@@ -937,14 +1004,14 @@ void uio_unregister_device(struct uio_info *info)
 
        uio_free_minor(idev);
 
+       mutex_lock(&idev->info_lock);
        uio_dev_del_attributes(idev);
 
        if (info->irq && info->irq != UIO_IRQ_CUSTOM)
                free_irq(info->irq, idev);
 
-       spin_lock_irqsave(&idev->info_lock, flags);
        idev->info = NULL;
-       spin_unlock_irqrestore(&idev->info_lock, flags);
+       mutex_unlock(&idev->info_lock);
 
        device_unregister(&idev->dev);
 
index 785f0ed037f7897d06cd3432a7a79013bba3831c..ee34e9046f7ea201f53e79d3e8725d9c937b0d88 100644 (file)
@@ -3,6 +3,7 @@ config USB_CHIPIDEA
        depends on ((USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET)) && HAS_DMA
        select EXTCON
        select RESET_CONTROLLER
+       select USB_ULPI_BUS
        help
          Say Y here if your system has a dual role high speed USB
          controller based on ChipIdea silicon IP. It supports:
@@ -38,12 +39,4 @@ config USB_CHIPIDEA_HOST
        help
          Say Y here to enable host controller functionality of the
          ChipIdea driver.
-
-config USB_CHIPIDEA_ULPI
-       bool "ChipIdea ULPI PHY support"
-       depends on USB_ULPI_BUS=y || USB_ULPI_BUS=USB_CHIPIDEA
-       help
-         Say Y here if you have a ULPI PHY attached to your ChipIdea
-         controller.
-
 endif
index e3d5e728fa530aef709487061b28f76a36f3a98d..12df94f78f7221e39aa24f0646d78d90977088e7 100644 (file)
@@ -1,11 +1,10 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_USB_CHIPIDEA)             += ci_hdrc.o
 
-ci_hdrc-y                              := core.o otg.o debug.o
+ci_hdrc-y                              := core.o otg.o debug.o ulpi.o
 ci_hdrc-$(CONFIG_USB_CHIPIDEA_UDC)     += udc.o
 ci_hdrc-$(CONFIG_USB_CHIPIDEA_HOST)    += host.o
 ci_hdrc-$(CONFIG_USB_OTG_FSM)          += otg_fsm.o
-ci_hdrc-$(CONFIG_USB_CHIPIDEA_ULPI)    += ulpi.o
 
 # Glue/Bridge layers go here
 
index 0bf244d505442932d7660640029beebb66c2e0e0..6a2cc5cd0281d8d728fb4f083798b2ea6bf55cc8 100644 (file)
@@ -240,10 +240,8 @@ struct ci_hdrc {
 
        struct ci_hdrc_platform_data    *platdata;
        int                             vbus_active;
-#ifdef CONFIG_USB_CHIPIDEA_ULPI
        struct ulpi                     *ulpi;
        struct ulpi_ops                 ulpi_ops;
-#endif
        struct phy                      *phy;
        /* old usb_phy interface */
        struct usb_phy                  *usb_phy;
@@ -426,15 +424,9 @@ static inline bool ci_otg_is_fsm_mode(struct ci_hdrc *ci)
 #endif
 }
 
-#if IS_ENABLED(CONFIG_USB_CHIPIDEA_ULPI)
 int ci_ulpi_init(struct ci_hdrc *ci);
 void ci_ulpi_exit(struct ci_hdrc *ci);
 int ci_ulpi_resume(struct ci_hdrc *ci);
-#else
-static inline int ci_ulpi_init(struct ci_hdrc *ci) { return 0; }
-static inline void ci_ulpi_exit(struct ci_hdrc *ci) { }
-static inline int ci_ulpi_resume(struct ci_hdrc *ci) { return 0; }
-#endif
 
 u32 hw_read_intr_enable(struct ci_hdrc *ci);
 
index af45aa3222b5ce1c99ccb4de586c988d3b3a0b9f..4638d9b066bea7ad2b35f6c966ee7acec428facf 100644 (file)
@@ -124,8 +124,11 @@ static int host_start(struct ci_hdrc *ci)
 
        hcd->power_budget = ci->platdata->power_budget;
        hcd->tpl_support = ci->platdata->tpl_support;
-       if (ci->phy || ci->usb_phy)
+       if (ci->phy || ci->usb_phy) {
                hcd->skip_phy_initialization = 1;
+               if (ci->usb_phy)
+                       hcd->usb_phy = ci->usb_phy;
+       }
 
        ehci = hcd_to_ehci(hcd);
        ehci->caps = ci->hw_bank.cap;
index 6da42dcd2888601a23f65aa6644608c259b3288c..dfec07e8ae1d268c459b495603bd5e8de154518f 100644 (file)
@@ -95,6 +95,9 @@ int ci_ulpi_resume(struct ci_hdrc *ci)
 {
        int cnt = 100000;
 
+       if (ci->platdata->phy_mode != USBPHY_INTERFACE_MODE_ULPI)
+               return 0;
+
        while (cnt-- > 0) {
                if (hw_read(ci, OP_ULPI_VIEWPORT, ULPI_SYNC_STATE))
                        return 0;
index 7b366a6c0b493f2eb8bec4959830d222223f3cb2..75c4623ad779eecd64b0164a24b6d8ac86177ca3 100644 (file)
@@ -1758,6 +1758,9 @@ static const struct usb_device_id acm_ids[] = {
        { USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */
        .driver_info = SINGLE_RX_URB,
        },
+       { USB_DEVICE(0x1965, 0x0018), /* Uniden UBC125XLT */
+       .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+       },
        { USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */
        .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
        },
@@ -1828,6 +1831,9 @@ static const struct usb_device_id acm_ids[] = {
        { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
        .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
        },
+       { USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */
+       .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+       },
 
        { USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
        .driver_info = CLEAR_HALT_CONDITIONS,
index fcae521df29b8de4712e92de725c575f298f567a..1fb2668099663e08ffa3f5e7713ab0eaa55fb10a 100644 (file)
@@ -1142,10 +1142,14 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
 
                if (!udev || udev->state == USB_STATE_NOTATTACHED) {
                        /* Tell hub_wq to disconnect the device or
-                        * check for a new connection
+                        * check for a new connection or over current condition.
+                        * Based on USB2.0 Spec Section 11.12.5,
+                        * C_PORT_OVER_CURRENT could be set while
+                        * PORT_OVER_CURRENT is not. So check for any of them.
                         */
                        if (udev || (portstatus & USB_PORT_STAT_CONNECTION) ||
-                           (portstatus & USB_PORT_STAT_OVERCURRENT))
+                           (portstatus & USB_PORT_STAT_OVERCURRENT) ||
+                           (portchange & USB_PORT_STAT_C_OVERCURRENT))
                                set_bit(port1, hub->change_bits);
 
                } else if (portstatus & USB_PORT_STAT_ENABLE) {
index c55def2f1320f92c6c0c652fc94c7056165ee467..097057d2eacf7bcb18316473c6f0def8a5744b62 100644 (file)
@@ -378,6 +378,10 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Corsair K70 RGB */
        { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
 
+       /* Corsair Strafe */
+       { USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT |
+         USB_QUIRK_DELAY_CTRL_MSG },
+
        /* Corsair Strafe RGB */
        { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT |
          USB_QUIRK_DELAY_CTRL_MSG },
index 4a56ac772a3c35360d3834f5542aad22d5b1b720..71b3b08ad516c9fb3bfbd403da1687b5825e7e14 100644 (file)
@@ -1004,6 +1004,7 @@ struct dwc2_hregs_backup {
  * @frame_list_sz:      Frame list size
  * @desc_gen_cache:     Kmem cache for generic descriptors
  * @desc_hsisoc_cache:  Kmem cache for hs isochronous descriptors
+ * @unaligned_cache:    Kmem cache for DMA mode to handle non-aligned buf
  *
  * These are for peripheral mode:
  *
@@ -1177,6 +1178,8 @@ struct dwc2_hsotg {
        u32 frame_list_sz;
        struct kmem_cache *desc_gen_cache;
        struct kmem_cache *desc_hsisoc_cache;
+       struct kmem_cache *unaligned_cache;
+#define DWC2_KMEM_UNALIGNED_BUF_SIZE 1024
 
 #endif /* CONFIG_USB_DWC2_HOST || CONFIG_USB_DWC2_DUAL_ROLE */
 
index f0d9ccf1d665ad37b2f23786806bde8c16da11ea..cefc99ae69b2f489c8fa16f9fca4381ffb405bc3 100644 (file)
@@ -812,6 +812,7 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
        u32 index;
        u32 maxsize = 0;
        u32 mask = 0;
+       u8 pid = 0;
 
        maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
 
@@ -840,7 +841,11 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
                         ((len << DEV_DMA_NBYTES_SHIFT) & mask));
 
        if (hs_ep->dir_in) {
-               desc->status |= ((hs_ep->mc << DEV_DMA_ISOC_PID_SHIFT) &
+               if (len)
+                       pid = DIV_ROUND_UP(len, hs_ep->ep.maxpacket);
+               else
+                       pid = 1;
+               desc->status |= ((pid << DEV_DMA_ISOC_PID_SHIFT) &
                                 DEV_DMA_ISOC_PID_MASK) |
                                ((len % hs_ep->ep.maxpacket) ?
                                 DEV_DMA_SHORT : 0) |
@@ -884,6 +889,7 @@ static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
        struct dwc2_dma_desc *desc;
 
        if (list_empty(&hs_ep->queue)) {
+               hs_ep->target_frame = TARGET_FRAME_INITIAL;
                dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__);
                return;
        }
@@ -2755,8 +2761,6 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
         */
        tmp = dwc2_hsotg_read_frameno(hsotg);
 
-       dwc2_hsotg_complete_request(hsotg, ep, get_ep_head(ep), 0);
-
        if (using_desc_dma(hsotg)) {
                if (ep->target_frame == TARGET_FRAME_INITIAL) {
                        /* Start first ISO Out */
@@ -2817,9 +2821,6 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
 
                tmp = dwc2_hsotg_read_frameno(hsotg);
                if (using_desc_dma(hsotg)) {
-                       dwc2_hsotg_complete_request(hsotg, hs_ep,
-                                                   get_ep_head(hs_ep), 0);
-
                        hs_ep->target_frame = tmp;
                        dwc2_gadget_incr_frame_num(hs_ep);
                        dwc2_gadget_start_isoc_ddma(hs_ep);
@@ -3429,7 +3430,7 @@ static void dwc2_gadget_handle_incomplete_isoc_in(struct dwc2_hsotg *hsotg)
        for (idx = 1; idx < hsotg->num_of_eps; idx++) {
                hs_ep = hsotg->eps_in[idx];
                /* Proceed only unmasked ISOC EPs */
-               if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk))
+               if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
                        continue;
 
                epctrl = dwc2_readl(hsotg->regs + DIEPCTL(idx));
@@ -3475,7 +3476,7 @@ static void dwc2_gadget_handle_incomplete_isoc_out(struct dwc2_hsotg *hsotg)
        for (idx = 1; idx < hsotg->num_of_eps; idx++) {
                hs_ep = hsotg->eps_out[idx];
                /* Proceed only unmasked ISOC EPs */
-               if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk))
+               if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
                        continue;
 
                epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx));
@@ -3649,7 +3650,7 @@ irq_retry:
                for (idx = 1; idx < hsotg->num_of_eps; idx++) {
                        hs_ep = hsotg->eps_out[idx];
                        /* Proceed only unmasked ISOC EPs */
-                       if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk))
+                       if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
                                continue;
 
                        epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx));
@@ -4739,9 +4740,11 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
        }
 
        ret = usb_add_gadget_udc(dev, &hsotg->gadget);
-       if (ret)
+       if (ret) {
+               dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep,
+                                          hsotg->ctrl_req);
                return ret;
-
+       }
        dwc2_hsotg_dump(hsotg);
 
        return 0;
@@ -4755,6 +4758,7 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
 int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg)
 {
        usb_del_gadget_udc(&hsotg->gadget);
+       dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, hsotg->ctrl_req);
 
        return 0;
 }
index edaf0b6af4f0491ba192d346c29a792a06a85751..6e2cdd7b93d46cf839fe2c515e88028dec22bb2e 100644 (file)
@@ -1567,11 +1567,20 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
        }
 
        if (hsotg->params.host_dma) {
-               dwc2_writel((u32)chan->xfer_dma,
-                           hsotg->regs + HCDMA(chan->hc_num));
+               dma_addr_t dma_addr;
+
+               if (chan->align_buf) {
+                       if (dbg_hc(chan))
+                               dev_vdbg(hsotg->dev, "align_buf\n");
+                       dma_addr = chan->align_buf;
+               } else {
+                       dma_addr = chan->xfer_dma;
+               }
+               dwc2_writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num));
+
                if (dbg_hc(chan))
                        dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
-                                (unsigned long)chan->xfer_dma, chan->hc_num);
+                                (unsigned long)dma_addr, chan->hc_num);
        }
 
        /* Start the split */
@@ -2625,36 +2634,66 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
        }
 }
 
-#define DWC2_USB_DMA_ALIGN 4
+static int dwc2_alloc_split_dma_aligned_buf(struct dwc2_hsotg *hsotg,
+                                           struct dwc2_qh *qh,
+                                           struct dwc2_host_chan *chan)
+{
+       if (!hsotg->unaligned_cache ||
+           chan->max_packet > DWC2_KMEM_UNALIGNED_BUF_SIZE)
+               return -ENOMEM;
 
-struct dma_aligned_buffer {
-       void *kmalloc_ptr;
-       void *old_xfer_buffer;
-       u8 data[0];
-};
+       if (!qh->dw_align_buf) {
+               qh->dw_align_buf = kmem_cache_alloc(hsotg->unaligned_cache,
+                                                   GFP_ATOMIC | GFP_DMA);
+               if (!qh->dw_align_buf)
+                       return -ENOMEM;
+       }
+
+       qh->dw_align_buf_dma = dma_map_single(hsotg->dev, qh->dw_align_buf,
+                                             DWC2_KMEM_UNALIGNED_BUF_SIZE,
+                                             DMA_FROM_DEVICE);
+
+       if (dma_mapping_error(hsotg->dev, qh->dw_align_buf_dma)) {
+               dev_err(hsotg->dev, "can't map align_buf\n");
+               chan->align_buf = 0;
+               return -EINVAL;
+       }
+
+       chan->align_buf = qh->dw_align_buf_dma;
+       return 0;
+}
+
+#define DWC2_USB_DMA_ALIGN 4
 
 static void dwc2_free_dma_aligned_buffer(struct urb *urb)
 {
-       struct dma_aligned_buffer *temp;
+       void *stored_xfer_buffer;
+       size_t length;
 
        if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
                return;
 
-       temp = container_of(urb->transfer_buffer,
-                           struct dma_aligned_buffer, data);
+       /* Restore urb->transfer_buffer from the end of the allocated area */
+       memcpy(&stored_xfer_buffer, urb->transfer_buffer +
+              urb->transfer_buffer_length, sizeof(urb->transfer_buffer));
 
-       if (usb_urb_dir_in(urb))
-               memcpy(temp->old_xfer_buffer, temp->data,
-                      urb->transfer_buffer_length);
-       urb->transfer_buffer = temp->old_xfer_buffer;
-       kfree(temp->kmalloc_ptr);
+       if (usb_urb_dir_in(urb)) {
+               if (usb_pipeisoc(urb->pipe))
+                       length = urb->transfer_buffer_length;
+               else
+                       length = urb->actual_length;
+
+               memcpy(stored_xfer_buffer, urb->transfer_buffer, length);
+       }
+       kfree(urb->transfer_buffer);
+       urb->transfer_buffer = stored_xfer_buffer;
 
        urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
 }
 
 static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
 {
-       struct dma_aligned_buffer *temp, *kmalloc_ptr;
+       void *kmalloc_ptr;
        size_t kmalloc_size;
 
        if (urb->num_sgs || urb->sg ||
@@ -2662,22 +2701,29 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
            !((uintptr_t)urb->transfer_buffer & (DWC2_USB_DMA_ALIGN - 1)))
                return 0;
 
-       /* Allocate a buffer with enough padding for alignment */
+       /*
+        * Allocate a buffer with enough padding for original transfer_buffer
+        * pointer. This allocation is guaranteed to be aligned properly for
+        * DMA
+        */
        kmalloc_size = urb->transfer_buffer_length +
-               sizeof(struct dma_aligned_buffer) + DWC2_USB_DMA_ALIGN - 1;
+               sizeof(urb->transfer_buffer);
 
        kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
        if (!kmalloc_ptr)
                return -ENOMEM;
 
-       /* Position our struct dma_aligned_buffer such that data is aligned */
-       temp = PTR_ALIGN(kmalloc_ptr + 1, DWC2_USB_DMA_ALIGN) - 1;
-       temp->kmalloc_ptr = kmalloc_ptr;
-       temp->old_xfer_buffer = urb->transfer_buffer;
+       /*
+        * Position value of original urb->transfer_buffer pointer to the end
+        * of allocation for later referencing
+        */
+       memcpy(kmalloc_ptr + urb->transfer_buffer_length,
+              &urb->transfer_buffer, sizeof(urb->transfer_buffer));
+
        if (usb_urb_dir_out(urb))
-               memcpy(temp->data, urb->transfer_buffer,
+               memcpy(kmalloc_ptr, urb->transfer_buffer,
                       urb->transfer_buffer_length);
-       urb->transfer_buffer = temp->data;
+       urb->transfer_buffer = kmalloc_ptr;
 
        urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
 
@@ -2802,6 +2848,32 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
        /* Set the transfer attributes */
        dwc2_hc_init_xfer(hsotg, chan, qtd);
 
+       /* For non-dword aligned buffers */
+       if (hsotg->params.host_dma && qh->do_split &&
+           chan->ep_is_in && (chan->xfer_dma & 0x3)) {
+               dev_vdbg(hsotg->dev, "Non-aligned buffer\n");
+               if (dwc2_alloc_split_dma_aligned_buf(hsotg, qh, chan)) {
+                       dev_err(hsotg->dev,
+                               "Failed to allocate memory to handle non-aligned buffer\n");
+                       /* Add channel back to free list */
+                       chan->align_buf = 0;
+                       chan->multi_count = 0;
+                       list_add_tail(&chan->hc_list_entry,
+                                     &hsotg->free_hc_list);
+                       qtd->in_process = 0;
+                       qh->channel = NULL;
+                       return -ENOMEM;
+               }
+       } else {
+               /*
+                * We assume that DMA is always aligned in non-split
+                * case or split out case. Warn if not.
+                */
+               WARN_ON_ONCE(hsotg->params.host_dma &&
+                            (chan->xfer_dma & 0x3));
+               chan->align_buf = 0;
+       }
+
        if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
            chan->ep_type == USB_ENDPOINT_XFER_ISOC)
                /*
@@ -5246,6 +5318,19 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
                }
        }
 
+       if (hsotg->params.host_dma) {
+               /*
+                * Create kmem caches to handle non-aligned buffer
+                * in Buffer DMA mode.
+                */
+               hsotg->unaligned_cache = kmem_cache_create("dwc2-unaligned-dma",
+                                               DWC2_KMEM_UNALIGNED_BUF_SIZE, 4,
+                                               SLAB_CACHE_DMA, NULL);
+               if (!hsotg->unaligned_cache)
+                       dev_err(hsotg->dev,
+                               "unable to create dwc2 unaligned cache\n");
+       }
+
        hsotg->otg_port = 1;
        hsotg->frame_list = NULL;
        hsotg->frame_list_dma = 0;
@@ -5280,8 +5365,9 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
        return 0;
 
 error4:
-       kmem_cache_destroy(hsotg->desc_gen_cache);
+       kmem_cache_destroy(hsotg->unaligned_cache);
        kmem_cache_destroy(hsotg->desc_hsisoc_cache);
+       kmem_cache_destroy(hsotg->desc_gen_cache);
 error3:
        dwc2_hcd_release(hsotg);
 error2:
@@ -5322,8 +5408,9 @@ void dwc2_hcd_remove(struct dwc2_hsotg *hsotg)
        usb_remove_hcd(hcd);
        hsotg->priv = NULL;
 
-       kmem_cache_destroy(hsotg->desc_gen_cache);
+       kmem_cache_destroy(hsotg->unaligned_cache);
        kmem_cache_destroy(hsotg->desc_hsisoc_cache);
+       kmem_cache_destroy(hsotg->desc_gen_cache);
 
        dwc2_hcd_release(hsotg);
        usb_put_hcd(hcd);
@@ -5435,7 +5522,7 @@ int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg)
        dwc2_writel(hprt0, hsotg->regs + HPRT0);
 
        /* Wait for the HPRT0.PrtSusp register field to be set */
-       if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 300))
+       if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 3000))
                dev_warn(hsotg->dev, "Suspend wasn't generated\n");
 
        /*
@@ -5616,6 +5703,8 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
                return ret;
        }
 
+       dwc2_hcd_rem_wakeup(hsotg);
+
        hsotg->hibernated = 0;
        hsotg->bus_suspended = 0;
        hsotg->lx_state = DWC2_L0;
index 7db1ee7e7a7781c12100d413abe7e4d62828e951..5502a501f5166640a2926132e4d4e1ee2f450724 100644 (file)
@@ -76,6 +76,8 @@ struct dwc2_qh;
  *                      (micro)frame
  * @xfer_buf:           Pointer to current transfer buffer position
  * @xfer_dma:           DMA address of xfer_buf
+ * @align_buf:          In Buffer DMA mode this will be used if xfer_buf is not
+ *                      DWORD aligned
  * @xfer_len:           Total number of bytes to transfer
  * @xfer_count:         Number of bytes transferred so far
  * @start_pkt_count:    Packet count at start of transfer
@@ -133,6 +135,7 @@ struct dwc2_host_chan {
 
        u8 *xfer_buf;
        dma_addr_t xfer_dma;
+       dma_addr_t align_buf;
        u32 xfer_len;
        u32 xfer_count;
        u16 start_pkt_count;
@@ -302,6 +305,9 @@ struct dwc2_hs_transfer_time {
  *                           speed.  Note that this is in "schedule slice" which
  *                           is tightly packed.
  * @ntd:                Actual number of transfer descriptors in a list
+ * @dw_align_buf:       Used instead of original buffer if its physical address
+ *                      is not dword-aligned
+ * @dw_align_buf_dma:   DMA address for dw_align_buf
  * @qtd_list:           List of QTDs for this QH
  * @channel:            Host channel currently processing transfers for this QH
  * @qh_list_entry:      Entry for QH in either the periodic or non-periodic
@@ -350,6 +356,8 @@ struct dwc2_qh {
        struct dwc2_hs_transfer_time hs_transfers[DWC2_HS_SCHEDULE_UFRAMES];
        u32 ls_start_schedule_slice;
        u16 ntd;
+       u8 *dw_align_buf;
+       dma_addr_t dw_align_buf_dma;
        struct list_head qtd_list;
        struct dwc2_host_chan *channel;
        struct list_head qh_list_entry;
index fbea5e3fb9479bc4ff4ef250026df2583969ec67..8ce10caf3e1981fa7b5132d44a3aa760c5252d0f 100644 (file)
@@ -942,14 +942,21 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
        frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
        len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
                                          DWC2_HC_XFER_COMPLETE, NULL);
-       if (!len) {
+       if (!len && !qtd->isoc_split_offset) {
                qtd->complete_split = 0;
-               qtd->isoc_split_offset = 0;
                return 0;
        }
 
        frame_desc->actual_length += len;
 
+       if (chan->align_buf) {
+               dev_vdbg(hsotg->dev, "non-aligned buffer\n");
+               dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma,
+                                DWC2_KMEM_UNALIGNED_BUF_SIZE, DMA_FROM_DEVICE);
+               memcpy(qtd->urb->buf + (chan->xfer_dma - qtd->urb->dma),
+                      chan->qh->dw_align_buf, len);
+       }
+
        qtd->isoc_split_offset += len;
 
        hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum));
@@ -1224,7 +1231,10 @@ static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
         * avoid interrupt storms we'll wait before retrying if we've got
         * several NAKs. If we didn't do this we'd retry directly from the
         * interrupt handler and could end up quickly getting another
-        * interrupt (another NAK), which we'd retry.
+        * interrupt (another NAK), which we'd retry. Note that we do not
+        * delay retries for IN parts of control requests, as those are expected
+        * to complete fairly quickly, and if we delay them we risk confusing
+        * the device and cause it issue STALL.
         *
         * Note that in DMA mode software only gets involved to re-send NAKed
         * transfers for split transactions, so we only need to apply this
@@ -1237,7 +1247,9 @@ static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
                        qtd->error_count = 0;
                qtd->complete_split = 0;
                qtd->num_naks++;
-               qtd->qh->want_wait = qtd->num_naks >= DWC2_NAKS_BEFORE_DELAY;
+               qtd->qh->want_wait = qtd->num_naks >= DWC2_NAKS_BEFORE_DELAY &&
+                               !(chan->ep_type == USB_ENDPOINT_XFER_CONTROL &&
+                                 chan->ep_is_in);
                dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
                goto handle_nak_done;
        }
index d7c3d6c776d86a8edf5c41832a7856cd6fca29eb..301ced1618f873203b534ffa77cf7ef59a773f84 100644 (file)
@@ -383,7 +383,7 @@ static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg,
        /* Get the map and adjust if this is a multi_tt hub */
        map = qh->dwc_tt->periodic_bitmaps;
        if (qh->dwc_tt->usb_tt->multi)
-               map += DWC2_ELEMENTS_PER_LS_BITMAP * qh->ttport;
+               map += DWC2_ELEMENTS_PER_LS_BITMAP * (qh->ttport - 1);
 
        return map;
 }
@@ -1696,6 +1696,9 @@ void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
 
        if (qh->desc_list)
                dwc2_hcd_qh_free_ddma(hsotg, qh);
+       else if (hsotg->unaligned_cache && qh->dw_align_buf)
+               kmem_cache_free(hsotg->unaligned_cache, qh->dw_align_buf);
+
        kfree(qh);
 }
 
index ea91310113b9abd2a233a17bcd973d7a1ed1e09e..103807587dc640a747d75339f703a3aea0e8e992 100644 (file)
@@ -1272,7 +1272,6 @@ static int dwc3_probe(struct platform_device *pdev)
        if (!dwc->clks)
                return -ENOMEM;
 
-       dwc->num_clks = ARRAY_SIZE(dwc3_core_clks);
        dwc->dev = dev;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1307,15 +1306,19 @@ static int dwc3_probe(struct platform_device *pdev)
        if (IS_ERR(dwc->reset))
                return PTR_ERR(dwc->reset);
 
-       ret = clk_bulk_get(dev, dwc->num_clks, dwc->clks);
-       if (ret == -EPROBE_DEFER)
-               return ret;
-       /*
-        * Clocks are optional, but new DT platforms should support all clocks
-        * as required by the DT-binding.
-        */
-       if (ret)
-               dwc->num_clks = 0;
+       if (dev->of_node) {
+               dwc->num_clks = ARRAY_SIZE(dwc3_core_clks);
+
+               ret = clk_bulk_get(dev, dwc->num_clks, dwc->clks);
+               if (ret == -EPROBE_DEFER)
+                       return ret;
+               /*
+                * Clocks are optional, but new DT platforms should support all
+                * clocks as required by the DT-binding.
+                */
+               if (ret)
+                       dwc->num_clks = 0;
+       }
 
        ret = reset_control_deassert(dwc->reset);
        if (ret)
index 6b3ccd542bd76f6c40308a9df550ac2674673e45..dbeff5e6ad1461eea71a4cf9e562755cd50b1b17 100644 (file)
@@ -165,8 +165,9 @@ static int dwc3_of_simple_remove(struct platform_device *pdev)
 
        reset_control_put(simple->resets);
 
-       pm_runtime_put_sync(dev);
        pm_runtime_disable(dev);
+       pm_runtime_put_noidle(dev);
+       pm_runtime_set_suspended(dev);
 
        return 0;
 }
index c961a94d136b5248a5e242a6ab3370b22f3fa360..f57e7c94b8e5e0154ef430e3a0b3973d6ff84bd9 100644 (file)
@@ -34,6 +34,7 @@
 #define PCI_DEVICE_ID_INTEL_GLK                        0x31aa
 #define PCI_DEVICE_ID_INTEL_CNPLP              0x9dee
 #define PCI_DEVICE_ID_INTEL_CNPH               0xa36e
+#define PCI_DEVICE_ID_INTEL_ICLLP              0x34ee
 
 #define PCI_INTEL_BXT_DSM_GUID         "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
 #define PCI_INTEL_BXT_FUNC_PMU_PWR     4
@@ -289,6 +290,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GLK), },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPLP), },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPH), },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICLLP), },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
        {  }    /* Terminating Entry */
 };
index b0e67ab2f98cd09ba54eaabfdab1680e30783e63..a6d0203e40b6e048bfb736133321ef6c857c09ca 100644 (file)
@@ -490,6 +490,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
        qcom->dwc3 = of_find_device_by_node(dwc3_np);
        if (!qcom->dwc3) {
                dev_err(&pdev->dev, "failed to get dwc3 platform device\n");
+               ret = -ENODEV;
                goto depopulate;
        }
 
@@ -547,8 +548,7 @@ static int dwc3_qcom_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int dwc3_qcom_pm_suspend(struct device *dev)
+static int __maybe_unused dwc3_qcom_pm_suspend(struct device *dev)
 {
        struct dwc3_qcom *qcom = dev_get_drvdata(dev);
        int ret = 0;
@@ -560,7 +560,7 @@ static int dwc3_qcom_pm_suspend(struct device *dev)
        return ret;
 }
 
-static int dwc3_qcom_pm_resume(struct device *dev)
+static int __maybe_unused dwc3_qcom_pm_resume(struct device *dev)
 {
        struct dwc3_qcom *qcom = dev_get_drvdata(dev);
        int ret;
@@ -571,23 +571,20 @@ static int dwc3_qcom_pm_resume(struct device *dev)
 
        return ret;
 }
-#endif
 
-#ifdef CONFIG_PM
-static int dwc3_qcom_runtime_suspend(struct device *dev)
+static int __maybe_unused dwc3_qcom_runtime_suspend(struct device *dev)
 {
        struct dwc3_qcom *qcom = dev_get_drvdata(dev);
 
        return dwc3_qcom_suspend(qcom);
 }
 
-static int dwc3_qcom_runtime_resume(struct device *dev)
+static int __maybe_unused dwc3_qcom_runtime_resume(struct device *dev)
 {
        struct dwc3_qcom *qcom = dev_get_drvdata(dev);
 
        return dwc3_qcom_resume(qcom);
 }
-#endif
 
 static const struct dev_pm_ops dwc3_qcom_dev_pm_ops = {
        SET_SYSTEM_SLEEP_PM_OPS(dwc3_qcom_pm_suspend, dwc3_qcom_pm_resume)
index c77ff50a88a2c5c44910b02fa88189a23491fab2..8efde178eef4d55faeb91aa924bcc82e7b548a41 100644 (file)
@@ -973,15 +973,12 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
                ret = dwc3_ep0_start_trans(dep);
        } else if (IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) &&
                   req->request.length && req->request.zero) {
-               u32     maxpacket;
 
                ret = usb_gadget_map_request_by_dev(dwc->sysdev,
                                &req->request, dep->number);
                if (ret)
                        return;
 
-               maxpacket = dep->endpoint.maxpacket;
-
                /* prepare normal TRB */
                dwc3_ep0_prepare_one_trb(dep, req->request.dma,
                                         req->request.length,
index f242c2bcea810c0dee04f067ceea0dc716912058..b8a15840b4ffd574430cdc5d0e57e74a3c116242 100644 (file)
@@ -1719,6 +1719,8 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
                 */
                if (w_value && !f->get_alt)
                        break;
+
+               spin_lock(&cdev->lock);
                value = f->set_alt(f, w_index, w_value);
                if (value == USB_GADGET_DELAYED_STATUS) {
                        DBG(cdev,
@@ -1728,6 +1730,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
                        DBG(cdev, "delayed_status count %d\n",
                                        cdev->delayed_status);
                }
+               spin_unlock(&cdev->lock);
                break;
        case USB_REQ_GET_INTERFACE:
                if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE))
@@ -1816,7 +1819,6 @@ unknown:
                if (cdev->use_os_string && cdev->os_desc_config &&
                    (ctrl->bRequestType & USB_TYPE_VENDOR) &&
                    ctrl->bRequest == cdev->b_vendor_code) {
-                       struct usb_request              *req;
                        struct usb_configuration        *os_desc_cfg;
                        u8                              *buf;
                        int                             interface;
index dce9d12c7981afb1733be479e9daa43977bd218a..3ada83d81bda8d2810ab04d154b65658f57e0ba2 100644 (file)
@@ -215,6 +215,7 @@ struct ffs_io_data {
 
        struct mm_struct *mm;
        struct work_struct work;
+       struct work_struct cancellation_work;
 
        struct usb_ep *ep;
        struct usb_request *req;
@@ -1072,22 +1073,31 @@ ffs_epfile_open(struct inode *inode, struct file *file)
        return 0;
 }
 
+static void ffs_aio_cancel_worker(struct work_struct *work)
+{
+       struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
+                                                  cancellation_work);
+
+       ENTER();
+
+       usb_ep_dequeue(io_data->ep, io_data->req);
+}
+
 static int ffs_aio_cancel(struct kiocb *kiocb)
 {
        struct ffs_io_data *io_data = kiocb->private;
-       struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
+       struct ffs_data *ffs = io_data->ffs;
        int value;
 
        ENTER();
 
-       spin_lock_irq(&epfile->ffs->eps_lock);
-
-       if (likely(io_data && io_data->ep && io_data->req))
-               value = usb_ep_dequeue(io_data->ep, io_data->req);
-       else
+       if (likely(io_data && io_data->ep && io_data->req)) {
+               INIT_WORK(&io_data->cancellation_work, ffs_aio_cancel_worker);
+               queue_work(ffs->io_completion_wq, &io_data->cancellation_work);
+               value = -EINPROGRESS;
+       } else {
                value = -EINVAL;
-
-       spin_unlock_irq(&epfile->ffs->eps_lock);
+       }
 
        return value;
 }
@@ -3253,7 +3263,7 @@ static int ffs_func_setup(struct usb_function *f,
        __ffs_event_add(ffs, FUNCTIONFS_SETUP);
        spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
 
-       return USB_GADGET_DELAYED_STATUS;
+       return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0;
 }
 
 static bool ffs_func_req_match(struct usb_function *f,
index d2dc1f00180b7869201afc1dbd82dcd4b126ab16..d582921f7257de4325f76102c7d86e6eda463645 100644 (file)
@@ -438,14 +438,14 @@ static struct usb_descriptor_header *hs_audio_desc[] = {
 };
 
 struct cntrl_cur_lay3 {
-       __u32   dCUR;
+       __le32  dCUR;
 };
 
 struct cntrl_range_lay3 {
-       __u16   wNumSubRanges;
-       __u32   dMIN;
-       __u32   dMAX;
-       __u32   dRES;
+       __le16  wNumSubRanges;
+       __le32  dMIN;
+       __le32  dMAX;
+       __le32  dRES;
 } __packed;
 
 static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
@@ -559,13 +559,13 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
        agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
        if (!agdev->out_ep) {
                dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
-               return ret;
+               return -ENODEV;
        }
 
        agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc);
        if (!agdev->in_ep) {
                dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
-               return ret;
+               return -ENODEV;
        }
 
        agdev->in_ep_maxpsize = max_t(u16,
@@ -703,9 +703,9 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
                memset(&c, 0, sizeof(struct cntrl_cur_lay3));
 
                if (entity_id == USB_IN_CLK_ID)
-                       c.dCUR = p_srate;
+                       c.dCUR = cpu_to_le32(p_srate);
                else if (entity_id == USB_OUT_CLK_ID)
-                       c.dCUR = c_srate;
+                       c.dCUR = cpu_to_le32(c_srate);
 
                value = min_t(unsigned, w_length, sizeof c);
                memcpy(req->buf, &c, value);
@@ -742,15 +742,15 @@ in_rq_range(struct usb_function *fn, const struct usb_ctrlrequest *cr)
 
        if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) {
                if (entity_id == USB_IN_CLK_ID)
-                       r.dMIN = p_srate;
+                       r.dMIN = cpu_to_le32(p_srate);
                else if (entity_id == USB_OUT_CLK_ID)
-                       r.dMIN = c_srate;
+                       r.dMIN = cpu_to_le32(c_srate);
                else
                        return -EOPNOTSUPP;
 
                r.dMAX = r.dMIN;
                r.dRES = 0;
-               r.wNumSubRanges = 1;
+               r.wNumSubRanges = cpu_to_le16(1);
 
                value = min_t(unsigned, w_length, sizeof r);
                memcpy(req->buf, &r, value);
index a72295c953bba502468fb380c38601eaef6e4988..fb5ed97572e5fabe11609a3c01367fd26ba280c6 100644 (file)
@@ -32,9 +32,6 @@ struct uac_req {
 struct uac_rtd_params {
        struct snd_uac_chip *uac; /* parent chip */
        bool ep_enabled; /* if the ep is enabled */
-       /* Size of the ring buffer */
-       size_t dma_bytes;
-       unsigned char *dma_area;
 
        struct snd_pcm_substream *ss;
 
@@ -43,8 +40,6 @@ struct uac_rtd_params {
 
        void *rbuf;
 
-       size_t period_size;
-
        unsigned max_psize;     /* MaxPacketSize of endpoint */
        struct uac_req *ureq;
 
@@ -84,12 +79,12 @@ static const struct snd_pcm_hardware uac_pcm_hardware = {
 static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
 {
        unsigned pending;
-       unsigned long flags;
+       unsigned long flags, flags2;
        unsigned int hw_ptr;
-       bool update_alsa = false;
        int status = req->status;
        struct uac_req *ur = req->context;
        struct snd_pcm_substream *substream;
+       struct snd_pcm_runtime *runtime;
        struct uac_rtd_params *prm = ur->pp;
        struct snd_uac_chip *uac = prm->uac;
 
@@ -111,6 +106,14 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
        if (!substream)
                goto exit;
 
+       snd_pcm_stream_lock_irqsave(substream, flags2);
+
+       runtime = substream->runtime;
+       if (!runtime || !snd_pcm_running(substream)) {
+               snd_pcm_stream_unlock_irqrestore(substream, flags2);
+               goto exit;
+       }
+
        spin_lock_irqsave(&prm->lock, flags);
 
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
@@ -137,43 +140,46 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
                req->actual = req->length;
        }
 
-       pending = prm->hw_ptr % prm->period_size;
-       pending += req->actual;
-       if (pending >= prm->period_size)
-               update_alsa = true;
-
        hw_ptr = prm->hw_ptr;
-       prm->hw_ptr = (prm->hw_ptr + req->actual) % prm->dma_bytes;
 
        spin_unlock_irqrestore(&prm->lock, flags);
 
        /* Pack USB load in ALSA ring buffer */
-       pending = prm->dma_bytes - hw_ptr;
+       pending = runtime->dma_bytes - hw_ptr;
 
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
                if (unlikely(pending < req->actual)) {
-                       memcpy(req->buf, prm->dma_area + hw_ptr, pending);
-                       memcpy(req->buf + pending, prm->dma_area,
+                       memcpy(req->buf, runtime->dma_area + hw_ptr, pending);
+                       memcpy(req->buf + pending, runtime->dma_area,
                               req->actual - pending);
                } else {
-                       memcpy(req->buf, prm->dma_area + hw_ptr, req->actual);
+                       memcpy(req->buf, runtime->dma_area + hw_ptr,
+                              req->actual);
                }
        } else {
                if (unlikely(pending < req->actual)) {
-                       memcpy(prm->dma_area + hw_ptr, req->buf, pending);
-                       memcpy(prm->dma_area, req->buf + pending,
+                       memcpy(runtime->dma_area + hw_ptr, req->buf, pending);
+                       memcpy(runtime->dma_area, req->buf + pending,
                               req->actual - pending);
                } else {
-                       memcpy(prm->dma_area + hw_ptr, req->buf, req->actual);
+                       memcpy(runtime->dma_area + hw_ptr, req->buf,
+                              req->actual);
                }
        }
 
+       spin_lock_irqsave(&prm->lock, flags);
+       /* update hw_ptr after data is copied to memory */
+       prm->hw_ptr = (hw_ptr + req->actual) % runtime->dma_bytes;
+       hw_ptr = prm->hw_ptr;
+       spin_unlock_irqrestore(&prm->lock, flags);
+       snd_pcm_stream_unlock_irqrestore(substream, flags2);
+
+       if ((hw_ptr % snd_pcm_lib_period_bytes(substream)) < req->actual)
+               snd_pcm_period_elapsed(substream);
+
 exit:
        if (usb_ep_queue(ep, req, GFP_ATOMIC))
                dev_err(uac->card->dev, "%d Error!\n", __LINE__);
-
-       if (update_alsa)
-               snd_pcm_period_elapsed(substream);
 }
 
 static int uac_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
@@ -236,40 +242,12 @@ static snd_pcm_uframes_t uac_pcm_pointer(struct snd_pcm_substream *substream)
 static int uac_pcm_hw_params(struct snd_pcm_substream *substream,
                               struct snd_pcm_hw_params *hw_params)
 {
-       struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
-       struct uac_rtd_params *prm;
-       int err;
-
-       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-               prm = &uac->p_prm;
-       else
-               prm = &uac->c_prm;
-
-       err = snd_pcm_lib_malloc_pages(substream,
+       return snd_pcm_lib_malloc_pages(substream,
                                        params_buffer_bytes(hw_params));
-       if (err >= 0) {
-               prm->dma_bytes = substream->runtime->dma_bytes;
-               prm->dma_area = substream->runtime->dma_area;
-               prm->period_size = params_period_bytes(hw_params);
-       }
-
-       return err;
 }
 
 static int uac_pcm_hw_free(struct snd_pcm_substream *substream)
 {
-       struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
-       struct uac_rtd_params *prm;
-
-       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-               prm = &uac->p_prm;
-       else
-               prm = &uac->c_prm;
-
-       prm->dma_area = NULL;
-       prm->dma_bytes = 0;
-       prm->period_size = 0;
-
        return snd_pcm_lib_free_pages(substream);
 }
 
@@ -595,15 +573,15 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
        if (err < 0)
                goto snd_fail;
 
-       strcpy(pcm->name, pcm_name);
+       strlcpy(pcm->name, pcm_name, sizeof(pcm->name));
        pcm->private_data = uac;
        uac->pcm = pcm;
 
        snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &uac_pcm_ops);
        snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &uac_pcm_ops);
 
-       strcpy(card->driver, card_name);
-       strcpy(card->shortname, card_name);
+       strlcpy(card->driver, card_name, sizeof(card->driver));
+       strlcpy(card->shortname, card_name, sizeof(card->shortname));
        sprintf(card->longname, "%s %i", card_name, card->dev->id);
 
        snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
index f0cdf89b850371e693db8fefc505efd90b72cd0d..83ba8a2eb6af9f95fe84a95e44904a20ada3c5ac 100644 (file)
@@ -2,6 +2,7 @@
 config USB_ASPEED_VHUB
        tristate "Aspeed vHub UDC driver"
        depends on ARCH_ASPEED || COMPILE_TEST
+       depends on USB_LIBCOMPOSITE
        help
          USB peripheral controller for the Aspeed AST2500 family
          SoCs supporting the "vHub" functionality and USB2.0
index 20ffb03ff6ac1823c366dcedc5ba0a2360ffeb34..e2927fb083cf14f3119fc71945a286176fa7990d 100644 (file)
@@ -108,6 +108,13 @@ void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep)
        /* Check our state, cancel pending requests if needed */
        if (ep->ep0.state != ep0_state_token) {
                EPDBG(ep, "wrong state\n");
+               ast_vhub_nuke(ep, -EIO);
+
+               /*
+                * Accept the packet regardless, this seems to happen
+                * when stalling a SETUP packet that has an OUT data
+                * phase.
+                */
                ast_vhub_nuke(ep, 0);
                goto stall;
        }
@@ -212,6 +219,8 @@ static void ast_vhub_ep0_do_send(struct ast_vhub_ep *ep,
        if (chunk && req->req.buf)
                memcpy(ep->buf, req->req.buf + req->req.actual, chunk);
 
+       vhub_dma_workaround(ep->buf);
+
        /* Remember chunk size and trigger send */
        reg = VHUB_EP0_SET_TX_LEN(chunk);
        writel(reg, ep->ep0.ctlstat);
@@ -224,7 +233,7 @@ static void ast_vhub_ep0_rx_prime(struct ast_vhub_ep *ep)
        EPVDBG(ep, "rx prime\n");
 
        /* Prime endpoint for receiving data */
-       writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat + AST_VHUB_EP0_CTRL);
+       writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
 }
 
 static void ast_vhub_ep0_do_receive(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
index 80c9feac5147b5450cf0be37ce6ed29c8b44f0bb..5939eb1e97f209bc43538155ed666aaebcd28eaf 100644 (file)
@@ -66,11 +66,16 @@ static void ast_vhub_epn_kick(struct ast_vhub_ep *ep, struct ast_vhub_req *req)
        if (!req->req.dma) {
 
                /* For IN transfers, copy data over first */
-               if (ep->epn.is_in)
+               if (ep->epn.is_in) {
                        memcpy(ep->buf, req->req.buf + act, chunk);
+                       vhub_dma_workaround(ep->buf);
+               }
                writel(ep->buf_dma, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
-       } else
+       } else {
+               if (ep->epn.is_in)
+                       vhub_dma_workaround(req->req.buf);
                writel(req->req.dma + act, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
+       }
 
        /* Start DMA */
        req->active = true;
@@ -161,6 +166,7 @@ static inline unsigned int ast_vhub_count_free_descs(struct ast_vhub_ep *ep)
 static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
                                   struct ast_vhub_req *req)
 {
+       struct ast_vhub_desc *desc = NULL;
        unsigned int act = req->act_count;
        unsigned int len = req->req.length;
        unsigned int chunk;
@@ -177,7 +183,6 @@ static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
 
        /* While we can create descriptors */
        while (ast_vhub_count_free_descs(ep) && req->last_desc < 0) {
-               struct ast_vhub_desc *desc;
                unsigned int d_num;
 
                /* Grab next free descriptor */
@@ -227,6 +232,9 @@ static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
                req->act_count = act = act + chunk;
        }
 
+       if (likely(desc))
+               vhub_dma_workaround(desc);
+
        /* Tell HW about new descriptors */
        writel(VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next),
               ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
index 2b040257bc1f698f0097a4d673ed6cb9fffe3d6b..4ed03d33a5a92b53f836d9d6930722aa5c418cfe 100644 (file)
@@ -462,6 +462,39 @@ enum std_req_rc {
 #define DDBG(d, fmt, ...)      do { } while(0)
 #endif
 
+static inline void vhub_dma_workaround(void *addr)
+{
+       /*
+        * This works around a confirmed HW issue with the Aspeed chip.
+        *
+        * The core uses a different bus to memory than the AHB going to
+        * the USB device controller. Due to the latter having a higher
+        * priority than the core for arbitration on that bus, it's
+        * possible for an MMIO to the device, followed by a DMA by the
+        * device from memory to all be performed and services before
+        * a previous store to memory gets completed.
+        *
+        * This the following scenario can happen:
+        *
+        *    - Driver writes to a DMA descriptor (Mbus)
+        *    - Driver writes to the MMIO register to start the DMA (AHB)
+        *    - The gadget sees the second write and sends a read of the
+        *      descriptor to the memory controller (Mbus)
+        *    - The gadget hits memory before the descriptor write
+        *      causing it to read an obsolete value.
+        *
+        * Thankfully the problem is limited to the USB gadget device, other
+        * masters in the SoC all have a lower priority than the core, thus
+        * ensuring that the store by the core arrives first.
+        *
+        * The workaround consists of using a dummy read of the memory before
+        * doing the MMIO writes. This will ensure that the previous writes
+        * have been "pushed out".
+        */
+       mb();
+       (void)__raw_readl((void __iomem *)addr);
+}
+
 /* core.c */
 void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
                   int status);
index a3ecce62662ba6cdc1e0f7ee65e90b2445537b56..11e25a3f4f1fa86ea42677d7efbbdd1f0f83e6b5 100644 (file)
@@ -832,11 +832,11 @@ static void init_controller(struct r8a66597 *r8a66597)
 
                r8a66597_bset(r8a66597, XCKE, SYSCFG0);
 
-               msleep(3);
+               mdelay(3);
 
                r8a66597_bset(r8a66597, PLLC, SYSCFG0);
 
-               msleep(1);
+               mdelay(1);
 
                r8a66597_bset(r8a66597, SCKE, SYSCFG0);
 
@@ -1190,7 +1190,7 @@ __acquires(r8a66597->lock)
        r8a66597->ep0_req->length = 2;
        /* AV: what happens if we get called again before that gets through? */
        spin_unlock(&r8a66597->lock);
-       r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_KERNEL);
+       r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_ATOMIC);
        spin_lock(&r8a66597->lock);
 }
 
index 1fbfd89d0a0f00945abec540a75c58582a3d1943..387f124a83340b5f27eb9cfd2e4bb0323fcbd954 100644 (file)
@@ -508,16 +508,18 @@ static int xhci_do_dbc_start(struct xhci_hcd *xhci)
        return 0;
 }
 
-static void xhci_do_dbc_stop(struct xhci_hcd *xhci)
+static int xhci_do_dbc_stop(struct xhci_hcd *xhci)
 {
        struct xhci_dbc         *dbc = xhci->dbc;
 
        if (dbc->state == DS_DISABLED)
-               return;
+               return -1;
 
        writel(0, &dbc->regs->control);
        xhci_dbc_mem_cleanup(xhci);
        dbc->state = DS_DISABLED;
+
+       return 0;
 }
 
 static int xhci_dbc_start(struct xhci_hcd *xhci)
@@ -544,6 +546,7 @@ static int xhci_dbc_start(struct xhci_hcd *xhci)
 
 static void xhci_dbc_stop(struct xhci_hcd *xhci)
 {
+       int ret;
        unsigned long           flags;
        struct xhci_dbc         *dbc = xhci->dbc;
        struct dbc_port         *port = &dbc->port;
@@ -556,10 +559,11 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci)
                xhci_dbc_tty_unregister_device(xhci);
 
        spin_lock_irqsave(&dbc->lock, flags);
-       xhci_do_dbc_stop(xhci);
+       ret = xhci_do_dbc_stop(xhci);
        spin_unlock_irqrestore(&dbc->lock, flags);
 
-       pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
+       if (!ret)
+               pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
 }
 
 static void
index acbd3d7b8828693f79a51f19ad70fb1aa4ade050..ef350c33dc4a8615a0188af0cea13ae29a876532 100644 (file)
@@ -595,7 +595,7 @@ struct xhci_ring *xhci_stream_id_to_ring(
        if (!ep->stream_info)
                return NULL;
 
-       if (stream_id > ep->stream_info->num_streams)
+       if (stream_id >= ep->stream_info->num_streams)
                return NULL;
        return ep->stream_info->stream_rings[stream_id];
 }
@@ -886,12 +886,12 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
 
        dev = xhci->devs[slot_id];
 
-       trace_xhci_free_virt_device(dev);
-
        xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
        if (!dev)
                return;
 
+       trace_xhci_free_virt_device(dev);
+
        if (dev->tt_info)
                old_active_eps = dev->tt_info->active_eps;
 
index a8c1d073cba05e3b070e73722d02b32eaf112e69..4b463e5202a421705be74610a136239dc3a9c423 100644 (file)
@@ -481,7 +481,7 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra,
        unsigned long mask;
        unsigned int port;
        bool idle, enable;
-       int err;
+       int err = 0;
 
        memset(&rsp, 0, sizeof(rsp));
 
@@ -1223,10 +1223,10 @@ disable_rpm:
        pm_runtime_disable(&pdev->dev);
        usb_put_hcd(tegra->hcd);
 disable_xusbc:
-       if (!&pdev->dev.pm_domain)
+       if (!pdev->dev.pm_domain)
                tegra_powergate_power_off(TEGRA_POWERGATE_XUSBC);
 disable_xusba:
-       if (!&pdev->dev.pm_domain)
+       if (!pdev->dev.pm_domain)
                tegra_powergate_power_off(TEGRA_POWERGATE_XUSBA);
 put_padctl:
        tegra_xusb_padctl_put(tegra->padctl);
index 410544ffe78f68fa73e7328e8e105028a9116008..88b427434bd82536c653a911bb393c4bdb87814b 100644 (file)
@@ -171,6 +171,37 @@ DEFINE_EVENT(xhci_log_trb, xhci_dbc_gadget_ep_queue,
        TP_ARGS(ring, trb)
 );
 
+DECLARE_EVENT_CLASS(xhci_log_free_virt_dev,
+       TP_PROTO(struct xhci_virt_device *vdev),
+       TP_ARGS(vdev),
+       TP_STRUCT__entry(
+               __field(void *, vdev)
+               __field(unsigned long long, out_ctx)
+               __field(unsigned long long, in_ctx)
+               __field(u8, fake_port)
+               __field(u8, real_port)
+               __field(u16, current_mel)
+
+       ),
+       TP_fast_assign(
+               __entry->vdev = vdev;
+               __entry->in_ctx = (unsigned long long) vdev->in_ctx->dma;
+               __entry->out_ctx = (unsigned long long) vdev->out_ctx->dma;
+               __entry->fake_port = (u8) vdev->fake_port;
+               __entry->real_port = (u8) vdev->real_port;
+               __entry->current_mel = (u16) vdev->current_mel;
+               ),
+       TP_printk("vdev %p ctx %llx | %llx fake_port %d real_port %d current_mel %d",
+               __entry->vdev, __entry->in_ctx, __entry->out_ctx,
+               __entry->fake_port, __entry->real_port, __entry->current_mel
+       )
+);
+
+DEFINE_EVENT(xhci_log_free_virt_dev, xhci_free_virt_device,
+       TP_PROTO(struct xhci_virt_device *vdev),
+       TP_ARGS(vdev)
+);
+
 DECLARE_EVENT_CLASS(xhci_log_virt_dev,
        TP_PROTO(struct xhci_virt_device *vdev),
        TP_ARGS(vdev),
@@ -208,11 +239,6 @@ DEFINE_EVENT(xhci_log_virt_dev, xhci_alloc_virt_device,
        TP_ARGS(vdev)
 );
 
-DEFINE_EVENT(xhci_log_virt_dev, xhci_free_virt_device,
-       TP_PROTO(struct xhci_virt_device *vdev),
-       TP_ARGS(vdev)
-);
-
 DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_device,
        TP_PROTO(struct xhci_virt_device *vdev),
        TP_ARGS(vdev)
index 8c8da2d657fa1008c1e612e6f30d3e9716d534b6..68e6132aa8b2a3985f01ac12c6bdd75882c4748d 100644 (file)
@@ -908,6 +908,41 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
        spin_unlock_irqrestore(&xhci->lock, flags);
 }
 
+static bool xhci_pending_portevent(struct xhci_hcd *xhci)
+{
+       struct xhci_port        **ports;
+       int                     port_index;
+       u32                     status;
+       u32                     portsc;
+
+       status = readl(&xhci->op_regs->status);
+       if (status & STS_EINT)
+               return true;
+       /*
+        * Checking STS_EINT is not enough as there is a lag between a change
+        * bit being set and the Port Status Change Event that it generated
+        * being written to the Event Ring. See note in xhci 1.1 section 4.19.2.
+        */
+
+       port_index = xhci->usb2_rhub.num_ports;
+       ports = xhci->usb2_rhub.ports;
+       while (port_index--) {
+               portsc = readl(ports[port_index]->addr);
+               if (portsc & PORT_CHANGE_MASK ||
+                   (portsc & PORT_PLS_MASK) == XDEV_RESUME)
+                       return true;
+       }
+       port_index = xhci->usb3_rhub.num_ports;
+       ports = xhci->usb3_rhub.ports;
+       while (port_index--) {
+               portsc = readl(ports[port_index]->addr);
+               if (portsc & PORT_CHANGE_MASK ||
+                   (portsc & PORT_PLS_MASK) == XDEV_RESUME)
+                       return true;
+       }
+       return false;
+}
+
 /*
  * Stop HC (not bus-specific)
  *
@@ -1009,7 +1044,7 @@ EXPORT_SYMBOL_GPL(xhci_suspend);
  */
 int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
 {
-       u32                     command, temp = 0, status;
+       u32                     command, temp = 0;
        struct usb_hcd          *hcd = xhci_to_hcd(xhci);
        struct usb_hcd          *secondary_hcd;
        int                     retval = 0;
@@ -1043,8 +1078,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
                command = readl(&xhci->op_regs->command);
                command |= CMD_CRS;
                writel(command, &xhci->op_regs->command);
+               /*
+                * Some controllers take up to 55+ ms to complete the controller
+                * restore so setting the timeout to 100ms. Xhci specification
+                * doesn't mention any timeout value.
+                */
                if (xhci_handshake(&xhci->op_regs->status,
-                             STS_RESTORE, 0, 10 * 1000)) {
+                             STS_RESTORE, 0, 100 * 1000)) {
                        xhci_warn(xhci, "WARN: xHC restore state timeout\n");
                        spin_unlock_irq(&xhci->lock);
                        return -ETIMEDOUT;
@@ -1134,8 +1174,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
  done:
        if (retval == 0) {
                /* Resume root hubs only when have pending events. */
-               status = readl(&xhci->op_regs->status);
-               if (status & STS_EINT) {
+               if (xhci_pending_portevent(xhci)) {
                        usb_hcd_resume_root_hub(xhci->shared_hcd);
                        usb_hcd_resume_root_hub(hcd);
                }
@@ -3012,6 +3051,7 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
        if (!list_empty(&ep->ring->td_list)) {
                dev_err(&udev->dev, "EP not empty, refuse reset\n");
                spin_unlock_irqrestore(&xhci->lock, flags);
+               xhci_free_command(xhci, cfg_cmd);
                goto cleanup;
        }
        xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0);
index 939e2f86b595eecbf1f1ecac7dcd7f39965d238d..841e89ffe2e9d88f6f81255340da58144916ca59 100644 (file)
@@ -382,6 +382,10 @@ struct xhci_op_regs {
 #define PORT_PLC       (1 << 22)
 /* port configure error change - port failed to configure its link partner */
 #define PORT_CEC       (1 << 23)
+#define PORT_CHANGE_MASK       (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
+                                PORT_RC | PORT_PLC | PORT_CEC)
+
+
 /* Cold Attach Status - xHC can set this bit to report device attached during
  * Sx state. Warm port reset should be perfomed to clear this bit and move port
  * to connected state.
index 8abb6cbbd98a17d6b6ff95d0ad88268832780b87..3be40eaa1ac9b2caf493a8fd21e8980982b5d9a1 100644 (file)
@@ -396,8 +396,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
                          loff_t *ppos)
 {
        struct usb_yurex *dev;
-       int retval = 0;
-       int bytes_read = 0;
+       int len = 0;
        char in_buffer[20];
        unsigned long flags;
 
@@ -405,26 +404,16 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
 
        mutex_lock(&dev->io_mutex);
        if (!dev->interface) {          /* already disconnected */
-               retval = -ENODEV;
-               goto exit;
+               mutex_unlock(&dev->io_mutex);
+               return -ENODEV;
        }
 
        spin_lock_irqsave(&dev->lock, flags);
-       bytes_read = snprintf(in_buffer, 20, "%lld\n", dev->bbu);
+       len = snprintf(in_buffer, 20, "%lld\n", dev->bbu);
        spin_unlock_irqrestore(&dev->lock, flags);
-
-       if (*ppos < bytes_read) {
-               if (copy_to_user(buffer, in_buffer + *ppos, bytes_read - *ppos))
-                       retval = -EFAULT;
-               else {
-                       retval = bytes_read - *ppos;
-                       *ppos += bytes_read;
-               }
-       }
-
-exit:
        mutex_unlock(&dev->io_mutex);
-       return retval;
+
+       return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
 }
 
 static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
index 900875f326d7c2939f053f2227b4dec9942aa9ef..f7c96d209eda78a73b812685a931c59b53a1a6c8 100644 (file)
@@ -861,6 +861,7 @@ int usb_otg_start(struct platform_device *pdev)
        if (pdata->init && pdata->init(pdev) != 0)
                return -EINVAL;
 
+#ifdef CONFIG_PPC32
        if (pdata->big_endian_mmio) {
                _fsl_readl = _fsl_readl_be;
                _fsl_writel = _fsl_writel_be;
@@ -868,6 +869,7 @@ int usb_otg_start(struct platform_device *pdev)
                _fsl_readl = _fsl_readl_le;
                _fsl_writel = _fsl_writel_le;
        }
+#endif
 
        /* request irq */
        p_otg->irq = platform_get_irq(pdev, 0);
@@ -958,7 +960,7 @@ int usb_otg_start(struct platform_device *pdev)
 /*
  * state file in sysfs
  */
-static int show_fsl_usb2_otg_state(struct device *dev,
+static ssize_t show_fsl_usb2_otg_state(struct device *dev,
                                   struct device_attribute *attr, char *buf)
 {
        struct otg_fsm *fsm = &fsl_otg_dev->fsm;
index bdd7a5ad3bf1c0060bcef8a3a1dc640b4a45cbcc..3bb1fff02bedd0e076581baabeff440e4552e3ce 100644 (file)
@@ -128,7 +128,7 @@ static int ch341_control_in(struct usb_device *dev,
        r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request,
                            USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
                            value, index, buf, bufsize, DEFAULT_TIMEOUT);
-       if (r < bufsize) {
+       if (r < (int)bufsize) {
                if (r >= 0) {
                        dev_err(&dev->dev,
                                "short control message received (%d < %u)\n",
index eb6c26cbe5792b0e535c77b9e2e245b700071458..626a29d9aa58d7e13770f048ae8c705dcfab2fea 100644 (file)
@@ -95,6 +95,9 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
        { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
        { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */
+       { USB_DEVICE(0x10C4, 0x817C) }, /* CESINEL MEDCAL N Power Quality Monitor */
+       { USB_DEVICE(0x10C4, 0x817D) }, /* CESINEL MEDCAL NT Power Quality Monitor */
+       { USB_DEVICE(0x10C4, 0x817E) }, /* CESINEL MEDCAL S Power Quality Monitor */
        { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
        { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
        { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
@@ -112,6 +115,9 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
        { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
        { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
+       { USB_DEVICE(0x10C4, 0x82EF) }, /* CESINEL FALCO 6105 AC Power Supply */
+       { USB_DEVICE(0x10C4, 0x82F1) }, /* CESINEL MEDCAL EFD Earth Fault Detector */
+       { USB_DEVICE(0x10C4, 0x82F2) }, /* CESINEL MEDCAL ST Network Analyzer */
        { USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
        { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
        { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
@@ -124,7 +130,9 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
        { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
        { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
+       { USB_DEVICE(0x10C4, 0x851E) }, /* CESINEL MEDCAL PT Network Analyzer */
        { USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */
+       { USB_DEVICE(0x10C4, 0x85B8) }, /* CESINEL ReCon T Energy Logger */
        { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
        { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
        { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
@@ -134,17 +142,24 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
        { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
        { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
+       { USB_DEVICE(0x10C4, 0x88FB) }, /* CESINEL MEDCAL STII Network Analyzer */
+       { USB_DEVICE(0x10C4, 0x8938) }, /* CESINEL MEDCAL S II Network Analyzer */
        { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
        { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */
        { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
        { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
+       { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */
+       { USB_DEVICE(0x10C4, 0x89FB) }, /* Qivicon ZigBee USB Radio Stick */
        { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
        { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
        { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
        { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
        { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
+       { USB_DEVICE(0x10C4, 0xEA63) }, /* Silicon Labs Windows Update (CP2101-4/CP2102N) */
        { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
        { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
+       { USB_DEVICE(0x10C4, 0xEA7A) }, /* Silicon Labs Windows Update (CP2105) */
+       { USB_DEVICE(0x10C4, 0xEA7B) }, /* Silicon Labs Windows Update (CP2108) */
        { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
        { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
        { USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */
index 5169624d8b11386ecb07d50234850ca99397150f..38d43c4b7ce547700e007f6f406d17f2c65d3ca4 100644 (file)
@@ -369,8 +369,10 @@ static int keyspan_pda_get_modem_info(struct usb_serial *serial,
                             3, /* get pins */
                             USB_TYPE_VENDOR|USB_RECIP_INTERFACE|USB_DIR_IN,
                             0, 0, data, 1, 2000);
-       if (rc >= 0)
+       if (rc == 1)
                *value = *data;
+       else if (rc >= 0)
+               rc = -EIO;
 
        kfree(data);
        return rc;
index fdceb46d9fc61a0c5eea2f113abd494dc4cc693b..b580b4c7fa488bbf80f1b5628e4e46bb8e28807b 100644 (file)
@@ -468,6 +468,9 @@ static void mos7840_control_callback(struct urb *urb)
        }
 
        dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length);
+       if (urb->actual_length < 1)
+               goto out;
+
        dev_dbg(dev, "%s mos7840_port->MsrLsr is %d port %d\n", __func__,
                mos7840_port->MsrLsr, mos7840_port->port_num);
        data = urb->transfer_buffer;
index 8a201dd53d36b352b3d7fb68cf6486c08ddb96ff..d1d20252bad86889bb87b7f755545020abd6b0ea 100644 (file)
@@ -418,17 +418,18 @@ static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args)
        u64 ts_nsec = local_clock();
        unsigned long rem_nsec;
 
+       mutex_lock(&port->logbuffer_lock);
        if (!port->logbuffer[port->logbuffer_head]) {
                port->logbuffer[port->logbuffer_head] =
                                kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL);
-               if (!port->logbuffer[port->logbuffer_head])
+               if (!port->logbuffer[port->logbuffer_head]) {
+                       mutex_unlock(&port->logbuffer_lock);
                        return;
+               }
        }
 
        vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args);
 
-       mutex_lock(&port->logbuffer_lock);
-
        if (tcpm_log_full(port)) {
                port->logbuffer_head = max(port->logbuffer_head - 1, 0);
                strcpy(tmpbuffer, "overflow");
@@ -724,6 +725,9 @@ static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv)
 
        tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma);
 
+       port->supply_voltage = mv;
+       port->current_limit = max_ma;
+
        if (port->tcpc->set_current_limit)
                ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv);
 
@@ -2136,7 +2140,7 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
                         * PPS APDO. Again skip the first sink PDO as this will
                         * always be 5V 3A.
                         */
-                       for (j = i; j < port->nr_snk_pdo; j++) {
+                       for (j = 1; j < port->nr_snk_pdo; j++) {
                                pdo = port->snk_pdo[j];
 
                                switch (pdo_type(pdo)) {
@@ -2594,8 +2598,6 @@ static void tcpm_reset_port(struct tcpm_port *port)
        tcpm_set_attached_state(port, false);
        port->try_src_count = 0;
        port->try_snk_count = 0;
-       port->supply_voltage = 0;
-       port->current_limit = 0;
        port->usb_type = POWER_SUPPLY_USB_TYPE_C;
 
        power_supply_changed(port->psy);
@@ -3043,7 +3045,8 @@ static void run_state_machine(struct tcpm_port *port)
                    tcpm_port_is_sink(port) &&
                    time_is_after_jiffies(port->delayed_runtime)) {
                        tcpm_set_state(port, SNK_DISCOVERY,
-                                      port->delayed_runtime - jiffies);
+                                      jiffies_to_msecs(port->delayed_runtime -
+                                                       jiffies));
                        break;
                }
                tcpm_set_state(port, unattached_state(port), 0);
index bd5cca5632b395def6384ec233d8ba5926e81c93..8d0a6fe748bdc50ca99800c3d6ba5680a4e9f0bd 100644 (file)
@@ -350,6 +350,19 @@ static void ucsi_connector_change(struct work_struct *work)
        }
 
        if (con->status.change & UCSI_CONSTAT_CONNECT_CHANGE) {
+               typec_set_pwr_role(con->port, con->status.pwr_dir);
+
+               switch (con->status.partner_type) {
+               case UCSI_CONSTAT_PARTNER_TYPE_UFP:
+                       typec_set_data_role(con->port, TYPEC_HOST);
+                       break;
+               case UCSI_CONSTAT_PARTNER_TYPE_DFP:
+                       typec_set_data_role(con->port, TYPEC_DEVICE);
+                       break;
+               default:
+                       break;
+               }
+
                if (con->status.connected)
                        ucsi_register_partner(con);
                else
index 44eb4e1ea817b2e38eab36cee60021368508a342..a18112a83faed2df09e49c0a5a93d2fce0823c5f 100644 (file)
@@ -79,6 +79,11 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
+       /* This will make sure we can use ioremap_nocache() */
+       status = acpi_release_memory(ACPI_HANDLE(&pdev->dev), res, 1);
+       if (ACPI_FAILURE(status))
+               return -ENOMEM;
+
        /*
         * NOTE: The memory region for the data structures is used also in an
         * operation region, which means ACPI has already reserved it. Therefore
index 24ee2605b9f043c9c1128d73bd44a4aa47322a37..42dc1d3d71cf05a7c91c5316ee832b62b15bf75e 100644 (file)
@@ -28,5 +28,13 @@ config VFIO_PCI_INTX
        def_bool y if !S390
 
 config VFIO_PCI_IGD
-       depends on VFIO_PCI
-       def_bool y if X86
+       bool "VFIO PCI extensions for Intel graphics (GVT-d)"
+       depends on VFIO_PCI && X86
+       default y
+       help
+         Support for Intel IGD specific extensions to enable direct
+         assignment to virtual machines.  This includes exposing an IGD
+         specific firmware table and read-only copies of the host bridge
+         and LPC bridge config space.
+
+         To enable Intel IGD assignment through vfio-pci, say Y.
index b423a309a6e0d08930599cb1c0bebf5c5e08ab9a..125b58eff9369618e9a40398e089d8a0bee8deae 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/uaccess.h>
 #include <linux/vfio.h>
 #include <linux/vgaarb.h>
+#include <linux/nospec.h>
 
 #include "vfio_pci_private.h"
 
@@ -727,6 +728,9 @@ static long vfio_pci_ioctl(void *device_data,
                        if (info.index >=
                            VFIO_PCI_NUM_REGIONS + vdev->num_regions)
                                return -EINVAL;
+                       info.index = array_index_nospec(info.index,
+                                                       VFIO_PCI_NUM_REGIONS +
+                                                       vdev->num_regions);
 
                        i = info.index - VFIO_PCI_NUM_REGIONS;
 
index 759a5bdd40e1b37305f4fd850b60a87f1d3aafae..7cd63b0c1a4623edd236458cbf047cd472fa4b02 100644 (file)
@@ -457,17 +457,17 @@ static void tce_iommu_unuse_page(struct tce_container *container,
 }
 
 static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
-               unsigned long tce, unsigned long size,
+               unsigned long tce, unsigned long shift,
                unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
 {
        long ret = 0;
        struct mm_iommu_table_group_mem_t *mem;
 
-       mem = mm_iommu_lookup(container->mm, tce, size);
+       mem = mm_iommu_lookup(container->mm, tce, 1ULL << shift);
        if (!mem)
                return -EINVAL;
 
-       ret = mm_iommu_ua_to_hpa(mem, tce, phpa);
+       ret = mm_iommu_ua_to_hpa(mem, tce, shift, phpa);
        if (ret)
                return -EINVAL;
 
@@ -487,7 +487,7 @@ static void tce_iommu_unuse_page_v2(struct tce_container *container,
        if (!pua)
                return;
 
-       ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl),
+       ret = tce_iommu_prereg_ua_to_hpa(container, *pua, tbl->it_page_shift,
                        &hpa, &mem);
        if (ret)
                pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
@@ -611,7 +611,7 @@ static long tce_iommu_build_v2(struct tce_container *container,
                                entry + i);
 
                ret = tce_iommu_prereg_ua_to_hpa(container,
-                               tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem);
+                               tce, tbl->it_page_shift, &hpa, &mem);
                if (ret)
                        break;
 
index 2c75b33db4ac19768ea77415685b4fac700dc4d3..3e5b17710a4f1fa47eb4f1333c17c96e1eae2cdd 100644 (file)
@@ -343,18 +343,16 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
        struct page *page[1];
        struct vm_area_struct *vma;
        struct vm_area_struct *vmas[1];
+       unsigned int flags = 0;
        int ret;
 
+       if (prot & IOMMU_WRITE)
+               flags |= FOLL_WRITE;
+
+       down_read(&mm->mmap_sem);
        if (mm == current->mm) {
-               ret = get_user_pages_longterm(vaddr, 1, !!(prot & IOMMU_WRITE),
-                                             page, vmas);
+               ret = get_user_pages_longterm(vaddr, 1, flags, page, vmas);
        } else {
-               unsigned int flags = 0;
-
-               if (prot & IOMMU_WRITE)
-                       flags |= FOLL_WRITE;
-
-               down_read(&mm->mmap_sem);
                ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
                                            vmas, NULL);
                /*
@@ -368,8 +366,8 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
                        ret = -EOPNOTSUPP;
                        put_page(page[0]);
                }
-               up_read(&mm->mmap_sem);
        }
+       up_read(&mm->mmap_sem);
 
        if (ret == 1) {
                *pfn = page_to_pfn(page[0]);
index 686dc670fd294b3077cf363241338ab871b26244..29756d88799b630f2c73ca097b56b092a14a7d5a 100644 (file)
@@ -1226,7 +1226,8 @@ err_used:
        if (ubufs)
                vhost_net_ubuf_put_wait_and_free(ubufs);
 err_ubufs:
-       sockfd_put(sock);
+       if (sock)
+               sockfd_put(sock);
 err_vq:
        mutex_unlock(&vq->mutex);
 err:
index 4110ba7d7ca9070f783c8e23aa55adca9f1247dc..e91edef98633e9d624ca50f171c13d0978510eca 100644 (file)
@@ -150,6 +150,17 @@ config FRAMEBUFFER_CONSOLE_ROTATION
          such that other users of the framebuffer will remain normally
          oriented.
 
+config FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
+       bool "Framebuffer Console Deferred Takeover"
+       depends on FRAMEBUFFER_CONSOLE=y && DUMMY_CONSOLE=y
+       help
+         If enabled this defers the framebuffer console taking over the
+         console from the dummy console until the first text is displayed on
+         the console. This is useful in combination with the "quiet" kernel
+         commandline option to keep the framebuffer contents initially put up
+         by the firmware in place, rather then replacing the contents with a
+         black screen as soon as fbcon loads.
+
 config STI_CONSOLE
         bool "STI text console"
        depends on PARISC && HAS_IOMEM
index f2eafe2ed98066866d6006f05f5019ead3fdae34..0254251fdd79a745e77a46793915e55425b4770f 100644 (file)
 #define DUMMY_ROWS     CONFIG_DUMMY_CONSOLE_ROWS
 #endif
 
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
+/* These are both protected by the console_lock */
+static RAW_NOTIFIER_HEAD(dummycon_output_nh);
+static bool dummycon_putc_called;
+
+void dummycon_register_output_notifier(struct notifier_block *nb)
+{
+       raw_notifier_chain_register(&dummycon_output_nh, nb);
+
+       if (dummycon_putc_called)
+               nb->notifier_call(nb, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(dummycon_register_output_notifier);
+
+void dummycon_unregister_output_notifier(struct notifier_block *nb)
+{
+       raw_notifier_chain_unregister(&dummycon_output_nh, nb);
+}
+EXPORT_SYMBOL_GPL(dummycon_unregister_output_notifier);
+
+static void dummycon_putc(struct vc_data *vc, int c, int ypos, int xpos)
+{
+       dummycon_putc_called = true;
+       raw_notifier_call_chain(&dummycon_output_nh, 0, NULL);
+}
+
+static void dummycon_putcs(struct vc_data *vc, const unsigned short *s,
+                          int count, int ypos, int xpos)
+{
+       int i;
+
+       if (!dummycon_putc_called) {
+               /* Ignore erases */
+               for (i = 0 ; i < count; i++) {
+                       if (s[i] != vc->vc_video_erase_char)
+                               break;
+               }
+               if (i == count)
+                       return;
+
+               dummycon_putc_called = true;
+       }
+
+       raw_notifier_call_chain(&dummycon_output_nh, 0, NULL);
+}
+
+static int dummycon_blank(struct vc_data *vc, int blank, int mode_switch)
+{
+       /* Redraw, so that we get putc(s) for output done while blanked */
+       return 1;
+}
+#else
+static void dummycon_putc(struct vc_data *vc, int c, int ypos, int xpos) { }
+static void dummycon_putcs(struct vc_data *vc, const unsigned short *s,
+                          int count, int ypos, int xpos) { }
+static int dummycon_blank(struct vc_data *vc, int blank, int mode_switch)
+{
+       return 0;
+}
+#endif
+
 static const char *dummycon_startup(void)
 {
     return "dummy device";
@@ -44,9 +105,6 @@ static void dummycon_init(struct vc_data *vc, int init)
 static void dummycon_deinit(struct vc_data *vc) { }
 static void dummycon_clear(struct vc_data *vc, int sy, int sx, int height,
                           int width) { }
-static void dummycon_putc(struct vc_data *vc, int c, int ypos, int xpos) { }
-static void dummycon_putcs(struct vc_data *vc, const unsigned short *s,
-                          int count, int ypos, int xpos) { }
 static void dummycon_cursor(struct vc_data *vc, int mode) { }
 
 static bool dummycon_scroll(struct vc_data *vc, unsigned int top,
@@ -61,11 +119,6 @@ static int dummycon_switch(struct vc_data *vc)
        return 0;
 }
 
-static int dummycon_blank(struct vc_data *vc, int blank, int mode_switch)
-{
-       return 0;
-}
-
 static int dummycon_font_set(struct vc_data *vc, struct console_font *font,
                             unsigned int flags)
 {
index c910e74d46ffff43b56c36aae1dedf04494c95af..5fb156bdcf4e5450f0d8e0348010dadee09bd05f 100644 (file)
@@ -129,6 +129,12 @@ static inline void fbcon_map_override(void)
 }
 #endif /* CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY */
 
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
+static bool deferred_takeover = true;
+#else
+#define deferred_takeover false
+#endif
+
 /* font data */
 static char fontname[40];
 
@@ -499,6 +505,12 @@ static int __init fb_console_setup(char *this_opt)
                                margin_color = simple_strtoul(options, &options, 0);
                        continue;
                }
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
+               if (!strcmp(options, "nodefer")) {
+                       deferred_takeover = false;
+                       continue;
+               }
+#endif
        }
        return 1;
 }
@@ -828,6 +840,8 @@ static int set_con2fb_map(int unit, int newidx, int user)
        struct fb_info *oldinfo = NULL;
        int found, err = 0;
 
+       WARN_CONSOLE_UNLOCKED();
+
        if (oldidx == newidx)
                return 0;
 
@@ -3044,6 +3058,8 @@ static int fbcon_fb_unbind(int idx)
 {
        int i, new_idx = -1, ret = 0;
 
+       WARN_CONSOLE_UNLOCKED();
+
        if (!fbcon_has_console_bind)
                return 0;
 
@@ -3094,6 +3110,11 @@ static int fbcon_fb_unregistered(struct fb_info *info)
 {
        int i, idx;
 
+       WARN_CONSOLE_UNLOCKED();
+
+       if (deferred_takeover)
+               return 0;
+
        idx = info->node;
        for (i = first_fb_vc; i <= last_fb_vc; i++) {
                if (con2fb_map[i] == idx)
@@ -3131,6 +3152,16 @@ static int fbcon_fb_unregistered(struct fb_info *info)
 static void fbcon_remap_all(int idx)
 {
        int i;
+
+       WARN_CONSOLE_UNLOCKED();
+
+       if (deferred_takeover) {
+               for (i = first_fb_vc; i <= last_fb_vc; i++)
+                       con2fb_map_boot[i] = idx;
+               fbcon_map_override();
+               return;
+       }
+
        for (i = first_fb_vc; i <= last_fb_vc; i++)
                set_con2fb_map(i, idx, 0);
 
@@ -3177,9 +3208,16 @@ static int fbcon_fb_registered(struct fb_info *info)
 {
        int ret = 0, i, idx;
 
+       WARN_CONSOLE_UNLOCKED();
+
        idx = info->node;
        fbcon_select_primary(info);
 
+       if (deferred_takeover) {
+               pr_info("fbcon: Deferring console take-over\n");
+               return 0;
+       }
+
        if (info_idx == -1) {
                for (i = first_fb_vc; i <= last_fb_vc; i++) {
                        if (con2fb_map_boot[i] == idx) {
@@ -3555,8 +3593,46 @@ static int fbcon_init_device(void)
        return 0;
 }
 
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
+static struct notifier_block fbcon_output_nb;
+
+static int fbcon_output_notifier(struct notifier_block *nb,
+                                unsigned long action, void *data)
+{
+       int i;
+
+       WARN_CONSOLE_UNLOCKED();
+
+       pr_info("fbcon: Taking over console\n");
+
+       dummycon_unregister_output_notifier(&fbcon_output_nb);
+       deferred_takeover = false;
+       logo_shown = FBCON_LOGO_DONTSHOW;
+
+       for (i = 0; i < FB_MAX; i++) {
+               if (registered_fb[i])
+                       fbcon_fb_registered(registered_fb[i]);
+       }
+
+       return NOTIFY_OK;
+}
+
+static void fbcon_register_output_notifier(void)
+{
+       fbcon_output_nb.notifier_call = fbcon_output_notifier;
+       dummycon_register_output_notifier(&fbcon_output_nb);
+}
+#else
+static inline void fbcon_register_output_notifier(void) {}
+#endif
+
 static void fbcon_start(void)
 {
+       if (deferred_takeover) {
+               fbcon_register_output_notifier();
+               return;
+       }
+
        if (num_registered_fb) {
                int i;
 
@@ -3583,6 +3659,13 @@ static void fbcon_exit(void)
        if (fbcon_has_exited)
                return;
 
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
+       if (deferred_takeover) {
+               dummycon_unregister_output_notifier(&fbcon_output_nb);
+               deferred_takeover = false;
+       }
+#endif
+
        kfree((void *)softback_buf);
        softback_buf = 0UL;
 
index 451e833f593175886fd4fa6ae066860dad4cf95e..48b154276179f0269c7444e38d6717ac493a06d5 100644 (file)
@@ -41,4 +41,4 @@ obj-$(CONFIG_XEN_PVCALLS_FRONTEND)    += pvcalls-front.o
 xen-evtchn-y                           := evtchn.o
 xen-gntdev-y                           := gntdev.o
 xen-gntalloc-y                         := gntalloc.o
-xen-privcmd-y                          := privcmd.o
+xen-privcmd-y                          := privcmd.o privcmd-buf.o
index 762378f1811cc9069dc6171edb55aaa3610b82fa..08e4af04d6f2c32850a049a83721933a82883b8c 100644 (file)
@@ -628,8 +628,6 @@ static void __unbind_from_irq(unsigned int irq)
                xen_irq_info_cleanup(info);
        }
 
-       BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
-
        xen_free_irq(irq);
 }
 
index 2473b0a9e6e41d5d51b47e318d7e3b26d81ec5f6..ba9f3eec2bd00f6f39eb952ed5815e7b45c9735e 100644 (file)
@@ -799,7 +799,7 @@ int gnttab_alloc_pages(int nr_pages, struct page **pages)
 
        return 0;
 }
-EXPORT_SYMBOL(gnttab_alloc_pages);
+EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
 
 /**
  * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
@@ -820,7 +820,7 @@ void gnttab_free_pages(int nr_pages, struct page **pages)
        }
        free_xenballooned_pages(nr_pages, pages);
 }
-EXPORT_SYMBOL(gnttab_free_pages);
+EXPORT_SYMBOL_GPL(gnttab_free_pages);
 
 /* Handling of paged out grant targets (GNTST_eagain) */
 #define MAX_DELAY 256
index 8835065029d34a150a91662bb4562b4a41be50ca..c93d8ef8df3483bbc393b2101c189120f844b634 100644 (file)
@@ -289,8 +289,15 @@ static void sysrq_handler(struct xenbus_watch *watch, const char *path,
                return;
        }
 
-       if (sysrq_key != '\0')
-               xenbus_printf(xbt, "control", "sysrq", "%c", '\0');
+       if (sysrq_key != '\0') {
+               err = xenbus_printf(xbt, "control", "sysrq", "%c", '\0');
+               if (err) {
+                       pr_err("%s: Error %d writing sysrq in control/sysrq\n",
+                              __func__, err);
+                       xenbus_transaction_end(xbt, 1);
+                       return;
+               }
+       }
 
        err = xenbus_transaction_end(xbt, 0);
        if (err == -EAGAIN)
@@ -342,7 +349,12 @@ static int setup_shutdown_watcher(void)
                        continue;
                snprintf(node, FEATURE_PATH_SIZE, "feature-%s",
                         shutdown_handlers[idx].command);
-               xenbus_printf(XBT_NIL, "control", node, "%u", 1);
+               err = xenbus_printf(XBT_NIL, "control", node, "%u", 1);
+               if (err) {
+                       pr_err("%s: Error %d writing %s\n", __func__,
+                               err, node);
+                       return err;
+               }
        }
 
        return 0;
diff --git a/drivers/xen/privcmd-buf.c b/drivers/xen/privcmd-buf.c
new file mode 100644 (file)
index 0000000..df1ed37
--- /dev/null
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+/******************************************************************************
+ * privcmd-buf.c
+ *
+ * Mmap of hypercall buffers.
+ *
+ * Copyright (c) 2018 Juergen Gross
+ */
+
+#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include "privcmd.h"
+
+MODULE_LICENSE("GPL");
+
+static unsigned int limit = 64;
+module_param(limit, uint, 0644);
+MODULE_PARM_DESC(limit, "Maximum number of pages that may be allocated by "
+                       "the privcmd-buf device per open file");
+
+struct privcmd_buf_private {
+       struct mutex lock;
+       struct list_head list;
+       unsigned int allocated;
+};
+
+struct privcmd_buf_vma_private {
+       struct privcmd_buf_private *file_priv;
+       struct list_head list;
+       unsigned int users;
+       unsigned int n_pages;
+       struct page *pages[];
+};
+
+static int privcmd_buf_open(struct inode *ino, struct file *file)
+{
+       struct privcmd_buf_private *file_priv;
+
+       file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
+       if (!file_priv)
+               return -ENOMEM;
+
+       mutex_init(&file_priv->lock);
+       INIT_LIST_HEAD(&file_priv->list);
+
+       file->private_data = file_priv;
+
+       return 0;
+}
+
+static void privcmd_buf_vmapriv_free(struct privcmd_buf_vma_private *vma_priv)
+{
+       unsigned int i;
+
+       vma_priv->file_priv->allocated -= vma_priv->n_pages;
+
+       list_del(&vma_priv->list);
+
+       for (i = 0; i < vma_priv->n_pages; i++)
+               if (vma_priv->pages[i])
+                       __free_page(vma_priv->pages[i]);
+
+       kfree(vma_priv);
+}
+
+static int privcmd_buf_release(struct inode *ino, struct file *file)
+{
+       struct privcmd_buf_private *file_priv = file->private_data;
+       struct privcmd_buf_vma_private *vma_priv;
+
+       mutex_lock(&file_priv->lock);
+
+       while (!list_empty(&file_priv->list)) {
+               vma_priv = list_first_entry(&file_priv->list,
+                                           struct privcmd_buf_vma_private,
+                                           list);
+               privcmd_buf_vmapriv_free(vma_priv);
+       }
+
+       mutex_unlock(&file_priv->lock);
+
+       kfree(file_priv);
+
+       return 0;
+}
+
+static void privcmd_buf_vma_open(struct vm_area_struct *vma)
+{
+       struct privcmd_buf_vma_private *vma_priv = vma->vm_private_data;
+
+       if (!vma_priv)
+               return;
+
+       mutex_lock(&vma_priv->file_priv->lock);
+       vma_priv->users++;
+       mutex_unlock(&vma_priv->file_priv->lock);
+}
+
+static void privcmd_buf_vma_close(struct vm_area_struct *vma)
+{
+       struct privcmd_buf_vma_private *vma_priv = vma->vm_private_data;
+       struct privcmd_buf_private *file_priv;
+
+       if (!vma_priv)
+               return;
+
+       file_priv = vma_priv->file_priv;
+
+       mutex_lock(&file_priv->lock);
+
+       vma_priv->users--;
+       if (!vma_priv->users)
+               privcmd_buf_vmapriv_free(vma_priv);
+
+       mutex_unlock(&file_priv->lock);
+}
+
+static vm_fault_t privcmd_buf_vma_fault(struct vm_fault *vmf)
+{
+       pr_debug("fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
+                vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
+                vmf->pgoff, (void *)vmf->address);
+
+       return VM_FAULT_SIGBUS;
+}
+
+static const struct vm_operations_struct privcmd_buf_vm_ops = {
+       .open = privcmd_buf_vma_open,
+       .close = privcmd_buf_vma_close,
+       .fault = privcmd_buf_vma_fault,
+};
+
+static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       struct privcmd_buf_private *file_priv = file->private_data;
+       struct privcmd_buf_vma_private *vma_priv;
+       unsigned long count = vma_pages(vma);
+       unsigned int i;
+       int ret = 0;
+
+       if (!(vma->vm_flags & VM_SHARED) || count > limit ||
+           file_priv->allocated + count > limit)
+               return -EINVAL;
+
+       vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *),
+                          GFP_KERNEL);
+       if (!vma_priv)
+               return -ENOMEM;
+
+       vma_priv->n_pages = count;
+       count = 0;
+       for (i = 0; i < vma_priv->n_pages; i++) {
+               vma_priv->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
+               if (!vma_priv->pages[i])
+                       break;
+               count++;
+       }
+
+       mutex_lock(&file_priv->lock);
+
+       file_priv->allocated += count;
+
+       vma_priv->file_priv = file_priv;
+       vma_priv->users = 1;
+
+       vma->vm_flags |= VM_IO | VM_DONTEXPAND;
+       vma->vm_ops = &privcmd_buf_vm_ops;
+       vma->vm_private_data = vma_priv;
+
+       list_add(&vma_priv->list, &file_priv->list);
+
+       if (vma_priv->n_pages != count)
+               ret = -ENOMEM;
+       else
+               for (i = 0; i < vma_priv->n_pages; i++) {
+                       ret = vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE,
+                                            vma_priv->pages[i]);
+                       if (ret)
+                               break;
+               }
+
+       if (ret)
+               privcmd_buf_vmapriv_free(vma_priv);
+
+       mutex_unlock(&file_priv->lock);
+
+       return ret;
+}
+
+const struct file_operations xen_privcmdbuf_fops = {
+       .owner = THIS_MODULE,
+       .open = privcmd_buf_open,
+       .release = privcmd_buf_release,
+       .mmap = privcmd_buf_mmap,
+};
+EXPORT_SYMBOL_GPL(xen_privcmdbuf_fops);
+
+struct miscdevice xen_privcmdbuf_dev = {
+       .minor = MISC_DYNAMIC_MINOR,
+       .name = "xen/hypercall",
+       .fops = &xen_privcmdbuf_fops,
+};
index 8ae0349d9f0ae47036ed2b6b8e968230c1fdfb41..7e6e682104dc4e9a77d8149e2f4500ded81b41b8 100644 (file)
@@ -1007,12 +1007,21 @@ static int __init privcmd_init(void)
                pr_err("Could not register Xen privcmd device\n");
                return err;
        }
+
+       err = misc_register(&xen_privcmdbuf_dev);
+       if (err != 0) {
+               pr_err("Could not register Xen hypercall-buf device\n");
+               misc_deregister(&privcmd_dev);
+               return err;
+       }
+
        return 0;
 }
 
 static void __exit privcmd_exit(void)
 {
        misc_deregister(&privcmd_dev);
+       misc_deregister(&xen_privcmdbuf_dev);
 }
 
 module_init(privcmd_init);
index 14facaeed36fda1a1492aaa2a88a72bc8855c450..0dd9f8f67ee30efc849a7bdf2085036c0c0e84ab 100644 (file)
@@ -1,3 +1,6 @@
 #include <linux/fs.h>
 
 extern const struct file_operations xen_privcmd_fops;
+extern const struct file_operations xen_privcmdbuf_fops;
+
+extern struct miscdevice xen_privcmdbuf_dev;
index 7bc88fd43cfc84d05873893ef4ddec8307e76c2a..e2f3e8b0fba9ff160a7c82a37e64cf5fe0b3c8f0 100644 (file)
@@ -1012,6 +1012,7 @@ static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state,
 {
        struct v2p_entry *entry;
        unsigned long flags;
+       int err;
 
        if (try) {
                spin_lock_irqsave(&info->v2p_lock, flags);
@@ -1027,8 +1028,11 @@ static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state,
                        scsiback_del_translation_entry(info, vir);
                }
        } else if (!try) {
-               xenbus_printf(XBT_NIL, info->dev->nodename, state,
+               err = xenbus_printf(XBT_NIL, info->dev->nodename, state,
                              "%d", XenbusStateClosed);
+               if (err)
+                       xenbus_dev_error(info->dev, err,
+                               "%s: writing %s", __func__, state);
        }
 }
 
@@ -1067,8 +1071,11 @@ static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op,
        snprintf(str, sizeof(str), "vscsi-devs/%s/p-dev", ent);
        val = xenbus_read(XBT_NIL, dev->nodename, str, NULL);
        if (IS_ERR(val)) {
-               xenbus_printf(XBT_NIL, dev->nodename, state,
+               err = xenbus_printf(XBT_NIL, dev->nodename, state,
                              "%d", XenbusStateClosed);
+               if (err)
+                       xenbus_dev_error(info->dev, err,
+                               "%s: writing %s", __func__, state);
                return;
        }
        strlcpy(phy, val, VSCSI_NAMELEN);
@@ -1079,8 +1086,11 @@ static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op,
        err = xenbus_scanf(XBT_NIL, dev->nodename, str, "%u:%u:%u:%u",
                           &vir.hst, &vir.chn, &vir.tgt, &vir.lun);
        if (XENBUS_EXIST_ERR(err)) {
-               xenbus_printf(XBT_NIL, dev->nodename, state,
+               err = xenbus_printf(XBT_NIL, dev->nodename, state,
                              "%d", XenbusStateClosed);
+               if (err)
+                       xenbus_dev_error(info->dev, err,
+                               "%s: writing %s", __func__, state);
                return;
        }
 
index e1d20124ec0e8698a1e8a5940537ff45f2e57d2c..27454594e37a1e1ca6a7ad25524090954d50a1de 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -5,7 +5,6 @@
  *     Implements an efficient asynchronous io interface.
  *
  *     Copyright 2000, 2001, 2002 Red Hat, Inc.  All Rights Reserved.
- *     Copyright 2018 Christoph Hellwig.
  *
  *     See ../COPYING for licensing terms.
  */
@@ -165,22 +164,10 @@ struct fsync_iocb {
        bool                    datasync;
 };
 
-struct poll_iocb {
-       struct file             *file;
-       __poll_t                events;
-       struct wait_queue_head  *head;
-
-       union {
-               struct wait_queue_entry wait;
-               struct work_struct      work;
-       };
-};
-
 struct aio_kiocb {
        union {
                struct kiocb            rw;
                struct fsync_iocb       fsync;
-               struct poll_iocb        poll;
        };
 
        struct kioctx           *ki_ctx;
@@ -1590,6 +1577,7 @@ static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync)
        if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes ||
                        iocb->aio_rw_flags))
                return -EINVAL;
+
        req->file = fget(iocb->aio_fildes);
        if (unlikely(!req->file))
                return -EBADF;
@@ -1604,137 +1592,6 @@ static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync)
        return 0;
 }
 
-/* need to use list_del_init so we can check if item was present */
-static inline bool __aio_poll_remove(struct poll_iocb *req)
-{
-       if (list_empty(&req->wait.entry))
-               return false;
-       list_del_init(&req->wait.entry);
-       return true;
-}
-
-static inline void __aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask)
-{
-       fput(iocb->poll.file);
-       aio_complete(iocb, mangle_poll(mask), 0);
-}
-
-static void aio_poll_work(struct work_struct *work)
-{
-       struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, poll.work);
-
-       if (!list_empty_careful(&iocb->ki_list))
-               aio_remove_iocb(iocb);
-       __aio_poll_complete(iocb, iocb->poll.events);
-}
-
-static int aio_poll_cancel(struct kiocb *iocb)
-{
-       struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
-       struct poll_iocb *req = &aiocb->poll;
-       struct wait_queue_head *head = req->head;
-       bool found = false;
-
-       spin_lock(&head->lock);
-       found = __aio_poll_remove(req);
-       spin_unlock(&head->lock);
-
-       if (found) {
-               req->events = 0;
-               INIT_WORK(&req->work, aio_poll_work);
-               schedule_work(&req->work);
-       }
-       return 0;
-}
-
-static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
-               void *key)
-{
-       struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
-       struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
-       struct file *file = req->file;
-       __poll_t mask = key_to_poll(key);
-
-       assert_spin_locked(&req->head->lock);
-
-       /* for instances that support it check for an event match first: */
-       if (mask && !(mask & req->events))
-               return 0;
-
-       mask = file->f_op->poll_mask(file, req->events) & req->events;
-       if (!mask)
-               return 0;
-
-       __aio_poll_remove(req);
-
-       /*
-        * Try completing without a context switch if we can acquire ctx_lock
-        * without spinning.  Otherwise we need to defer to a workqueue to
-        * avoid a deadlock due to the lock order.
-        */
-       if (spin_trylock(&iocb->ki_ctx->ctx_lock)) {
-               list_del_init(&iocb->ki_list);
-               spin_unlock(&iocb->ki_ctx->ctx_lock);
-
-               __aio_poll_complete(iocb, mask);
-       } else {
-               req->events = mask;
-               INIT_WORK(&req->work, aio_poll_work);
-               schedule_work(&req->work);
-       }
-
-       return 1;
-}
-
-static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb)
-{
-       struct kioctx *ctx = aiocb->ki_ctx;
-       struct poll_iocb *req = &aiocb->poll;
-       __poll_t mask;
-
-       /* reject any unknown events outside the normal event mask. */
-       if ((u16)iocb->aio_buf != iocb->aio_buf)
-               return -EINVAL;
-       /* reject fields that are not defined for poll */
-       if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags)
-               return -EINVAL;
-
-       req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
-       req->file = fget(iocb->aio_fildes);
-       if (unlikely(!req->file))
-               return -EBADF;
-       if (!file_has_poll_mask(req->file))
-               goto out_fail;
-
-       req->head = req->file->f_op->get_poll_head(req->file, req->events);
-       if (!req->head)
-               goto out_fail;
-       if (IS_ERR(req->head)) {
-               mask = EPOLLERR;
-               goto done;
-       }
-
-       init_waitqueue_func_entry(&req->wait, aio_poll_wake);
-       aiocb->ki_cancel = aio_poll_cancel;
-
-       spin_lock_irq(&ctx->ctx_lock);
-       spin_lock(&req->head->lock);
-       mask = req->file->f_op->poll_mask(req->file, req->events) & req->events;
-       if (!mask) {
-               __add_wait_queue(req->head, &req->wait);
-               list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
-       }
-       spin_unlock(&req->head->lock);
-       spin_unlock_irq(&ctx->ctx_lock);
-done:
-       if (mask)
-               __aio_poll_complete(aiocb, mask);
-       return 0;
-out_fail:
-       fput(req->file);
-       return -EINVAL; /* same as no support for IOCB_CMD_POLL */
-}
-
 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
                         bool compat)
 {
@@ -1808,9 +1665,6 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
        case IOCB_CMD_FDSYNC:
                ret = aio_fsync(&req->fsync, &iocb, true);
                break;
-       case IOCB_CMD_POLL:
-               ret = aio_poll(req, &iocb);
-               break;
        default:
                pr_debug("invalid aio operation %d\n", iocb.aio_lio_opcode);
                ret = -EINVAL;
@@ -2042,6 +1896,11 @@ SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
        return ret;
 }
 
+struct __aio_sigset {
+       const sigset_t __user   *sigmask;
+       size_t          sigsetsize;
+};
+
 SYSCALL_DEFINE6(io_pgetevents,
                aio_context_t, ctx_id,
                long, min_nr,
index 43fedde15c26203548c08c866aa7fbf5cfc27dd0..1f85d35ec8b7b7f6a3866962bce468e3ae7d56f6 100644 (file)
@@ -2,6 +2,6 @@
 # Makefile for the linux autofs-filesystem routines.
 #
 
-obj-$(CONFIG_AUTOFS_FS) += autofs.o
+obj-$(CONFIG_AUTOFS_FS) += autofs4.o
 
-autofs-objs := init.o inode.o root.o symlink.o waitq.o expire.o dev-ioctl.o
+autofs4-objs := init.o inode.o root.o symlink.o waitq.o expire.o dev-ioctl.o
index ea4ca1445ab78808644408de99430d8bbd6e1fd9..86eafda4a65226ef292f8713c2a86dee48e831ff 100644 (file)
@@ -135,6 +135,15 @@ static int validate_dev_ioctl(int cmd, struct autofs_dev_ioctl *param)
                                cmd);
                        goto out;
                }
+       } else {
+               unsigned int inr = _IOC_NR(cmd);
+
+               if (inr == AUTOFS_DEV_IOCTL_OPENMOUNT_CMD ||
+                   inr == AUTOFS_DEV_IOCTL_REQUESTER_CMD ||
+                   inr == AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD) {
+                       err = -EINVAL;
+                       goto out;
+               }
        }
 
        err = 0;
@@ -271,7 +280,8 @@ static int autofs_dev_ioctl_openmount(struct file *fp,
        dev_t devid;
        int err, fd;
 
-       /* param->path has already been checked */
+       /* param->path has been checked in validate_dev_ioctl() */
+
        if (!param->openmount.devid)
                return -EINVAL;
 
@@ -433,10 +443,7 @@ static int autofs_dev_ioctl_requester(struct file *fp,
        dev_t devid;
        int err = -ENOENT;
 
-       if (param->size <= AUTOFS_DEV_IOCTL_SIZE) {
-               err = -EINVAL;
-               goto out;
-       }
+       /* param->path has been checked in validate_dev_ioctl() */
 
        devid = sbi->sb->s_dev;
 
@@ -521,10 +528,7 @@ static int autofs_dev_ioctl_ismountpoint(struct file *fp,
        unsigned int devid, magic;
        int err = -ENOENT;
 
-       if (param->size <= AUTOFS_DEV_IOCTL_SIZE) {
-               err = -EINVAL;
-               goto out;
-       }
+       /* param->path has been checked in validate_dev_ioctl() */
 
        name = param->path;
        type = param->ismountpoint.in.type;
index cc9447e1903f7a16d023067c0098c4123e764351..79ae07d9592f55cc06a10086cf45453250637d30 100644 (file)
@@ -23,7 +23,7 @@ static struct file_system_type autofs_fs_type = {
        .kill_sb        = autofs_kill_sb,
 };
 MODULE_ALIAS_FS("autofs");
-MODULE_ALIAS("autofs4");
+MODULE_ALIAS("autofs");
 
 static int __init init_autofs_fs(void)
 {
index 0ac456b52bddb62e9c817d61bccc886c7c8cde85..816cc921cf36f766ca4521145b1b911a33fc7a13 100644 (file)
@@ -1259,9 +1259,8 @@ static int load_elf_library(struct file *file)
                goto out_free_ph;
        }
 
-       len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
-                           ELF_MIN_ALIGN - 1);
-       bss = eppnt->p_memsz + eppnt->p_vaddr;
+       len = ELF_PAGEALIGN(eppnt->p_filesz + eppnt->p_vaddr);
+       bss = ELF_PAGEALIGN(eppnt->p_memsz + eppnt->p_vaddr);
        if (bss > len) {
                error = vm_brk(len, bss - len);
                if (error)
index 0dd87aaeb39a7d05bbec28ce01536b106c4f76c2..aba25414231a83af85df892bed8795bb3af1c2e5 100644 (file)
@@ -221,7 +221,7 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
 
        ret = bio_iov_iter_get_pages(&bio, iter);
        if (unlikely(ret))
-               return ret;
+               goto out;
        ret = bio.bi_iter.bi_size;
 
        if (iov_iter_rw(iter) == READ) {
@@ -250,12 +250,13 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
                put_page(bvec->bv_page);
        }
 
-       if (vecs != inline_vecs)
-               kfree(vecs);
-
        if (unlikely(bio.bi_status))
                ret = blk_status_to_errno(bio.bi_status);
 
+out:
+       if (vecs != inline_vecs)
+               kfree(vecs);
+
        bio_uninit(&bio);
 
        return ret;
index cce6087d6880fa4c1673dbc8aab0026fc62391f4..b3e45714d28f0507f40e590ff14076fcdb7728a5 100644 (file)
@@ -4238,8 +4238,9 @@ int try_release_extent_mapping(struct page *page, gfp_t mask)
        struct extent_map *em;
        u64 start = page_offset(page);
        u64 end = start + PAGE_SIZE - 1;
-       struct extent_io_tree *tree = &BTRFS_I(page->mapping->host)->io_tree;
-       struct extent_map_tree *map = &BTRFS_I(page->mapping->host)->extent_tree;
+       struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
+       struct extent_io_tree *tree = &btrfs_inode->io_tree;
+       struct extent_map_tree *map = &btrfs_inode->extent_tree;
 
        if (gfpflags_allow_blocking(mask) &&
            page->mapping->host->i_size > SZ_16M) {
@@ -4262,6 +4263,8 @@ int try_release_extent_mapping(struct page *page, gfp_t mask)
                                            extent_map_end(em) - 1,
                                            EXTENT_LOCKED | EXTENT_WRITEBACK,
                                            0, NULL)) {
+                               set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+                                       &btrfs_inode->runtime_flags);
                                remove_extent_mapping(map, em);
                                /* once for the rb tree */
                                free_extent_map(em);
@@ -4542,8 +4545,11 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                        offset_in_extent = em_start - em->start;
                em_end = extent_map_end(em);
                em_len = em_end - em_start;
-               disko = em->block_start + offset_in_extent;
                flags = 0;
+               if (em->block_start < EXTENT_MAP_LAST_BYTE)
+                       disko = em->block_start + offset_in_extent;
+               else
+                       disko = 0;
 
                /*
                 * bump off for our next call to get_extent
index e9482f0db9d08ffd79a117f0d6f08b6eb94cae99..eba61bcb9bb3cdd9759837b539c257aaaed1edef 100644 (file)
@@ -9005,13 +9005,14 @@ again:
 
        unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
 
-out_unlock:
        if (!ret2) {
                btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, true);
                sb_end_pagefault(inode->i_sb);
                extent_changeset_free(data_reserved);
                return VM_FAULT_LOCKED;
        }
+
+out_unlock:
        unlock_page(page);
 out:
        btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, (ret != 0));
@@ -9443,6 +9444,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
        u64 new_idx = 0;
        u64 root_objectid;
        int ret;
+       int ret2;
        bool root_log_pinned = false;
        bool dest_log_pinned = false;
 
@@ -9639,7 +9641,8 @@ out_fail:
                        dest_log_pinned = false;
                }
        }
-       ret = btrfs_end_transaction(trans);
+       ret2 = btrfs_end_transaction(trans);
+       ret = ret ? ret : ret2;
 out_notrans:
        if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
                up_read(&fs_info->subvol_sem);
index c2837a32d689de9a7d5d3bfc96d7d861cd221dfb..b077544b523245c05c6ec53710d4f9d45d1eb641 100644 (file)
@@ -3327,11 +3327,13 @@ static void btrfs_cmp_data_free(struct cmp_pages *cmp)
                if (pg) {
                        unlock_page(pg);
                        put_page(pg);
+                       cmp->src_pages[i] = NULL;
                }
                pg = cmp->dst_pages[i];
                if (pg) {
                        unlock_page(pg);
                        put_page(pg);
+                       cmp->dst_pages[i] = NULL;
                }
        }
 }
@@ -3577,7 +3579,7 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
                ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN,
                                              dst, dst_loff, &cmp);
                if (ret)
-                       goto out_unlock;
+                       goto out_free;
 
                loff += BTRFS_MAX_DEDUPE_LEN;
                dst_loff += BTRFS_MAX_DEDUPE_LEN;
@@ -3587,16 +3589,16 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
                ret = btrfs_extent_same_range(src, loff, tail_len, dst,
                                              dst_loff, &cmp);
 
+out_free:
+       kvfree(cmp.src_pages);
+       kvfree(cmp.dst_pages);
+
 out_unlock:
        if (same_inode)
                inode_unlock(src);
        else
                btrfs_double_inode_unlock(src, dst);
 
-out_free:
-       kvfree(cmp.src_pages);
-       kvfree(cmp.dst_pages);
-
        return ret;
 }
 
index 1874a6d2e6f5422c809759d0ca29e9bb973826bb..c25dc47210a397560e929f55fc3feea8f26798dd 100644 (file)
@@ -2680,8 +2680,10 @@ out:
                free_extent_buffer(scratch_leaf);
        }
 
-       if (done && !ret)
+       if (done && !ret) {
                ret = 1;
+               fs_info->qgroup_rescan_progress.objectid = (u64)-1;
+       }
        return ret;
 }
 
@@ -2784,13 +2786,20 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
 
        if (!init_flags) {
                /* we're resuming qgroup rescan at mount time */
-               if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN))
+               if (!(fs_info->qgroup_flags &
+                     BTRFS_QGROUP_STATUS_FLAG_RESCAN)) {
                        btrfs_warn(fs_info,
                        "qgroup rescan init failed, qgroup is not enabled");
-               else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
+                       ret = -EINVAL;
+               } else if (!(fs_info->qgroup_flags &
+                            BTRFS_QGROUP_STATUS_FLAG_ON)) {
                        btrfs_warn(fs_info,
                        "qgroup rescan init failed, qgroup rescan is not queued");
-               return -EINVAL;
+                       ret = -EINVAL;
+               }
+
+               if (ret)
+                       return ret;
        }
 
        mutex_lock(&fs_info->qgroup_rescan_lock);
index 5723060364776d1fd3e3e1e09bdfc09d4bb7eadb..6702896cdb8f7bcdb93a393f5ee3482498376445 100644 (file)
@@ -1151,11 +1151,6 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
                return ret;
        }
 
-       if (sctx->is_dev_replace && !is_metadata && !have_csum) {
-               sblocks_for_recheck = NULL;
-               goto nodatasum_case;
-       }
-
        /*
         * read all mirrors one after the other. This includes to
         * re-read the extent or metadata block that failed (that was
@@ -1268,13 +1263,19 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
                goto out;
        }
 
-       if (!is_metadata && !have_csum) {
+       /*
+        * NOTE: Even for nodatasum case, it's still possible that it's a
+        * compressed data extent, thus scrub_fixup_nodatasum(), which write
+        * inode page cache onto disk, could cause serious data corruption.
+        *
+        * So here we could only read from disk, and hope our recovery could
+        * reach disk before the newer write.
+        */
+       if (0 && !is_metadata && !have_csum) {
                struct scrub_fixup_nodatasum *fixup_nodatasum;
 
                WARN_ON(sctx->is_dev_replace);
 
-nodatasum_case:
-
                /*
                 * !is_metadata and !have_csum, this means that the data
                 * might not be COWed, that it might be modified
index e034ad9e23b48b42826de6bed1a8f59d6e926a20..1da162928d1a9b305ab36c2d99386afb2f060326 100644 (file)
@@ -1146,6 +1146,7 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
 {
        int ret;
 
+       mutex_lock(&uuid_mutex);
        mutex_lock(&fs_devices->device_list_mutex);
        if (fs_devices->opened) {
                fs_devices->opened++;
@@ -1155,6 +1156,7 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
                ret = open_fs_devices(fs_devices, flags, holder);
        }
        mutex_unlock(&fs_devices->device_list_mutex);
+       mutex_unlock(&uuid_mutex);
 
        return ret;
 }
index d9f001078e08f677591495935c29c3aabad87d56..4a717d40080754378921f06404767659e983abf9 100644 (file)
@@ -218,7 +218,8 @@ static int cachefiles_daemon_add_cache(struct cachefiles_cache *cache)
                           "%s",
                           fsdef->dentry->d_sb->s_id);
 
-       fscache_object_init(&fsdef->fscache, NULL, &cache->cache);
+       fscache_object_init(&fsdef->fscache, &fscache_fsdef_index,
+                           &cache->cache);
 
        ret = fscache_add_cache(&cache->cache, &fsdef->fscache, cache->tag);
        if (ret < 0)
index ab0bbe93b398ce68dd0dc04652a626635d2c7c23..af2b17b21b94ba0c97b1085dc7154a3ee4df5c62 100644 (file)
@@ -186,12 +186,12 @@ try_again:
         * need to wait for it to be destroyed */
 wait_for_old_object:
        trace_cachefiles_wait_active(object, dentry, xobject);
+       clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
 
        if (fscache_object_is_live(&xobject->fscache)) {
                pr_err("\n");
                pr_err("Error: Unexpected object collision\n");
                cachefiles_printk_object(object, xobject);
-               BUG();
        }
        atomic_inc(&xobject->usage);
        write_unlock(&cache->active_lock);
@@ -248,7 +248,6 @@ wait_for_old_object:
        goto try_again;
 
 requeue:
-       clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
        cache->cache.ops->put_object(&xobject->fscache, cachefiles_obj_put_wait_timeo);
        _leave(" = -ETIMEDOUT");
        return -ETIMEDOUT;
index 5082c8a496866dcab1740c63088f7d8f21fb1c5a..40f7595aad10f20666df7741b8ce8dce3db37b6e 100644 (file)
@@ -27,6 +27,7 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
        struct cachefiles_one_read *monitor =
                container_of(wait, struct cachefiles_one_read, monitor);
        struct cachefiles_object *object;
+       struct fscache_retrieval *op = monitor->op;
        struct wait_bit_key *key = _key;
        struct page *page = wait->private;
 
@@ -51,16 +52,22 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
        list_del(&wait->entry);
 
        /* move onto the action list and queue for FS-Cache thread pool */
-       ASSERT(monitor->op);
+       ASSERT(op);
 
-       object = container_of(monitor->op->op.object,
-                             struct cachefiles_object, fscache);
+       /* We need to temporarily bump the usage count as we don't own a ref
+        * here otherwise cachefiles_read_copier() may free the op between the
+        * monitor being enqueued on the op->to_do list and the op getting
+        * enqueued on the work queue.
+        */
+       fscache_get_retrieval(op);
 
+       object = container_of(op->op.object, struct cachefiles_object, fscache);
        spin_lock(&object->work_lock);
-       list_add_tail(&monitor->op_link, &monitor->op->to_do);
+       list_add_tail(&monitor->op_link, &op->to_do);
        spin_unlock(&object->work_lock);
 
-       fscache_enqueue_retrieval(monitor->op);
+       fscache_enqueue_retrieval(op);
+       fscache_put_retrieval(op);
        return 0;
 }
 
index ee764ac352ab7b855165b797c1daf579fbaa45e1..a866be999216a81bcfa90dfcb17cc11177442731 100644 (file)
@@ -1135,6 +1135,7 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in)
        if (IS_ERR(realdn)) {
                pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
                       PTR_ERR(realdn), dn, in, ceph_vinop(in));
+               dput(dn);
                dn = realdn; /* note realdn contains the error */
                goto out;
        } else if (realdn) {
index 116146022aa1fa82d334790f7e2d7ff46b052bf3..bfe99950581527bcc494acb6419436e6373aa923 100644 (file)
@@ -126,6 +126,25 @@ static void cifs_debug_tcon(struct seq_file *m, struct cifs_tcon *tcon)
        seq_putc(m, '\n');
 }
 
+static void
+cifs_dump_iface(struct seq_file *m, struct cifs_server_iface *iface)
+{
+       struct sockaddr_in *ipv4 = (struct sockaddr_in *)&iface->sockaddr;
+       struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)&iface->sockaddr;
+
+       seq_printf(m, "\t\tSpeed: %zu bps\n", iface->speed);
+       seq_puts(m, "\t\tCapabilities: ");
+       if (iface->rdma_capable)
+               seq_puts(m, "rdma ");
+       if (iface->rss_capable)
+               seq_puts(m, "rss ");
+       seq_putc(m, '\n');
+       if (iface->sockaddr.ss_family == AF_INET)
+               seq_printf(m, "\t\tIPv4: %pI4\n", &ipv4->sin_addr);
+       else if (iface->sockaddr.ss_family == AF_INET6)
+               seq_printf(m, "\t\tIPv6: %pI6\n", &ipv6->sin6_addr);
+}
+
 static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
 {
        struct list_head *tmp1, *tmp2, *tmp3;
@@ -312,6 +331,16 @@ skip_rdma:
                                              mid_entry->mid);
                        }
                        spin_unlock(&GlobalMid_Lock);
+
+                       spin_lock(&ses->iface_lock);
+                       if (ses->iface_count)
+                               seq_printf(m, "\n\tServer interfaces: %zu\n",
+                                          ses->iface_count);
+                       for (j = 0; j < ses->iface_count; j++) {
+                               seq_printf(m, "\t%d)\n", j);
+                               cifs_dump_iface(m, &ses->iface_list[j]);
+                       }
+                       spin_unlock(&ses->iface_lock);
                }
        }
        spin_unlock(&cifs_tcp_ses_lock);
index 937251cc61c046916228f150916c8c0a82a442a5..ee2a8ec70056f7451695cb75bfe1e00a95280ff0 100644 (file)
@@ -37,7 +37,6 @@
 #include <crypto/aead.h>
 
 int __cifs_calc_signature(struct smb_rqst *rqst,
-                       int start,
                        struct TCP_Server_Info *server, char *signature,
                        struct shash_desc *shash)
 {
@@ -45,16 +44,27 @@ int __cifs_calc_signature(struct smb_rqst *rqst,
        int rc;
        struct kvec *iov = rqst->rq_iov;
        int n_vec = rqst->rq_nvec;
+       int is_smb2 = server->vals->header_preamble_size == 0;
 
-       for (i = start; i < n_vec; i++) {
+       /* iov[0] is actual data and not the rfc1002 length for SMB2+ */
+       if (is_smb2) {
+               if (iov[0].iov_len <= 4)
+                       return -EIO;
+               i = 0;
+       } else {
+               if (n_vec < 2 || iov[0].iov_len != 4)
+                       return -EIO;
+               i = 1; /* skip rfc1002 length */
+       }
+
+       for (; i < n_vec; i++) {
                if (iov[i].iov_len == 0)
                        continue;
                if (iov[i].iov_base == NULL) {
                        cifs_dbg(VFS, "null iovec entry\n");
                        return -EIO;
                }
-               if (i == 1 && iov[1].iov_len <= 4)
-                       break; /* nothing to sign or corrupt header */
+
                rc = crypto_shash_update(shash,
                                         iov[i].iov_base, iov[i].iov_len);
                if (rc) {
@@ -118,7 +128,7 @@ static int cifs_calc_signature(struct smb_rqst *rqst,
                return rc;
        }
 
-       return __cifs_calc_signature(rqst, 1, server, signature,
+       return __cifs_calc_signature(rqst, server, signature,
                                     &server->secmech.sdescmd5->shash);
 }
 
index 1efa2e65bc1a8971f01811ac699a82cb7c1f1727..c923c785402757c36d25528c5e77e53909b227dc 100644 (file)
@@ -33,6 +33,9 @@
 
 #define CIFS_MAGIC_NUMBER 0xFF534D42      /* the first four bytes of SMB PDUs */
 
+#define CIFS_PORT 445
+#define RFC1001_PORT 139
+
 /*
  * The sizes of various internal tables and strings
  */
@@ -312,6 +315,10 @@ struct smb_version_operations {
        /* send echo request */
        int (*echo)(struct TCP_Server_Info *);
        /* create directory */
+       int (*posix_mkdir)(const unsigned int xid, struct inode *inode,
+                       umode_t mode, struct cifs_tcon *tcon,
+                       const char *full_path,
+                       struct cifs_sb_info *cifs_sb);
        int (*mkdir)(const unsigned int, struct cifs_tcon *, const char *,
                     struct cifs_sb_info *);
        /* set info on created directory */
@@ -416,7 +423,7 @@ struct smb_version_operations {
        void (*set_oplock_level)(struct cifsInodeInfo *, __u32, unsigned int,
                                 bool *);
        /* create lease context buffer for CREATE request */
-       char * (*create_lease_buf)(u8 *, u8);
+       char * (*create_lease_buf)(u8 *lease_key, u8 oplock);
        /* parse lease context buffer and return oplock/epoch info */
        __u8 (*parse_lease_buf)(void *buf, unsigned int *epoch, char *lkey);
        ssize_t (*copychunk_range)(const unsigned int,
@@ -838,6 +845,13 @@ static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net)
 
 #endif
 
+struct cifs_server_iface {
+       size_t speed;
+       unsigned int rdma_capable : 1;
+       unsigned int rss_capable : 1;
+       struct sockaddr_storage sockaddr;
+};
+
 /*
  * Session structure.  One of these for each uid session with a particular host
  */
@@ -875,6 +889,20 @@ struct cifs_ses {
 #ifdef CONFIG_CIFS_SMB311
        __u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
 #endif /* 3.1.1 */
+
+       /*
+        * Network interfaces available on the server this session is
+        * connected to.
+        *
+        * Other channels can be opened by connecting and binding this
+        * session to interfaces from this list.
+        *
+        * iface_lock should be taken when accessing any of these fields
+        */
+       spinlock_t iface_lock;
+       struct cifs_server_iface *iface_list;
+       size_t iface_count;
+       unsigned long iface_last_update; /* jiffies */
 };
 
 static inline bool
@@ -883,6 +911,14 @@ cap_unix(struct cifs_ses *ses)
        return ses->server->vals->cap_unix & ses->capabilities;
 }
 
+struct cached_fid {
+       bool is_valid:1;        /* Do we have a useable root fid */
+       struct cifs_fid *fid;
+       struct mutex fid_mutex;
+       struct cifs_tcon *tcon;
+       struct work_struct lease_break;
+};
+
 /*
  * there is one of these for each connection to a resource on a particular
  * session
@@ -987,9 +1023,7 @@ struct cifs_tcon {
        struct fscache_cookie *fscache; /* cookie for share */
 #endif
        struct list_head pending_opens; /* list of incomplete opens */
-       bool valid_root_fid:1;  /* Do we have a useable root fid */
-       struct mutex prfid_mutex; /* prevents reopen race after dead ses*/
-       struct cifs_fid *prfid; /* handle to the directory at top of share */
+       struct cached_fid crfid; /* Cached root fid */
        /* BB add field for back pointer to sb struct(s)? */
 };
 
@@ -1382,6 +1416,7 @@ typedef int (mid_handle_t)(struct TCP_Server_Info *server,
 /* one of these for every pending CIFS request to the server */
 struct mid_q_entry {
        struct list_head qhead; /* mids waiting on reply from this server */
+       struct kref refcount;
        struct TCP_Server_Info *server; /* server corresponding to this mid */
        __u64 mid;              /* multiplex id */
        __u32 pid;              /* process id */
index 4e0d183c3d1016918d9934420af6e626d128d077..1890f534c88b168b8476a64fd165cce64f905887 100644 (file)
@@ -82,6 +82,7 @@ extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer,
                                        struct TCP_Server_Info *server);
 extern void DeleteMidQEntry(struct mid_q_entry *midEntry);
 extern void cifs_delete_mid(struct mid_q_entry *mid);
+extern void cifs_mid_q_entry_release(struct mid_q_entry *midEntry);
 extern void cifs_wake_up_task(struct mid_q_entry *mid);
 extern int cifs_handle_standard(struct TCP_Server_Info *server,
                                struct mid_q_entry *mid);
@@ -112,10 +113,6 @@ extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
                        struct kvec *, int /* nvec to send */,
                        int * /* type of buf returned */, const int flags,
                        struct kvec * /* resp vec */);
-extern int smb2_send_recv(const unsigned int xid, struct cifs_ses *pses,
-                         struct kvec *pkvec, int nvec_to_send,
-                         int *pbuftype, const int flags,
-                         struct kvec *presp);
 extern int SendReceiveBlockingLock(const unsigned int xid,
                        struct cifs_tcon *ptcon,
                        struct smb_hdr *in_buf ,
@@ -544,7 +541,7 @@ int cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
                           struct cifs_sb_info *cifs_sb,
                           const unsigned char *path, char *pbuf,
                           unsigned int *pbytes_written);
-int __cifs_calc_signature(struct smb_rqst *rqst, int start,
+int __cifs_calc_signature(struct smb_rqst *rqst,
                        struct TCP_Server_Info *server, char *signature,
                        struct shash_desc *shash);
 enum securityEnum cifs_select_sectype(struct TCP_Server_Info *,
@@ -552,6 +549,7 @@ enum securityEnum cifs_select_sectype(struct TCP_Server_Info *,
 struct cifs_aio_ctx *cifs_aio_ctx_alloc(void);
 void cifs_aio_ctx_release(struct kref *refcount);
 int setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw);
+void smb2_cached_lease_break(struct work_struct *work);
 
 int cifs_alloc_hash(const char *name, struct crypto_shash **shash,
                    struct sdesc **sdesc);
index 42329b25877db2b3de349b0ce5723f70bebad92b..93408eab92e78988bcf79b715ac77049db643e7f 100644 (file)
@@ -107,10 +107,10 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
        }
        spin_unlock(&tcon->open_file_lock);
 
-       mutex_lock(&tcon->prfid_mutex);
-       tcon->valid_root_fid = false;
-       memset(tcon->prfid, 0, sizeof(struct cifs_fid));
-       mutex_unlock(&tcon->prfid_mutex);
+       mutex_lock(&tcon->crfid.fid_mutex);
+       tcon->crfid.is_valid = false;
+       memset(tcon->crfid.fid, 0, sizeof(struct cifs_fid));
+       mutex_unlock(&tcon->crfid.fid_mutex);
 
        /*
         * BB Add call to invalidate_inodes(sb) for all superblocks mounted
@@ -157,8 +157,14 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
         * greater than cifs socket timeout which is 7 seconds
         */
        while (server->tcpStatus == CifsNeedReconnect) {
-               wait_event_interruptible_timeout(server->response_q,
-                       (server->tcpStatus != CifsNeedReconnect), 10 * HZ);
+               rc = wait_event_interruptible_timeout(server->response_q,
+                                                     (server->tcpStatus != CifsNeedReconnect),
+                                                     10 * HZ);
+               if (rc < 0) {
+                       cifs_dbg(FYI, "%s: aborting reconnect due to a received"
+                                " signal by the process\n", __func__);
+                       return -ERESTARTSYS;
+               }
 
                /* are we still trying to reconnect? */
                if (server->tcpStatus != CifsNeedReconnect)
index 96645a7d8f27144a885863578d33e2b757afeec6..5df2c0698cda7a5ae093db0e3886b275bc0565cb 100644 (file)
@@ -57,9 +57,6 @@
 #include "smb2proto.h"
 #include "smbdirect.h"
 
-#define CIFS_PORT 445
-#define RFC1001_PORT 139
-
 extern mempool_t *cifs_req_poolp;
 extern bool disable_legacy_dialects;
 
@@ -927,6 +924,7 @@ next_pdu:
                                server->pdu_size = next_offset;
                }
 
+               mid_entry = NULL;
                if (server->ops->is_transform_hdr &&
                    server->ops->receive_transform &&
                    server->ops->is_transform_hdr(buf)) {
@@ -941,8 +939,11 @@ next_pdu:
                                length = mid_entry->receive(server, mid_entry);
                }
 
-               if (length < 0)
+               if (length < 0) {
+                       if (mid_entry)
+                               cifs_mid_q_entry_release(mid_entry);
                        continue;
+               }
 
                if (server->large_buf)
                        buf = server->bigbuf;
@@ -959,6 +960,8 @@ next_pdu:
 
                        if (!mid_entry->multiRsp || mid_entry->multiEnd)
                                mid_entry->callback(mid_entry);
+
+                       cifs_mid_q_entry_release(mid_entry);
                } else if (server->ops->is_oplock_break &&
                           server->ops->is_oplock_break(buf, server)) {
                        cifs_dbg(FYI, "Received oplock break\n");
@@ -3029,8 +3032,11 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
 
 #ifdef CONFIG_CIFS_SMB311
        if ((volume_info->linux_ext) && (ses->server->posix_ext_supported)) {
-               if (ses->server->vals->protocol_id == SMB311_PROT_ID)
+               if (ses->server->vals->protocol_id == SMB311_PROT_ID) {
                        tcon->posix_extensions = true;
+                       printk_once(KERN_WARNING
+                               "SMB3.11 POSIX Extensions are experimental\n");
+               }
        }
 #endif /* 311 */
 
index f4697f548a394dbf5c42f731bf13bd529c9aaea0..a2cfb33e85c1f8cb25a2d32a52bb5d60c93b79f1 100644 (file)
@@ -1575,6 +1575,17 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
                goto mkdir_out;
        }
 
+       server = tcon->ses->server;
+
+#ifdef CONFIG_CIFS_SMB311
+       if ((server->ops->posix_mkdir) && (tcon->posix_extensions)) {
+               rc = server->ops->posix_mkdir(xid, inode, mode, tcon, full_path,
+                                             cifs_sb);
+               d_drop(direntry); /* for time being always refresh inode info */
+               goto mkdir_out;
+       }
+#endif /* SMB311 */
+
        if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
                                le64_to_cpu(tcon->fsUnixInfo.Capability))) {
                rc = cifs_posix_mkdir(inode, direntry, mode, full_path, cifs_sb,
@@ -1583,8 +1594,6 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
                        goto mkdir_out;
        }
 
-       server = tcon->ses->server;
-
        if (!server->ops->mkdir) {
                rc = -ENOSYS;
                goto mkdir_out;
index af29ade195c002c0323d855edb391a155f1620f7..53e8362cbc4a953218d3fbd50f1c7133e5435cc9 100644 (file)
@@ -82,6 +82,7 @@ sesInfoAlloc(void)
                INIT_LIST_HEAD(&ret_buf->smb_ses_list);
                INIT_LIST_HEAD(&ret_buf->tcon_list);
                mutex_init(&ret_buf->session_mutex);
+               spin_lock_init(&ret_buf->iface_lock);
        }
        return ret_buf;
 }
@@ -102,6 +103,7 @@ sesInfoFree(struct cifs_ses *buf_to_free)
        kfree(buf_to_free->user_name);
        kfree(buf_to_free->domainName);
        kzfree(buf_to_free->auth_key.response);
+       kfree(buf_to_free->iface_list);
        kzfree(buf_to_free);
 }
 
@@ -117,8 +119,9 @@ tconInfoAlloc(void)
                INIT_LIST_HEAD(&ret_buf->openFileList);
                INIT_LIST_HEAD(&ret_buf->tcon_list);
                spin_lock_init(&ret_buf->open_file_lock);
-               mutex_init(&ret_buf->prfid_mutex);
-               ret_buf->prfid = kzalloc(sizeof(struct cifs_fid), GFP_KERNEL);
+               mutex_init(&ret_buf->crfid.fid_mutex);
+               ret_buf->crfid.fid = kzalloc(sizeof(struct cifs_fid),
+                                            GFP_KERNEL);
 #ifdef CONFIG_CIFS_STATS
                spin_lock_init(&ret_buf->stat_lock);
 #endif
@@ -136,7 +139,7 @@ tconInfoFree(struct cifs_tcon *buf_to_free)
        atomic_dec(&tconInfoAllocCount);
        kfree(buf_to_free->nativeFileSystem);
        kzfree(buf_to_free->password);
-       kfree(buf_to_free->prfid);
+       kfree(buf_to_free->crfid.fid);
        kfree(buf_to_free);
 }
 
index aff8ce8ba34d55485d1d15aa8b7ea498cf6726f3..646dcd149de1e368baebac10a940a70a095ef479 100644 (file)
@@ -107,6 +107,7 @@ cifs_find_mid(struct TCP_Server_Info *server, char *buffer)
                if (compare_mid(mid->mid, buf) &&
                    mid->mid_state == MID_REQUEST_SUBMITTED &&
                    le16_to_cpu(mid->command) == buf->Command) {
+                       kref_get(&mid->refcount);
                        spin_unlock(&GlobalMid_Lock);
                        return mid;
                }
index 788412675723e85589f78cc6056f2d67edbd5ff1..4ed10dd086e6f31f2816462c8a082ec8939175ae 100644 (file)
@@ -41,7 +41,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
        int rc;
        __le16 *smb2_path;
        struct smb2_file_all_info *smb2_data = NULL;
-       __u8 smb2_oplock[17];
+       __u8 smb2_oplock;
        struct cifs_fid *fid = oparms->fid;
        struct network_resiliency_req nr_ioctl_req;
 
@@ -59,12 +59,9 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
        }
 
        oparms->desired_access |= FILE_READ_ATTRIBUTES;
-       *smb2_oplock = SMB2_OPLOCK_LEVEL_BATCH;
+       smb2_oplock = SMB2_OPLOCK_LEVEL_BATCH;
 
-       if (oparms->tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
-               memcpy(smb2_oplock + 1, fid->lease_key, SMB2_LEASE_KEY_SIZE);
-
-       rc = SMB2_open(xid, oparms, smb2_path, smb2_oplock, smb2_data, NULL,
+       rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data, NULL,
                       NULL);
        if (rc)
                goto out;
@@ -101,7 +98,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
                move_smb2_info_to_cifs(buf, smb2_data);
        }
 
-       *oplock = *smb2_oplock;
+       *oplock = smb2_oplock;
 out:
        kfree(smb2_data);
        kfree(smb2_path);
index e2bec47c684580089a70e7914ec71d2f523da3e3..3ff7cec2da81141f67482c57ab03de52aed855ba 100644 (file)
@@ -454,7 +454,8 @@ cifs_convert_path_to_utf16(const char *from, struct cifs_sb_info *cifs_sb)
 #ifdef CONFIG_CIFS_SMB311
        /* SMB311 POSIX extensions paths do not include leading slash */
        else if (cifs_sb_master_tlink(cifs_sb) &&
-                cifs_sb_master_tcon(cifs_sb)->posix_extensions) {
+                cifs_sb_master_tcon(cifs_sb)->posix_extensions &&
+                (from[0] == '/')) {
                start_of_path = from + 1;
        }
 #endif /* 311 */
@@ -492,10 +493,11 @@ cifs_ses_oplock_break(struct work_struct *work)
 {
        struct smb2_lease_break_work *lw = container_of(work,
                                struct smb2_lease_break_work, lease_break);
-       int rc;
+       int rc = 0;
 
        rc = SMB2_lease_break(0, tlink_tcon(lw->tlink), lw->lease_key,
                              lw->lease_state);
+
        cifs_dbg(FYI, "Lease release rc %d\n", rc);
        cifs_put_tlink(lw->tlink);
        kfree(lw);
@@ -561,6 +563,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
 
                open->oplock = lease_state;
        }
+
        return found;
 }
 
@@ -603,6 +606,18 @@ smb2_is_valid_lease_break(char *buffer)
                                        return true;
                                }
                                spin_unlock(&tcon->open_file_lock);
+
+                               if (tcon->crfid.is_valid &&
+                                   !memcmp(rsp->LeaseKey,
+                                           tcon->crfid.fid->lease_key,
+                                           SMB2_LEASE_KEY_SIZE)) {
+                                       INIT_WORK(&tcon->crfid.lease_break,
+                                                 smb2_cached_lease_break);
+                                       queue_work(cifsiod_wq,
+                                                  &tcon->crfid.lease_break);
+                                       spin_unlock(&cifs_tcp_ses_lock);
+                                       return true;
+                               }
                        }
                }
        }
index b15f5957d64591f0af611670088dd4dd8439fb43..ea92a38b2f08c34f2afd942d5fa933098f04cc07 100644 (file)
@@ -203,6 +203,7 @@ smb2_find_mid(struct TCP_Server_Info *server, char *buf)
                if ((mid->mid == wire_mid) &&
                    (mid->mid_state == MID_REQUEST_SUBMITTED) &&
                    (mid->command == shdr->Command)) {
+                       kref_get(&mid->refcount);
                        spin_unlock(&GlobalMid_Lock);
                        return mid;
                }
@@ -294,34 +295,191 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
        return rsize;
 }
 
-#ifdef CONFIG_CIFS_STATS2
+
+static int
+parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
+                       size_t buf_len,
+                       struct cifs_server_iface **iface_list,
+                       size_t *iface_count)
+{
+       struct network_interface_info_ioctl_rsp *p;
+       struct sockaddr_in *addr4;
+       struct sockaddr_in6 *addr6;
+       struct iface_info_ipv4 *p4;
+       struct iface_info_ipv6 *p6;
+       struct cifs_server_iface *info;
+       ssize_t bytes_left;
+       size_t next = 0;
+       int nb_iface = 0;
+       int rc = 0;
+
+       *iface_list = NULL;
+       *iface_count = 0;
+
+       /*
+        * Fist pass: count and sanity check
+        */
+
+       bytes_left = buf_len;
+       p = buf;
+       while (bytes_left >= sizeof(*p)) {
+               nb_iface++;
+               next = le32_to_cpu(p->Next);
+               if (!next) {
+                       bytes_left -= sizeof(*p);
+                       break;
+               }
+               p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
+               bytes_left -= next;
+       }
+
+       if (!nb_iface) {
+               cifs_dbg(VFS, "%s: malformed interface info\n", __func__);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       if (bytes_left || p->Next)
+               cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
+
+
+       /*
+        * Second pass: extract info to internal structure
+        */
+
+       *iface_list = kcalloc(nb_iface, sizeof(**iface_list), GFP_KERNEL);
+       if (!*iface_list) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       info = *iface_list;
+       bytes_left = buf_len;
+       p = buf;
+       while (bytes_left >= sizeof(*p)) {
+               info->speed = le64_to_cpu(p->LinkSpeed);
+               info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE);
+               info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE);
+
+               cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
+               cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
+               cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
+                        le32_to_cpu(p->Capability));
+
+               switch (p->Family) {
+               /*
+                * The kernel and wire socket structures have the same
+                * layout and use network byte order but make the
+                * conversion explicit in case either one changes.
+                */
+               case INTERNETWORK:
+                       addr4 = (struct sockaddr_in *)&info->sockaddr;
+                       p4 = (struct iface_info_ipv4 *)p->Buffer;
+                       addr4->sin_family = AF_INET;
+                       memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
+
+                       /* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
+                       addr4->sin_port = cpu_to_be16(CIFS_PORT);
+
+                       cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
+                                &addr4->sin_addr);
+                       break;
+               case INTERNETWORKV6:
+                       addr6 = (struct sockaddr_in6 *)&info->sockaddr;
+                       p6 = (struct iface_info_ipv6 *)p->Buffer;
+                       addr6->sin6_family = AF_INET6;
+                       memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16);
+
+                       /* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
+                       addr6->sin6_flowinfo = 0;
+                       addr6->sin6_scope_id = 0;
+                       addr6->sin6_port = cpu_to_be16(CIFS_PORT);
+
+                       cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
+                                &addr6->sin6_addr);
+                       break;
+               default:
+                       cifs_dbg(VFS,
+                                "%s: skipping unsupported socket family\n",
+                                __func__);
+                       goto next_iface;
+               }
+
+               (*iface_count)++;
+               info++;
+next_iface:
+               next = le32_to_cpu(p->Next);
+               if (!next)
+                       break;
+               p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
+               bytes_left -= next;
+       }
+
+       if (!*iface_count) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+out:
+       if (rc) {
+               kfree(*iface_list);
+               *iface_count = 0;
+               *iface_list = NULL;
+       }
+       return rc;
+}
+
+
 static int
 SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
 {
        int rc;
        unsigned int ret_data_len = 0;
-       struct network_interface_info_ioctl_rsp *out_buf;
+       struct network_interface_info_ioctl_rsp *out_buf = NULL;
+       struct cifs_server_iface *iface_list;
+       size_t iface_count;
+       struct cifs_ses *ses = tcon->ses;
 
        rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
                        FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
                        NULL /* no data input */, 0 /* no data input */,
                        (char **)&out_buf, &ret_data_len);
-       if (rc != 0)
+       if (rc != 0) {
                cifs_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
-       else if (ret_data_len < sizeof(struct network_interface_info_ioctl_rsp)) {
-               cifs_dbg(VFS, "server returned bad net interface info buf\n");
-               rc = -EINVAL;
-       } else {
-               /* Dump info on first interface */
-               cifs_dbg(FYI, "Adapter Capability 0x%x\t",
-                       le32_to_cpu(out_buf->Capability));
-               cifs_dbg(FYI, "Link Speed %lld\n",
-                       le64_to_cpu(out_buf->LinkSpeed));
+               goto out;
        }
+
+       rc = parse_server_interfaces(out_buf, ret_data_len,
+                                    &iface_list, &iface_count);
+       if (rc)
+               goto out;
+
+       spin_lock(&ses->iface_lock);
+       kfree(ses->iface_list);
+       ses->iface_list = iface_list;
+       ses->iface_count = iface_count;
+       ses->iface_last_update = jiffies;
+       spin_unlock(&ses->iface_lock);
+
+out:
        kfree(out_buf);
        return rc;
 }
-#endif /* STATS2 */
+
+void
+smb2_cached_lease_break(struct work_struct *work)
+{
+       struct cached_fid *cfid = container_of(work,
+                               struct cached_fid, lease_break);
+       mutex_lock(&cfid->fid_mutex);
+       if (cfid->is_valid) {
+               cifs_dbg(FYI, "clear cached root file handle\n");
+               SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid,
+                          cfid->fid->volatile_fid);
+               cfid->is_valid = false;
+       }
+       mutex_unlock(&cfid->fid_mutex);
+}
 
 /*
  * Open the directory at the root of a share
@@ -331,13 +489,13 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
        struct cifs_open_parms oparams;
        int rc;
        __le16 srch_path = 0; /* Null - since an open of top of share */
-       u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+       u8 oplock = SMB2_OPLOCK_LEVEL_II;
 
-       mutex_lock(&tcon->prfid_mutex);
-       if (tcon->valid_root_fid) {
+       mutex_lock(&tcon->crfid.fid_mutex);
+       if (tcon->crfid.is_valid) {
                cifs_dbg(FYI, "found a cached root file handle\n");
-               memcpy(pfid, tcon->prfid, sizeof(struct cifs_fid));
-               mutex_unlock(&tcon->prfid_mutex);
+               memcpy(pfid, tcon->crfid.fid, sizeof(struct cifs_fid));
+               mutex_unlock(&tcon->crfid.fid_mutex);
                return 0;
        }
 
@@ -350,10 +508,11 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
 
        rc = SMB2_open(xid, &oparams, &srch_path, &oplock, NULL, NULL, NULL);
        if (rc == 0) {
-               memcpy(tcon->prfid, pfid, sizeof(struct cifs_fid));
-               tcon->valid_root_fid = true;
+               memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
+               tcon->crfid.tcon = tcon;
+               tcon->crfid.is_valid = true;
        }
-       mutex_unlock(&tcon->prfid_mutex);
+       mutex_unlock(&tcon->crfid.fid_mutex);
        return rc;
 }
 
@@ -383,9 +542,7 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
        if (rc)
                return;
 
-#ifdef CONFIG_CIFS_STATS2
        SMB3_request_interfaces(xid, tcon);
-#endif /* STATS2 */
 
        SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
                        FS_ATTRIBUTE_INFORMATION);
@@ -436,7 +593,7 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
        struct cifs_open_parms oparms;
        struct cifs_fid fid;
 
-       if ((*full_path == 0) && tcon->valid_root_fid)
+       if ((*full_path == 0) && tcon->crfid.is_valid)
                return 0;
 
        utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
@@ -699,6 +856,8 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
 
        rc = SMB2_set_ea(xid, tcon, fid.persistent_fid, fid.volatile_fid, ea,
                         len);
+       kfree(ea);
+
        SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
 
        return rc;
@@ -2063,8 +2222,7 @@ smb2_create_lease_buf(u8 *lease_key, u8 oplock)
        if (!buf)
                return NULL;
 
-       buf->lcontext.LeaseKeyLow = cpu_to_le64(*((u64 *)lease_key));
-       buf->lcontext.LeaseKeyHigh = cpu_to_le64(*((u64 *)(lease_key + 8)));
+       memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
        buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
 
        buf->ccontext.DataOffset = cpu_to_le16(offsetof
@@ -2090,8 +2248,7 @@ smb3_create_lease_buf(u8 *lease_key, u8 oplock)
        if (!buf)
                return NULL;
 
-       buf->lcontext.LeaseKeyLow = cpu_to_le64(*((u64 *)lease_key));
-       buf->lcontext.LeaseKeyHigh = cpu_to_le64(*((u64 *)(lease_key + 8)));
+       memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
        buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
 
        buf->ccontext.DataOffset = cpu_to_le16(offsetof
@@ -2128,8 +2285,7 @@ smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
        if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
                return SMB2_OPLOCK_LEVEL_NOCHANGE;
        if (lease_key)
-               memcpy(lease_key, &lc->lcontext.LeaseKeyLow,
-                      SMB2_LEASE_KEY_SIZE);
+               memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
        return le32_to_cpu(lc->lcontext.LeaseState);
 }
 
@@ -2151,7 +2307,7 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
                   struct smb_rqst *old_rq)
 {
        struct smb2_sync_hdr *shdr =
-                       (struct smb2_sync_hdr *)old_rq->rq_iov[1].iov_base;
+                       (struct smb2_sync_hdr *)old_rq->rq_iov[0].iov_base;
 
        memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr));
        tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
@@ -2171,14 +2327,13 @@ static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
 }
 
 /* Assumes:
- * rqst->rq_iov[0]  is rfc1002 length
- * rqst->rq_iov[1]  is tranform header
- * rqst->rq_iov[2+] data to be encrypted/decrypted
+ * rqst->rq_iov[0]  is transform header
+ * rqst->rq_iov[1+] data to be encrypted/decrypted
  */
 static struct scatterlist *
 init_sg(struct smb_rqst *rqst, u8 *sign)
 {
-       unsigned int sg_len = rqst->rq_nvec + rqst->rq_npages;
+       unsigned int sg_len = rqst->rq_nvec + rqst->rq_npages + 1;
        unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
        struct scatterlist *sg;
        unsigned int i;
@@ -2189,10 +2344,10 @@ init_sg(struct smb_rqst *rqst, u8 *sign)
                return NULL;
 
        sg_init_table(sg, sg_len);
-       smb2_sg_set_buf(&sg[0], rqst->rq_iov[1].iov_base + 20, assoc_data_len);
-       for (i = 1; i < rqst->rq_nvec - 1; i++)
-               smb2_sg_set_buf(&sg[i], rqst->rq_iov[i+1].iov_base,
-                                               rqst->rq_iov[i+1].iov_len);
+       smb2_sg_set_buf(&sg[0], rqst->rq_iov[0].iov_base + 20, assoc_data_len);
+       for (i = 1; i < rqst->rq_nvec; i++)
+               smb2_sg_set_buf(&sg[i], rqst->rq_iov[i].iov_base,
+                                               rqst->rq_iov[i].iov_len);
        for (j = 0; i < sg_len - 1; i++, j++) {
                unsigned int len, offset;
 
@@ -2224,18 +2379,17 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
        return 1;
 }
 /*
- * Encrypt or decrypt @rqst message. @rqst has the following format:
- * iov[0] - rfc1002 length
- * iov[1] - transform header (associate data),
- * iov[2-N] and pages - data to encrypt.
- * On success return encrypted data in iov[2-N] and pages, leave iov[0-1]
+ * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
+ * iov[0]   - transform header (associate data),
+ * iov[1-N] - SMB2 header and pages - data to encrypt.
+ * On success return encrypted data in iov[1-N] and pages, leave iov[0]
  * untouched.
  */
 static int
 crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc)
 {
        struct smb2_transform_hdr *tr_hdr =
-                       (struct smb2_transform_hdr *)rqst->rq_iov[1].iov_base;
+                       (struct smb2_transform_hdr *)rqst->rq_iov[0].iov_base;
        unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
        int rc = 0;
        struct scatterlist *sg;
@@ -2323,10 +2477,6 @@ free_req:
        return rc;
 }
 
-/*
- * This is called from smb_send_rqst. At this point we have the rfc1002
- * header as the first element in the vector.
- */
 static int
 smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq,
                       struct smb_rqst *old_rq)
@@ -2335,7 +2485,7 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq,
        struct page **pages;
        struct smb2_transform_hdr *tr_hdr;
        unsigned int npages = old_rq->rq_npages;
-       unsigned int orig_len = get_rfc1002_length(old_rq->rq_iov[0].iov_base);
+       unsigned int orig_len;
        int i;
        int rc = -ENOMEM;
 
@@ -2355,18 +2505,14 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq,
                        goto err_free_pages;
        }
 
-       /* Make space for one extra iov to hold the transform header */
        iov = kmalloc_array(old_rq->rq_nvec + 1, sizeof(struct kvec),
                            GFP_KERNEL);
        if (!iov)
                goto err_free_pages;
 
-       /* copy all iovs from the old except the 1st one (rfc1002 length) */
-       memcpy(&iov[2], &old_rq->rq_iov[1],
-                               sizeof(struct kvec) * (old_rq->rq_nvec - 1));
-       /* copy the rfc1002 iov */
-       iov[0].iov_base = old_rq->rq_iov[0].iov_base;
-       iov[0].iov_len  = old_rq->rq_iov[0].iov_len;
+       /* copy all iovs from the old */
+       memcpy(&iov[1], &old_rq->rq_iov[0],
+                               sizeof(struct kvec) * old_rq->rq_nvec);
 
        new_rq->rq_iov = iov;
        new_rq->rq_nvec = old_rq->rq_nvec + 1;
@@ -2375,14 +2521,12 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq,
        if (!tr_hdr)
                goto err_free_iov;
 
+       orig_len = smb_rqst_len(server, old_rq);
+
        /* fill the 2nd iov with a transform header */
        fill_transform_hdr(tr_hdr, orig_len, old_rq);
-       new_rq->rq_iov[1].iov_base = tr_hdr;
-       new_rq->rq_iov[1].iov_len = sizeof(struct smb2_transform_hdr);
-
-       /* Update rfc1002 header */
-       inc_rfc1001_len(new_rq->rq_iov[0].iov_base,
-                       sizeof(struct smb2_transform_hdr));
+       new_rq->rq_iov[0].iov_base = tr_hdr;
+       new_rq->rq_iov[0].iov_len = sizeof(struct smb2_transform_hdr);
 
        /* copy pages form the old */
        for (i = 0; i < npages; i++) {
@@ -2426,7 +2570,7 @@ smb3_free_transform_rq(struct smb_rqst *rqst)
                put_page(rqst->rq_pages[i]);
        kfree(rqst->rq_pages);
        /* free transform header */
-       kfree(rqst->rq_iov[1].iov_base);
+       kfree(rqst->rq_iov[0].iov_base);
        kfree(rqst->rq_iov);
 }
 
@@ -2443,19 +2587,17 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
                 unsigned int buf_data_size, struct page **pages,
                 unsigned int npages, unsigned int page_data_size)
 {
-       struct kvec iov[3];
+       struct kvec iov[2];
        struct smb_rqst rqst = {NULL};
        int rc;
 
-       iov[0].iov_base = NULL;
-       iov[0].iov_len = 0;
-       iov[1].iov_base = buf;
-       iov[1].iov_len = sizeof(struct smb2_transform_hdr);
-       iov[2].iov_base = buf + sizeof(struct smb2_transform_hdr);
-       iov[2].iov_len = buf_data_size;
+       iov[0].iov_base = buf;
+       iov[0].iov_len = sizeof(struct smb2_transform_hdr);
+       iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
+       iov[1].iov_len = buf_data_size;
 
        rqst.rq_iov = iov;
-       rqst.rq_nvec = 3;
+       rqst.rq_nvec = 2;
        rqst.rq_pages = pages;
        rqst.rq_npages = npages;
        rqst.rq_pagesz = PAGE_SIZE;
@@ -2467,7 +2609,7 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
        if (rc)
                return rc;
 
-       memmove(buf, iov[2].iov_base, buf_data_size);
+       memmove(buf, iov[1].iov_base, buf_data_size);
 
        server->total_read = buf_data_size + page_data_size;
 
@@ -3170,6 +3312,7 @@ struct smb_version_operations smb311_operations = {
        .set_compression = smb2_set_compression,
        .mkdir = smb2_mkdir,
        .mkdir_setinfo = smb2_mkdir_setinfo,
+       .posix_mkdir = smb311_posix_mkdir,
        .rmdir = smb2_rmdir,
        .unlink = smb2_unlink,
        .rename = smb2_rename_path,
index af032e1a3eac7adaf0570f5923e0ba6164e8ed6b..3c92678cb45bc8fab4ce27cfcbadaef43586a3e9 100644 (file)
@@ -155,7 +155,7 @@ out:
 static int
 smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
 {
-       int rc = 0;
+       int rc;
        struct nls_table *nls_codepage;
        struct cifs_ses *ses;
        struct TCP_Server_Info *server;
@@ -166,10 +166,10 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
         * for those three - in the calling routine.
         */
        if (tcon == NULL)
-               return rc;
+               return 0;
 
        if (smb2_command == SMB2_TREE_CONNECT)
-               return rc;
+               return 0;
 
        if (tcon->tidStatus == CifsExiting) {
                /*
@@ -212,8 +212,14 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
                        return -EAGAIN;
                }
 
-               wait_event_interruptible_timeout(server->response_q,
-                       (server->tcpStatus != CifsNeedReconnect), 10 * HZ);
+               rc = wait_event_interruptible_timeout(server->response_q,
+                                                     (server->tcpStatus != CifsNeedReconnect),
+                                                     10 * HZ);
+               if (rc < 0) {
+                       cifs_dbg(FYI, "%s: aborting reconnect due to a received"
+                                " signal by the process\n", __func__);
+                       return -ERESTARTSYS;
+               }
 
                /* are we still trying to reconnect? */
                if (server->tcpStatus != CifsNeedReconnect)
@@ -231,7 +237,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
        }
 
        if (!tcon->ses->need_reconnect && !tcon->need_reconnect)
-               return rc;
+               return 0;
 
        nls_codepage = load_nls_default();
 
@@ -340,7 +346,10 @@ smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
                return rc;
 
        /* BB eventually switch this to SMB2 specific small buf size */
-       *request_buf = cifs_small_buf_get();
+       if (smb2_command == SMB2_SET_INFO)
+               *request_buf = cifs_buf_get();
+       else
+               *request_buf = cifs_small_buf_get();
        if (*request_buf == NULL) {
                /* BB should we add a retry in here if not a writepage? */
                return -ENOMEM;
@@ -602,6 +611,7 @@ static void assemble_neg_contexts(struct smb2_negotiate_req *req,
 int
 SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
 {
+       struct smb_rqst rqst;
        struct smb2_negotiate_req *req;
        struct smb2_negotiate_rsp *rsp;
        struct kvec iov[1];
@@ -673,7 +683,11 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base;
        /*
@@ -990,8 +1004,9 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
        req->PreviousSessionId = sess_data->previous_session;
 
        req->Flags = 0; /* MBZ */
-       /* to enable echos and oplocks */
-       req->sync_hdr.CreditRequest = cpu_to_le16(3);
+
+       /* enough to enable echos and oplocks and one max size write */
+       req->sync_hdr.CreditRequest = cpu_to_le16(130);
 
        /* only one of SMB2 signing flags may be set in SMB2 request */
        if (server->sign)
@@ -1027,6 +1042,7 @@ static int
 SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
 {
        int rc;
+       struct smb_rqst rqst;
        struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base;
        struct kvec rsp_iov = { NULL, 0 };
 
@@ -1035,10 +1051,13 @@ SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
                cpu_to_le16(sizeof(struct smb2_sess_setup_req) - 1 /* pad */);
        req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len);
 
-       /* BB add code to build os and lm fields */
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = sess_data->iov;
+       rqst.rq_nvec = 2;
 
-       rc = smb2_send_recv(sess_data->xid, sess_data->ses,
-                           sess_data->iov, 2,
+       /* BB add code to build os and lm fields */
+       rc = cifs_send_recv(sess_data->xid, sess_data->ses,
+                           &rqst,
                            &sess_data->buf0_type,
                            CIFS_LOG_ERROR | CIFS_NEG_OP, &rsp_iov);
        cifs_small_buf_release(sess_data->iov[0].iov_base);
@@ -1376,6 +1395,7 @@ out:
 int
 SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
 {
+       struct smb_rqst rqst;
        struct smb2_logoff_req *req; /* response is also trivial struct */
        int rc = 0;
        struct TCP_Server_Info *server;
@@ -1413,7 +1433,11 @@ SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
        cifs_small_buf_release(req);
        /*
         * No tcon so can't do
@@ -1443,6 +1467,7 @@ int
 SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
          struct cifs_tcon *tcon, const struct nls_table *cp)
 {
+       struct smb_rqst rqst;
        struct smb2_tree_connect_req *req;
        struct smb2_tree_connect_rsp *rsp = NULL;
        struct kvec iov[2];
@@ -1499,7 +1524,11 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
            !smb3_encryption_required(tcon))
                req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
 
-       rc = smb2_send_recv(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 2;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base;
 
@@ -1563,6 +1592,7 @@ tcon_error_exit:
 int
 SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
 {
+       struct smb_rqst rqst;
        struct smb2_tree_disconnect_req *req; /* response is trivial */
        int rc = 0;
        struct cifs_ses *ses = tcon->ses;
@@ -1593,7 +1623,11 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
        cifs_small_buf_release(req);
        if (rc)
                cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
@@ -1682,12 +1716,12 @@ parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp,
 
 static int
 add_lease_context(struct TCP_Server_Info *server, struct kvec *iov,
-                 unsigned int *num_iovec, __u8 *oplock)
+                 unsigned int *num_iovec, u8 *lease_key, __u8 *oplock)
 {
        struct smb2_create_req *req = iov[0].iov_base;
        unsigned int num = *num_iovec;
 
-       iov[num].iov_base = server->ops->create_lease_buf(oplock+1, *oplock);
+       iov[num].iov_base = server->ops->create_lease_buf(lease_key, *oplock);
        if (iov[num].iov_base == NULL)
                return -ENOMEM;
        iov[num].iov_len = server->vals->create_lease_size;
@@ -1886,11 +1920,165 @@ alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len,
        return 0;
 }
 
+#ifdef CONFIG_CIFS_SMB311
+int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
+                              umode_t mode, struct cifs_tcon *tcon,
+                              const char *full_path,
+                              struct cifs_sb_info *cifs_sb)
+{
+       struct smb_rqst rqst;
+       struct smb2_create_req *req;
+       struct smb2_create_rsp *rsp;
+       struct TCP_Server_Info *server;
+       struct cifs_ses *ses = tcon->ses;
+       struct kvec iov[3]; /* make sure at least one for each open context */
+       struct kvec rsp_iov = {NULL, 0};
+       int resp_buftype;
+       int uni_path_len;
+       __le16 *copy_path = NULL;
+       int copy_size;
+       int rc = 0;
+       unsigned int n_iov = 2;
+       __u32 file_attributes = 0;
+       char *pc_buf = NULL;
+       int flags = 0;
+       unsigned int total_len;
+       __le16 *path = cifs_convert_path_to_utf16(full_path, cifs_sb);
+
+       if (!path)
+               return -ENOMEM;
+
+       cifs_dbg(FYI, "mkdir\n");
+
+       if (ses && (ses->server))
+               server = ses->server;
+       else
+               return -EIO;
+
+       rc = smb2_plain_req_init(SMB2_CREATE, tcon, (void **) &req, &total_len);
+
+       if (rc)
+               return rc;
+
+       if (smb3_encryption_required(tcon))
+               flags |= CIFS_TRANSFORM_REQ;
+
+
+       req->ImpersonationLevel = IL_IMPERSONATION;
+       req->DesiredAccess = cpu_to_le32(FILE_WRITE_ATTRIBUTES);
+       /* File attributes ignored on open (used in create though) */
+       req->FileAttributes = cpu_to_le32(file_attributes);
+       req->ShareAccess = FILE_SHARE_ALL_LE;
+       req->CreateDisposition = cpu_to_le32(FILE_CREATE);
+       req->CreateOptions = cpu_to_le32(CREATE_NOT_FILE);
+
+       iov[0].iov_base = (char *)req;
+       /* -1 since last byte is buf[0] which is sent below (path) */
+       iov[0].iov_len = total_len - 1;
+
+       req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
+
+       /* [MS-SMB2] 2.2.13 NameOffset:
+        * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
+        * the SMB2 header, the file name includes a prefix that will
+        * be processed during DFS name normalization as specified in
+        * section 3.3.5.9. Otherwise, the file name is relative to
+        * the share that is identified by the TreeId in the SMB2
+        * header.
+        */
+       if (tcon->share_flags & SHI1005_FLAGS_DFS) {
+               int name_len;
+
+               req->sync_hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
+               rc = alloc_path_with_tree_prefix(&copy_path, &copy_size,
+                                                &name_len,
+                                                tcon->treeName, path);
+               if (rc) {
+                       cifs_small_buf_release(req);
+                       return rc;
+               }
+               req->NameLength = cpu_to_le16(name_len * 2);
+               uni_path_len = copy_size;
+               path = copy_path;
+       } else {
+               uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
+               /* MUST set path len (NameLength) to 0 opening root of share */
+               req->NameLength = cpu_to_le16(uni_path_len - 2);
+               if (uni_path_len % 8 != 0) {
+                       copy_size = roundup(uni_path_len, 8);
+                       copy_path = kzalloc(copy_size, GFP_KERNEL);
+                       if (!copy_path) {
+                               cifs_small_buf_release(req);
+                               return -ENOMEM;
+                       }
+                       memcpy((char *)copy_path, (const char *)path,
+                              uni_path_len);
+                       uni_path_len = copy_size;
+                       path = copy_path;
+               }
+       }
+
+       iov[1].iov_len = uni_path_len;
+       iov[1].iov_base = path;
+       req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_NONE;
+
+       if (tcon->posix_extensions) {
+               if (n_iov > 2) {
+                       struct create_context *ccontext =
+                           (struct create_context *)iov[n_iov-1].iov_base;
+                       ccontext->Next =
+                               cpu_to_le32(iov[n_iov-1].iov_len);
+               }
+
+               rc = add_posix_context(iov, &n_iov, mode);
+               if (rc) {
+                       cifs_small_buf_release(req);
+                       kfree(copy_path);
+                       return rc;
+               }
+               pc_buf = iov[n_iov-1].iov_base;
+       }
+
+
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = n_iov;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
+                           &rsp_iov);
+
+       cifs_small_buf_release(req);
+       rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
+
+       if (rc != 0) {
+               cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
+               trace_smb3_posix_mkdir_err(xid, tcon->tid, ses->Suid,
+                                   CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES, rc);
+               goto smb311_mkdir_exit;
+       } else
+               trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid,
+                                    ses->Suid, CREATE_NOT_FILE,
+                                    FILE_WRITE_ATTRIBUTES);
+
+       SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId);
+
+       /* Eventually save off posix specific response info and timestaps */
+
+smb311_mkdir_exit:
+       kfree(copy_path);
+       kfree(pc_buf);
+       free_rsp_buf(resp_buftype, rsp);
+       return rc;
+
+}
+#endif /* SMB311 */
+
 int
 SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
          __u8 *oplock, struct smb2_file_all_info *buf,
          struct kvec *err_iov, int *buftype)
 {
+       struct smb_rqst rqst;
        struct smb2_create_req *req;
        struct smb2_create_rsp *rsp;
        struct TCP_Server_Info *server;
@@ -1993,7 +2181,8 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
            *oplock == SMB2_OPLOCK_LEVEL_NONE)
                req->RequestedOplockLevel = *oplock;
        else {
-               rc = add_lease_context(server, iov, &n_iov, oplock);
+               rc = add_lease_context(server, iov, &n_iov,
+                                      oparms->fid->lease_key, oplock);
                if (rc) {
                        cifs_small_buf_release(req);
                        kfree(copy_path);
@@ -2043,7 +2232,11 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
        }
 #endif /* SMB311 */
 
-       rc = smb2_send_recv(xid, ses, iov, n_iov, &resp_buftype, flags,
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = n_iov;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
                            &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
@@ -2099,6 +2292,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
           char *in_data, u32 indatalen,
           char **out_data, u32 *plen /* returned data len */)
 {
+       struct smb_rqst rqst;
        struct smb2_ioctl_req *req;
        struct smb2_ioctl_rsp *rsp;
        struct cifs_ses *ses;
@@ -2189,7 +2383,11 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
                req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
 
-       rc = smb2_send_recv(xid, ses, iov, n_iov, &resp_buftype, flags,
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = n_iov;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
                            &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base;
@@ -2274,6 +2472,7 @@ int
 SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
                 u64 persistent_fid, u64 volatile_fid, int flags)
 {
+       struct smb_rqst rqst;
        struct smb2_close_req *req;
        struct smb2_close_rsp *rsp;
        struct cifs_ses *ses = tcon->ses;
@@ -2301,7 +2500,11 @@ SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_close_rsp *)rsp_iov.iov_base;
 
@@ -2387,6 +2590,7 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
           u32 additional_info, size_t output_len, size_t min_len, void **data,
                u32 *dlen)
 {
+       struct smb_rqst rqst;
        struct smb2_query_info_req *req;
        struct smb2_query_info_rsp *rsp = NULL;
        struct kvec iov[2];
@@ -2427,7 +2631,11 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
        /* 1 for Buffer */
        iov[0].iov_len = total_len - 1;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
 
@@ -2594,11 +2802,10 @@ SMB2_echo(struct TCP_Server_Info *server)
 {
        struct smb2_echo_req *req;
        int rc = 0;
-       struct kvec iov[2];
+       struct kvec iov[1];
        struct smb_rqst rqst = { .rq_iov = iov,
-                                .rq_nvec = 2 };
+                                .rq_nvec = 1 };
        unsigned int total_len;
-       __be32 rfc1002_marker;
 
        cifs_dbg(FYI, "In echo request\n");
 
@@ -2614,11 +2821,8 @@ SMB2_echo(struct TCP_Server_Info *server)
 
        req->sync_hdr.CreditRequest = cpu_to_le16(1);
 
-       iov[0].iov_len = 4;
-       rfc1002_marker = cpu_to_be32(total_len);
-       iov[0].iov_base = &rfc1002_marker;
-       iov[1].iov_len = total_len;
-       iov[1].iov_base = (char *)req;
+       iov[0].iov_len = total_len;
+       iov[0].iov_base = (char *)req;
 
        rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, NULL,
                             server, CIFS_ECHO_OP);
@@ -2633,6 +2837,7 @@ int
 SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
           u64 volatile_fid)
 {
+       struct smb_rqst rqst;
        struct smb2_flush_req *req;
        struct cifs_ses *ses = tcon->ses;
        struct kvec iov[1];
@@ -2660,7 +2865,11 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
 
        if (rc != 0) {
@@ -2848,10 +3057,9 @@ smb2_async_readv(struct cifs_readdata *rdata)
        struct smb2_sync_hdr *shdr;
        struct cifs_io_parms io_parms;
        struct smb_rqst rqst = { .rq_iov = rdata->iov,
-                                .rq_nvec = 2 };
+                                .rq_nvec = 1 };
        struct TCP_Server_Info *server;
        unsigned int total_len;
-       __be32 req_len;
 
        cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n",
                 __func__, rdata->offset, rdata->bytes);
@@ -2882,12 +3090,8 @@ smb2_async_readv(struct cifs_readdata *rdata)
        if (smb3_encryption_required(io_parms.tcon))
                flags |= CIFS_TRANSFORM_REQ;
 
-       req_len = cpu_to_be32(total_len);
-
-       rdata->iov[0].iov_base = &req_len;
-       rdata->iov[0].iov_len = sizeof(__be32);
-       rdata->iov[1].iov_base = buf;
-       rdata->iov[1].iov_len = total_len;
+       rdata->iov[0].iov_base = buf;
+       rdata->iov[0].iov_len = total_len;
 
        shdr = (struct smb2_sync_hdr *)buf;
 
@@ -2926,6 +3130,7 @@ int
 SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
          unsigned int *nbytes, char **buf, int *buf_type)
 {
+       struct smb_rqst rqst;
        int resp_buftype, rc = -EACCES;
        struct smb2_read_plain_req *req = NULL;
        struct smb2_read_rsp *rsp = NULL;
@@ -2946,7 +3151,11 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
 
        rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
@@ -3062,10 +3271,9 @@ smb2_async_writev(struct cifs_writedata *wdata,
        struct smb2_sync_hdr *shdr;
        struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
        struct TCP_Server_Info *server = tcon->ses->server;
-       struct kvec iov[2];
+       struct kvec iov[1];
        struct smb_rqst rqst = { };
        unsigned int total_len;
-       __be32 rfc1002_marker;
 
        rc = smb2_plain_req_init(SMB2_WRITE, tcon, (void **) &req, &total_len);
        if (rc) {
@@ -3137,15 +3345,11 @@ smb2_async_writev(struct cifs_writedata *wdata,
                v1->length = cpu_to_le32(wdata->mr->mr->length);
        }
 #endif
-       /* 4 for rfc1002 length field and 1 for Buffer */
-       iov[0].iov_len = 4;
-       rfc1002_marker = cpu_to_be32(total_len - 1 + wdata->bytes);
-       iov[0].iov_base = &rfc1002_marker;
-       iov[1].iov_len = total_len - 1;
-       iov[1].iov_base = (char *)req;
+       iov[0].iov_len = total_len - 1;
+       iov[0].iov_base = (char *)req;
 
        rqst.rq_iov = iov;
-       rqst.rq_nvec = 2;
+       rqst.rq_nvec = 1;
        rqst.rq_pages = wdata->pages;
        rqst.rq_offset = wdata->page_offset;
        rqst.rq_npages = wdata->nr_pages;
@@ -3153,7 +3357,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
        rqst.rq_tailsz = wdata->tailsz;
 #ifdef CONFIG_CIFS_SMB_DIRECT
        if (wdata->mr) {
-               iov[1].iov_len += sizeof(struct smbd_buffer_descriptor_v1);
+               iov[0].iov_len += sizeof(struct smbd_buffer_descriptor_v1);
                rqst.rq_npages = 0;
        }
 #endif
@@ -3210,6 +3414,7 @@ int
 SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
           unsigned int *nbytes, struct kvec *iov, int n_vec)
 {
+       struct smb_rqst rqst;
        int rc = 0;
        struct smb2_write_req *req = NULL;
        struct smb2_write_rsp *rsp = NULL;
@@ -3251,7 +3456,11 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
        /* 1 for Buffer */
        iov[0].iov_len = total_len - 1;
 
-       rc = smb2_send_recv(xid, io_parms->tcon->ses, iov, n_vec + 1,
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = n_vec + 1;
+
+       rc = cifs_send_recv(xid, io_parms->tcon->ses, &rqst,
                            &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
@@ -3323,6 +3532,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
                     u64 persistent_fid, u64 volatile_fid, int index,
                     struct cifs_search_info *srch_inf)
 {
+       struct smb_rqst rqst;
        struct smb2_query_directory_req *req;
        struct smb2_query_directory_rsp *rsp = NULL;
        struct kvec iov[2];
@@ -3395,7 +3605,11 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
        iov[1].iov_base = (char *)(req->Buffer);
        iov[1].iov_len = len;
 
-       rc = smb2_send_recv(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 2;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
 
@@ -3454,6 +3668,7 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
               u8 info_type, u32 additional_info, unsigned int num,
                void **data, unsigned int *size)
 {
+       struct smb_rqst rqst;
        struct smb2_set_info_req *req;
        struct smb2_set_info_rsp *rsp = NULL;
        struct kvec *iov;
@@ -3509,9 +3724,13 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
                iov[i].iov_len = size[i];
        }
 
-       rc = smb2_send_recv(xid, ses, iov, num, &resp_buftype, flags,
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = num;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
                            &rsp_iov);
-       cifs_small_buf_release(req);
+       cifs_buf_release(req);
        rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base;
 
        if (rc != 0) {
@@ -3664,6 +3883,7 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
                  const u64 persistent_fid, const u64 volatile_fid,
                  __u8 oplock_level)
 {
+       struct smb_rqst rqst;
        int rc;
        struct smb2_oplock_break *req = NULL;
        struct cifs_ses *ses = tcon->ses;
@@ -3692,7 +3912,11 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
        cifs_small_buf_release(req);
 
        if (rc) {
@@ -3755,6 +3979,7 @@ int
 SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
              u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
 {
+       struct smb_rqst rqst;
        struct smb2_query_info_rsp *rsp = NULL;
        struct kvec iov;
        struct kvec rsp_iov;
@@ -3773,7 +3998,11 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
        if (smb3_encryption_required(tcon))
                flags |= CIFS_TRANSFORM_REQ;
 
-       rc = smb2_send_recv(xid, ses, &iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = &iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(iov.iov_base);
        if (rc) {
                cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
@@ -3798,6 +4027,7 @@ int
 SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
              u64 persistent_fid, u64 volatile_fid, int level)
 {
+       struct smb_rqst rqst;
        struct smb2_query_info_rsp *rsp = NULL;
        struct kvec iov;
        struct kvec rsp_iov;
@@ -3829,7 +4059,11 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
        if (smb3_encryption_required(tcon))
                flags |= CIFS_TRANSFORM_REQ;
 
-       rc = smb2_send_recv(xid, ses, &iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = &iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(iov.iov_base);
        if (rc) {
                cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
@@ -3868,6 +4102,7 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
           const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
           const __u32 num_lock, struct smb2_lock_element *buf)
 {
+       struct smb_rqst rqst;
        int rc = 0;
        struct smb2_lock_req *req = NULL;
        struct kvec iov[2];
@@ -3900,7 +4135,12 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
        iov[1].iov_len = count;
 
        cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
-       rc = smb2_send_recv(xid, tcon->ses, iov, 2, &resp_buf_type, flags,
+
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 2;
+
+       rc = cifs_send_recv(xid, tcon->ses, &rqst, &resp_buf_type, flags,
                            &rsp_iov);
        cifs_small_buf_release(req);
        if (rc) {
@@ -3934,6 +4174,7 @@ int
 SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
                 __u8 *lease_key, const __le32 lease_state)
 {
+       struct smb_rqst rqst;
        int rc;
        struct smb2_lease_ack *req = NULL;
        struct cifs_ses *ses = tcon->ses;
@@ -3964,7 +4205,11 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
        cifs_small_buf_release(req);
 
        if (rc) {
index a345560001ced354c550d6ab2f507a18d72ff9d2..a671adcc44a6c8c6d460585c9b2c8d6b546fc015 100644 (file)
@@ -678,16 +678,14 @@ struct create_context {
 #define SMB2_LEASE_KEY_SIZE 16
 
 struct lease_context {
-       __le64 LeaseKeyLow;
-       __le64 LeaseKeyHigh;
+       u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
        __le32 LeaseState;
        __le32 LeaseFlags;
        __le64 LeaseDuration;
 } __packed;
 
 struct lease_context_v2 {
-       __le64 LeaseKeyLow;
-       __le64 LeaseKeyHigh;
+       u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
        __le32 LeaseState;
        __le32 LeaseFlags;
        __le64 LeaseDuration;
@@ -851,8 +849,11 @@ struct validate_negotiate_info_rsp {
        __le16 Dialect; /* Dialect in use for the connection */
 } __packed;
 
-#define RSS_CAPABLE    0x00000001
-#define RDMA_CAPABLE   0x00000002
+#define RSS_CAPABLE    cpu_to_le32(0x00000001)
+#define RDMA_CAPABLE   cpu_to_le32(0x00000002)
+
+#define INTERNETWORK   cpu_to_le16(0x0002)
+#define INTERNETWORKV6 cpu_to_le16(0x0017)
 
 struct network_interface_info_ioctl_rsp {
        __le32 Next; /* next interface. zero if this is last one */
@@ -860,7 +861,21 @@ struct network_interface_info_ioctl_rsp {
        __le32 Capability; /* RSS or RDMA Capable */
        __le32 Reserved;
        __le64 LinkSpeed;
-       char    SockAddr_Storage[128];
+       __le16 Family;
+       __u8 Buffer[126];
+} __packed;
+
+struct iface_info_ipv4 {
+       __be16 Port;
+       __be32 IPv4Address;
+       __be64 Reserved;
+} __packed;
+
+struct iface_info_ipv6 {
+       __be16 Port;
+       __be32 FlowInfo;
+       __u8   IPv6Address[16];
+       __be32 ScopeId;
 } __packed;
 
 #define NO_FILE_ID 0xFFFFFFFFFFFFFFFFULL /* general ioctls to srv not to file */
index c84020057bd816c31a69fd173746374c5c224c8a..6e6a4f2ec890dc0f0ae02b53c9326ae379b02bf7 100644 (file)
@@ -79,6 +79,10 @@ extern int smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
                              struct cifs_sb_info *cifs_sb, bool set_alloc);
 extern int smb2_set_file_info(struct inode *inode, const char *full_path,
                              FILE_BASIC_INFO *buf, const unsigned int xid);
+extern int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
+                              umode_t mode, struct cifs_tcon *tcon,
+                              const char *full_path,
+                              struct cifs_sb_info *cifs_sb);
 extern int smb2_mkdir(const unsigned int xid, struct cifs_tcon *tcon,
                      const char *name, struct cifs_sb_info *cifs_sb);
 extern void smb2_mkdir_setinfo(struct inode *inode, const char *full_path,
@@ -109,6 +113,8 @@ extern int smb2_unlock_range(struct cifsFileInfo *cfile,
 extern int smb2_push_mandatory_locks(struct cifsFileInfo *cfile);
 extern void smb2_reconnect_server(struct work_struct *work);
 extern int smb3_crypto_aead_allocate(struct TCP_Server_Info *server);
+extern unsigned long smb_rqst_len(struct TCP_Server_Info *server,
+                                 struct smb_rqst *rqst);
 
 /*
  * SMB2 Worker functions - most of protocol specific implementation details
index 349d5ccf854c26999ed8554f6d19cf64a89a33a3..719d55e63d88fe9efc307d16813ffe3f7b9d6762 100644 (file)
@@ -171,10 +171,10 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
        unsigned char smb2_signature[SMB2_HMACSHA256_SIZE];
        unsigned char *sigptr = smb2_signature;
        struct kvec *iov = rqst->rq_iov;
-       int iov_hdr_index = rqst->rq_nvec > 1 ? 1 : 0;
-       struct smb2_sync_hdr *shdr =
-               (struct smb2_sync_hdr *)iov[iov_hdr_index].iov_base;
+       struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)iov[0].iov_base;
        struct cifs_ses *ses;
+       struct shash_desc *shash = &server->secmech.sdeschmacsha256->shash;
+       struct smb_rqst drqst;
 
        ses = smb2_find_smb_ses(server, shdr->SessionId);
        if (!ses) {
@@ -192,21 +192,39 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
        }
 
        rc = crypto_shash_setkey(server->secmech.hmacsha256,
-               ses->auth_key.response, SMB2_NTLMV2_SESSKEY_SIZE);
+                                ses->auth_key.response, SMB2_NTLMV2_SESSKEY_SIZE);
        if (rc) {
                cifs_dbg(VFS, "%s: Could not update with response\n", __func__);
                return rc;
        }
 
-       rc = crypto_shash_init(&server->secmech.sdeschmacsha256->shash);
+       rc = crypto_shash_init(shash);
        if (rc) {
                cifs_dbg(VFS, "%s: Could not init sha256", __func__);
                return rc;
        }
 
-       rc = __cifs_calc_signature(rqst, iov_hdr_index,  server, sigptr,
-               &server->secmech.sdeschmacsha256->shash);
+       /*
+        * For SMB2+, __cifs_calc_signature() expects to sign only the actual
+        * data, that is, iov[0] should not contain a rfc1002 length.
+        *
+        * Sign the rfc1002 length prior to passing the data (iov[1-N]) down to
+        * __cifs_calc_signature().
+        */
+       drqst = *rqst;
+       if (drqst.rq_nvec >= 2 && iov[0].iov_len == 4) {
+               rc = crypto_shash_update(shash, iov[0].iov_base,
+                                        iov[0].iov_len);
+               if (rc) {
+                       cifs_dbg(VFS, "%s: Could not update with payload\n",
+                                __func__);
+                       return rc;
+               }
+               drqst.rq_iov++;
+               drqst.rq_nvec--;
+       }
 
+       rc = __cifs_calc_signature(&drqst, server, sigptr, shash);
        if (!rc)
                memcpy(shdr->Signature, sigptr, SMB2_SIGNATURE_SIZE);
 
@@ -410,14 +428,14 @@ generate_smb311signingkey(struct cifs_ses *ses)
 int
 smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
 {
-       int rc = 0;
+       int rc;
        unsigned char smb3_signature[SMB2_CMACAES_SIZE];
        unsigned char *sigptr = smb3_signature;
        struct kvec *iov = rqst->rq_iov;
-       int iov_hdr_index = rqst->rq_nvec > 1 ? 1 : 0;
-       struct smb2_sync_hdr *shdr =
-               (struct smb2_sync_hdr *)iov[iov_hdr_index].iov_base;
+       struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)iov[0].iov_base;
        struct cifs_ses *ses;
+       struct shash_desc *shash = &server->secmech.sdesccmacaes->shash;
+       struct smb_rqst drqst;
 
        ses = smb2_find_smb_ses(server, shdr->SessionId);
        if (!ses) {
@@ -429,8 +447,7 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
        memset(shdr->Signature, 0x0, SMB2_SIGNATURE_SIZE);
 
        rc = crypto_shash_setkey(server->secmech.cmacaes,
-               ses->smb3signingkey, SMB2_CMACAES_SIZE);
-
+                                ses->smb3signingkey, SMB2_CMACAES_SIZE);
        if (rc) {
                cifs_dbg(VFS, "%s: Could not set key for cmac aes\n", __func__);
                return rc;
@@ -441,15 +458,33 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
         * so unlike smb2 case we do not have to check here if secmech are
         * initialized
         */
-       rc = crypto_shash_init(&server->secmech.sdesccmacaes->shash);
+       rc = crypto_shash_init(shash);
        if (rc) {
                cifs_dbg(VFS, "%s: Could not init cmac aes\n", __func__);
                return rc;
        }
 
-       rc = __cifs_calc_signature(rqst, iov_hdr_index, server, sigptr,
-                                  &server->secmech.sdesccmacaes->shash);
+       /*
+        * For SMB2+, __cifs_calc_signature() expects to sign only the actual
+        * data, that is, iov[0] should not contain a rfc1002 length.
+        *
+        * Sign the rfc1002 length prior to passing the data (iov[1-N]) down to
+        * __cifs_calc_signature().
+        */
+       drqst = *rqst;
+       if (drqst.rq_nvec >= 2 && iov[0].iov_len == 4) {
+               rc = crypto_shash_update(shash, iov[0].iov_base,
+                                        iov[0].iov_len);
+               if (rc) {
+                       cifs_dbg(VFS, "%s: Could not update with payload\n",
+                                __func__);
+                       return rc;
+               }
+               drqst.rq_iov++;
+               drqst.rq_nvec--;
+       }
 
+       rc = __cifs_calc_signature(&drqst, server, sigptr, shash);
        if (!rc)
                memcpy(shdr->Signature, sigptr, SMB2_SIGNATURE_SIZE);
 
@@ -462,7 +497,7 @@ smb2_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server)
 {
        int rc = 0;
        struct smb2_sync_hdr *shdr =
-                       (struct smb2_sync_hdr *)rqst->rq_iov[1].iov_base;
+                       (struct smb2_sync_hdr *)rqst->rq_iov[0].iov_base;
 
        if (!(shdr->Flags & SMB2_FLAGS_SIGNED) ||
            server->tcpStatus == CifsNeedNegotiate)
@@ -552,6 +587,7 @@ smb2_mid_entry_alloc(const struct smb2_sync_hdr *shdr,
 
        temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
        memset(temp, 0, sizeof(struct mid_q_entry));
+       kref_init(&temp->refcount);
        temp->mid = le64_to_cpu(shdr->MessageId);
        temp->pid = current->pid;
        temp->command = shdr->Command; /* Always LE */
@@ -635,7 +671,7 @@ smb2_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
 {
        int rc;
        struct smb2_sync_hdr *shdr =
-                       (struct smb2_sync_hdr *)rqst->rq_iov[1].iov_base;
+                       (struct smb2_sync_hdr *)rqst->rq_iov[0].iov_base;
        struct mid_q_entry *mid;
 
        smb2_seq_num_into_buf(ses->server, shdr);
@@ -656,7 +692,7 @@ smb2_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
 {
        int rc;
        struct smb2_sync_hdr *shdr =
-                       (struct smb2_sync_hdr *)rqst->rq_iov[1].iov_base;
+                       (struct smb2_sync_hdr *)rqst->rq_iov[0].iov_base;
        struct mid_q_entry *mid;
 
        smb2_seq_num_into_buf(server, shdr);
index e459c97151b34e684dc3f3cbbc36772fee5aaee5..c55ea4e6201bbf08041968e483ae26d0183a5f3a 100644 (file)
@@ -18,6 +18,7 @@
 #include "smbdirect.h"
 #include "cifs_debug.h"
 #include "cifsproto.h"
+#include "smb2proto.h"
 
 static struct smbd_response *get_empty_queue_buffer(
                struct smbd_connection *info);
@@ -2082,12 +2083,13 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
  * rqst: the data to write
  * return value: 0 if successfully write, otherwise error code
  */
-int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
+int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
 {
+       struct smbd_connection *info = server->smbd_conn;
        struct kvec vec;
        int nvecs;
        int size;
-       unsigned int buflen = 0, remaining_data_length;
+       unsigned int buflen, remaining_data_length;
        int start, i, j;
        int max_iov_size =
                info->max_send_size - sizeof(struct smbd_data_transfer);
@@ -2111,25 +2113,13 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
                log_write(ERR, "expected the pdu length in 1st iov, but got %zu\n", rqst->rq_iov[0].iov_len);
                return -EINVAL;
        }
-       iov = &rqst->rq_iov[1];
-
-       /* total up iov array first */
-       for (i = 0; i < rqst->rq_nvec-1; i++) {
-               buflen += iov[i].iov_len;
-       }
 
        /*
         * Add in the page array if there is one. The caller needs to set
         * rq_tailsz to PAGE_SIZE when the buffer has multiple pages and
         * ends at page boundary
         */
-       if (rqst->rq_npages) {
-               if (rqst->rq_npages == 1)
-                       buflen += rqst->rq_tailsz;
-               else
-                       buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
-                                       rqst->rq_offset + rqst->rq_tailsz;
-       }
+       buflen = smb_rqst_len(server, rqst);
 
        if (buflen + sizeof(struct smbd_data_transfer) >
                info->max_fragmented_send_size) {
@@ -2139,6 +2129,8 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
                goto done;
        }
 
+       iov = &rqst->rq_iov[1];
+
        cifs_dbg(FYI, "Sending smb (RDMA): smb_len=%u\n", buflen);
        for (i = 0; i < rqst->rq_nvec-1; i++)
                dump_smb(iov[i].iov_base, iov[i].iov_len);
index 1e419c21dc60527c753747bee44625cafdc7ca3d..a11096254f2965d02478132af55e9ccf6613c578 100644 (file)
@@ -292,7 +292,7 @@ void smbd_destroy(struct smbd_connection *info);
 
 /* Interface for carrying upper layer I/O through send/recv */
 int smbd_recv(struct smbd_connection *info, struct msghdr *msg);
-int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst);
+int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst);
 
 enum mr_state {
        MR_READY,
@@ -332,7 +332,7 @@ static inline void *smbd_get_connection(
 static inline int smbd_reconnect(struct TCP_Server_Info *server) {return -1; }
 static inline void smbd_destroy(struct smbd_connection *info) {}
 static inline int smbd_recv(struct smbd_connection *info, struct msghdr *msg) {return -1; }
-static inline int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst) {return -1; }
+static inline int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst) {return -1; }
 #endif
 
 #endif
index 61e74d455d90625339591a3b47560f5bdb50c343..67e413f6ee4d8fd1dbd1eede0a7b0a9e6442a9e3 100644 (file)
@@ -378,7 +378,7 @@ DEFINE_EVENT(smb3_open_err_class, smb3_##name,    \
        TP_ARGS(xid, tid, sesid, create_options, desired_access, rc))
 
 DEFINE_SMB3_OPEN_ERR_EVENT(open_err);
-
+DEFINE_SMB3_OPEN_ERR_EVENT(posix_mkdir_err);
 
 DECLARE_EVENT_CLASS(smb3_open_done_class,
        TP_PROTO(unsigned int xid,
@@ -420,6 +420,7 @@ DEFINE_EVENT(smb3_open_done_class, smb3_##name,  \
        TP_ARGS(xid, fid, tid, sesid, create_options, desired_access))
 
 DEFINE_SMB3_OPEN_DONE_EVENT(open_done);
+DEFINE_SMB3_OPEN_DONE_EVENT(posix_mkdir_done);
 
 #endif /* _CIFS_TRACE_H */
 
index 1f1a68f8911001bae86976171e44a09402982d92..a341ec839c83de8ba9b9a10bb31f3b7ce8d45e8f 100644 (file)
@@ -61,6 +61,7 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
 
        temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
        memset(temp, 0, sizeof(struct mid_q_entry));
+       kref_init(&temp->refcount);
        temp->mid = get_mid(smb_buffer);
        temp->pid = current->pid;
        temp->command = cpu_to_le16(smb_buffer->Command);
@@ -82,6 +83,21 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
        return temp;
 }
 
+static void _cifs_mid_q_entry_release(struct kref *refcount)
+{
+       struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
+                                              refcount);
+
+       mempool_free(mid, cifs_mid_poolp);
+}
+
+void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
+{
+       spin_lock(&GlobalMid_Lock);
+       kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
+       spin_unlock(&GlobalMid_Lock);
+}
+
 void
 DeleteMidQEntry(struct mid_q_entry *midEntry)
 {
@@ -110,7 +126,7 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
                }
        }
 #endif
-       mempool_free(midEntry, cifs_mid_poolp);
+       cifs_mid_q_entry_release(midEntry);
 }
 
 void
@@ -201,15 +217,25 @@ smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
        return 0;
 }
 
-static unsigned long
-rqst_len(struct smb_rqst *rqst)
+unsigned long
+smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
 {
        unsigned int i;
-       struct kvec *iov = rqst->rq_iov;
+       struct kvec *iov;
+       int nvec;
        unsigned long buflen = 0;
 
+       if (server->vals->header_preamble_size == 0 &&
+           rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
+               iov = &rqst->rq_iov[1];
+               nvec = rqst->rq_nvec - 1;
+       } else {
+               iov = rqst->rq_iov;
+               nvec = rqst->rq_nvec;
+       }
+
        /* total up iov array first */
-       for (i = 0; i < rqst->rq_nvec; i++)
+       for (i = 0; i < nvec; i++)
                buflen += iov[i].iov_len;
 
        /*
@@ -236,70 +262,88 @@ rqst_len(struct smb_rqst *rqst)
 }
 
 static int
-__smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
+__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+               struct smb_rqst *rqst)
 {
-       int rc;
-       struct kvec *iov = rqst->rq_iov;
-       int n_vec = rqst->rq_nvec;
-       unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
-       unsigned long send_length;
-       unsigned int i;
+       int rc = 0;
+       struct kvec *iov;
+       int n_vec;
+       unsigned int send_length = 0;
+       unsigned int i, j;
        size_t total_len = 0, sent, size;
        struct socket *ssocket = server->ssocket;
        struct msghdr smb_msg;
        int val = 1;
+       __be32 rfc1002_marker;
+
        if (cifs_rdma_enabled(server) && server->smbd_conn) {
-               rc = smbd_send(server->smbd_conn, rqst);
+               rc = smbd_send(server, rqst);
                goto smbd_done;
        }
        if (ssocket == NULL)
                return -ENOTSOCK;
 
-       /* sanity check send length */
-       send_length = rqst_len(rqst);
-       if (send_length != smb_buf_length + 4) {
-               WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n",
-                       send_length, smb_buf_length);
-               return -EIO;
-       }
-
-       if (n_vec < 2)
-               return -EIO;
-
-       cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length);
-       dump_smb(iov[0].iov_base, iov[0].iov_len);
-       dump_smb(iov[1].iov_base, iov[1].iov_len);
-
        /* cork the socket */
        kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
                                (char *)&val, sizeof(val));
 
-       size = 0;
-       for (i = 0; i < n_vec; i++)
-               size += iov[i].iov_len;
+       for (j = 0; j < num_rqst; j++)
+               send_length += smb_rqst_len(server, &rqst[j]);
+       rfc1002_marker = cpu_to_be32(send_length);
 
-       iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, iov, n_vec, size);
+       /* Generate a rfc1002 marker for SMB2+ */
+       if (server->vals->header_preamble_size == 0) {
+               struct kvec hiov = {
+                       .iov_base = &rfc1002_marker,
+                       .iov_len  = 4
+               };
+               iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, &hiov,
+                             1, 4);
+               rc = smb_send_kvec(server, &smb_msg, &sent);
+               if (rc < 0)
+                       goto uncork;
 
-       rc = smb_send_kvec(server, &smb_msg, &sent);
-       if (rc < 0)
-               goto uncork;
+               total_len += sent;
+               send_length += 4;
+       }
 
-       total_len += sent;
+       cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
 
-       /* now walk the page array and send each page in it */
-       for (i = 0; i < rqst->rq_npages; i++) {
-               struct bio_vec bvec;
+       for (j = 0; j < num_rqst; j++) {
+               iov = rqst[j].rq_iov;
+               n_vec = rqst[j].rq_nvec;
+
+               size = 0;
+               for (i = 0; i < n_vec; i++) {
+                       dump_smb(iov[i].iov_base, iov[i].iov_len);
+                       size += iov[i].iov_len;
+               }
 
-               bvec.bv_page = rqst->rq_pages[i];
-               rqst_page_get_length(rqst, i, &bvec.bv_len, &bvec.bv_offset);
+               iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC,
+                             iov, n_vec, size);
 
-               iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
-                             &bvec, 1, bvec.bv_len);
                rc = smb_send_kvec(server, &smb_msg, &sent);
                if (rc < 0)
-                       break;
+                       goto uncork;
 
                total_len += sent;
+
+               /* now walk the page array and send each page in it */
+               for (i = 0; i < rqst[j].rq_npages; i++) {
+                       struct bio_vec bvec;
+
+                       bvec.bv_page = rqst[j].rq_pages[i];
+                       rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
+                                            &bvec.bv_offset);
+
+                       iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
+                                     &bvec, 1, bvec.bv_len);
+                       rc = smb_send_kvec(server, &smb_msg, &sent);
+                       if (rc < 0)
+                               break;
+
+                       total_len += sent;
+               }
        }
 
 uncork:
@@ -308,9 +352,9 @@ uncork:
        kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
                                (char *)&val, sizeof(val));
 
-       if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
+       if ((total_len > 0) && (total_len != send_length)) {
                cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
-                        smb_buf_length + 4, total_len);
+                        send_length, total_len);
                /*
                 * If we have only sent part of an SMB then the next SMB could
                 * be taken as the remainder of this one. We need to kill the
@@ -335,7 +379,7 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst, int flags)
        int rc;
 
        if (!(flags & CIFS_TRANSFORM_REQ))
-               return __smb_send_rqst(server, rqst);
+               return __smb_send_rqst(server, 1, rqst);
 
        if (!server->ops->init_transform_rq ||
            !server->ops->free_transform_rq) {
@@ -347,7 +391,7 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst, int flags)
        if (rc)
                return rc;
 
-       rc = __smb_send_rqst(server, &cur_rqst);
+       rc = __smb_send_rqst(server, 1, &cur_rqst);
        server->ops->free_transform_rq(&cur_rqst);
        return rc;
 }
@@ -365,7 +409,7 @@ smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
        iov[1].iov_base = (char *)smb_buffer + 4;
        iov[1].iov_len = smb_buf_length;
 
-       return __smb_send_rqst(server, &rqst);
+       return __smb_send_rqst(server, 1, &rqst);
 }
 
 static int
@@ -730,7 +774,6 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
         * to the same server. We may make this configurable later or
         * use ses->maxReq.
         */
-
        rc = wait_for_free_request(ses->server, timeout, optype);
        if (rc)
                return rc;
@@ -766,8 +809,8 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
 
 #ifdef CONFIG_CIFS_SMB311
        if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
-               smb311_update_preauth_hash(ses, rqst->rq_iov+1,
-                                          rqst->rq_nvec-1);
+               smb311_update_preauth_hash(ses, rqst->rq_iov,
+                                          rqst->rq_nvec);
 #endif
 
        if (timeout == CIFS_ASYNC_OP)
@@ -812,8 +855,8 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
 #ifdef CONFIG_CIFS_SMB311
        if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
                struct kvec iov = {
-                       .iov_base = buf,
-                       .iov_len = midQ->resp_buf_size
+                       .iov_base = resp_iov->iov_base,
+                       .iov_len = resp_iov->iov_len
                };
                smb311_update_preauth_hash(ses, &iov, 1);
        }
@@ -872,49 +915,6 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
        return rc;
 }
 
-/* Like SendReceive2 but iov[0] does not contain an rfc1002 header */
-int
-smb2_send_recv(const unsigned int xid, struct cifs_ses *ses,
-              struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
-              const int flags, struct kvec *resp_iov)
-{
-       struct smb_rqst rqst;
-       struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
-       int rc;
-       int i;
-       __u32 count;
-       __be32 rfc1002_marker;
-
-       if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
-               new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
-                                       GFP_KERNEL);
-               if (!new_iov)
-                       return -ENOMEM;
-       } else
-               new_iov = s_iov;
-
-       /* 1st iov is an RFC1002 Session Message length */
-       memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
-
-       count = 0;
-       for (i = 1; i < n_vec + 1; i++)
-               count += new_iov[i].iov_len;
-
-       rfc1002_marker = cpu_to_be32(count);
-
-       new_iov[0].iov_base = &rfc1002_marker;
-       new_iov[0].iov_len = 4;
-
-       memset(&rqst, 0, sizeof(struct smb_rqst));
-       rqst.rq_iov = new_iov;
-       rqst.rq_nvec = n_vec + 1;
-
-       rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
-       if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
-               kfree(new_iov);
-       return rc;
-}
-
 int
 SendReceive(const unsigned int xid, struct cifs_ses *ses,
            struct smb_hdr *in_buf, struct smb_hdr *out_buf,
index ceb1031f1cac948e74a970f02058cfeb52d7a351..08d3bd602f73d8f219ee1f259c0cbaa839245c56 100644 (file)
@@ -101,20 +101,14 @@ static int eventfd_release(struct inode *inode, struct file *file)
        return 0;
 }
 
-static struct wait_queue_head *
-eventfd_get_poll_head(struct file *file, __poll_t events)
-{
-       struct eventfd_ctx *ctx = file->private_data;
-
-       return &ctx->wqh;
-}
-
-static __poll_t eventfd_poll_mask(struct file *file, __poll_t eventmask)
+static __poll_t eventfd_poll(struct file *file, poll_table *wait)
 {
        struct eventfd_ctx *ctx = file->private_data;
        __poll_t events = 0;
        u64 count;
 
+       poll_wait(file, &ctx->wqh, wait);
+
        /*
         * All writes to ctx->count occur within ctx->wqh.lock.  This read
         * can be done outside ctx->wqh.lock because we know that poll_wait
@@ -156,11 +150,11 @@ static __poll_t eventfd_poll_mask(struct file *file, __poll_t eventmask)
        count = READ_ONCE(ctx->count);
 
        if (count > 0)
-               events |= (EPOLLIN & eventmask);
+               events |= EPOLLIN;
        if (count == ULLONG_MAX)
                events |= EPOLLERR;
        if (ULLONG_MAX - 1 > count)
-               events |= (EPOLLOUT & eventmask);
+               events |= EPOLLOUT;
 
        return events;
 }
@@ -311,8 +305,7 @@ static const struct file_operations eventfd_fops = {
        .show_fdinfo    = eventfd_show_fdinfo,
 #endif
        .release        = eventfd_release,
-       .get_poll_head  = eventfd_get_poll_head,
-       .poll_mask      = eventfd_poll_mask,
+       .poll           = eventfd_poll,
        .read           = eventfd_read,
        .write          = eventfd_write,
        .llseek         = noop_llseek,
index ea4436f409fb005a16edeca3f49f29f955db0171..67db22fe99c5ce8bf0ba606c0a45f221cbf69b38 100644 (file)
@@ -922,18 +922,14 @@ static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head
        return 0;
 }
 
-static struct wait_queue_head *ep_eventpoll_get_poll_head(struct file *file,
-               __poll_t eventmask)
-{
-       struct eventpoll *ep = file->private_data;
-       return &ep->poll_wait;
-}
-
-static __poll_t ep_eventpoll_poll_mask(struct file *file, __poll_t eventmask)
+static __poll_t ep_eventpoll_poll(struct file *file, poll_table *wait)
 {
        struct eventpoll *ep = file->private_data;
        int depth = 0;
 
+       /* Insert inside our poll wait queue */
+       poll_wait(file, &ep->poll_wait, wait);
+
        /*
         * Proceed to find out if wanted events are really available inside
         * the ready list.
@@ -972,8 +968,7 @@ static const struct file_operations eventpoll_fops = {
        .show_fdinfo    = ep_show_fdinfo,
 #endif
        .release        = ep_eventpoll_release,
-       .get_poll_head  = ep_eventpoll_get_poll_head,
-       .poll_mask      = ep_eventpoll_poll_mask,
+       .poll           = ep_eventpoll_poll,
        .llseek         = noop_llseek,
 };
 
index 2d4e0075bd2457c83f9109d5a29365de61658840..bdd0eacefdf575b1351b5d00bf5d7d1bb05b50a1 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -290,15 +290,15 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
        struct vm_area_struct *vma = NULL;
        struct mm_struct *mm = bprm->mm;
 
-       bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+       bprm->vma = vma = vm_area_alloc(mm);
        if (!vma)
                return -ENOMEM;
+       vma_set_anonymous(vma);
 
        if (down_write_killable(&mm->mmap_sem)) {
                err = -EINTR;
                goto err_free;
        }
-       vma->vm_mm = mm;
 
        /*
         * Place the stack at the largest stack address the architecture
@@ -311,7 +311,6 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
        vma->vm_start = vma->vm_end - PAGE_SIZE;
        vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-       INIT_LIST_HEAD(&vma->anon_vma_chain);
 
        err = insert_vm_struct(mm, vma);
        if (err)
@@ -326,7 +325,7 @@ err:
        up_write(&mm->mmap_sem);
 err_free:
        bprm->vma = NULL;
-       kmem_cache_free(vm_area_cachep, vma);
+       vm_area_free(vma);
        return err;
 }
 
index cc40802ddfa856d14aefc8ef75ec9e61b89864b0..00e759f051619cfd37a58108265bc9f798554a21 100644 (file)
@@ -748,7 +748,6 @@ extern void ext2_free_blocks (struct inode *, unsigned long,
                              unsigned long);
 extern unsigned long ext2_count_free_blocks (struct super_block *);
 extern unsigned long ext2_count_dirs (struct super_block *);
-extern void ext2_check_blocks_bitmap (struct super_block *);
 extern struct ext2_group_desc * ext2_get_group_desc(struct super_block * sb,
                                                    unsigned int block_group,
                                                    struct buffer_head ** bh);
@@ -771,7 +770,6 @@ extern void ext2_set_link(struct inode *, struct ext2_dir_entry_2 *, struct page
 extern struct inode * ext2_new_inode (struct inode *, umode_t, const struct qstr *);
 extern void ext2_free_inode (struct inode *);
 extern unsigned long ext2_count_free_inodes (struct super_block *);
-extern void ext2_check_inodes_bitmap (struct super_block *);
 extern unsigned long ext2_count_free (struct buffer_head *, unsigned);
 
 /* inode.c */
index 25ab1274090f8532254e783def084bccd24a21c4..8ff53f8da3bcc414fdad44ac3bb76a88258e4d51 100644 (file)
@@ -557,6 +557,9 @@ static int parse_options(char *options, struct super_block *sb,
                        set_opt (opts->s_mount_opt, NO_UID32);
                        break;
                case Opt_nocheck:
+                       ext2_msg(sb, KERN_WARNING,
+                               "Option nocheck/check=none is deprecated and"
+                               " will be removed in June 2020.");
                        clear_opt (opts->s_mount_opt, CHECK);
                        break;
                case Opt_debug:
@@ -1335,9 +1338,6 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
        new_opts.s_resgid = sbi->s_resgid;
        spin_unlock(&sbi->s_lock);
 
-       /*
-        * Allow the "check" option to be passed as a remount option.
-        */
        if (!parse_options(data, sb, &new_opts))
                return -EINVAL;
 
index b00481c475cb1ea63195ef970bde30af613364c6..aa52d87985aaf30901a52ac6a605357ac6cbea12 100644 (file)
@@ -184,7 +184,6 @@ static int ext4_init_block_bitmap(struct super_block *sb,
        unsigned int bit, bit_max;
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        ext4_fsblk_t start, tmp;
-       int flex_bg = 0;
 
        J_ASSERT_BH(bh, buffer_locked(bh));
 
@@ -207,22 +206,19 @@ static int ext4_init_block_bitmap(struct super_block *sb,
 
        start = ext4_group_first_block_no(sb, block_group);
 
-       if (ext4_has_feature_flex_bg(sb))
-               flex_bg = 1;
-
        /* Set bits for block and inode bitmaps, and inode table */
        tmp = ext4_block_bitmap(sb, gdp);
-       if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
+       if (ext4_block_in_group(sb, tmp, block_group))
                ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
 
        tmp = ext4_inode_bitmap(sb, gdp);
-       if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
+       if (ext4_block_in_group(sb, tmp, block_group))
                ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
 
        tmp = ext4_inode_table(sb, gdp);
        for (; tmp < ext4_inode_table(sb, gdp) +
                     sbi->s_itb_per_group; tmp++) {
-               if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
+               if (ext4_block_in_group(sb, tmp, block_group))
                        ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
        }
 
@@ -372,6 +368,8 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
                return -EFSCORRUPTED;
 
        ext4_lock_group(sb, block_group);
+       if (buffer_verified(bh))
+               goto verified;
        if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
                        desc, bh))) {
                ext4_unlock_group(sb, block_group);
@@ -390,6 +388,7 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
                return -EFSCORRUPTED;
        }
        set_buffer_verified(bh);
+verified:
        ext4_unlock_group(sb, block_group);
        return 0;
 }
@@ -442,7 +441,16 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
                goto verify;
        }
        ext4_lock_group(sb, block_group);
-       if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+       if (ext4_has_group_desc_csum(sb) &&
+           (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
+               if (block_group == 0) {
+                       ext4_unlock_group(sb, block_group);
+                       unlock_buffer(bh);
+                       ext4_error(sb, "Block bitmap for bg 0 marked "
+                                  "uninitialized");
+                       err = -EFSCORRUPTED;
+                       goto out;
+               }
                err = ext4_init_block_bitmap(sb, bh, block_group, desc);
                set_bitmap_uptodate(bh);
                set_buffer_uptodate(bh);
index 0b127853c5845aef5bcfeaa9ec2485f47d7939fb..7c7123f265c25ae9a586877dc30d7b80ede5b62c 100644 (file)
@@ -1114,6 +1114,7 @@ struct ext4_inode_info {
 #define EXT4_MOUNT_DIOREAD_NOLOCK      0x400000 /* Enable support for dio read nolocking */
 #define EXT4_MOUNT_JOURNAL_CHECKSUM    0x800000 /* Journal checksums */
 #define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT        0x1000000 /* Journal Async Commit */
+#define EXT4_MOUNT_WARN_ON_ERROR       0x2000000 /* Trigger WARN_ON on error */
 #define EXT4_MOUNT_DELALLOC            0x8000000 /* Delalloc support */
 #define EXT4_MOUNT_DATA_ERR_ABORT      0x10000000 /* Abort on file data write */
 #define EXT4_MOUNT_BLOCK_VALIDITY      0x20000000 /* Block validity checking */
@@ -1507,11 +1508,6 @@ static inline struct ext4_inode_info *EXT4_I(struct inode *inode)
 static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
 {
        return ino == EXT4_ROOT_INO ||
-               ino == EXT4_USR_QUOTA_INO ||
-               ino == EXT4_GRP_QUOTA_INO ||
-               ino == EXT4_BOOT_LOADER_INO ||
-               ino == EXT4_JOURNAL_INO ||
-               ino == EXT4_RESIZE_INO ||
                (ino >= EXT4_FIRST_INO(sb) &&
                 ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count));
 }
@@ -3018,9 +3014,6 @@ extern int ext4_inline_data_fiemap(struct inode *inode,
 struct iomap;
 extern int ext4_inline_data_iomap(struct inode *inode, struct iomap *iomap);
 
-extern int ext4_try_to_evict_inline_data(handle_t *handle,
-                                        struct inode *inode,
-                                        int needed);
 extern int ext4_inline_data_truncate(struct inode *inode, int *has_inline);
 
 extern int ext4_convert_inline_data(struct inode *inode);
index 98fb0c119c6827dd50b86ac3521f216bba41170c..adf6668b596f9e20aab3878b791009bfa5f051a0 100644 (file)
@@ -91,6 +91,7 @@ struct ext4_extent_header {
 };
 
 #define EXT4_EXT_MAGIC         cpu_to_le16(0xf30a)
+#define EXT4_MAX_EXTENT_DEPTH 5
 
 #define EXT4_EXTENT_TAIL_OFFSET(hdr) \
        (sizeof(struct ext4_extent_header) + \
index 0057fe3f248d195736ee58ec40131dadd98d59bb..8ce6fd5b10dd331a9cd86fb41e15ba84095c75e7 100644 (file)
@@ -869,6 +869,12 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
 
        eh = ext_inode_hdr(inode);
        depth = ext_depth(inode);
+       if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) {
+               EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d",
+                                depth);
+               ret = -EFSCORRUPTED;
+               goto err;
+       }
 
        if (path) {
                ext4_ext_drop_refs(path);
index f525f909b559c8c12e361f0750b56717a972ffb1..f336cbc6e932ee03113ebfa957e9b74ccc42352a 100644 (file)
@@ -90,6 +90,8 @@ static int ext4_validate_inode_bitmap(struct super_block *sb,
                return -EFSCORRUPTED;
 
        ext4_lock_group(sb, block_group);
+       if (buffer_verified(bh))
+               goto verified;
        blk = ext4_inode_bitmap(sb, desc);
        if (!ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
                                           EXT4_INODES_PER_GROUP(sb) / 8)) {
@@ -101,6 +103,7 @@ static int ext4_validate_inode_bitmap(struct super_block *sb,
                return -EFSBADCRC;
        }
        set_buffer_verified(bh);
+verified:
        ext4_unlock_group(sb, block_group);
        return 0;
 }
@@ -150,7 +153,16 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
        }
 
        ext4_lock_group(sb, block_group);
-       if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
+       if (ext4_has_group_desc_csum(sb) &&
+           (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) {
+               if (block_group == 0) {
+                       ext4_unlock_group(sb, block_group);
+                       unlock_buffer(bh);
+                       ext4_error(sb, "Inode bitmap for bg 0 marked "
+                                  "uninitialized");
+                       err = -EFSCORRUPTED;
+                       goto out;
+               }
                memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
                ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
                                     sb->s_blocksize * 8, bh->b_data);
@@ -994,7 +1006,8 @@ got:
 
                /* recheck and clear flag under lock if we still need to */
                ext4_lock_group(sb, group);
-               if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+               if (ext4_has_group_desc_csum(sb) &&
+                   (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
                        gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
                        ext4_free_group_clusters_set(sb, gdp,
                                ext4_free_clusters_after_init(sb, group, gdp));
@@ -1375,7 +1388,10 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
                            ext4_itable_unused_count(sb, gdp)),
                            sbi->s_inodes_per_block);
 
-       if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) {
+       if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) ||
+           ((group == 0) && ((EXT4_INODES_PER_GROUP(sb) -
+                              ext4_itable_unused_count(sb, gdp)) <
+                             EXT4_FIRST_INO(sb)))) {
                ext4_error(sb, "Something is wrong with group %u: "
                           "used itable blocks: %d; "
                           "itable unused count: %u",
index 285ed1588730c34c892566c0639b048e9c9a017e..3543fe80a3c442364d752fcbb74a7edd4df97dc9 100644 (file)
@@ -437,6 +437,7 @@ static int ext4_destroy_inline_data_nolock(handle_t *handle,
 
        memset((void *)ext4_raw_inode(&is.iloc)->i_block,
                0, EXT4_MIN_INLINE_DATA_SIZE);
+       memset(ei->i_data, 0, EXT4_MIN_INLINE_DATA_SIZE);
 
        if (ext4_has_feature_extents(inode->i_sb)) {
                if (S_ISDIR(inode->i_mode) ||
@@ -681,6 +682,10 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
                goto convert;
        }
 
+       ret = ext4_journal_get_write_access(handle, iloc.bh);
+       if (ret)
+               goto out;
+
        flags |= AOP_FLAG_NOFS;
 
        page = grab_cache_page_write_begin(mapping, 0, flags);
@@ -709,7 +714,7 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
 out_up_read:
        up_read(&EXT4_I(inode)->xattr_sem);
 out:
-       if (handle)
+       if (handle && (ret != 1))
                ext4_journal_stop(handle);
        brelse(iloc.bh);
        return ret;
@@ -751,6 +756,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
 
        ext4_write_unlock_xattr(inode, &no_expand);
        brelse(iloc.bh);
+       mark_inode_dirty(inode);
 out:
        return copied;
 }
@@ -886,18 +892,17 @@ retry_journal:
        flags |= AOP_FLAG_NOFS;
 
        if (ret == -ENOSPC) {
+               ext4_journal_stop(handle);
                ret = ext4_da_convert_inline_data_to_extent(mapping,
                                                            inode,
                                                            flags,
                                                            fsdata);
-               ext4_journal_stop(handle);
                if (ret == -ENOSPC &&
                    ext4_should_retry_alloc(inode->i_sb, &retries))
                        goto retry_journal;
                goto out;
        }
 
-
        page = grab_cache_page_write_begin(mapping, 0, flags);
        if (!page) {
                ret = -ENOMEM;
@@ -915,6 +920,9 @@ retry_journal:
                if (ret < 0)
                        goto out_release_page;
        }
+       ret = ext4_journal_get_write_access(handle, iloc.bh);
+       if (ret)
+               goto out_release_page;
 
        up_read(&EXT4_I(inode)->xattr_sem);
        *pagep = page;
@@ -935,7 +943,6 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
                                  unsigned len, unsigned copied,
                                  struct page *page)
 {
-       int i_size_changed = 0;
        int ret;
 
        ret = ext4_write_inline_data_end(inode, pos, len, copied, page);
@@ -953,10 +960,8 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
         * But it's important to update i_size while still holding page lock:
         * page writeout could otherwise come in and zero beyond i_size.
         */
-       if (pos+copied > inode->i_size) {
+       if (pos+copied > inode->i_size)
                i_size_write(inode, pos+copied);
-               i_size_changed = 1;
-       }
        unlock_page(page);
        put_page(page);
 
@@ -966,8 +971,7 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
         * ordering of page lock and transaction start for journaling
         * filesystems.
         */
-       if (i_size_changed)
-               mark_inode_dirty(inode);
+       mark_inode_dirty(inode);
 
        return copied;
 }
@@ -1890,42 +1894,6 @@ out:
        return (error < 0 ? error : 0);
 }
 
-/*
- * Called during xattr set, and if we can sparse space 'needed',
- * just create the extent tree evict the data to the outer block.
- *
- * We use jbd2 instead of page cache to move data to the 1st block
- * so that the whole transaction can be committed as a whole and
- * the data isn't lost because of the delayed page cache write.
- */
-int ext4_try_to_evict_inline_data(handle_t *handle,
-                                 struct inode *inode,
-                                 int needed)
-{
-       int error;
-       struct ext4_xattr_entry *entry;
-       struct ext4_inode *raw_inode;
-       struct ext4_iloc iloc;
-
-       error = ext4_get_inode_loc(inode, &iloc);
-       if (error)
-               return error;
-
-       raw_inode = ext4_raw_inode(&iloc);
-       entry = (struct ext4_xattr_entry *)((void *)raw_inode +
-                                           EXT4_I(inode)->i_inline_off);
-       if (EXT4_XATTR_LEN(entry->e_name_len) +
-           EXT4_XATTR_SIZE(le32_to_cpu(entry->e_value_size)) < needed) {
-               error = -ENOSPC;
-               goto out;
-       }
-
-       error = ext4_convert_inline_data_nolock(handle, inode, &iloc);
-out:
-       brelse(iloc.bh);
-       return error;
-}
-
 int ext4_inline_data_truncate(struct inode *inode, int *has_inline)
 {
        handle_t *handle;
index 2ea07efbe0165d0d5bbff1cd4a570cb8bc337ae6..4efe77286ecd55a6a1c79e45e61a93eb3554b901 100644 (file)
@@ -402,9 +402,9 @@ static int __check_block_validity(struct inode *inode, const char *func,
        if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
                                   map->m_len)) {
                ext4_error_inode(inode, func, line, map->m_pblk,
-                                "lblock %lu mapped to illegal pblock "
+                                "lblock %lu mapped to illegal pblock %llu "
                                 "(length %d)", (unsigned long) map->m_lblk,
-                                map->m_len);
+                                map->m_pblk, map->m_len);
                return -EFSCORRUPTED;
        }
        return 0;
@@ -1389,9 +1389,10 @@ static int ext4_write_end(struct file *file,
        loff_t old_size = inode->i_size;
        int ret = 0, ret2;
        int i_size_changed = 0;
+       int inline_data = ext4_has_inline_data(inode);
 
        trace_ext4_write_end(inode, pos, len, copied);
-       if (ext4_has_inline_data(inode)) {
+       if (inline_data) {
                ret = ext4_write_inline_data_end(inode, pos, len,
                                                 copied, page);
                if (ret < 0) {
@@ -1419,7 +1420,7 @@ static int ext4_write_end(struct file *file,
         * ordering of page lock and transaction start for journaling
         * filesystems.
         */
-       if (i_size_changed)
+       if (i_size_changed || inline_data)
                ext4_mark_inode_dirty(handle, inode);
 
        if (pos + len > inode->i_size && ext4_can_truncate(inode))
@@ -1493,6 +1494,7 @@ static int ext4_journalled_write_end(struct file *file,
        int partial = 0;
        unsigned from, to;
        int size_changed = 0;
+       int inline_data = ext4_has_inline_data(inode);
 
        trace_ext4_journalled_write_end(inode, pos, len, copied);
        from = pos & (PAGE_SIZE - 1);
@@ -1500,7 +1502,7 @@ static int ext4_journalled_write_end(struct file *file,
 
        BUG_ON(!ext4_handle_valid(handle));
 
-       if (ext4_has_inline_data(inode)) {
+       if (inline_data) {
                ret = ext4_write_inline_data_end(inode, pos, len,
                                                 copied, page);
                if (ret < 0) {
@@ -1531,7 +1533,7 @@ static int ext4_journalled_write_end(struct file *file,
        if (old_size < pos)
                pagecache_isize_extended(inode, old_size, pos);
 
-       if (size_changed) {
+       if (size_changed || inline_data) {
                ret2 = ext4_mark_inode_dirty(handle, inode);
                if (!ret)
                        ret = ret2;
@@ -2028,11 +2030,7 @@ static int __ext4_journalled_writepage(struct page *page,
        }
 
        if (inline_data) {
-               BUFFER_TRACE(inode_bh, "get write access");
-               ret = ext4_journal_get_write_access(handle, inode_bh);
-
-               err = ext4_handle_dirty_metadata(handle, inode, inode_bh);
-
+               ret = ext4_mark_inode_dirty(handle, inode);
        } else {
                ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
                                             do_journal_get_write_access);
@@ -4506,7 +4504,8 @@ static int __ext4_get_inode_loc(struct inode *inode,
        int                     inodes_per_block, inode_offset;
 
        iloc->bh = NULL;
-       if (!ext4_valid_inum(sb, inode->i_ino))
+       if (inode->i_ino < EXT4_ROOT_INO ||
+           inode->i_ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
                return -EFSCORRUPTED;
 
        iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
index 6eae2b91aafa20b21fd19c61bb15fa7add625935..f7ab340881626be5f28407334c0f25f18717eb75 100644 (file)
@@ -2423,7 +2423,8 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
         * initialize bb_free to be able to skip
         * empty groups without initialization
         */
-       if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+       if (ext4_has_group_desc_csum(sb) &&
+           (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
                meta_group_info[i]->bb_free =
                        ext4_free_clusters_after_init(sb, group, desc);
        } else {
@@ -2989,7 +2990,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
 #endif
        ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
                      ac->ac_b_ex.fe_len);
-       if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+       if (ext4_has_group_desc_csum(sb) &&
+           (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
                gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
                ext4_free_group_clusters_set(sb, gdp,
                                             ext4_free_clusters_after_init(sb,
index 27b9a76a0dfabeff3ee9ec65a15d8d5e711d851a..638ad47434771af1d2c2a90a17003271fe26e5a9 100644 (file)
@@ -186,11 +186,8 @@ static int kmmpd(void *data)
                        goto exit_thread;
                }
 
-               if (sb_rdonly(sb)) {
-                       ext4_warning(sb, "kmmpd being stopped since filesystem "
-                                    "has been remounted as readonly.");
-                       goto exit_thread;
-               }
+               if (sb_rdonly(sb))
+                       break;
 
                diff = jiffies - last_update_time;
                if (diff < mmp_update_interval * HZ)
index 0c4c2201b3aa2ee9680478f8fd11685e66634f50..b7f7922061be89928542176043b4cc78695edb17 100644 (file)
@@ -405,6 +405,9 @@ static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
 
 static void ext4_handle_error(struct super_block *sb)
 {
+       if (test_opt(sb, WARN_ON_ERROR))
+               WARN_ON_ONCE(1);
+
        if (sb_rdonly(sb))
                return;
 
@@ -740,6 +743,9 @@ __acquires(bitlock)
                va_end(args);
        }
 
+       if (test_opt(sb, WARN_ON_ERROR))
+               WARN_ON_ONCE(1);
+
        if (test_opt(sb, ERRORS_CONT)) {
                ext4_commit_super(sb, 0);
                return;
@@ -1371,7 +1377,8 @@ enum {
        Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
        Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
        Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax,
-       Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit,
+       Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_warn_on_error,
+       Opt_nowarn_on_error, Opt_mblk_io_submit,
        Opt_lazytime, Opt_nolazytime, Opt_debug_want_extra_isize,
        Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
        Opt_inode_readahead_blks, Opt_journal_ioprio,
@@ -1438,6 +1445,8 @@ static const match_table_t tokens = {
        {Opt_dax, "dax"},
        {Opt_stripe, "stripe=%u"},
        {Opt_delalloc, "delalloc"},
+       {Opt_warn_on_error, "warn_on_error"},
+       {Opt_nowarn_on_error, "nowarn_on_error"},
        {Opt_lazytime, "lazytime"},
        {Opt_nolazytime, "nolazytime"},
        {Opt_debug_want_extra_isize, "debug_want_extra_isize=%u"},
@@ -1602,6 +1611,8 @@ static const struct mount_opts {
         MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
        {Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
         MOPT_EXT4_ONLY | MOPT_CLEAR},
+       {Opt_warn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_SET},
+       {Opt_nowarn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_CLEAR},
        {Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
         MOPT_EXT4_ONLY | MOPT_CLEAR},
        {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
@@ -2331,6 +2342,7 @@ static int ext4_check_descriptors(struct super_block *sb,
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
        ext4_fsblk_t last_block;
+       ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0);
        ext4_fsblk_t block_bitmap;
        ext4_fsblk_t inode_bitmap;
        ext4_fsblk_t inode_table;
@@ -2363,6 +2375,14 @@ static int ext4_check_descriptors(struct super_block *sb,
                        if (!sb_rdonly(sb))
                                return 0;
                }
+               if (block_bitmap >= sb_block + 1 &&
+                   block_bitmap <= last_bg_block) {
+                       ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+                                "Block bitmap for group %u overlaps "
+                                "block group descriptors", i);
+                       if (!sb_rdonly(sb))
+                               return 0;
+               }
                if (block_bitmap < first_block || block_bitmap > last_block) {
                        ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
                               "Block bitmap for group %u not in group "
@@ -2377,6 +2397,14 @@ static int ext4_check_descriptors(struct super_block *sb,
                        if (!sb_rdonly(sb))
                                return 0;
                }
+               if (inode_bitmap >= sb_block + 1 &&
+                   inode_bitmap <= last_bg_block) {
+                       ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+                                "Inode bitmap for group %u overlaps "
+                                "block group descriptors", i);
+                       if (!sb_rdonly(sb))
+                               return 0;
+               }
                if (inode_bitmap < first_block || inode_bitmap > last_block) {
                        ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
                               "Inode bitmap for group %u not in group "
@@ -2391,6 +2419,14 @@ static int ext4_check_descriptors(struct super_block *sb,
                        if (!sb_rdonly(sb))
                                return 0;
                }
+               if (inode_table >= sb_block + 1 &&
+                   inode_table <= last_bg_block) {
+                       ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+                                "Inode table for group %u overlaps "
+                                "block group descriptors", i);
+                       if (!sb_rdonly(sb))
+                               return 0;
+               }
                if (inode_table < first_block ||
                    inode_table + sbi->s_itb_per_group - 1 > last_block) {
                        ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
@@ -3097,6 +3133,9 @@ static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
        ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
        struct ext4_group_desc *gdp = NULL;
 
+       if (!ext4_has_group_desc_csum(sb))
+               return ngroups;
+
        for (group = 0; group < ngroups; group++) {
                gdp = ext4_get_group_desc(sb, group, NULL);
                if (!gdp)
@@ -3742,6 +3781,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                         le32_to_cpu(es->s_log_block_size));
                goto failed_mount;
        }
+       if (le32_to_cpu(es->s_log_cluster_size) >
+           (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
+               ext4_msg(sb, KERN_ERR,
+                        "Invalid log cluster size: %u",
+                        le32_to_cpu(es->s_log_cluster_size));
+               goto failed_mount;
+       }
 
        if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
                ext4_msg(sb, KERN_ERR,
@@ -3806,6 +3852,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        } else {
                sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
                sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
+               if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) {
+                       ext4_msg(sb, KERN_ERR, "invalid first ino: %u",
+                                sbi->s_first_ino);
+                       goto failed_mount;
+               }
                if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
                    (!is_power_of_2(sbi->s_inode_size)) ||
                    (sbi->s_inode_size > blocksize)) {
@@ -3882,13 +3933,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                                 "block size (%d)", clustersize, blocksize);
                        goto failed_mount;
                }
-               if (le32_to_cpu(es->s_log_cluster_size) >
-                   (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
-                       ext4_msg(sb, KERN_ERR,
-                                "Invalid log cluster size: %u",
-                                le32_to_cpu(es->s_log_cluster_size));
-                       goto failed_mount;
-               }
                sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
                        le32_to_cpu(es->s_log_block_size);
                sbi->s_clusters_per_group =
@@ -3909,10 +3953,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                }
        } else {
                if (clustersize != blocksize) {
-                       ext4_warning(sb, "fragment/cluster size (%d) != "
-                                    "block size (%d)", clustersize,
-                                    blocksize);
-                       clustersize = blocksize;
+                       ext4_msg(sb, KERN_ERR,
+                                "fragment/cluster size (%d) != "
+                                "block size (%d)", clustersize, blocksize);
+                       goto failed_mount;
                }
                if (sbi->s_blocks_per_group > blocksize * 8) {
                        ext4_msg(sb, KERN_ERR,
@@ -3966,6 +4010,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                         ext4_blocks_count(es));
                goto failed_mount;
        }
+       if ((es->s_first_data_block == 0) && (es->s_log_block_size == 0) &&
+           (sbi->s_cluster_ratio == 1)) {
+               ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
+                        "block is 0 with a 1k block and cluster size");
+               goto failed_mount;
+       }
+
        blocks_count = (ext4_blocks_count(es) -
                        le32_to_cpu(es->s_first_data_block) +
                        EXT4_BLOCKS_PER_GROUP(sb) - 1);
@@ -4001,6 +4052,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                ret = -ENOMEM;
                goto failed_mount;
        }
+       if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
+           le32_to_cpu(es->s_inodes_count)) {
+               ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
+                        le32_to_cpu(es->s_inodes_count),
+                        ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
+               ret = -EINVAL;
+               goto failed_mount;
+       }
 
        bgl_lock_init(sbi->s_blockgroup_lock);
 
@@ -4020,14 +4079,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                        goto failed_mount2;
                }
        }
+       sbi->s_gdb_count = db_count;
        if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
                ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
                ret = -EFSCORRUPTED;
                goto failed_mount2;
        }
 
-       sbi->s_gdb_count = db_count;
-
        timer_setup(&sbi->s_err_report, print_daily_error_info, 0);
 
        /* Register extent status tree shrinker */
@@ -4736,6 +4794,14 @@ static int ext4_commit_super(struct super_block *sb, int sync)
 
        if (!sbh || block_device_ejected(sb))
                return error;
+
+       /*
+        * The superblock bh should be mapped, but it might not be if the
+        * device was hot-removed. Not much we can do but fail the I/O.
+        */
+       if (!buffer_mapped(sbh))
+               return error;
+
        /*
         * If the file system is mounted read-only, don't update the
         * superblock write time.  This avoids updating the superblock
@@ -5140,6 +5206,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
 
                        if (sbi->s_journal)
                                ext4_mark_recovery_complete(sb, es);
+                       if (sbi->s_mmp_tsk)
+                               kthread_stop(sbi->s_mmp_tsk);
                } else {
                        /* Make sure we can mount this feature set readwrite */
                        if (ext4_has_feature_readonly(sb) ||
index fc4ced59c565b7b8ad2d36af9b8e1894c7fd3029..723df14f408408607c123dbbb7b7f7fe1fe9b396 100644 (file)
@@ -230,12 +230,12 @@ __ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh,
 {
        int error = -EFSCORRUPTED;
 
-       if (buffer_verified(bh))
-               return 0;
-
        if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
            BHDR(bh)->h_blocks != cpu_to_le32(1))
                goto errout;
+       if (buffer_verified(bh))
+               return 0;
+
        error = -EFSBADCRC;
        if (!ext4_xattr_block_csum_verify(inode, bh))
                goto errout;
@@ -1560,7 +1560,7 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
                                handle_t *handle, struct inode *inode,
                                bool is_block)
 {
-       struct ext4_xattr_entry *last;
+       struct ext4_xattr_entry *last, *next;
        struct ext4_xattr_entry *here = s->here;
        size_t min_offs = s->end - s->base, name_len = strlen(i->name);
        int in_inode = i->in_inode;
@@ -1595,7 +1595,13 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
 
        /* Compute min_offs and last. */
        last = s->first;
-       for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+       for (; !IS_LAST_ENTRY(last); last = next) {
+               next = EXT4_XATTR_NEXT(last);
+               if ((void *)next >= s->end) {
+                       EXT4_ERROR_INODE(inode, "corrupted xattr entries");
+                       ret = -EFSCORRUPTED;
+                       goto out;
+               }
                if (!last->e_value_inum && last->e_value_size) {
                        size_t offs = le16_to_cpu(last->e_value_offs);
                        if (offs < min_offs)
@@ -2206,23 +2212,8 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
        if (EXT4_I(inode)->i_extra_isize == 0)
                return -ENOSPC;
        error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */);
-       if (error) {
-               if (error == -ENOSPC &&
-                   ext4_has_inline_data(inode)) {
-                       error = ext4_try_to_evict_inline_data(handle, inode,
-                                       EXT4_XATTR_LEN(strlen(i->name) +
-                                       EXT4_XATTR_SIZE(i->value_len)));
-                       if (error)
-                               return error;
-                       error = ext4_xattr_ibody_find(inode, i, is);
-                       if (error)
-                               return error;
-                       error = ext4_xattr_set_entry(i, s, handle, inode,
-                                                    false /* is_block */);
-               }
-               if (error)
-                       return error;
-       }
+       if (error)
+               return error;
        header = IHDR(inode, ext4_raw_inode(&is->iloc));
        if (!IS_LAST_ENTRY(s->first)) {
                header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
@@ -2651,6 +2642,11 @@ static int ext4_xattr_make_inode_space(handle_t *handle, struct inode *inode,
                last = IFIRST(header);
                /* Find the entry best suited to be pushed into EA block */
                for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+                       /* never move system.data out of the inode */
+                       if ((last->e_name_len == 4) &&
+                           (last->e_name_index == EXT4_XATTR_INDEX_SYSTEM) &&
+                           !memcmp(last->e_name, "data", 4))
+                               continue;
                        total_size = EXT4_XATTR_LEN(last->e_name_len);
                        if (!last->e_value_inum)
                                total_size += EXT4_XATTR_SIZE(
index 065dc919a0ce15963b21265f4872b007bcfc3310..bfd589ea74c01ebf74e4866d920759143b01be16 100644 (file)
@@ -707,13 +707,21 @@ static void fat_set_state(struct super_block *sb,
        brelse(bh);
 }
 
+static void fat_reset_iocharset(struct fat_mount_options *opts)
+{
+       if (opts->iocharset != fat_default_iocharset) {
+               /* Note: opts->iocharset can be NULL here */
+               kfree(opts->iocharset);
+               opts->iocharset = fat_default_iocharset;
+       }
+}
+
 static void delayed_free(struct rcu_head *p)
 {
        struct msdos_sb_info *sbi = container_of(p, struct msdos_sb_info, rcu);
        unload_nls(sbi->nls_disk);
        unload_nls(sbi->nls_io);
-       if (sbi->options.iocharset != fat_default_iocharset)
-               kfree(sbi->options.iocharset);
+       fat_reset_iocharset(&sbi->options);
        kfree(sbi);
 }
 
@@ -1132,7 +1140,7 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat,
        opts->fs_fmask = opts->fs_dmask = current_umask();
        opts->allow_utime = -1;
        opts->codepage = fat_default_codepage;
-       opts->iocharset = fat_default_iocharset;
+       fat_reset_iocharset(opts);
        if (is_vfat) {
                opts->shortname = VFAT_SFN_DISPLAY_WINNT|VFAT_SFN_CREATE_WIN95;
                opts->rodir = 0;
@@ -1289,8 +1297,7 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat,
 
                /* vfat specific */
                case Opt_charset:
-                       if (opts->iocharset != fat_default_iocharset)
-                               kfree(opts->iocharset);
+                       fat_reset_iocharset(opts);
                        iocharset = match_strdup(&args[0]);
                        if (!iocharset)
                                return -ENOMEM;
@@ -1881,8 +1888,7 @@ out_fail:
                iput(fat_inode);
        unload_nls(sbi->nls_io);
        unload_nls(sbi->nls_disk);
-       if (sbi->options.iocharset != fat_default_iocharset)
-               kfree(sbi->options.iocharset);
+       fat_reset_iocharset(&sbi->options);
        sb->s_fs_info = NULL;
        kfree(sbi);
        return error;
index c184c5a356ff2b9f6d85534c67d01aeb75fda808..cdcb376ef8df4d3d48ee26d6d7bef2a1faa06377 100644 (file)
@@ -220,6 +220,7 @@ int fscache_add_cache(struct fscache_cache *cache,
 {
        struct fscache_cache_tag *tag;
 
+       ASSERTCMP(ifsdef->cookie, ==, &fscache_fsdef_index);
        BUG_ON(!cache->ops);
        BUG_ON(!ifsdef);
 
@@ -248,7 +249,6 @@ int fscache_add_cache(struct fscache_cache *cache,
        if (!cache->kobj)
                goto error;
 
-       ifsdef->cookie = &fscache_fsdef_index;
        ifsdef->cache = cache;
        cache->fsdef = ifsdef;
 
index 97137d7ec5ee8bfe21796abde94144d726785d29..83bfe04456b6a99a196c485830b2ba2bc7419dec 100644 (file)
@@ -516,6 +516,7 @@ static int fscache_alloc_object(struct fscache_cache *cache,
                goto error;
        }
 
+       ASSERTCMP(object->cookie, ==, cookie);
        fscache_stat(&fscache_n_object_alloc);
 
        object->debug_id = atomic_inc_return(&fscache_object_debug_id);
@@ -571,6 +572,8 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
 
        _enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id);
 
+       ASSERTCMP(object->cookie, ==, cookie);
+
        spin_lock(&cookie->lock);
 
        /* there may be multiple initial creations of this object, but we only
@@ -610,9 +613,7 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
                spin_unlock(&cache->object_list_lock);
        }
 
-       /* attach to the cookie */
-       object->cookie = cookie;
-       fscache_cookie_get(cookie, fscache_cookie_get_attach_object);
+       /* Attach to the cookie.  The object already has a ref on it. */
        hlist_add_head(&object->cookie_link, &cookie->backing_objects);
 
        fscache_objlist_add(object);
index 20e0d0a4dc8cba917ef354e93f1aafe8bfee46a8..9edc920f651f3929f9ad4f27e061df728c424794 100644 (file)
@@ -327,6 +327,7 @@ void fscache_object_init(struct fscache_object *object,
        object->store_limit_l = 0;
        object->cache = cache;
        object->cookie = cookie;
+       fscache_cookie_get(cookie, fscache_cookie_get_attach_object);
        object->parent = NULL;
 #ifdef CONFIG_FSCACHE_OBJECT_LIST
        RB_CLEAR_NODE(&object->objlist_link);
index e30c5975ea585e73dd70dcba892c295e3d78e68a..8d265790374cdac651ff3c6c9920d8c9467b7441 100644 (file)
@@ -70,7 +70,8 @@ void fscache_enqueue_operation(struct fscache_operation *op)
        ASSERT(op->processor != NULL);
        ASSERT(fscache_object_is_available(op->object));
        ASSERTCMP(atomic_read(&op->usage), >, 0);
-       ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
+       ASSERTIFCMP(op->state != FSCACHE_OP_ST_IN_PROGRESS,
+                   op->state, ==,  FSCACHE_OP_ST_CANCELLED);
 
        fscache_stat(&fscache_n_op_enqueue);
        switch (op->flags & FSCACHE_OP_TYPE) {
@@ -499,7 +500,8 @@ void fscache_put_operation(struct fscache_operation *op)
        struct fscache_cache *cache;
 
        _enter("{OBJ%x OP%x,%d}",
-              op->object->debug_id, op->debug_id, atomic_read(&op->usage));
+              op->object ? op->object->debug_id : 0,
+              op->debug_id, atomic_read(&op->usage));
 
        ASSERTCMP(atomic_read(&op->usage), >, 0);
 
index d508c7844681fb4427ed8f66e2886cd9f39acbad..40d4c66c7751dd895302f42e4ec0d4bc60c1f87b 100644 (file)
@@ -411,6 +411,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
        bool truncate_op = (lend == LLONG_MAX);
 
        memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
+       vma_init(&pseudo_vma, current->mm);
        pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
        pagevec_init(&pvec);
        next = start;
@@ -595,6 +596,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
         * as input to create an allocation policy.
         */
        memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
+       vma_init(&pseudo_vma, mm);
        pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
        pseudo_vma.vm_file = file;
 
index 2c300e98179607ea0062a2c1dbcee17e9bc926c4..8c86c809ca17b30e003913e169626aa42df2e908 100644 (file)
@@ -1999,8 +1999,14 @@ void inode_init_owner(struct inode *inode, const struct inode *dir,
        inode->i_uid = current_fsuid();
        if (dir && dir->i_mode & S_ISGID) {
                inode->i_gid = dir->i_gid;
+
+               /* Directories are special, and always inherit S_ISGID */
                if (S_ISDIR(mode))
                        mode |= S_ISGID;
+               else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) &&
+                        !in_group_p(inode->i_gid) &&
+                        !capable_wrt_inode_uidgid(dir, CAP_FSETID))
+                       mode &= ~S_ISGID;
        } else
                inode->i_gid = current_fsgid();
        inode->i_mode = mode;
index 980d005b21b4111084ab7a56b17fda7e5b1320f5..5645b4ebf494c6c54be75455045d2d183134f799 100644 (file)
@@ -127,7 +127,6 @@ int do_fchownat(int dfd, const char __user *filename, uid_t user, gid_t group,
 
 extern int open_check_o_direct(struct file *f);
 extern int vfs_open(const struct path *, struct file *, const struct cred *);
-extern struct file *filp_clone_open(struct file *);
 
 /*
  * inode.c
index 51dd68e67b0f3abfcd115196724079e226467d09..c0b66a7a795b1cd22de3061930e454ff24394925 100644 (file)
@@ -1361,6 +1361,13 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
                if (jh->b_transaction == transaction &&
                    jh->b_jlist != BJ_Metadata) {
                        jbd_lock_bh_state(bh);
+                       if (jh->b_transaction == transaction &&
+                           jh->b_jlist != BJ_Metadata)
+                               pr_err("JBD2: assertion failure: h_type=%u "
+                                      "h_line_no=%u block_no=%llu jlist=%u\n",
+                                      handle->h_type, handle->h_line_no,
+                                      (unsigned long long) bh->b_blocknr,
+                                      jh->b_jlist);
                        J_ASSERT_JH(jh, jh->b_transaction != transaction ||
                                        jh->b_jlist == BJ_Metadata);
                        jbd_unlock_bh_state(bh);
@@ -1380,11 +1387,11 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
                 * of the transaction. This needs to be done
                 * once a transaction -bzzz
                 */
-               jh->b_modified = 1;
                if (handle->h_buffer_credits <= 0) {
                        ret = -ENOSPC;
                        goto out_unlock_bh;
                }
+               jh->b_modified = 1;
                handle->h_buffer_credits--;
        }
 
index c60f3d32ee911192c0cd8dae3b7cb11c0f416411..a6797986b625a34d19e097050c58f582c177c30c 100644 (file)
@@ -491,15 +491,17 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
        if (size > PSIZE) {
                /*
                 * To keep the rest of the code simple.  Allocate a
-                * contiguous buffer to work with
+                * contiguous buffer to work with. Make the buffer large
+                * enough to make use of the whole extent.
                 */
-               ea_buf->xattr = kmalloc(size, GFP_KERNEL);
+               ea_buf->max_size = (size + sb->s_blocksize - 1) &
+                   ~(sb->s_blocksize - 1);
+
+               ea_buf->xattr = kmalloc(ea_buf->max_size, GFP_KERNEL);
                if (ea_buf->xattr == NULL)
                        return -ENOMEM;
 
                ea_buf->flag = EA_MALLOC;
-               ea_buf->max_size = (size + sb->s_blocksize - 1) &
-                   ~(sb->s_blocksize - 1);
 
                if (ea_size == 0)
                        return 0;
index bbd0465535ebd9e433a812d60ab345161ef736b3..f033f3a69a3bcf7259192a9e062d7af295f90639 100644 (file)
@@ -883,8 +883,10 @@ struct inode *nfs_delegation_find_inode(struct nfs_client *clp,
        rcu_read_lock();
        list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
                res = nfs_delegation_find_inode_server(server, fhandle);
-               if (res != ERR_PTR(-ENOENT))
+               if (res != ERR_PTR(-ENOENT)) {
+                       rcu_read_unlock();
                        return res;
+               }
        }
        rcu_read_unlock();
        return ERR_PTR(-ENOENT);
index d4a07acad5989e1374f879f2cc46c284f9aa8c4f..8f003792ccde1c24c3bcd444a609b888a629340f 100644 (file)
@@ -1243,17 +1243,18 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
                                           hdr->ds_clp, hdr->lseg,
                                           hdr->pgio_mirror_idx);
 
+       clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
+       clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
        switch (err) {
        case -NFS4ERR_RESET_TO_PNFS:
                if (ff_layout_choose_best_ds_for_read(hdr->lseg,
                                        hdr->pgio_mirror_idx + 1,
                                        &hdr->pgio_mirror_idx))
                        goto out_eagain;
-               ff_layout_read_record_layoutstats_done(task, hdr);
-               pnfs_read_resend_pnfs(hdr);
+               set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
                return task->tk_status;
        case -NFS4ERR_RESET_TO_MDS:
-               ff_layout_reset_read(hdr);
+               set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
                return task->tk_status;
        case -EAGAIN:
                goto out_eagain;
@@ -1403,6 +1404,10 @@ static void ff_layout_read_release(void *data)
        struct nfs_pgio_header *hdr = data;
 
        ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
+       if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
+               pnfs_read_resend_pnfs(hdr);
+       else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
+               ff_layout_reset_read(hdr);
        pnfs_generic_rw_release(data);
 }
 
@@ -1423,12 +1428,14 @@ static int ff_layout_write_done_cb(struct rpc_task *task,
                                           hdr->ds_clp, hdr->lseg,
                                           hdr->pgio_mirror_idx);
 
+       clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
+       clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
        switch (err) {
        case -NFS4ERR_RESET_TO_PNFS:
-               ff_layout_reset_write(hdr, true);
+               set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
                return task->tk_status;
        case -NFS4ERR_RESET_TO_MDS:
-               ff_layout_reset_write(hdr, false);
+               set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
                return task->tk_status;
        case -EAGAIN:
                return -EAGAIN;
@@ -1575,6 +1582,10 @@ static void ff_layout_write_release(void *data)
        struct nfs_pgio_header *hdr = data;
 
        ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
+       if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
+               ff_layout_reset_write(hdr, true);
+       else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
+               ff_layout_reset_write(hdr, false);
        pnfs_generic_rw_release(data);
 }
 
index ed45090e4df6471902f5968b908429fe28976280..6dd146885da99304c8183f5fae21741f4aa3625f 100644 (file)
@@ -3294,6 +3294,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
        struct nfs4_closedata *calldata = data;
        struct nfs4_state *state = calldata->state;
        struct inode *inode = calldata->inode;
+       struct pnfs_layout_hdr *lo;
        bool is_rdonly, is_wronly, is_rdwr;
        int call_close = 0;
 
@@ -3337,6 +3338,12 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
                goto out_wait;
        }
 
+       lo = calldata->arg.lr_args ? calldata->arg.lr_args->layout : NULL;
+       if (lo && !pnfs_layout_is_valid(lo)) {
+               calldata->arg.lr_args = NULL;
+               calldata->res.lr_res = NULL;
+       }
+
        if (calldata->arg.fmode == 0)
                task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
 
@@ -5972,12 +5979,19 @@ static void nfs4_delegreturn_release(void *calldata)
 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
 {
        struct nfs4_delegreturndata *d_data;
+       struct pnfs_layout_hdr *lo;
 
        d_data = (struct nfs4_delegreturndata *)data;
 
        if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task))
                return;
 
+       lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL;
+       if (lo && !pnfs_layout_is_valid(lo)) {
+               d_data->args.lr_args = NULL;
+               d_data->res.lr_res = NULL;
+       }
+
        nfs4_setup_sequence(d_data->res.server->nfs_client,
                        &d_data->args.seq_args,
                        &d_data->res.seq_res,
@@ -8650,6 +8664,8 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
 
        dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
 
+       nfs4_sequence_free_slot(&lgp->res.seq_res);
+
        switch (nfs4err) {
        case 0:
                goto out;
@@ -8714,7 +8730,6 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
                goto out;
        }
 
-       nfs4_sequence_free_slot(&lgp->res.seq_res);
        err = nfs4_handle_exception(server, nfs4err, exception);
        if (!status) {
                if (exception->retry)
@@ -8786,20 +8801,22 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout)
        if (IS_ERR(task))
                return ERR_CAST(task);
        status = rpc_wait_for_completion_task(task);
-       if (status == 0) {
+       if (status != 0)
+               goto out;
+
+       /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
+       if (task->tk_status < 0 || lgp->res.layoutp->len == 0) {
                status = nfs4_layoutget_handle_exception(task, lgp, &exception);
                *timeout = exception.timeout;
-       }
-
+       } else
+               lseg = pnfs_layout_process(lgp);
+out:
        trace_nfs4_layoutget(lgp->args.ctx,
                        &lgp->args.range,
                        &lgp->res.range,
                        &lgp->res.stateid,
                        status);
 
-       /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
-       if (status == 0 && lgp->res.layoutp->len)
-               lseg = pnfs_layout_process(lgp);
        rpc_put_task(task);
        dprintk("<-- %s status=%d\n", __func__, status);
        if (status)
@@ -8817,6 +8834,8 @@ nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
                        &lrp->args.seq_args,
                        &lrp->res.seq_res,
                        task);
+       if (!pnfs_layout_is_valid(lrp->args.layout))
+               rpc_exit(task, 0);
 }
 
 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
index a8f5e6b167491e3746a921f1f611fbb06b7d5f45..3fe81424337d07b5b19ab77d08825fb27bf523b0 100644 (file)
@@ -801,6 +801,11 @@ static inline void nfs4_lgopen_release(struct nfs4_layoutget *lgp)
 {
 }
 
+static inline bool pnfs_layout_is_valid(const struct pnfs_layout_hdr *lo)
+{
+       return false;
+}
+
 #endif /* CONFIG_NFS_V4_1 */
 
 #if IS_ENABLED(CONFIG_NFS_V4_2)
index bb0840e234f3bc176d2af120d6ed94ee3720aad0..39d6f431da83f4227fbfbee1b6931230aa82e95c 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -509,22 +509,19 @@ static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        }
 }
 
-static struct wait_queue_head *
-pipe_get_poll_head(struct file *filp, __poll_t events)
-{
-       struct pipe_inode_info *pipe = filp->private_data;
-
-       return &pipe->wait;
-}
-
 /* No kernel lock held - fine */
-static __poll_t pipe_poll_mask(struct file *filp, __poll_t events)
+static __poll_t
+pipe_poll(struct file *filp, poll_table *wait)
 {
+       __poll_t mask;
        struct pipe_inode_info *pipe = filp->private_data;
-       int nrbufs = pipe->nrbufs;
-       __poll_t mask = 0;
+       int nrbufs;
+
+       poll_wait(filp, &pipe->wait, wait);
 
        /* Reading only -- no need for acquiring the semaphore.  */
+       nrbufs = pipe->nrbufs;
+       mask = 0;
        if (filp->f_mode & FMODE_READ) {
                mask = (nrbufs > 0) ? EPOLLIN | EPOLLRDNORM : 0;
                if (!pipe->writers && filp->f_version != pipe->w_counter)
@@ -1023,8 +1020,7 @@ const struct file_operations pipefifo_fops = {
        .llseek         = no_llseek,
        .read_iter      = pipe_read,
        .write_iter     = pipe_write,
-       .get_poll_head  = pipe_get_poll_head,
-       .poll_mask      = pipe_poll_mask,
+       .poll           = pipe_poll,
        .unlocked_ioctl = pipe_ioctl,
        .release        = pipe_release,
        .fasync         = pipe_fasync,
index b6572944efc340d89f136c5a9c17ac409c8bef00..aaffc0c302162db0fc9d682c071469f55326dc1d 100644 (file)
@@ -235,6 +235,10 @@ static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
        if (env_start != arg_end || env_start >= env_end)
                env_start = env_end = arg_end;
 
+       /* .. and limit it to a maximum of one page of slop */
+       if (env_end >= arg_end + PAGE_SIZE)
+               env_end = arg_end + PAGE_SIZE - 1;
+
        /* We're not going to care if "*ppos" has high bits set */
        pos = arg_start + *ppos;
 
@@ -254,10 +258,19 @@ static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
        while (count) {
                int got;
                size_t size = min_t(size_t, PAGE_SIZE, count);
+               long offset;
 
-               got = access_remote_vm(mm, pos, page, size, FOLL_ANON);
-               if (got <= 0)
+               /*
+                * Are we already starting past the official end?
+                * We always include the last byte that is *supposed*
+                * to be NUL
+                */
+               offset = (pos >= arg_end) ? pos - arg_end + 1 : 0;
+
+               got = access_remote_vm(mm, pos - offset, page, size + offset, FOLL_ANON);
+               if (got <= offset)
                        break;
+               got -= offset;
 
                /* Don't walk past a NUL character once you hit arg_end */
                if (pos + got >= arg_end) {
@@ -276,12 +289,17 @@ static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
                                n = arg_end - pos - 1;
 
                        /* Cut off at first NUL after 'n' */
-                       got = n + strnlen(page+n, got-n);
-                       if (!got)
+                       got = n + strnlen(page+n, offset+got-n);
+                       if (got < offset)
                                break;
+                       got -= offset;
+
+                       /* Include the NUL if it existed */
+                       if (got < size)
+                               got++;
                }
 
-               got -= copy_to_user(buf, page, got);
+               got -= copy_to_user(buf, page+offset, got);
                if (unlikely(!got)) {
                        if (!len)
                                len = -EFAULT;
index 6ac1c92997ea2a20c3af8959c6920218f16a846d..bb1c1625b158d03f5c8685f55e370267e1cc76fb 100644 (file)
@@ -564,11 +564,20 @@ static int proc_seq_open(struct inode *inode, struct file *file)
        return seq_open(file, de->seq_ops);
 }
 
+static int proc_seq_release(struct inode *inode, struct file *file)
+{
+       struct proc_dir_entry *de = PDE(inode);
+
+       if (de->state_size)
+               return seq_release_private(inode, file);
+       return seq_release(inode, file);
+}
+
 static const struct file_operations proc_seq_fops = {
        .open           = proc_seq_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
-       .release        = seq_release,
+       .release        = proc_seq_release,
 };
 
 struct proc_dir_entry *proc_create_seq_private(const char *name, umode_t mode,
index e9679016271fba923290c24e13f5368f5f0e0199..dfd73a4616ce565bfccb996a50f5eb549fe41fe8 100644 (file)
@@ -831,7 +831,8 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
                SEQ_PUT_DEC(" kB\nSwap:           ", mss->swap);
                SEQ_PUT_DEC(" kB\nSwapPss:        ",
                                                mss->swap_pss >> PSS_SHIFT);
-               SEQ_PUT_DEC(" kB\nLocked:         ", mss->pss >> PSS_SHIFT);
+               SEQ_PUT_DEC(" kB\nLocked:         ",
+                                               mss->pss_locked >> PSS_SHIFT);
                seq_puts(m, " kB\n");
        }
        if (!rollup_mode) {
index d88231e3b2be3ec1bc1f85c3c2fd92973e312c15..fc20e06c56ba55bf229db78cb5b5077c21935931 100644 (file)
@@ -711,21 +711,18 @@ EXPORT_SYMBOL(dquot_quota_sync);
 static unsigned long
 dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 {
-       struct list_head *head;
        struct dquot *dquot;
        unsigned long freed = 0;
 
        spin_lock(&dq_list_lock);
-       head = free_dquots.prev;
-       while (head != &free_dquots && sc->nr_to_scan) {
-               dquot = list_entry(head, struct dquot, dq_free);
+       while (!list_empty(&free_dquots) && sc->nr_to_scan) {
+               dquot = list_first_entry(&free_dquots, struct dquot, dq_free);
                remove_dquot_hash(dquot);
                remove_free_dquot(dquot);
                remove_inuse(dquot);
                do_destroy_dquot(dquot);
                sc->nr_to_scan--;
                freed++;
-               head = free_dquots.prev;
        }
        spin_unlock(&dq_list_lock);
        return freed;
index 7e288d97adcbb7504f2c3c2953ca24debd770b01..9fed1c05f1f4df6f750c599da1670abf4b066445 100644 (file)
@@ -76,83 +76,99 @@ static char *le_type(struct reiserfs_key *key)
 }
 
 /* %k */
-static void sprintf_le_key(char *buf, struct reiserfs_key *key)
+static int scnprintf_le_key(char *buf, size_t size, struct reiserfs_key *key)
 {
        if (key)
-               sprintf(buf, "[%d %d %s %s]", le32_to_cpu(key->k_dir_id),
-                       le32_to_cpu(key->k_objectid), le_offset(key),
-                       le_type(key));
+               return scnprintf(buf, size, "[%d %d %s %s]",
+                                le32_to_cpu(key->k_dir_id),
+                                le32_to_cpu(key->k_objectid), le_offset(key),
+                                le_type(key));
        else
-               sprintf(buf, "[NULL]");
+               return scnprintf(buf, size, "[NULL]");
 }
 
 /* %K */
-static void sprintf_cpu_key(char *buf, struct cpu_key *key)
+static int scnprintf_cpu_key(char *buf, size_t size, struct cpu_key *key)
 {
        if (key)
-               sprintf(buf, "[%d %d %s %s]", key->on_disk_key.k_dir_id,
-                       key->on_disk_key.k_objectid, reiserfs_cpu_offset(key),
-                       cpu_type(key));
+               return scnprintf(buf, size, "[%d %d %s %s]",
+                                key->on_disk_key.k_dir_id,
+                                key->on_disk_key.k_objectid,
+                                reiserfs_cpu_offset(key), cpu_type(key));
        else
-               sprintf(buf, "[NULL]");
+               return scnprintf(buf, size, "[NULL]");
 }
 
-static void sprintf_de_head(char *buf, struct reiserfs_de_head *deh)
+static int scnprintf_de_head(char *buf, size_t size,
+                            struct reiserfs_de_head *deh)
 {
        if (deh)
-               sprintf(buf,
-                       "[offset=%d dir_id=%d objectid=%d location=%d state=%04x]",
-                       deh_offset(deh), deh_dir_id(deh), deh_objectid(deh),
-                       deh_location(deh), deh_state(deh));
+               return scnprintf(buf, size,
+                                "[offset=%d dir_id=%d objectid=%d location=%d state=%04x]",
+                                deh_offset(deh), deh_dir_id(deh),
+                                deh_objectid(deh), deh_location(deh),
+                                deh_state(deh));
        else
-               sprintf(buf, "[NULL]");
+               return scnprintf(buf, size, "[NULL]");
 
 }
 
-static void sprintf_item_head(char *buf, struct item_head *ih)
+static int scnprintf_item_head(char *buf, size_t size, struct item_head *ih)
 {
        if (ih) {
-               strcpy(buf,
-                      (ih_version(ih) == KEY_FORMAT_3_6) ? "*3.6* " : "*3.5*");
-               sprintf_le_key(buf + strlen(buf), &(ih->ih_key));
-               sprintf(buf + strlen(buf), ", item_len %d, item_location %d, "
-                       "free_space(entry_count) %d",
-                       ih_item_len(ih), ih_location(ih), ih_free_space(ih));
+               char *p = buf;
+               char * const end = buf + size;
+
+               p += scnprintf(p, end - p, "%s",
+                              (ih_version(ih) == KEY_FORMAT_3_6) ?
+                              "*3.6* " : "*3.5*");
+
+               p += scnprintf_le_key(p, end - p, &ih->ih_key);
+
+               p += scnprintf(p, end - p,
+                              ", item_len %d, item_location %d, free_space(entry_count) %d",
+                              ih_item_len(ih), ih_location(ih),
+                              ih_free_space(ih));
+               return p - buf;
        } else
-               sprintf(buf, "[NULL]");
+               return scnprintf(buf, size, "[NULL]");
 }
 
-static void sprintf_direntry(char *buf, struct reiserfs_dir_entry *de)
+static int scnprintf_direntry(char *buf, size_t size,
+                             struct reiserfs_dir_entry *de)
 {
        char name[20];
 
        memcpy(name, de->de_name, de->de_namelen > 19 ? 19 : de->de_namelen);
        name[de->de_namelen > 19 ? 19 : de->de_namelen] = 0;
-       sprintf(buf, "\"%s\"==>[%d %d]", name, de->de_dir_id, de->de_objectid);
+       return scnprintf(buf, size, "\"%s\"==>[%d %d]",
+                        name, de->de_dir_id, de->de_objectid);
 }
 
-static void sprintf_block_head(char *buf, struct buffer_head *bh)
+static int scnprintf_block_head(char *buf, size_t size, struct buffer_head *bh)
 {
-       sprintf(buf, "level=%d, nr_items=%d, free_space=%d rdkey ",
-               B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh));
+       return scnprintf(buf, size,
+                        "level=%d, nr_items=%d, free_space=%d rdkey ",
+                        B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh));
 }
 
-static void sprintf_buffer_head(char *buf, struct buffer_head *bh)
+static int scnprintf_buffer_head(char *buf, size_t size, struct buffer_head *bh)
 {
-       sprintf(buf,
-               "dev %pg, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
-               bh->b_bdev, bh->b_size,
-               (unsigned long long)bh->b_blocknr, atomic_read(&(bh->b_count)),
-               bh->b_state, bh->b_page,
-               buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE",
-               buffer_dirty(bh) ? "DIRTY" : "CLEAN",
-               buffer_locked(bh) ? "LOCKED" : "UNLOCKED");
+       return scnprintf(buf, size,
+                        "dev %pg, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
+                        bh->b_bdev, bh->b_size,
+                        (unsigned long long)bh->b_blocknr,
+                        atomic_read(&(bh->b_count)),
+                        bh->b_state, bh->b_page,
+                        buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE",
+                        buffer_dirty(bh) ? "DIRTY" : "CLEAN",
+                        buffer_locked(bh) ? "LOCKED" : "UNLOCKED");
 }
 
-static void sprintf_disk_child(char *buf, struct disk_child *dc)
+static int scnprintf_disk_child(char *buf, size_t size, struct disk_child *dc)
 {
-       sprintf(buf, "[dc_number=%d, dc_size=%u]", dc_block_number(dc),
-               dc_size(dc));
+       return scnprintf(buf, size, "[dc_number=%d, dc_size=%u]",
+                        dc_block_number(dc), dc_size(dc));
 }
 
 static char *is_there_reiserfs_struct(char *fmt, int *what)
@@ -189,55 +205,60 @@ static void prepare_error_buf(const char *fmt, va_list args)
        char *fmt1 = fmt_buf;
        char *k;
        char *p = error_buf;
+       char * const end = &error_buf[sizeof(error_buf)];
        int what;
 
        spin_lock(&error_lock);
 
-       strcpy(fmt1, fmt);
+       if (WARN_ON(strscpy(fmt_buf, fmt, sizeof(fmt_buf)) < 0)) {
+               strscpy(error_buf, "format string too long", end - error_buf);
+               goto out_unlock;
+       }
 
        while ((k = is_there_reiserfs_struct(fmt1, &what)) != NULL) {
                *k = 0;
 
-               p += vsprintf(p, fmt1, args);
+               p += vscnprintf(p, end - p, fmt1, args);
 
                switch (what) {
                case 'k':
-                       sprintf_le_key(p, va_arg(args, struct reiserfs_key *));
+                       p += scnprintf_le_key(p, end - p,
+                                             va_arg(args, struct reiserfs_key *));
                        break;
                case 'K':
-                       sprintf_cpu_key(p, va_arg(args, struct cpu_key *));
+                       p += scnprintf_cpu_key(p, end - p,
+                                              va_arg(args, struct cpu_key *));
                        break;
                case 'h':
-                       sprintf_item_head(p, va_arg(args, struct item_head *));
+                       p += scnprintf_item_head(p, end - p,
+                                                va_arg(args, struct item_head *));
                        break;
                case 't':
-                       sprintf_direntry(p,
-                                        va_arg(args,
-                                               struct reiserfs_dir_entry *));
+                       p += scnprintf_direntry(p, end - p,
+                                               va_arg(args, struct reiserfs_dir_entry *));
                        break;
                case 'y':
-                       sprintf_disk_child(p,
-                                          va_arg(args, struct disk_child *));
+                       p += scnprintf_disk_child(p, end - p,
+                                                 va_arg(args, struct disk_child *));
                        break;
                case 'z':
-                       sprintf_block_head(p,
-                                          va_arg(args, struct buffer_head *));
+                       p += scnprintf_block_head(p, end - p,
+                                                 va_arg(args, struct buffer_head *));
                        break;
                case 'b':
-                       sprintf_buffer_head(p,
-                                           va_arg(args, struct buffer_head *));
+                       p += scnprintf_buffer_head(p, end - p,
+                                                  va_arg(args, struct buffer_head *));
                        break;
                case 'a':
-                       sprintf_de_head(p,
-                                       va_arg(args,
-                                              struct reiserfs_de_head *));
+                       p += scnprintf_de_head(p, end - p,
+                                              va_arg(args, struct reiserfs_de_head *));
                        break;
                }
 
-               p += strlen(p);
                fmt1 = k + 2;
        }
-       vsprintf(p, fmt1, args);
+       p += vscnprintf(p, end - p, fmt1, args);
+out_unlock:
        spin_unlock(&error_lock);
 
 }
index 317891ff8165ba19b775fcfaa8f6deccb58ba18f..4a6b6e4b21cb91aecdf40492c4763f09bf4ccc3f 100644 (file)
 
 #include <linux/uaccess.h>
 
-__poll_t vfs_poll(struct file *file, struct poll_table_struct *pt)
-{
-       if (file->f_op->poll) {
-               return file->f_op->poll(file, pt);
-       } else if (file_has_poll_mask(file)) {
-               unsigned int events = poll_requested_events(pt);
-               struct wait_queue_head *head;
-
-               if (pt && pt->_qproc) {
-                       head = file->f_op->get_poll_head(file, events);
-                       if (!head)
-                               return DEFAULT_POLLMASK;
-                       if (IS_ERR(head))
-                               return EPOLLERR;
-                       pt->_qproc(file, head, pt);
-               }
-
-               return file->f_op->poll_mask(file, events);
-       } else {
-               return DEFAULT_POLLMASK;
-       }
-}
-EXPORT_SYMBOL_GPL(vfs_poll);
 
 /*
  * Estimate expected accuracy in ns from a timeval.
index 23813c078cc9527f547c345ba01ce31dafd570ab..0839efa720b3b562a9e56e677631fe9458ca9233 100644 (file)
@@ -350,6 +350,9 @@ int squashfs_read_metadata(struct super_block *sb, void *buffer,
 
        TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset);
 
+       if (unlikely(length < 0))
+               return -EIO;
+
        while (length) {
                entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0);
                if (entry->error) {
index 13d80947bf9e6adac348878e3494b38cdd206099..fcff2e0487fef11f89724a72221090e529f0a775 100644 (file)
@@ -194,7 +194,11 @@ static long long read_indexes(struct super_block *sb, int n,
                }
 
                for (i = 0; i < blocks; i++) {
-                       int size = le32_to_cpu(blist[i]);
+                       int size = squashfs_block_size(blist[i]);
+                       if (size < 0) {
+                               err = size;
+                               goto failure;
+                       }
                        block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size);
                }
                n -= blocks;
@@ -367,7 +371,7 @@ static int read_blocklist(struct inode *inode, int index, u64 *block)
                        sizeof(size));
        if (res < 0)
                return res;
-       return le32_to_cpu(size);
+       return squashfs_block_size(size);
 }
 
 /* Copy data into page cache  */
index 0ed6edbc5c7170aa06f191e33df193721206cb3f..86ad9a4b8c364d389df43f1fa6beb2e6b737e827 100644 (file)
@@ -61,9 +61,7 @@ int squashfs_frag_lookup(struct super_block *sb, unsigned int fragment,
                return size;
 
        *fragment_block = le64_to_cpu(fragment_entry.start_block);
-       size = le32_to_cpu(fragment_entry.size);
-
-       return size;
+       return squashfs_block_size(fragment_entry.size);
 }
 
 
index 24d12fd1417767689778302abf77b21f1efd6350..4e6853f084d071b6291da9891b8a16c730901e48 100644 (file)
 
 #define SQUASHFS_COMPRESSED_BLOCK(B)   (!((B) & SQUASHFS_COMPRESSED_BIT_BLOCK))
 
+static inline int squashfs_block_size(__le32 raw)
+{
+       u32 size = le32_to_cpu(raw);
+       return (size >> 25) ? -EIO : size;
+}
+
 /*
  * Inode number ops.  Inodes consist of a compressed block number, and an
  * uncompressed offset within that block
index d84a2bee4f82b2f8470b7f2fbd42b2f33beb2bce..cdad49da3ff710e6fd2cc1adf4bf4877623af670 100644 (file)
@@ -226,20 +226,21 @@ static int timerfd_release(struct inode *inode, struct file *file)
        kfree_rcu(ctx, rcu);
        return 0;
 }
-       
-static struct wait_queue_head *timerfd_get_poll_head(struct file *file,
-               __poll_t eventmask)
+
+static __poll_t timerfd_poll(struct file *file, poll_table *wait)
 {
        struct timerfd_ctx *ctx = file->private_data;
+       __poll_t events = 0;
+       unsigned long flags;
 
-       return &ctx->wqh;
-}
+       poll_wait(file, &ctx->wqh, wait);
 
-static __poll_t timerfd_poll_mask(struct file *file, __poll_t eventmask)
-{
-       struct timerfd_ctx *ctx = file->private_data;
+       spin_lock_irqsave(&ctx->wqh.lock, flags);
+       if (ctx->ticks)
+               events |= EPOLLIN;
+       spin_unlock_irqrestore(&ctx->wqh.lock, flags);
 
-       return ctx->ticks ? EPOLLIN : 0;
+       return events;
 }
 
 static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
@@ -363,8 +364,7 @@ static long timerfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg
 
 static const struct file_operations timerfd_fops = {
        .release        = timerfd_release,
-       .get_poll_head  = timerfd_get_poll_head,
-       .poll_mask      = timerfd_poll_mask,
+       .poll           = timerfd_poll,
        .read           = timerfd_read,
        .llseek         = noop_llseek,
        .show_fdinfo    = timerfd_show,
index 1b961b1d9699461cdf0771a90b4771078f6c95fc..fcda0fc97b90a14fd53aafbeb15885d85716e3a1 100644 (file)
@@ -533,8 +533,7 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
                        udf_write_aext(table, &epos, &eloc,
                                        (etype << 30) | elen, 1);
                } else
-                       udf_delete_aext(table, epos, eloc,
-                                       (etype << 30) | elen);
+                       udf_delete_aext(table, epos);
        } else {
                alloc_count = 0;
        }
@@ -630,7 +629,7 @@ static udf_pblk_t udf_table_new_block(struct super_block *sb,
        if (goal_elen)
                udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1);
        else
-               udf_delete_aext(table, goal_epos, goal_eloc, goal_elen);
+               udf_delete_aext(table, goal_epos);
        brelse(goal_epos.bh);
 
        udf_add_free_space(sb, partition, -1);
index 0a98a2369738fc2cff925c80066b92a58b299066..d9523013096f978c9d4a3ca1d8fdd23b55eeb275 100644 (file)
@@ -141,10 +141,7 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
                               fibh->ebh->b_data,
                               sizeof(struct fileIdentDesc) + fibh->soffset);
 
-                       fi_len = (sizeof(struct fileIdentDesc) +
-                                 cfi->lengthFileIdent +
-                                 le16_to_cpu(cfi->lengthOfImpUse) + 3) & ~3;
-
+                       fi_len = udf_dir_entry_len(cfi);
                        *nf_pos += fi_len - (fibh->eoffset - fibh->soffset);
                        fibh->eoffset = fibh->soffset + fi_len;
                } else {
@@ -152,6 +149,9 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
                               sizeof(struct fileIdentDesc));
                }
        }
+       /* Got last entry outside of dir size - fs is corrupted! */
+       if (*nf_pos > dir->i_size)
+               return NULL;
        return fi;
 }
 
index 7f39d17352c9697863f02140f7cf7ec1120a2215..9915a58fbabd7ff0194709ec883c1bd7003d72c7 100644 (file)
@@ -1147,8 +1147,7 @@ static void udf_update_extents(struct inode *inode, struct kernel_long_ad *laarr
 
        if (startnum > endnum) {
                for (i = 0; i < (startnum - endnum); i++)
-                       udf_delete_aext(inode, *epos, laarr[i].extLocation,
-                                       laarr[i].extLength);
+                       udf_delete_aext(inode, *epos);
        } else if (startnum < endnum) {
                for (i = 0; i < (endnum - startnum); i++) {
                        udf_insert_aext(inode, *epos, laarr[i].extLocation,
@@ -2176,14 +2175,15 @@ static int8_t udf_insert_aext(struct inode *inode, struct extent_position epos,
        return (nelen >> 30);
 }
 
-int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
-                      struct kernel_lb_addr eloc, uint32_t elen)
+int8_t udf_delete_aext(struct inode *inode, struct extent_position epos)
 {
        struct extent_position oepos;
        int adsize;
        int8_t etype;
        struct allocExtDesc *aed;
        struct udf_inode_info *iinfo;
+       struct kernel_lb_addr eloc;
+       uint32_t elen;
 
        if (epos.bh) {
                get_bh(epos.bh);
index c586026508db82d0a27a1df1b964bcbf3fcec45c..06f37ddd2997f4894859722fb7f801994e91239d 100644 (file)
@@ -351,8 +351,6 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
        loff_t f_pos;
        loff_t size = udf_ext0_offset(dir) + dir->i_size;
        int nfidlen;
-       uint8_t lfi;
-       uint16_t liu;
        udf_pblk_t block;
        struct kernel_lb_addr eloc;
        uint32_t elen = 0;
@@ -383,7 +381,7 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
                namelen = 0;
        }
 
-       nfidlen = (sizeof(struct fileIdentDesc) + namelen + 3) & ~3;
+       nfidlen = ALIGN(sizeof(struct fileIdentDesc) + namelen, UDF_NAME_PAD);
 
        f_pos = udf_ext0_offset(dir);
 
@@ -424,12 +422,8 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
                        goto out_err;
                }
 
-               liu = le16_to_cpu(cfi->lengthOfImpUse);
-               lfi = cfi->lengthFileIdent;
-
                if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
-                       if (((sizeof(struct fileIdentDesc) +
-                                       liu + lfi + 3) & ~3) == nfidlen) {
+                       if (udf_dir_entry_len(cfi) == nfidlen) {
                                cfi->descTag.tagSerialNum = cpu_to_le16(1);
                                cfi->fileVersionNum = cpu_to_le16(1);
                                cfi->fileCharacteristics = 0;
@@ -1201,9 +1195,7 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
 
        if (dir_fi) {
                dir_fi->icb.extLocation = cpu_to_lelb(UDF_I(new_dir)->i_location);
-               udf_update_tag((char *)dir_fi,
-                               (sizeof(struct fileIdentDesc) +
-                               le16_to_cpu(dir_fi->lengthOfImpUse) + 3) & ~3);
+               udf_update_tag((char *)dir_fi, udf_dir_entry_len(dir_fi));
                if (old_iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
                        mark_inode_dirty(old_inode);
                else
index bae311b59400459338d2c60f9e962429066e1483..84c47dde4d268a12e8f1aeaf2c6e6dda91db4972 100644 (file)
@@ -132,6 +132,12 @@ struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
 extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *,
                        struct fileIdentDesc *, struct udf_fileident_bh *,
                        uint8_t *, uint8_t *);
+static inline unsigned int udf_dir_entry_len(struct fileIdentDesc *cfi)
+{
+       return ALIGN(sizeof(struct fileIdentDesc) +
+               le16_to_cpu(cfi->lengthOfImpUse) + cfi->lengthFileIdent,
+               UDF_NAME_PAD);
+}
 
 /* file.c */
 extern long udf_ioctl(struct file *, unsigned int, unsigned long);
@@ -167,8 +173,7 @@ extern int udf_add_aext(struct inode *, struct extent_position *,
                        struct kernel_lb_addr *, uint32_t, int);
 extern void udf_write_aext(struct inode *, struct extent_position *,
                           struct kernel_lb_addr *, uint32_t, int);
-extern int8_t udf_delete_aext(struct inode *, struct extent_position,
-                             struct kernel_lb_addr, uint32_t);
+extern int8_t udf_delete_aext(struct inode *, struct extent_position);
 extern int8_t udf_next_aext(struct inode *, struct extent_position *,
                            struct kernel_lb_addr *, uint32_t *, int);
 extern int8_t udf_current_aext(struct inode *, struct extent_position *,
index 123bf7d516fc1f475cb89edb8aade4c2ad556f51..594d192b23317d7e69d068b2f124ca6f77de3e07 100644 (file)
@@ -222,24 +222,26 @@ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
                                         unsigned long reason)
 {
        struct mm_struct *mm = ctx->mm;
-       pte_t *pte;
+       pte_t *ptep, pte;
        bool ret = true;
 
        VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
 
-       pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
-       if (!pte)
+       ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
+
+       if (!ptep)
                goto out;
 
        ret = false;
+       pte = huge_ptep_get(ptep);
 
        /*
         * Lockless access: we're in a wait_event so it's ok if it
         * changes under us.
         */
-       if (huge_pte_none(*pte))
+       if (huge_pte_none(pte))
                ret = true;
-       if (!huge_pte_write(*pte) && (reason & VM_UFFD_WP))
+       if (!huge_pte_write(pte) && (reason & VM_UFFD_WP))
                ret = true;
 out:
        return ret;
index 84db76e0e3e3c58ae7d25b38a46a1ce2b4d5cae4..fecd187fcf2c3cd69bd79954ccc272737cb2ce2b 100644 (file)
@@ -157,6 +157,7 @@ __xfs_ag_resv_free(
        error = xfs_mod_fdblocks(pag->pag_mount, oldresv, true);
        resv->ar_reserved = 0;
        resv->ar_asked = 0;
+       resv->ar_orig_reserved = 0;
 
        if (error)
                trace_xfs_ag_resv_free_error(pag->pag_mount, pag->pag_agno,
@@ -189,13 +190,34 @@ __xfs_ag_resv_init(
        struct xfs_mount                *mp = pag->pag_mount;
        struct xfs_ag_resv              *resv;
        int                             error;
-       xfs_extlen_t                    reserved;
+       xfs_extlen_t                    hidden_space;
 
        if (used > ask)
                ask = used;
-       reserved = ask - used;
 
-       error = xfs_mod_fdblocks(mp, -(int64_t)reserved, true);
+       switch (type) {
+       case XFS_AG_RESV_RMAPBT:
+               /*
+                * Space taken by the rmapbt is not subtracted from fdblocks
+                * because the rmapbt lives in the free space.  Here we must
+                * subtract the entire reservation from fdblocks so that we
+                * always have blocks available for rmapbt expansion.
+                */
+               hidden_space = ask;
+               break;
+       case XFS_AG_RESV_METADATA:
+               /*
+                * Space taken by all other metadata btrees are accounted
+                * on-disk as used space.  We therefore only hide the space
+                * that is reserved but not used by the trees.
+                */
+               hidden_space = ask - used;
+               break;
+       default:
+               ASSERT(0);
+               return -EINVAL;
+       }
+       error = xfs_mod_fdblocks(mp, -(int64_t)hidden_space, true);
        if (error) {
                trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno,
                                error, _RET_IP_);
@@ -216,7 +238,8 @@ __xfs_ag_resv_init(
 
        resv = xfs_perag_resv(pag, type);
        resv->ar_asked = ask;
-       resv->ar_reserved = resv->ar_orig_reserved = reserved;
+       resv->ar_orig_reserved = hidden_space;
+       resv->ar_reserved = ask - used;
 
        trace_xfs_ag_resv_init(pag, type, ask);
        return 0;
index eef466260d43adb3cc9ef6ae3dcea36f82cd90a4..75dbdc14c45f08b733ab9d066dc86056ce8261f5 100644 (file)
@@ -223,12 +223,13 @@ xfs_alloc_get_rec(
        error = xfs_btree_get_rec(cur, &rec, stat);
        if (error || !(*stat))
                return error;
-       if (rec->alloc.ar_blockcount == 0)
-               goto out_bad_rec;
 
        *bno = be32_to_cpu(rec->alloc.ar_startblock);
        *len = be32_to_cpu(rec->alloc.ar_blockcount);
 
+       if (*len == 0)
+               goto out_bad_rec;
+
        /* check for valid extent range, including overflow */
        if (!xfs_verify_agbno(mp, agno, *bno))
                goto out_bad_rec;
index 01628f0c9a0c227543087c70bd7391ad3f0eee2c..7205268b30bc54b488bf513b1a2b6bb737769d64 100644 (file)
@@ -5780,6 +5780,32 @@ del_cursor:
        return error;
 }
 
+/* Make sure we won't be right-shifting an extent past the maximum bound. */
+int
+xfs_bmap_can_insert_extents(
+       struct xfs_inode        *ip,
+       xfs_fileoff_t           off,
+       xfs_fileoff_t           shift)
+{
+       struct xfs_bmbt_irec    got;
+       int                     is_empty;
+       int                     error = 0;
+
+       ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
+
+       if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+               return -EIO;
+
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+       error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty);
+       if (!error && !is_empty && got.br_startoff >= off &&
+           ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff)
+               error = -EINVAL;
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
+
+       return error;
+}
+
 int
 xfs_bmap_insert_extents(
        struct xfs_trans        *tp,
index 99dddbd0fcc6c606e59544d69a0435b0cc205c5f..9b49ddf99c4115479fe8271cc5b492a2d86b2b70 100644 (file)
@@ -227,6 +227,8 @@ int xfs_bmap_collapse_extents(struct xfs_trans *tp, struct xfs_inode *ip,
                xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
                bool *done, xfs_fsblock_t *firstblock,
                struct xfs_defer_ops *dfops);
+int    xfs_bmap_can_insert_extents(struct xfs_inode *ip, xfs_fileoff_t off,
+               xfs_fileoff_t shift);
 int    xfs_bmap_insert_extents(struct xfs_trans *tp, struct xfs_inode *ip,
                xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
                bool *done, xfs_fileoff_t stop_fsb, xfs_fsblock_t *firstblock,
index 1c5a8aaf2bfcea6b51b76e7aa7dff4b55b4e4145..059bc44c27e83edf3cb1fe2c494490e65f93c5d8 100644 (file)
@@ -962,6 +962,9 @@ typedef enum xfs_dinode_fmt {
                XFS_DFORK_DSIZE(dip, mp) : \
                XFS_DFORK_ASIZE(dip, mp))
 
+#define XFS_DFORK_MAXEXT(dip, mp, w) \
+       (XFS_DFORK_SIZE(dip, mp, w) / sizeof(struct xfs_bmbt_rec))
+
 /*
  * Return pointers to the data or attribute forks.
  */
@@ -1526,6 +1529,8 @@ typedef struct xfs_bmdr_block {
 #define BMBT_STARTBLOCK_BITLEN 52
 #define BMBT_BLOCKCOUNT_BITLEN 21
 
+#define BMBT_STARTOFF_MASK     ((1ULL << BMBT_STARTOFF_BITLEN) - 1)
+
 typedef struct xfs_bmbt_rec {
        __be64                  l0, l1;
 } xfs_bmbt_rec_t;
index d38d724534c48e2a4644be06acbf6d64da9a65b2..30d1d60f1d46e62ff71eca1f45b273536cc6cce1 100644 (file)
@@ -374,6 +374,47 @@ xfs_log_dinode_to_disk(
        }
 }
 
+static xfs_failaddr_t
+xfs_dinode_verify_fork(
+       struct xfs_dinode       *dip,
+       struct xfs_mount        *mp,
+       int                     whichfork)
+{
+       uint32_t                di_nextents = XFS_DFORK_NEXTENTS(dip, whichfork);
+
+       switch (XFS_DFORK_FORMAT(dip, whichfork)) {
+       case XFS_DINODE_FMT_LOCAL:
+               /*
+                * no local regular files yet
+                */
+               if (whichfork == XFS_DATA_FORK) {
+                       if (S_ISREG(be16_to_cpu(dip->di_mode)))
+                               return __this_address;
+                       if (be64_to_cpu(dip->di_size) >
+                                       XFS_DFORK_SIZE(dip, mp, whichfork))
+                               return __this_address;
+               }
+               if (di_nextents)
+                       return __this_address;
+               break;
+       case XFS_DINODE_FMT_EXTENTS:
+               if (di_nextents > XFS_DFORK_MAXEXT(dip, mp, whichfork))
+                       return __this_address;
+               break;
+       case XFS_DINODE_FMT_BTREE:
+               if (whichfork == XFS_ATTR_FORK) {
+                       if (di_nextents > MAXAEXTNUM)
+                               return __this_address;
+               } else if (di_nextents > MAXEXTNUM) {
+                       return __this_address;
+               }
+               break;
+       default:
+               return __this_address;
+       }
+       return NULL;
+}
+
 xfs_failaddr_t
 xfs_dinode_verify(
        struct xfs_mount        *mp,
@@ -441,24 +482,9 @@ xfs_dinode_verify(
        case S_IFREG:
        case S_IFLNK:
        case S_IFDIR:
-               switch (dip->di_format) {
-               case XFS_DINODE_FMT_LOCAL:
-                       /*
-                        * no local regular files yet
-                        */
-                       if (S_ISREG(mode))
-                               return __this_address;
-                       if (di_size > XFS_DFORK_DSIZE(dip, mp))
-                               return __this_address;
-                       if (dip->di_nextents)
-                               return __this_address;
-                       /* fall through */
-               case XFS_DINODE_FMT_EXTENTS:
-               case XFS_DINODE_FMT_BTREE:
-                       break;
-               default:
-                       return __this_address;
-               }
+               fa = xfs_dinode_verify_fork(dip, mp, XFS_DATA_FORK);
+               if (fa)
+                       return fa;
                break;
        case 0:
                /* Uninitialized inode ok. */
@@ -468,17 +494,9 @@ xfs_dinode_verify(
        }
 
        if (XFS_DFORK_Q(dip)) {
-               switch (dip->di_aformat) {
-               case XFS_DINODE_FMT_LOCAL:
-                       if (dip->di_anextents)
-                               return __this_address;
-               /* fall through */
-               case XFS_DINODE_FMT_EXTENTS:
-               case XFS_DINODE_FMT_BTREE:
-                       break;
-               default:
-                       return __this_address;
-               }
+               fa = xfs_dinode_verify_fork(dip, mp, XFS_ATTR_FORK);
+               if (fa)
+                       return fa;
        } else {
                /*
                 * If there is no fork offset, this may be a freshly-made inode
@@ -713,7 +731,8 @@ xfs_inode_validate_extsize(
        if ((hint_flag || inherit_flag) && extsize == 0)
                return __this_address;
 
-       if (!(hint_flag || inherit_flag) && extsize != 0)
+       /* free inodes get flags set to zero but extsize remains */
+       if (mode && !(hint_flag || inherit_flag) && extsize != 0)
                return __this_address;
 
        if (extsize_bytes % blocksize_bytes)
@@ -759,7 +778,8 @@ xfs_inode_validate_cowextsize(
        if (hint_flag && cowextsize == 0)
                return __this_address;
 
-       if (!hint_flag && cowextsize != 0)
+       /* free inodes get flags set to zero but cowextsize remains */
+       if (mode && !hint_flag && cowextsize != 0)
                return __this_address;
 
        if (hint_flag && rt_flag)
index 65fc4ed2e9a1050b76b1cd85d874294e52a8afd9..b228c821bae6802c0aa8ab9b79069d703245bbe2 100644 (file)
@@ -1029,8 +1029,8 @@ xfs_rtalloc_query_range(
        if (low_rec->ar_startext >= mp->m_sb.sb_rextents ||
            low_rec->ar_startext == high_rec->ar_startext)
                return 0;
-       if (high_rec->ar_startext >= mp->m_sb.sb_rextents)
-               high_rec->ar_startext = mp->m_sb.sb_rextents - 1;
+       if (high_rec->ar_startext > mp->m_sb.sb_rextents)
+               high_rec->ar_startext = mp->m_sb.sb_rextents;
 
        /* Iterate the bitmap, looking for discrepancies. */
        rtstart = low_rec->ar_startext;
index c35009a8669953dfee4013615ca62b47237b4d77..83b1e8c6c18f939e8afcabdb4eb37fd33e459da8 100644 (file)
@@ -685,12 +685,10 @@ out_unlock_iolock:
 }
 
 /*
- * dead simple method of punching delalyed allocation blocks from a range in
- * the inode. Walks a block at a time so will be slow, but is only executed in
- * rare error cases so the overhead is not critical. This will always punch out
- * both the start and end blocks, even if the ranges only partially overlap
- * them, so it is up to the caller to ensure that partial blocks are not
- * passed in.
+ * Dead simple method of punching delalyed allocation blocks from a range in
+ * the inode.  This will always punch out both the start and end blocks, even
+ * if the ranges only partially overlap them, so it is up to the caller to
+ * ensure that partial blocks are not passed in.
  */
 int
 xfs_bmap_punch_delalloc_range(
@@ -698,63 +696,44 @@ xfs_bmap_punch_delalloc_range(
        xfs_fileoff_t           start_fsb,
        xfs_fileoff_t           length)
 {
-       xfs_fileoff_t           remaining = length;
+       struct xfs_ifork        *ifp = &ip->i_df;
+       xfs_fileoff_t           end_fsb = start_fsb + length;
+       struct xfs_bmbt_irec    got, del;
+       struct xfs_iext_cursor  icur;
        int                     error = 0;
 
        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 
-       do {
-               int             done;
-               xfs_bmbt_irec_t imap;
-               int             nimaps = 1;
-               xfs_fsblock_t   firstblock;
-               struct xfs_defer_ops dfops;
+       if (!(ifp->if_flags & XFS_IFEXTENTS)) {
+               error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
+               if (error)
+                       return error;
+       }
 
-               /*
-                * Map the range first and check that it is a delalloc extent
-                * before trying to unmap the range. Otherwise we will be
-                * trying to remove a real extent (which requires a
-                * transaction) or a hole, which is probably a bad idea...
-                */
-               error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
-                                      XFS_BMAPI_ENTIRE);
+       if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
+               return 0;
 
-               if (error) {
-                       /* something screwed, just bail */
-                       if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
-                               xfs_alert(ip->i_mount,
-                       "Failed delalloc mapping lookup ino %lld fsb %lld.",
-                                               ip->i_ino, start_fsb);
-                       }
-                       break;
-               }
-               if (!nimaps) {
-                       /* nothing there */
-                       goto next_block;
-               }
-               if (imap.br_startblock != DELAYSTARTBLOCK) {
-                       /* been converted, ignore */
-                       goto next_block;
-               }
-               WARN_ON(imap.br_blockcount == 0);
+       while (got.br_startoff + got.br_blockcount > start_fsb) {
+               del = got;
+               xfs_trim_extent(&del, start_fsb, length);
 
                /*
-                * Note: while we initialise the firstblock/dfops pair, they
-                * should never be used because blocks should never be
-                * allocated or freed for a delalloc extent and hence we need
-                * don't cancel or finish them after the xfs_bunmapi() call.
+                * A delete can push the cursor forward. Step back to the
+                * previous extent on non-delalloc or extents outside the
+                * target range.
                 */
-               xfs_defer_init(&dfops, &firstblock);
-               error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
-                                       &dfops, &done);
-               if (error)
-                       break;
+               if (!del.br_blockcount ||
+                   !isnullstartblock(del.br_startblock)) {
+                       if (!xfs_iext_prev_extent(ifp, &icur, &got))
+                               break;
+                       continue;
+               }
 
-               ASSERT(!xfs_defer_has_unfinished_work(&dfops));
-next_block:
-               start_fsb++;
-               remaining--;
-       } while(remaining > 0);
+               error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur,
+                                                 &got, &del);
+               if (error || !xfs_iext_get_extent(ifp, &icur, &got))
+                       break;
+       }
 
        return error;
 }
@@ -1208,7 +1187,22 @@ xfs_free_file_space(
                return 0;
        if (offset + len > XFS_ISIZE(ip))
                len = XFS_ISIZE(ip) - offset;
-       return iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops);
+       error = iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops);
+       if (error)
+               return error;
+
+       /*
+        * If we zeroed right up to EOF and EOF straddles a page boundary we
+        * must make sure that the post-EOF area is also zeroed because the
+        * page could be mmap'd and iomap_zero_range doesn't do that for us.
+        * Writeback of the eof page will do this, albeit clumsily.
+        */
+       if (offset + len >= XFS_ISIZE(ip) && ((offset + len) & PAGE_MASK)) {
+               error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
+                               (offset + len) & ~PAGE_MASK, LLONG_MAX);
+       }
+
+       return error;
 }
 
 /*
@@ -1404,6 +1398,10 @@ xfs_insert_file_space(
 
        trace_xfs_insert_file_space(ip);
 
+       error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
+       if (error)
+               return error;
+
        error = xfs_prepare_shift(ip, offset);
        if (error)
                return error;
index c34fa9c342f25fdbee7e39fead0078e30859bba3..c7157bc48bd192ea60650577232ea87e8bfbbf02 100644 (file)
@@ -513,8 +513,8 @@ xfs_getfsmap_rtdev_rtbitmap_query(
        struct xfs_trans                *tp,
        struct xfs_getfsmap_info        *info)
 {
-       struct xfs_rtalloc_rec          alow;
-       struct xfs_rtalloc_rec          ahigh;
+       struct xfs_rtalloc_rec          alow = { 0 };
+       struct xfs_rtalloc_rec          ahigh = { 0 };
        int                             error;
 
        xfs_ilock(tp->t_mountp->m_rbmip, XFS_ILOCK_SHARED);
index a7afcad6b71140aed25f02979946cb9795afa644..3f2bd6032cf86525d6d344d60be903d9c739267c 100644 (file)
@@ -387,7 +387,7 @@ xfs_reserve_blocks(
        do {
                free = percpu_counter_sum(&mp->m_fdblocks) -
                                                mp->m_alloc_set_aside;
-               if (!free)
+               if (free <= 0)
                        break;
 
                delta = request - mp->m_resblks;
index 7a96c4e0ab5c621f38d9e034622d26ebd8d95437..5df4de666cc118848c86ddc33420d4147031ce57 100644 (file)
@@ -3236,7 +3236,6 @@ xfs_iflush_cluster(
        struct xfs_inode        *cip;
        int                     nr_found;
        int                     clcount = 0;
-       int                     bufwasdelwri;
        int                     i;
 
        pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
@@ -3360,37 +3359,22 @@ cluster_corrupt_out:
         * inode buffer and shut down the filesystem.
         */
        rcu_read_unlock();
-       /*
-        * Clean up the buffer.  If it was delwri, just release it --
-        * brelse can handle it with no problems.  If not, shut down the
-        * filesystem before releasing the buffer.
-        */
-       bufwasdelwri = (bp->b_flags & _XBF_DELWRI_Q);
-       if (bufwasdelwri)
-               xfs_buf_relse(bp);
-
        xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
 
-       if (!bufwasdelwri) {
-               /*
-                * Just like incore_relse: if we have b_iodone functions,
-                * mark the buffer as an error and call them.  Otherwise
-                * mark it as stale and brelse.
-                */
-               if (bp->b_iodone) {
-                       bp->b_flags &= ~XBF_DONE;
-                       xfs_buf_stale(bp);
-                       xfs_buf_ioerror(bp, -EIO);
-                       xfs_buf_ioend(bp);
-               } else {
-                       xfs_buf_stale(bp);
-                       xfs_buf_relse(bp);
-               }
-       }
-
        /*
-        * Unlocks the flush lock
+        * We'll always have an inode attached to the buffer for completion
+        * process by the time we are called from xfs_iflush(). Hence we have
+        * always need to do IO completion processing to abort the inodes
+        * attached to the buffer.  handle them just like the shutdown case in
+        * xfs_buf_submit().
         */
+       ASSERT(bp->b_iodone);
+       bp->b_flags &= ~XBF_DONE;
+       xfs_buf_stale(bp);
+       xfs_buf_ioerror(bp, -EIO);
+       xfs_buf_ioend(bp);
+
+       /* abort the corrupt inode, as it was not attached to the buffer */
        xfs_iflush_abort(cip, false);
        kmem_free(cilist);
        xfs_perag_put(pag);
@@ -3486,12 +3470,17 @@ xfs_iflush(
                xfs_log_force(mp, 0);
 
        /*
-        * inode clustering:
-        * see if other inodes can be gathered into this write
+        * inode clustering: try to gather other inodes into this write
+        *
+        * Note: Any error during clustering will result in the filesystem
+        * being shut down and completion callbacks run on the cluster buffer.
+        * As we have already flushed and attached this inode to the buffer,
+        * it has already been aborted and released by xfs_iflush_cluster() and
+        * so we have no further error handling to do here.
         */
        error = xfs_iflush_cluster(ip, bp);
        if (error)
-               goto cluster_corrupt_out;
+               return error;
 
        *bpp = bp;
        return 0;
@@ -3500,12 +3489,8 @@ corrupt_out:
        if (bp)
                xfs_buf_relse(bp);
        xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
-cluster_corrupt_out:
-       error = -EFSCORRUPTED;
 abort_out:
-       /*
-        * Unlocks the flush lock
-        */
+       /* abort the corrupt inode, as it was not attached to the buffer */
        xfs_iflush_abort(ip, false);
        return error;
 }
index 49f5492eed3bdb9d85c53843df03546c83f2c799..55876dd02f0c8c75fa5653eeab82881bd3741928 100644 (file)
@@ -963,12 +963,13 @@ xfs_ilock_for_iomap(
        unsigned                *lockmode)
 {
        unsigned                mode = XFS_ILOCK_SHARED;
+       bool                    is_write = flags & (IOMAP_WRITE | IOMAP_ZERO);
 
        /*
         * COW writes may allocate delalloc space or convert unwritten COW
         * extents, so we need to make sure to take the lock exclusively here.
         */
-       if (xfs_is_reflink_inode(ip) && (flags & (IOMAP_WRITE | IOMAP_ZERO))) {
+       if (xfs_is_reflink_inode(ip) && is_write) {
                /*
                 * FIXME: It could still overwrite on unshared extents and not
                 * need allocation.
@@ -989,6 +990,7 @@ xfs_ilock_for_iomap(
                mode = XFS_ILOCK_EXCL;
        }
 
+relock:
        if (flags & IOMAP_NOWAIT) {
                if (!xfs_ilock_nowait(ip, mode))
                        return -EAGAIN;
@@ -996,6 +998,17 @@ xfs_ilock_for_iomap(
                xfs_ilock(ip, mode);
        }
 
+       /*
+        * The reflink iflag could have changed since the earlier unlocked
+        * check, so if we got ILOCK_SHARED for a write and but we're now a
+        * reflink inode we have to switch to ILOCK_EXCL and relock.
+        */
+       if (mode == XFS_ILOCK_SHARED && is_write && xfs_is_reflink_inode(ip)) {
+               xfs_iunlock(ip, mode);
+               mode = XFS_ILOCK_EXCL;
+               goto relock;
+       }
+
        *lockmode = mode;
        return 0;
 }
index e040af120b69b3a69b38517cde3092773b391260..524f543c5b820fe45de5866cd950509190a74612 100644 (file)
@@ -258,7 +258,12 @@ xfs_trans_alloc(
        if (!(flags & XFS_TRANS_NO_WRITECOUNT))
                sb_start_intwrite(mp->m_super);
 
-       WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
+       /*
+        * Zero-reservation ("empty") transactions can't modify anything, so
+        * they're allowed to run while we're frozen.
+        */
+       WARN_ON(resp->tr_logres > 0 &&
+               mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
        atomic_inc(&mp->m_active_trans);
 
        tp = kmem_zone_zalloc(xfs_trans_zone,
index 40a916efd7c039d2132014fcaf5ec780e4a8248e..1194a4c78d557fb411e9672291f6bab6e3623e9d 100644 (file)
@@ -309,7 +309,7 @@ static inline void acpi_processor_ppc_exit(void)
 {
        return;
 }
-static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr,
+static inline void acpi_processor_ppc_has_changed(struct acpi_processor *pr,
                                                                int event_flag)
 {
        static unsigned int printout = 1;
@@ -320,7 +320,6 @@ static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr,
                       "Consider compiling CPUfreq support into your kernel.\n");
                printout = 0;
        }
-       return 0;
 }
 static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
 {
index 0763f065b975a543fb0e887d4af8d63bf7354f05..d10f1e7d6ba8c37140ae9332b59399baae4ffdf1 100644 (file)
@@ -63,7 +63,7 @@ typedef struct qspinlock {
 /*
  * Initializier
  */
-#define        __ARCH_SPIN_LOCK_UNLOCKED       { .val = ATOMIC_INIT(0) }
+#define        __ARCH_SPIN_LOCK_UNLOCKED       { { .val = ATOMIC_INIT(0) } }
 
 /*
  * Bitfields in the atomic value:
index faddde44de8c902e6884e64eeb8b22bd0d11b75a..3063125197adabb38876a9dc001986062db24658 100644 (file)
@@ -265,33 +265,41 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
  * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
  */
 
+#ifndef pte_free_tlb
 #define pte_free_tlb(tlb, ptep, address)                       \
        do {                                                    \
                __tlb_adjust_range(tlb, address, PAGE_SIZE);    \
                __pte_free_tlb(tlb, ptep, address);             \
        } while (0)
+#endif
 
+#ifndef pmd_free_tlb
 #define pmd_free_tlb(tlb, pmdp, address)                       \
        do {                                                    \
                __tlb_adjust_range(tlb, address, PAGE_SIZE);            \
                __pmd_free_tlb(tlb, pmdp, address);             \
        } while (0)
+#endif
 
 #ifndef __ARCH_HAS_4LEVEL_HACK
+#ifndef pud_free_tlb
 #define pud_free_tlb(tlb, pudp, address)                       \
        do {                                                    \
                __tlb_adjust_range(tlb, address, PAGE_SIZE);    \
                __pud_free_tlb(tlb, pudp, address);             \
        } while (0)
 #endif
+#endif
 
 #ifndef __ARCH_HAS_5LEVEL_HACK
+#ifndef p4d_free_tlb
 #define p4d_free_tlb(tlb, pudp, address)                       \
        do {                                                    \
                __tlb_adjust_range(tlb, address, PAGE_SIZE);            \
                __p4d_free_tlb(tlb, pudp, address);             \
        } while (0)
 #endif
+#endif
 
 #define tlb_migrate_finish(mm) do {} while (0)
 
index cc414db9da0ad6758f696d0de2a251ce99d8d301..482461d8931d9186c4a11b7b2d9a24f981a595bc 100644 (file)
@@ -245,7 +245,8 @@ ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
                        int offset, size_t size, int flags);
 void af_alg_free_resources(struct af_alg_async_req *areq);
 void af_alg_async_cb(struct crypto_async_request *_req, int err);
-__poll_t af_alg_poll_mask(struct socket *sock, __poll_t events);
+__poll_t af_alg_poll(struct file *file, struct socket *sock,
+                        poll_table *wait);
 struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
                                           unsigned int areqlen);
 int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
index f5099c12c6a6d3302c77f31ae89e57922ca63fc4..f7a19c2a7a807407862df5c023a6b7467046d540 100644 (file)
@@ -97,29 +97,11 @@ struct pci_controller;
 
 #define DRM_IF_VERSION(maj, min) (maj << 16 | min)
 
-/**
- * drm_drv_uses_atomic_modeset - check if the driver implements
- * atomic_commit()
- * @dev: DRM device
- *
- * This check is useful if drivers do not have DRIVER_ATOMIC set but
- * have atomic modesetting internally implemented.
- */
-static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
-{
-       return dev->mode_config.funcs->atomic_commit != NULL;
-}
-
 #define DRM_SWITCH_POWER_ON 0
 #define DRM_SWITCH_POWER_OFF 1
 #define DRM_SWITCH_POWER_CHANGING 2
 #define DRM_SWITCH_POWER_DYNAMIC_OFF 3
 
-static inline bool drm_core_check_feature(struct drm_device *dev, int feature)
-{
-       return dev->driver->driver_features & feature;
-}
-
 /* returns true if currently okay to sleep */
 static inline bool drm_can_sleep(void)
 {
index a57a8aa90ffb794146ffd46c4a71543da0545596..da9d95a1958096be400a3c4b9a4f5f1977d394e9 100644 (file)
@@ -160,6 +160,14 @@ struct __drm_crtcs_state {
 struct __drm_connnectors_state {
        struct drm_connector *ptr;
        struct drm_connector_state *state, *old_state, *new_state;
+       /**
+        * @out_fence_ptr:
+        *
+        * User-provided pointer which the kernel uses to return a sync_file
+        * file descriptor. Used by writeback connectors to signal completion of
+        * the writeback.
+        */
+       s32 __user *out_fence_ptr;
 };
 
 struct drm_private_obj;
@@ -594,6 +602,9 @@ void drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
 int __must_check
 drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
                                  struct drm_crtc *crtc);
+int drm_atomic_set_writeback_fb_for_connector(
+               struct drm_connector_state *conn_state,
+               struct drm_framebuffer *fb);
 int __must_check
 drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
                                   struct drm_crtc *crtc);
@@ -601,9 +612,6 @@ int __must_check
 drm_atomic_add_affected_planes(struct drm_atomic_state *state,
                               struct drm_crtc *crtc);
 
-void
-drm_atomic_clean_old_fb(struct drm_device *dev, unsigned plane_mask, int ret);
-
 int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
 int __must_check drm_atomic_commit(struct drm_atomic_state *state);
 int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state);
index 26aaba58d6ceb34f53608ed5411fc370c06fa7f7..99e2a5297c697cf79d0abb500bea997487c5bade 100644 (file)
@@ -100,6 +100,7 @@ int __must_check drm_atomic_helper_swap_state(struct drm_atomic_state *state,
 int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
                                   bool nonblock);
 void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state);
+void drm_atomic_helper_fake_vblank(struct drm_atomic_state *state);
 void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state);
 void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state);
 
index 3270fec469798fc6cdb20c75e96c0be311b0d3e6..bd850747ce5472fe477919db42f4cb5d1911f61f 100644 (file)
@@ -97,7 +97,7 @@ struct drm_bridge_funcs {
        /**
         * @mode_fixup:
         *
-        * This callback is used to validate and adjust a mode. The paramater
+        * This callback is used to validate and adjust a mode. The parameter
         * mode is the display mode that should be fed to the next element in
         * the display chain, either the final &drm_connector or the next
         * &drm_bridge. The parameter adjusted_mode is the input mode the bridge
@@ -178,6 +178,22 @@ struct drm_bridge_funcs {
         * then this would be &drm_encoder_helper_funcs.mode_set. The display
         * pipe (i.e.  clocks and timing signals) is off when this function is
         * called.
+        *
+        * The adjusted_mode parameter is the mode output by the CRTC for the
+        * first bridge in the chain. It can be different from the mode
+        * parameter that contains the desired mode for the connector at the end
+        * of the bridges chain, for instance when the first bridge in the chain
+        * performs scaling. The adjusted mode is mostly useful for the first
+        * bridge in the chain and is likely irrelevant for the other bridges.
+        *
+        * For atomic drivers the adjusted_mode is the mode stored in
+        * &drm_crtc_state.adjusted_mode.
+        *
+        * NOTE:
+        *
+        * If a need arises to store and access modes adjusted for other
+        * locations than the connection between the CRTC and the first bridge,
+        * the DRM framework will have to be extended with DRM bridge states.
         */
        void (*mode_set)(struct drm_bridge *bridge,
                         struct drm_display_mode *mode,
@@ -254,27 +270,29 @@ struct drm_bridge_timings {
 
 /**
  * struct drm_bridge - central DRM bridge control structure
- * @dev: DRM device this bridge belongs to
- * @encoder: encoder to which this bridge is connected
- * @next: the next bridge in the encoder chain
- * @of_node: device node pointer to the bridge
- * @list: to keep track of all added bridges
- * @timings: the timing specification for the bridge, if any (may
- * be NULL)
- * @funcs: control functions
- * @driver_private: pointer to the bridge driver's internal context
  */
 struct drm_bridge {
+       /** @dev: DRM device this bridge belongs to */
        struct drm_device *dev;
+       /** @encoder: encoder to which this bridge is connected */
        struct drm_encoder *encoder;
+       /** @next: the next bridge in the encoder chain */
        struct drm_bridge *next;
 #ifdef CONFIG_OF
+       /** @of_node: device node pointer to the bridge */
        struct device_node *of_node;
 #endif
+       /** @list: to keep track of all added bridges */
        struct list_head list;
+       /**
+        * @timings:
+        *
+        * the timing specification for the bridge, if any (may be NULL)
+        */
        const struct drm_bridge_timings *timings;
-
+       /** @funcs: control functions */
        const struct drm_bridge_funcs *funcs;
+       /** @driver_private: pointer to the bridge driver's internal context */
        void *driver_private;
 };
 
@@ -285,15 +303,15 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
                      struct drm_bridge *previous);
 
 bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
-                       const struct drm_display_mode *mode,
-                       struct drm_display_mode *adjusted_mode);
+                          const struct drm_display_mode *mode,
+                          struct drm_display_mode *adjusted_mode);
 enum drm_mode_status drm_bridge_mode_valid(struct drm_bridge *bridge,
                                           const struct drm_display_mode *mode);
 void drm_bridge_disable(struct drm_bridge *bridge);
 void drm_bridge_post_disable(struct drm_bridge *bridge);
 void drm_bridge_mode_set(struct drm_bridge *bridge,
-                       struct drm_display_mode *mode,
-                       struct drm_display_mode *adjusted_mode);
+                        struct drm_display_mode *mode,
+                        struct drm_display_mode *adjusted_mode);
 void drm_bridge_pre_enable(struct drm_bridge *bridge);
 void drm_bridge_enable(struct drm_bridge *bridge);
 
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
new file mode 100644 (file)
index 0000000..989f8e5
--- /dev/null
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DRM_CLIENT_H_
+#define _DRM_CLIENT_H_
+
+#include <linux/types.h>
+
+struct drm_client_dev;
+struct drm_device;
+struct drm_file;
+struct drm_framebuffer;
+struct drm_gem_object;
+struct drm_minor;
+struct module;
+
+/**
+ * struct drm_client_funcs - DRM client callbacks
+ */
+struct drm_client_funcs {
+       /**
+        * @owner: The module owner
+        */
+       struct module *owner;
+
+       /**
+        * @unregister:
+        *
+        * Called when &drm_device is unregistered. The client should respond by
+        * releasing it's resources using drm_client_release().
+        *
+        * This callback is optional.
+        */
+       void (*unregister)(struct drm_client_dev *client);
+
+       /**
+        * @restore:
+        *
+        * Called on drm_lastclose(). The first client instance in the list that
+        * returns zero gets the privilege to restore and no more clients are
+        * called. This callback is not called after @unregister has been called.
+        *
+        * This callback is optional.
+        */
+       int (*restore)(struct drm_client_dev *client);
+
+       /**
+        * @hotplug:
+        *
+        * Called on drm_kms_helper_hotplug_event().
+        * This callback is not called after @unregister has been called.
+        *
+        * This callback is optional.
+        */
+       int (*hotplug)(struct drm_client_dev *client);
+};
+
+/**
+ * struct drm_client_dev - DRM client instance
+ */
+struct drm_client_dev {
+       /**
+        * @dev: DRM device
+        */
+       struct drm_device *dev;
+
+       /**
+        * @name: Name of the client.
+        */
+       const char *name;
+
+       /**
+        * @list:
+        *
+        * List of all clients of a DRM device, linked into
+        * &drm_device.clientlist. Protected by &drm_device.clientlist_mutex.
+        */
+       struct list_head list;
+
+       /**
+        * @funcs: DRM client functions (optional)
+        */
+       const struct drm_client_funcs *funcs;
+
+       /**
+        * @file: DRM file
+        */
+       struct drm_file *file;
+};
+
+int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
+                  const char *name, const struct drm_client_funcs *funcs);
+void drm_client_release(struct drm_client_dev *client);
+
+void drm_client_dev_unregister(struct drm_device *dev);
+void drm_client_dev_hotplug(struct drm_device *dev);
+void drm_client_dev_restore(struct drm_device *dev);
+
+/**
+ * struct drm_client_buffer - DRM client buffer
+ */
+struct drm_client_buffer {
+       /**
+        * @client: DRM client
+        */
+       struct drm_client_dev *client;
+
+       /**
+        * @handle: Buffer handle
+        */
+       u32 handle;
+
+       /**
+        * @pitch: Buffer pitch
+        */
+       u32 pitch;
+
+       /**
+        * @gem: GEM object backing this buffer
+        */
+       struct drm_gem_object *gem;
+
+       /**
+        * @vaddr: Virtual address for the buffer
+        */
+       void *vaddr;
+
+       /**
+        * @fb: DRM framebuffer
+        */
+       struct drm_framebuffer *fb;
+};
+
+struct drm_client_buffer *
+drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format);
+void drm_client_framebuffer_delete(struct drm_client_buffer *buffer);
+
+int drm_client_debugfs_init(struct drm_minor *minor);
+
+#endif
index 675cc3f8cf85292ce19aadee37d32f1f143771d8..97ea41dc678fe64e8bcb41928677028faf89a639 100644 (file)
@@ -290,6 +290,10 @@ struct drm_display_info {
 #define DRM_BUS_FLAG_DATA_MSB_TO_LSB   (1<<4)
 /* data is transmitted LSB to MSB on the bus */
 #define DRM_BUS_FLAG_DATA_LSB_TO_MSB   (1<<5)
+/* drive sync on pos. edge */
+#define DRM_BUS_FLAG_SYNC_POSEDGE      (1<<6)
+/* drive sync on neg. edge */
+#define DRM_BUS_FLAG_SYNC_NEGEDGE      (1<<7)
 
        /**
         * @bus_flags: Additional information (like pixel signal polarity) for
@@ -374,12 +378,9 @@ struct drm_tv_connector_state {
 
 /**
  * struct drm_connector_state - mutable connector state
- * @connector: backpointer to the connector
- * @best_encoder: can be used by helpers and drivers to select the encoder
- * @state: backpointer to global drm_atomic_state
- * @tv: TV connector state
  */
 struct drm_connector_state {
+       /** @connector: backpointer to the connector */
        struct drm_connector *connector;
 
        /**
@@ -390,6 +391,13 @@ struct drm_connector_state {
         */
        struct drm_crtc *crtc;
 
+       /**
+        * @best_encoder:
+        *
+        * Used by the atomic helpers to select the encoder, through the
+        * &drm_connector_helper_funcs.atomic_best_encoder or
+        * &drm_connector_helper_funcs.best_encoder callbacks.
+        */
        struct drm_encoder *best_encoder;
 
        /**
@@ -398,6 +406,7 @@ struct drm_connector_state {
         */
        enum drm_link_status link_status;
 
+       /** @state: backpointer to global drm_atomic_state */
        struct drm_atomic_state *state;
 
        /**
@@ -407,6 +416,7 @@ struct drm_connector_state {
         */
        struct drm_crtc_commit *commit;
 
+       /** @tv: TV connector state */
        struct drm_tv_connector_state tv;
 
        /**
@@ -418,6 +428,14 @@ struct drm_connector_state {
         */
        enum hdmi_picture_aspect picture_aspect_ratio;
 
+       /**
+        * @content_type: Connector property to control the
+        * HDMI infoframe content type setting.
+        * The %DRM_MODE_CONTENT_TYPE_\* values much
+        * match the values.
+        */
+       unsigned int content_type;
+
        /**
         * @scaling_mode: Connector property to control the
         * upscaling, mostly used for built-in panels.
@@ -429,6 +447,19 @@ struct drm_connector_state {
         * protection. This is most commonly used for HDCP.
         */
        unsigned int content_protection;
+
+       /**
+        * @writeback_job: Writeback job for writeback connectors
+        *
+        * Holds the framebuffer and out-fence for a writeback connector. As
+        * the writeback completion may be asynchronous to the normal commit
+        * cycle, the writeback job lifetime is managed separately from the
+        * normal atomic state by this object.
+        *
+        * See also: drm_writeback_queue_job() and
+        * drm_writeback_signal_completion()
+        */
+       struct drm_writeback_job *writeback_job;
 };
 
 /**
@@ -530,8 +561,7 @@ struct drm_connector_funcs {
         * received for this output connector->edid must be NULL.
         *
         * Drivers using the probe helpers should use
-        * drm_helper_probe_single_connector_modes() or
-        * drm_helper_probe_single_connector_modes_nomerge() to implement this
+        * drm_helper_probe_single_connector_modes() to implement this
         * function.
         *
         * RETURNS:
@@ -608,6 +638,8 @@ struct drm_connector_funcs {
         * cleaned up by calling the @atomic_destroy_state hook in this
         * structure.
         *
+        * This callback is mandatory for atomic drivers.
+        *
         * Atomic drivers which don't subclass &struct drm_connector_state should use
         * drm_atomic_helper_connector_duplicate_state(). Drivers that subclass the
         * state structure to extend it with driver-private state should use
@@ -634,6 +666,8 @@ struct drm_connector_funcs {
         *
         * Destroy a state duplicated with @atomic_duplicate_state and release
         * or unreference all resources it references
+        *
+        * This callback is mandatory for atomic drivers.
         */
        void (*atomic_destroy_state)(struct drm_connector *connector,
                                     struct drm_connector_state *state);
@@ -738,45 +772,6 @@ struct drm_cmdline_mode {
 
 /**
  * struct drm_connector - central DRM connector control structure
- * @dev: parent DRM device
- * @kdev: kernel device for sysfs attributes
- * @attr: sysfs attributes
- * @head: list management
- * @base: base KMS object
- * @name: human readable name, can be overwritten by the driver
- * @connector_type: one of the DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
- * @connector_type_id: index into connector type enum
- * @interlace_allowed: can this connector handle interlaced modes?
- * @doublescan_allowed: can this connector handle doublescan?
- * @stereo_allowed: can this connector handle stereo modes?
- * @funcs: connector control functions
- * @edid_blob_ptr: DRM property containing EDID if present
- * @properties: property tracking for this connector
- * @dpms: current dpms state
- * @helper_private: mid-layer private data
- * @cmdline_mode: mode line parsed from the kernel cmdline for this connector
- * @force: a DRM_FORCE_<foo> state for forced mode sets
- * @override_edid: has the EDID been overwritten through debugfs for testing?
- * @encoder_ids: valid encoders for this connector
- * @eld: EDID-like data, if present
- * @latency_present: AV delay info from ELD, if found
- * @video_latency: video latency info from ELD, if found
- * @audio_latency: audio latency info from ELD, if found
- * @null_edid_counter: track sinks that give us all zeros for the EDID
- * @bad_edid_counter: track sinks that give us an EDID with invalid checksum
- * @edid_corrupt: indicates whether the last read EDID was corrupt
- * @debugfs_entry: debugfs directory for this connector
- * @has_tile: is this connector connected to a tiled monitor
- * @tile_group: tile group for the connected monitor
- * @tile_is_single_monitor: whether the tile is one monitor housing
- * @num_h_tile: number of horizontal tiles in the tile group
- * @num_v_tile: number of vertical tiles in the tile group
- * @tile_h_loc: horizontal location of this tile
- * @tile_v_loc: vertical location of this tile
- * @tile_h_size: horizontal size of this tile.
- * @tile_v_size: vertical size of this tile.
- * @scaling_mode_property:  Optional atomic property to control the upscaling.
- * @content_protection_property: Optional property to control content protection
  *
  * Each connector may be connected to one or more CRTCs, or may be clonable by
  * another connector if they can share a CRTC.  Each connector also has a specific
@@ -784,13 +779,27 @@ struct drm_cmdline_mode {
  * span multiple monitors).
  */
 struct drm_connector {
+       /** @dev: parent DRM device */
        struct drm_device *dev;
+       /** @kdev: kernel device for sysfs attributes */
        struct device *kdev;
+       /** @attr: sysfs attributes */
        struct device_attribute *attr;
+
+       /**
+        * @head:
+        *
+        * List of all connectors on a @dev, linked from
+        * &drm_mode_config.connector_list. Protected by
+        * &drm_mode_config.connector_list_lock, but please only use
+        * &drm_connector_list_iter to walk this list.
+        */
        struct list_head head;
 
+       /** @base: base KMS object */
        struct drm_mode_object base;
 
+       /** @name: human readable name, can be overwritten by the driver */
        char *name;
 
        /**
@@ -808,10 +817,30 @@ struct drm_connector {
         */
        unsigned index;
 
+       /**
+        * @connector_type:
+        * one of the DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
+        */
        int connector_type;
+       /** @connector_type_id: index into connector type enum */
        int connector_type_id;
+       /**
+        * @interlace_allowed:
+        * Can this connector handle interlaced modes? Only used by
+        * drm_helper_probe_single_connector_modes() for mode filtering.
+        */
        bool interlace_allowed;
+       /**
+        * @doublescan_allowed:
+        * Can this connector handle doublescan? Only used by
+        * drm_helper_probe_single_connector_modes() for mode filtering.
+        */
        bool doublescan_allowed;
+       /**
+        * @stereo_allowed:
+        * Can this connector handle stereo modes? Only used by
+        * drm_helper_probe_single_connector_modes() for mode filtering.
+        */
        bool stereo_allowed;
 
        /**
@@ -860,45 +889,42 @@ struct drm_connector {
         * Protected by &drm_mode_config.mutex.
         */
        struct drm_display_info display_info;
+
+       /** @funcs: connector control functions */
        const struct drm_connector_funcs *funcs;
 
+       /**
+        * @edid_blob_ptr: DRM property containing EDID if present. Protected by
+        * &drm_mode_config.mutex. This should be updated only by calling
+        * drm_connector_update_edid_property().
+        */
        struct drm_property_blob *edid_blob_ptr;
+
+       /** @properties: property tracking for this connector */
        struct drm_object_properties properties;
 
+       /**
+        * @scaling_mode_property: Optional atomic property to control the
+        * upscaling. See drm_connector_attach_content_protection_property().
+        */
        struct drm_property *scaling_mode_property;
 
        /**
         * @content_protection_property: DRM ENUM property for content
-        * protection
+        * protection. See drm_connector_attach_content_protection_property().
         */
        struct drm_property *content_protection_property;
 
        /**
         * @path_blob_ptr:
         *
-        * DRM blob property data for the DP MST path property.
+        * DRM blob property data for the DP MST path property. This should only
+        * be updated by calling drm_connector_set_path_property().
         */
        struct drm_property_blob *path_blob_ptr;
 
-       /**
-        * @tile_blob_ptr:
-        *
-        * DRM blob property data for the tile property (used mostly by DP MST).
-        * This is meant for screens which are driven through separate display
-        * pipelines represented by &drm_crtc, which might not be running with
-        * genlocked clocks. For tiled panels which are genlocked, like
-        * dual-link LVDS or dual-link DSI, the driver should try to not expose
-        * the tiling and virtualize both &drm_crtc and &drm_plane if needed.
-        */
-       struct drm_property_blob *tile_blob_ptr;
-
-/* should we poll this connector for connects and disconnects */
-/* hot plug detectable */
 #define DRM_CONNECTOR_POLL_HPD (1 << 0)
-/* poll for connections */
 #define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
-/* can cleanly poll for disconnections without flickering the screen */
-/* DACs should rarely do this without a lot of testing */
 #define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
 
        /**
@@ -915,25 +941,40 @@ struct drm_connector {
         *     Periodically poll the connector for connection.
         *
         * DRM_CONNECTOR_POLL_DISCONNECT
-        *     Periodically poll the connector for disconnection.
+        *     Periodically poll the connector for disconnection, without
+        *     causing flickering even when the connector is in use. DACs should
+        *     rarely do this without a lot of testing.
         *
         * Set to 0 for connectors that don't support connection status
         * discovery.
         */
        uint8_t polled;
 
-       /* requested DPMS state */
+       /**
+        * @dpms: Current dpms state. For legacy drivers the
+        * &drm_connector_funcs.dpms callback must update this. For atomic
+        * drivers, this is handled by the core atomic code, and drivers must
+        * only take &drm_crtc_state.active into account.
+        */
        int dpms;
 
+       /** @helper_private: mid-layer private data */
        const struct drm_connector_helper_funcs *helper_private;
 
-       /* forced on connector */
+       /** @cmdline_mode: mode line parsed from the kernel cmdline for this connector */
        struct drm_cmdline_mode cmdline_mode;
+       /** @force: a DRM_FORCE_<foo> state for forced mode sets */
        enum drm_connector_force force;
+       /** @override_edid: has the EDID been overwritten through debugfs for testing? */
        bool override_edid;
 
 #define DRM_CONNECTOR_MAX_ENCODER 3
+       /**
+        * @encoder_ids: Valid encoders for this connector. Please only use
+        * drm_connector_for_each_possible_encoder() to enumerate these.
+        */
        uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
+
        /**
         * @encoder: Currently bound encoder driving this connector, if any.
         * Only really meaningful for non-atomic drivers. Atomic drivers should
@@ -943,19 +984,37 @@ struct drm_connector {
        struct drm_encoder *encoder;
 
 #define MAX_ELD_BYTES  128
-       /* EDID bits */
+       /** @eld: EDID-like data, if present */
        uint8_t eld[MAX_ELD_BYTES];
+       /** @latency_present: AV delay info from ELD, if found */
        bool latency_present[2];
-       int video_latency[2];   /* [0]: progressive, [1]: interlaced */
+       /**
+        * @video_latency: Video latency info from ELD, if found.
+        * [0]: progressive, [1]: interlaced
+        */
+       int video_latency[2];
+       /**
+        * @audio_latency: audio latency info from ELD, if found
+        * [0]: progressive, [1]: interlaced
+        */
        int audio_latency[2];
-       int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
+       /**
+        * @null_edid_counter: track sinks that give us all zeros for the EDID.
+        * Needed to workaround some HW bugs where we get all 0s
+        */
+       int null_edid_counter;
+
+       /** @bad_edid_counter: track sinks that give us an EDID with invalid checksum */
        unsigned bad_edid_counter;
 
-       /* Flag for raw EDID header corruption - used in Displayport
-        * compliance testing - * Displayport Link CTS Core 1.2 rev1.1 4.2.2.6
+       /**
+        * @edid_corrupt: Indicates whether the last read EDID was corrupt. Used
+        * in Displayport compliance testing - Displayport Link CTS Core 1.2
+        * rev1.1 4.2.2.6
         */
        bool edid_corrupt;
 
+       /** @debugfs_entry: debugfs directory for this connector */
        struct dentry *debugfs_entry;
 
        /**
@@ -963,7 +1022,7 @@ struct drm_connector {
         *
         * Current atomic state for this connector.
         *
-        * This is protected by @drm_mode_config.connection_mutex. Note that
+        * This is protected by &drm_mode_config.connection_mutex. Note that
         * nonblocking atomic commits access the current connector state without
         * taking locks. Either by going through the &struct drm_atomic_state
         * pointers, see for_each_oldnew_connector_in_state(),
@@ -974,19 +1033,44 @@ struct drm_connector {
         */
        struct drm_connector_state *state;
 
-       /* DisplayID bits */
+       /* DisplayID bits. FIXME: Extract into a substruct? */
+
+       /**
+        * @tile_blob_ptr:
+        *
+        * DRM blob property data for the tile property (used mostly by DP MST).
+        * This is meant for screens which are driven through separate display
+        * pipelines represented by &drm_crtc, which might not be running with
+        * genlocked clocks. For tiled panels which are genlocked, like
+        * dual-link LVDS or dual-link DSI, the driver should try to not expose
+        * the tiling and virtualize both &drm_crtc and &drm_plane if needed.
+        *
+        * This should only be updated by calling
+        * drm_connector_set_tile_property().
+        */
+       struct drm_property_blob *tile_blob_ptr;
+
+       /** @has_tile: is this connector connected to a tiled monitor */
        bool has_tile;
+       /** @tile_group: tile group for the connected monitor */
        struct drm_tile_group *tile_group;
+       /** @tile_is_single_monitor: whether the tile is one monitor housing */
        bool tile_is_single_monitor;
 
+       /** @num_h_tile: number of horizontal tiles in the tile group */
+       /** @num_v_tile: number of vertical tiles in the tile group */
        uint8_t num_h_tile, num_v_tile;
+       /** @tile_h_loc: horizontal location of this tile */
+       /** @tile_v_loc: vertical location of this tile */
        uint8_t tile_h_loc, tile_v_loc;
+       /** @tile_h_size: horizontal size of this tile. */
+       /** @tile_v_size: vertical size of this tile. */
        uint16_t tile_h_size, tile_v_size;
 
        /**
         * @free_node:
         *
-        * List used only by &drm_connector_iter to be able to clean up a
+        * List used only by &drm_connector_list_iter to be able to clean up a
         * connector from any context, in conjunction with
         * &drm_mode_config.connector_free_work.
         */
@@ -1001,15 +1085,21 @@ int drm_connector_init(struct drm_device *dev,
                       int connector_type);
 int drm_connector_register(struct drm_connector *connector);
 void drm_connector_unregister(struct drm_connector *connector);
-int drm_mode_connector_attach_encoder(struct drm_connector *connector,
+int drm_connector_attach_encoder(struct drm_connector *connector,
                                      struct drm_encoder *encoder);
 
 void drm_connector_cleanup(struct drm_connector *connector);
-static inline unsigned drm_connector_index(struct drm_connector *connector)
+
+static inline unsigned int drm_connector_index(const struct drm_connector *connector)
 {
        return connector->index;
 }
 
+static inline u32 drm_connector_mask(const struct drm_connector *connector)
+{
+       return 1 << connector->index;
+}
+
 /**
  * drm_connector_lookup - lookup connector object
  * @dev: DRM device
@@ -1089,20 +1179,25 @@ int drm_mode_create_tv_properties(struct drm_device *dev,
                                  unsigned int num_modes,
                                  const char * const modes[]);
 int drm_mode_create_scaling_mode_property(struct drm_device *dev);
+int drm_connector_attach_content_type_property(struct drm_connector *dev);
 int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
                                               u32 scaling_mode_mask);
 int drm_connector_attach_content_protection_property(
                struct drm_connector *connector);
 int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
+int drm_mode_create_content_type_property(struct drm_device *dev);
+void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame,
+                                        const struct drm_connector_state *conn_state);
+
 int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
 
-int drm_mode_connector_set_path_property(struct drm_connector *connector,
-                                        const char *path);
-int drm_mode_connector_set_tile_property(struct drm_connector *connector);
-int drm_mode_connector_update_edid_property(struct drm_connector *connector,
-                                           const struct edid *edid);
-void drm_mode_connector_set_link_status_property(struct drm_connector *connector,
-                                                uint64_t link_status);
+int drm_connector_set_path_property(struct drm_connector *connector,
+                                   const char *path);
+int drm_connector_set_tile_property(struct drm_connector *connector);
+int drm_connector_update_edid_property(struct drm_connector *connector,
+                                      const struct edid *edid);
+void drm_connector_set_link_status_property(struct drm_connector *connector,
+                                           uint64_t link_status);
 int drm_connector_init_panel_orientation_property(
        struct drm_connector *connector, int width, int height);
 
@@ -1151,6 +1246,9 @@ struct drm_connector *
 drm_connector_list_iter_next(struct drm_connector_list_iter *iter);
 void drm_connector_list_iter_end(struct drm_connector_list_iter *iter);
 
+bool drm_connector_has_possible_encoder(struct drm_connector *connector,
+                                       struct drm_encoder *encoder);
+
 /**
  * drm_for_each_connector_iter - connector_list iterator macro
  * @connector: &struct drm_connector pointer used as cursor
@@ -1163,4 +1261,17 @@ void drm_connector_list_iter_end(struct drm_connector_list_iter *iter);
 #define drm_for_each_connector_iter(connector, iter) \
        while ((connector = drm_connector_list_iter_next(iter)))
 
+/**
+ * drm_connector_for_each_possible_encoder - iterate connector's possible encoders
+ * @connector: &struct drm_connector pointer
+ * @encoder: &struct drm_encoder pointer used as cursor
+ * @__i: int iteration cursor, for macro-internal use
+ */
+#define drm_connector_for_each_possible_encoder(connector, encoder, __i) \
+       for ((__i) = 0; (__i) < ARRAY_SIZE((connector)->encoder_ids) && \
+                    (connector)->encoder_ids[(__i)] != 0; (__i)++) \
+               for_each_if((encoder) = \
+                           drm_encoder_find((connector)->dev, NULL, \
+                                            (connector)->encoder_ids[(__i)])) \
+
 #endif
index a2d81d2907a9377716a8834f89aedb238cc607e2..92e7fc7f05a4486746d19cfc4399dc0ecb8094f5 100644 (file)
@@ -77,21 +77,6 @@ struct drm_plane_helper_funcs;
 
 /**
  * struct drm_crtc_state - mutable CRTC state
- * @crtc: backpointer to the CRTC
- * @enable: whether the CRTC should be enabled, gates all other state
- * @active: whether the CRTC is actively displaying (used for DPMS)
- * @planes_changed: planes on this crtc are updated
- * @mode_changed: @mode or @enable has been changed
- * @active_changed: @active has been toggled.
- * @connectors_changed: connectors to this crtc have been updated
- * @zpos_changed: zpos values of planes on this crtc have been updated
- * @color_mgmt_changed: color management properties have changed (degamma or
- *     gamma LUT or CSC matrix)
- * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
- * @connector_mask: bitmask of (1 << drm_connector_index(connector)) of attached connectors
- * @encoder_mask: bitmask of (1 << drm_encoder_index(encoder)) of attached encoders
- * @mode_blob: &drm_property_blob for @mode
- * @state: backpointer to global drm_atomic_state
  *
  * Note that the distinction between @enable and @active is rather subtile:
  * Flipping @active while @enable is set without changing anything else may
@@ -102,31 +87,127 @@ struct drm_plane_helper_funcs;
  *
  * The three booleans active_changed, connectors_changed and mode_changed are
  * intended to indicate whether a full modeset is needed, rather than strictly
- * describing what has changed in a commit.
- * See also: drm_atomic_crtc_needs_modeset()
+ * describing what has changed in a commit. See also:
+ * drm_atomic_crtc_needs_modeset()
+ *
+ * WARNING: Transitional helpers (like drm_helper_crtc_mode_set() or
+ * drm_helper_crtc_mode_set_base()) do not maintain many of the derived control
+ * state like @plane_mask so drivers not converted over to atomic helpers should
+ * not rely on these being accurate!
  */
 struct drm_crtc_state {
+       /** @crtc: backpointer to the CRTC */
        struct drm_crtc *crtc;
 
+       /**
+        * @enable: Whether the CRTC should be enabled, gates all other state.
+        * This controls reservations of shared resources. Actual hardware state
+        * is controlled by @active.
+        */
        bool enable;
+
+       /**
+        * @active: Whether the CRTC is actively displaying (used for DPMS).
+        * Implies that @enable is set. The driver must not release any shared
+        * resources if @active is set to false but @enable still true, because
+        * userspace expects that a DPMS ON always succeeds.
+        *
+        * Hence drivers must not consult @active in their various
+        * &drm_mode_config_funcs.atomic_check callback to reject an atomic
+        * commit. They can consult it to aid in the computation of derived
+        * hardware state, since even in the DPMS OFF state the display hardware
+        * should be as much powered down as when the CRTC is completely
+        * disabled through setting @enable to false.
+        */
        bool active;
 
-       /* computed state bits used by helpers and drivers */
+       /**
+        * @planes_changed: Planes on this crtc are updated. Used by the atomic
+        * helpers and drivers to steer the atomic commit control flow.
+        */
        bool planes_changed : 1;
+
+       /**
+        * @mode_changed: @mode or @enable has been changed. Used by the atomic
+        * helpers and drivers to steer the atomic commit control flow. See also
+        * drm_atomic_crtc_needs_modeset().
+        *
+        * Drivers are supposed to set this for any CRTC state changes that
+        * require a full modeset. They can also reset it to false if e.g. a
+        * @mode change can be done without a full modeset by only changing
+        * scaler settings.
+        */
        bool mode_changed : 1;
+
+       /**
+        * @active_changed: @active has been toggled. Used by the atomic
+        * helpers and drivers to steer the atomic commit control flow. See also
+        * drm_atomic_crtc_needs_modeset().
+        */
        bool active_changed : 1;
+
+       /**
+        * @connectors_changed: Connectors to this crtc have been updated,
+        * either in their state or routing. Used by the atomic
+        * helpers and drivers to steer the atomic commit control flow. See also
+        * drm_atomic_crtc_needs_modeset().
+        *
+        * Drivers are supposed to set this as-needed from their own atomic
+        * check code, e.g. from &drm_encoder_helper_funcs.atomic_check
+        */
        bool connectors_changed : 1;
+       /**
+        * @zpos_changed: zpos values of planes on this crtc have been updated.
+        * Used by the atomic helpers and drivers to steer the atomic commit
+        * control flow.
+        */
        bool zpos_changed : 1;
+       /**
+        * @color_mgmt_changed: Color management properties have changed
+        * (@gamma_lut, @degamma_lut or @ctm). Used by the atomic helpers and
+        * drivers to steer the atomic commit control flow.
+        */
        bool color_mgmt_changed : 1;
 
-       /* attached planes bitmask:
-        * WARNING: transitional helpers do not maintain plane_mask so
-        * drivers not converted over to atomic helpers should not rely
-        * on plane_mask being accurate!
+       /**
+        * @no_vblank:
+        *
+        * Reflects the ability of a CRTC to send VBLANK events. This state
+        * usually depends on the pipeline configuration, and the main usuage
+        * is CRTCs feeding a writeback connector operating in oneshot mode.
+        * In this case the VBLANK event is only generated when a job is queued
+        * to the writeback connector, and we want the core to fake VBLANK
+        * events when this part of the pipeline hasn't changed but others had
+        * or when the CRTC and connectors are being disabled.
+        *
+        * __drm_atomic_helper_crtc_duplicate_state() will not reset the value
+        * from the current state, the CRTC driver is then responsible for
+        * updating this field when needed.
+        *
+        * Note that the combination of &drm_crtc_state.event == NULL and
+        * &drm_crtc_state.no_blank == true is valid and usually used when the
+        * writeback connector attached to the CRTC has a new job queued. In
+        * this case the driver will send the VBLANK event on its own when the
+        * writeback job is complete.
+        */
+       bool no_vblank : 1;
+
+       /**
+        * @plane_mask: Bitmask of drm_plane_mask(plane) of planes attached to
+        * this CRTC.
         */
        u32 plane_mask;
 
+       /**
+        * @connector_mask: Bitmask of drm_connector_mask(connector) of
+        * connectors attached to this CRTC.
+        */
        u32 connector_mask;
+
+       /**
+        * @encoder_mask: Bitmask of drm_encoder_mask(encoder) of encoders
+        * attached to this CRTC.
+        */
        u32 encoder_mask;
 
        /**
@@ -134,10 +215,13 @@ struct drm_crtc_state {
         *
         * Internal display timings which can be used by the driver to handle
         * differences between the mode requested by userspace in @mode and what
-        * is actually programmed into the hardware. It is purely driver
-        * implementation defined what exactly this adjusted mode means. Usually
-        * it is used to store the hardware display timings used between the
-        * CRTC and encoder blocks.
+        * is actually programmed into the hardware.
+        *
+        * For drivers using &drm_bridge, this stores hardware display timings
+        * used between the CRTC and the first bridge. For other drivers, the
+        * meaning of the adjusted_mode field is purely driver implementation
+        * defined information, and will usually be used to store the hardware
+        * display timings used between the CRTC and encoder blocks.
         */
        struct drm_display_mode adjusted_mode;
 
@@ -158,7 +242,10 @@ struct drm_crtc_state {
         */
        struct drm_display_mode mode;
 
-       /* blob property to expose current mode to atomic userspace */
+       /**
+        * @mode_blob: &drm_property_blob for @mode, for exposing the mode to
+        * atomic userspace.
+        */
        struct drm_property_blob *mode_blob;
 
        /**
@@ -262,6 +349,7 @@ struct drm_crtc_state {
         */
        struct drm_crtc_commit *commit;
 
+       /** @state: backpointer to global drm_atomic_state */
        struct drm_atomic_state *state;
 };
 
@@ -503,6 +591,8 @@ struct drm_crtc_funcs {
         * cleaned up by calling the @atomic_destroy_state hook in this
         * structure.
         *
+        * This callback is mandatory for atomic drivers.
+        *
         * Atomic drivers which don't subclass &struct drm_crtc_state should use
         * drm_atomic_helper_crtc_duplicate_state(). Drivers that subclass the
         * state structure to extend it with driver-private state should use
@@ -529,6 +619,8 @@ struct drm_crtc_funcs {
         *
         * Destroy a state duplicated with @atomic_duplicate_state and release
         * or unreference all resources it references
+        *
+        * This callback is mandatory for atomic drivers.
         */
        void (*atomic_destroy_state)(struct drm_crtc *crtc,
                                     struct drm_crtc_state *state);
@@ -717,35 +809,25 @@ struct drm_crtc_funcs {
 
 /**
  * struct drm_crtc - central CRTC control structure
- * @dev: parent DRM device
- * @port: OF node used by drm_of_find_possible_crtcs()
- * @head: list management
- * @name: human readable name, can be overwritten by the driver
- * @mutex: per-CRTC locking
- * @base: base KMS object for ID tracking etc.
- * @primary: primary plane for this CRTC
- * @cursor: cursor plane for this CRTC
- * @cursor_x: current x position of the cursor, used for universal cursor planes
- * @cursor_y: current y position of the cursor, used for universal cursor planes
- * @enabled: is this CRTC enabled?
- * @mode: current mode timings
- * @hwmode: mode timings as programmed to hw regs
- * @x: x position on screen
- * @y: y position on screen
- * @funcs: CRTC control functions
- * @gamma_size: size of gamma ramp
- * @gamma_store: gamma ramp values
- * @helper_private: mid-layer private data
- * @properties: property tracking for this CRTC
  *
  * Each CRTC may have one or more connectors associated with it.  This structure
  * allows the CRTC to be controlled.
  */
 struct drm_crtc {
+       /** @dev: parent DRM device */
        struct drm_device *dev;
+       /** @port: OF node used by drm_of_find_possible_crtcs(). */
        struct device_node *port;
+       /**
+        * @head:
+        *
+        * List of all CRTCs on @dev, linked from &drm_mode_config.crtc_list.
+        * Invariant over the lifetime of @dev and therefore does not need
+        * locking.
+        */
        struct list_head head;
 
+       /** @name: human readable name, can be overwritten by the driver */
        char *name;
 
        /**
@@ -760,10 +842,25 @@ struct drm_crtc {
         */
        struct drm_modeset_lock mutex;
 
+       /** @base: base KMS object for ID tracking etc. */
        struct drm_mode_object base;
 
-       /* primary and cursor planes for CRTC */
+       /**
+        * @primary:
+        * Primary plane for this CRTC. Note that this is only
+        * relevant for legacy IOCTL, it specifies the plane implicitly used by
+        * the SETCRTC and PAGE_FLIP IOCTLs. It does not have any significance
+        * beyond that.
+        */
        struct drm_plane *primary;
+
+       /**
+        * @cursor:
+        * Cursor plane for this CRTC. Note that this is only relevant for
+        * legacy IOCTL, it specifies the plane implicitly used by the SETCURSOR
+        * and SETCURSOR2 IOCTLs. It does not have any significance
+        * beyond that.
+        */
        struct drm_plane *cursor;
 
        /**
@@ -772,30 +869,94 @@ struct drm_crtc {
         */
        unsigned index;
 
-       /* position of cursor plane on crtc */
+       /**
+        * @cursor_x: Current x position of the cursor, used for universal
+        * cursor planes because the SETCURSOR IOCTL only can update the
+        * framebuffer without supplying the coordinates. Drivers should not use
+        * this directly, atomic drivers should look at &drm_plane_state.crtc_x
+        * of the cursor plane instead.
+        */
        int cursor_x;
+       /**
+        * @cursor_y: Current y position of the cursor, used for universal
+        * cursor planes because the SETCURSOR IOCTL only can update the
+        * framebuffer without supplying the coordinates. Drivers should not use
+        * this directly, atomic drivers should look at &drm_plane_state.crtc_y
+        * of the cursor plane instead.
+        */
        int cursor_y;
 
+       /**
+        * @enabled:
+        *
+        * Is this CRTC enabled? Should only be used by legacy drivers, atomic
+        * drivers should instead consult &drm_crtc_state.enable and
+        * &drm_crtc_state.active. Atomic drivers can update this by calling
+        * drm_atomic_helper_update_legacy_modeset_state().
+        */
        bool enabled;
 
-       /* Requested mode from modesetting. */
+       /**
+        * @mode:
+        *
+        * Current mode timings. Should only be used by legacy drivers, atomic
+        * drivers should instead consult &drm_crtc_state.mode. Atomic drivers
+        * can update this by calling
+        * drm_atomic_helper_update_legacy_modeset_state().
+        */
        struct drm_display_mode mode;
 
-       /* Programmed mode in hw, after adjustments for encoders,
-        * crtc, panel scaling etc. Needed for timestamping etc.
+       /**
+        * @hwmode:
+        *
+        * Programmed mode in hw, after adjustments for encoders, crtc, panel
+        * scaling etc. Should only be used by legacy drivers, for high
+        * precision vblank timestamps in
+        * drm_calc_vbltimestamp_from_scanoutpos().
+        *
+        * Note that atomic drivers should not use this, but instead use
+        * &drm_crtc_state.adjusted_mode. And for high-precision timestamps
+        * drm_calc_vbltimestamp_from_scanoutpos() used &drm_vblank_crtc.hwmode,
+        * which is filled out by calling drm_calc_timestamping_constants().
         */
        struct drm_display_mode hwmode;
 
-       int x, y;
+       /**
+        * @x:
+        * x position on screen. Should only be used by legacy drivers, atomic
+        * drivers should look at &drm_plane_state.crtc_x of the primary plane
+        * instead. Updated by calling
+        * drm_atomic_helper_update_legacy_modeset_state().
+        */
+       int x;
+       /**
+        * @y:
+        * y position on screen. Should only be used by legacy drivers, atomic
+        * drivers should look at &drm_plane_state.crtc_y of the primary plane
+        * instead. Updated by calling
+        * drm_atomic_helper_update_legacy_modeset_state().
+        */
+       int y;
+
+       /** @funcs: CRTC control functions */
        const struct drm_crtc_funcs *funcs;
 
-       /* Legacy FB CRTC gamma size for reporting to userspace */
+       /**
+        * @gamma_size: Size of legacy gamma ramp reported to userspace. Set up
+        * by calling drm_mode_crtc_set_gamma_size().
+        */
        uint32_t gamma_size;
+
+       /**
+        * @gamma_store: Gamma ramp values used by the legacy SETGAMMA and
+        * GETGAMMA IOCTls. Set up by calling drm_mode_crtc_set_gamma_size().
+        */
        uint16_t *gamma_store;
 
-       /* if you are using the helper */
+       /** @helper_private: mid-layer private data */
        const struct drm_crtc_helper_funcs *helper_private;
 
+       /** @properties: property tracking for this CRTC */
        struct drm_object_properties properties;
 
        /**
@@ -865,7 +1026,6 @@ struct drm_crtc {
         *
         * spinlock to protect the fences in the fence_context.
         */
-
        spinlock_t fence_lock;
        /**
         * @fence_seqno:
@@ -935,8 +1095,8 @@ static inline unsigned int drm_crtc_index(const struct drm_crtc *crtc)
  * drm_crtc_mask - find the mask of a registered CRTC
  * @crtc: CRTC to find mask for
  *
- * Given a registered CRTC, return the mask bit of that CRTC for an
- * encoder's possible_crtcs field.
+ * Given a registered CRTC, return the mask bit of that CRTC for the
+ * &drm_encoder.possible_crtcs and &drm_plane.possible_crtcs fields.
  */
 static inline uint32_t drm_crtc_mask(const struct drm_crtc *crtc)
 {
index 7d63b1d4adb9765a457feec02af2dcb9c4606ec6..b225eeb30d05fc5141a5aec10573daa4afee5b1b 100644 (file)
@@ -43,6 +43,7 @@ struct drm_crtc_crc_entry {
  * @lock: protects the fields in this struct
  * @source: name of the currently configured source of CRCs
  * @opened: whether userspace has opened the data file for reading
+ * @overflow: whether an overflow occured.
  * @entries: array of entries, with size of %DRM_CRC_ENTRIES_NR
  * @head: head of circular queue
  * @tail: tail of circular queue
@@ -52,7 +53,7 @@ struct drm_crtc_crc_entry {
 struct drm_crtc_crc {
        spinlock_t lock;
        const char *source;
-       bool opened;
+       bool opened, overflow;
        struct drm_crtc_crc_entry *entries;
        int head, tail;
        size_t values_cnt;
index 858ba19a3e2937ecc9c493670e0ec7092e4f8e7e..f9c6e0e3aec7d049ab63315108ba816c15e47a18 100644 (file)
@@ -74,6 +74,27 @@ struct drm_device {
        struct mutex filelist_mutex;
        struct list_head filelist;
 
+       /**
+        * @filelist_internal:
+        *
+        * List of open DRM files for in-kernel clients. Protected by @filelist_mutex.
+        */
+       struct list_head filelist_internal;
+
+       /**
+        * @clientlist_mutex:
+        *
+        * Protects @clientlist access.
+        */
+       struct mutex clientlist_mutex;
+
+       /**
+        * @clientlist:
+        *
+        * List of in-kernel clients. Protected by @clientlist_mutex.
+        */
+       struct list_head clientlist;
+
        /** \name Memory management */
        /*@{ */
        struct list_head maplist;       /**< Linked list of regions */
index c01564991a9f9894b9f443e584ba00fdc9a25a97..05cc31b5db161070734a65aacd7d45fad5b43884 100644 (file)
@@ -1078,6 +1078,25 @@ struct drm_dp_aux_msg {
        size_t size;
 };
 
+struct cec_adapter;
+struct edid;
+
+/**
+ * struct drm_dp_aux_cec - DisplayPort CEC-Tunneling-over-AUX
+ * @lock: mutex protecting this struct
+ * @adap: the CEC adapter for CEC-Tunneling-over-AUX support.
+ * @name: name of the CEC adapter
+ * @parent: parent device of the CEC adapter
+ * @unregister_work: unregister the CEC adapter
+ */
+struct drm_dp_aux_cec {
+       struct mutex lock;
+       struct cec_adapter *adap;
+       const char *name;
+       struct device *parent;
+       struct delayed_work unregister_work;
+};
+
 /**
  * struct drm_dp_aux - DisplayPort AUX channel
  * @name: user-visible name of this AUX channel and the I2C-over-AUX adapter
@@ -1136,6 +1155,10 @@ struct drm_dp_aux {
         * @i2c_defer_count: Counts I2C DEFERs, used for DP validation.
         */
        unsigned i2c_defer_count;
+       /**
+        * @cec: struct containing fields used for CEC-Tunneling-over-AUX.
+        */
+       struct drm_dp_aux_cec cec;
 };
 
 ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
@@ -1258,4 +1281,37 @@ drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk)
        return desc->quirks & BIT(quirk);
 }
 
+#ifdef CONFIG_DRM_DP_CEC
+void drm_dp_cec_irq(struct drm_dp_aux *aux);
+void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name,
+                                  struct device *parent);
+void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux);
+void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid);
+void drm_dp_cec_unset_edid(struct drm_dp_aux *aux);
+#else
+static inline void drm_dp_cec_irq(struct drm_dp_aux *aux)
+{
+}
+
+static inline void drm_dp_cec_register_connector(struct drm_dp_aux *aux,
+                                                const char *name,
+                                                struct device *parent)
+{
+}
+
+static inline void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux)
+{
+}
+
+static inline void drm_dp_cec_set_edid(struct drm_dp_aux *aux,
+                                      const struct edid *edid)
+{
+}
+
+static inline void drm_dp_cec_unset_edid(struct drm_dp_aux *aux)
+{
+}
+
+#endif
+
 #endif /* _DRM_DP_HELPER_H_ */
index 7e545f5f94d367acef2d86194d8f66b9e412d618..46a8009784df1227f784774bdab50d39e03dceb5 100644 (file)
@@ -649,6 +649,35 @@ static inline bool drm_dev_is_unplugged(struct drm_device *dev)
        return true;
 }
 
+/**
+ * drm_core_check_feature - check driver feature flags
+ * @dev: DRM device to check
+ * @feature: feature flag
+ *
+ * This checks @dev for driver features, see &drm_driver.driver_features and the
+ * various DRIVER_\* flags.
+ *
+ * Returns true if the @feature is supported, false otherwise.
+ */
+static inline bool drm_core_check_feature(struct drm_device *dev, int feature)
+{
+       return dev->driver->driver_features & feature;
+}
+
+/**
+ * drm_drv_uses_atomic_modeset - check if the driver implements
+ * atomic_commit()
+ * @dev: DRM device
+ *
+ * This check is useful if drivers do not have DRIVER_ATOMIC set but
+ * have atomic modesetting internally implemented.
+ */
+static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
+{
+       return drm_core_check_feature(dev, DRIVER_ATOMIC) ||
+               dev->mode_config.funcs->atomic_commit != NULL;
+}
+
 
 int drm_dev_set_unique(struct drm_device *dev, const char *name);
 
index fb299696c7c417a24f953faaeb6cfd55b57833ed..4f597c0730b480a2db431616446c70d4a36b3306 100644 (file)
@@ -191,11 +191,23 @@ int drm_encoder_init(struct drm_device *dev,
  * Given a registered encoder, return the index of that encoder within a DRM
  * device's list of encoders.
  */
-static inline unsigned int drm_encoder_index(struct drm_encoder *encoder)
+static inline unsigned int drm_encoder_index(const struct drm_encoder *encoder)
 {
        return encoder->index;
 }
 
+/**
+ * drm_encoder_mask - find the mask of a registered ENCODER
+ * @encoder: encoder to find mask for
+ *
+ * Given a registered encoder, return the mask bit of that encoder for an
+ * encoder's possible_clones field.
+ */
+static inline u32 drm_encoder_mask(const struct drm_encoder *encoder)
+{
+       return 1 << drm_encoder_index(encoder);
+}
+
 /**
  * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
  * @encoder: encoder to test
@@ -241,7 +253,7 @@ void drm_encoder_cleanup(struct drm_encoder *encoder);
  */
 #define drm_for_each_encoder_mask(encoder, dev, encoder_mask) \
        list_for_each_entry((encoder), &(dev)->mode_config.encoder_list, head) \
-               for_each_if ((encoder_mask) & (1 << drm_encoder_index(encoder)))
+               for_each_if ((encoder_mask) & drm_encoder_mask(encoder))
 
 /**
  * drm_for_each_encoder - iterate over all encoders
index d532f88a8d5579f57db39905372b908da69b020f..96e26e3b9a0cd6bf7874d4a99572e180b2e6e7bd 100644 (file)
@@ -16,16 +16,10 @@ struct drm_mode_fb_cmd2;
 struct drm_plane;
 struct drm_plane_state;
 
-int drm_fb_cma_fbdev_init_with_funcs(struct drm_device *dev,
-       unsigned int preferred_bpp, unsigned int max_conn_count,
-       const struct drm_framebuffer_funcs *funcs);
 int drm_fb_cma_fbdev_init(struct drm_device *dev, unsigned int preferred_bpp,
                          unsigned int max_conn_count);
 void drm_fb_cma_fbdev_fini(struct drm_device *dev);
 
-struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
-       unsigned int preferred_bpp, unsigned int max_conn_count,
-       const struct drm_framebuffer_funcs *funcs);
 struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
        unsigned int preferred_bpp, unsigned int max_conn_count);
 void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma);
index b069433e7fc12f77fc8696e9f8bf2f0442d5d146..5db08c8f1d258a6e4ef6336831f15001e8780519 100644 (file)
@@ -32,6 +32,7 @@
 
 struct drm_fb_helper;
 
+#include <drm/drm_client.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_device.h>
 #include <linux/kgdb.h>
@@ -154,6 +155,20 @@ struct drm_fb_helper_connector {
  * operations.
  */
 struct drm_fb_helper {
+       /**
+        * @client:
+        *
+        * DRM client used by the generic fbdev emulation.
+        */
+       struct drm_client_dev client;
+
+       /**
+        * @buffer:
+        *
+        * Framebuffer used by the generic fbdev emulation.
+        */
+       struct drm_client_buffer *buffer;
+
        struct drm_framebuffer *fb;
        struct drm_device *dev;
        int crtc_count;
@@ -234,6 +249,12 @@ struct drm_fb_helper {
        int preferred_bpp;
 };
 
+static inline struct drm_fb_helper *
+drm_fb_helper_from_client(struct drm_client_dev *client)
+{
+       return container_of(client, struct drm_fb_helper, client);
+}
+
 /**
  * define DRM_FB_HELPER_DEFAULT_OPS - helper define for drm drivers
  *
@@ -330,6 +351,10 @@ void drm_fb_helper_fbdev_teardown(struct drm_device *dev);
 
 void drm_fb_helper_lastclose(struct drm_device *dev);
 void drm_fb_helper_output_poll_changed(struct drm_device *dev);
+
+int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
+                               struct drm_fb_helper_surface_size *sizes);
+int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp);
 #else
 static inline void drm_fb_helper_prepare(struct drm_device *dev,
                                        struct drm_fb_helper *helper,
@@ -564,6 +589,19 @@ static inline void drm_fb_helper_output_poll_changed(struct drm_device *dev)
 {
 }
 
+static inline int
+drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
+                           struct drm_fb_helper_surface_size *sizes)
+{
+       return 0;
+}
+
+static inline int
+drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
+{
+       return 0;
+}
+
 #endif
 
 static inline int
index 027ac16da3d15fb99655ad3724562ae8cf222190..26485acc51d7944697ce96f0988c2803f4d8c581 100644 (file)
@@ -192,6 +192,13 @@ struct drm_file {
         */
        unsigned aspect_ratio_allowed:1;
 
+       /**
+        * @writeback_connectors:
+        *
+        * True if client understands writeback connectors
+        */
+       unsigned writeback_connectors:1;
+
        /**
         * @is_master:
         *
index 3e86408dac9f5857daab736bf9e185e925dee78c..f9c15845f465b10d65581cc4fd21d6a511213b83 100644 (file)
@@ -39,6 +39,7 @@ struct drm_mode_fb_cmd2;
  * @hsub: Horizontal chroma subsampling factor
  * @vsub: Vertical chroma subsampling factor
  * @has_alpha: Does the format embeds an alpha component?
+ * @is_yuv: Is it a YUV format?
  */
 struct drm_format_info {
        u32 format;
@@ -48,6 +49,7 @@ struct drm_format_info {
        u8 hsub;
        u8 vsub;
        bool has_alpha;
+       bool is_yuv;
 };
 
 /**
index 101f566ae43d01e8d013573501165428aaec92a3..2c3bbb43c7d1a55600c688e08b3abb1a2cdacc83 100644 (file)
@@ -109,6 +109,38 @@ enum drm_mm_insert_mode {
         * Allocates the node from the bottom of the found hole.
         */
        DRM_MM_INSERT_EVICT,
+
+       /**
+        * @DRM_MM_INSERT_ONCE:
+        *
+        * Only check the first hole for suitablity and report -ENOSPC
+        * immediately otherwise, rather than check every hole until a
+        * suitable one is found. Can only be used in conjunction with another
+        * search method such as DRM_MM_INSERT_HIGH or DRM_MM_INSERT_LOW.
+        */
+       DRM_MM_INSERT_ONCE = BIT(31),
+
+       /**
+        * @DRM_MM_INSERT_HIGHEST:
+        *
+        * Only check the highest hole (the hole with the largest address) and
+        * insert the node at the top of the hole or report -ENOSPC if
+        * unsuitable.
+        *
+        * Does not search all holes.
+        */
+       DRM_MM_INSERT_HIGHEST = DRM_MM_INSERT_HIGH | DRM_MM_INSERT_ONCE,
+
+       /**
+        * @DRM_MM_INSERT_LOWEST:
+        *
+        * Only check the lowest hole (the hole with the smallest address) and
+        * insert the node at the bottom of the hole or report -ENOSPC if
+        * unsuitable.
+        *
+        * Does not search all holes.
+        */
+       DRM_MM_INSERT_LOWEST  = DRM_MM_INSERT_LOW | DRM_MM_INSERT_ONCE,
 };
 
 /**
@@ -173,7 +205,7 @@ struct drm_mm {
        struct drm_mm_node head_node;
        /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
        struct rb_root_cached interval_tree;
-       struct rb_root holes_size;
+       struct rb_root_cached holes_size;
        struct rb_root holes_addr;
 
        unsigned long scan_active;
index 33b3a96d66d0a4de3dcb0910edb75fa310eb7a4d..a0b202e1d69a08363c9cf1d39dbc3377f9d07ef1 100644 (file)
@@ -329,10 +329,10 @@ struct drm_mode_config_funcs {
 
 /**
  * struct drm_mode_config - Mode configuration control structure
- * @min_width: minimum pixel width on this device
- * @min_height: minimum pixel height on this device
- * @max_width: maximum pixel width on this device
- * @max_height: maximum pixel height on this device
+ * @min_width: minimum fb pixel width on this device
+ * @min_height: minimum fb pixel height on this device
+ * @max_width: maximum fb pixel width on this device
+ * @max_height: maximum fb pixel height on this device
  * @funcs: core driver provided mode setting functions
  * @fb_base: base address of the framebuffer
  * @poll_enabled: track polling support for this device
@@ -726,6 +726,11 @@ struct drm_mode_config {
         * HDMI infoframe aspect ratio setting.
         */
        struct drm_property *aspect_ratio_property;
+       /**
+        * @content_type_property: Optional connector property to control the
+        * HDMI infoframe content type setting.
+        */
+       struct drm_property *content_type_property;
        /**
         * @degamma_lut_property: Optional CRTC property to set the LUT used to
         * convert the framebuffer's colors to linear gamma.
@@ -779,6 +784,29 @@ struct drm_mode_config {
         */
        struct drm_property *panel_orientation_property;
 
+       /**
+        * @writeback_fb_id_property: Property for writeback connectors, storing
+        * the ID of the output framebuffer.
+        * See also: drm_writeback_connector_init()
+        */
+       struct drm_property *writeback_fb_id_property;
+
+       /**
+        * @writeback_pixel_formats_property: Property for writeback connectors,
+        * storing an array of the supported pixel formats for the writeback
+        * engine (read-only).
+        * See also: drm_writeback_connector_init()
+        */
+       struct drm_property *writeback_pixel_formats_property;
+       /**
+        * @writeback_out_fence_ptr_property: Property for writeback connectors,
+        * fd pointer representing the outgoing fences for a writeback
+        * connector. Userspace should provide a pointer to a value of type s32,
+        * and then cast that pointer to u64.
+        * See also: drm_writeback_connector_init()
+        */
+       struct drm_property *writeback_out_fence_ptr_property;
+
        /* dumb ioctl parameters */
        uint32_t preferred_depth, prefer_shadow;
 
index b159fe07fcf9e166284aa4bab0bb042d1ccbbde5..baded65144563b219657f0082b974a8fa6faf64f 100644 (file)
@@ -530,7 +530,7 @@ drm_mode_validate_ycbcr420(const struct drm_display_mode *mode,
 void drm_mode_prune_invalid(struct drm_device *dev,
                            struct list_head *mode_list, bool verbose);
 void drm_mode_sort(struct list_head *mode_list);
-void drm_mode_connector_list_update(struct drm_connector *connector);
+void drm_connector_list_update(struct drm_connector *connector);
 
 /* parsing cmdline modes */
 bool
index 35e2a3a79fc56843daa986ec6dc558176b63a2d9..61142aa0ab23ab912cbc69bb7e7bd29d91c2069b 100644 (file)
@@ -785,7 +785,7 @@ struct drm_connector_helper_funcs {
         *
         * This function should fill in all modes currently valid for the sink
         * into the &drm_connector.probed_modes list. It should also update the
-        * EDID property by calling drm_mode_connector_update_edid_property().
+        * EDID property by calling drm_connector_update_edid_property().
         *
         * The usual way to implement this is to cache the EDID retrieved in the
         * probe callback somewhere in the driver-private connector structure.
@@ -974,6 +974,21 @@ struct drm_connector_helper_funcs {
         */
        int (*atomic_check)(struct drm_connector *connector,
                            struct drm_connector_state *state);
+
+       /**
+        * @atomic_commit:
+        *
+        * This hook is to be used by drivers implementing writeback connectors
+        * that need a point when to commit the writeback job to the hardware.
+        * The writeback_job to commit is available in
+        * &drm_connector_state.writeback_job.
+        *
+        * This hook is optional.
+        *
+        * This callback is used by the atomic modeset helpers.
+        */
+       void (*atomic_commit)(struct drm_connector *connector,
+                             struct drm_connector_state *state);
 };
 
 /**
index b93c239afb608e04f2d06149833488cd66db4d88..ead34ab5ca4e9885ab468b4acf1bc8022ae6419e 100644 (file)
@@ -17,6 +17,8 @@ struct drm_bridge;
 struct device_node;
 
 #ifdef CONFIG_OF
+uint32_t drm_of_crtc_port_mask(struct drm_device *dev,
+                           struct device_node *port);
 uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
                                    struct device_node *port);
 void drm_of_component_match_add(struct device *master,
@@ -34,6 +36,12 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
                                struct drm_panel **panel,
                                struct drm_bridge **bridge);
 #else
+static inline uint32_t drm_of_crtc_port_mask(struct drm_device *dev,
+                                         struct device_node *port)
+{
+       return 0;
+}
+
 static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
                                                  struct device_node *port)
 {
index 14ac240a1f646d6253c0a076ecaaa9b8a1a567c3..582a0ec0aa70448e07b39f5de7f49868e5787953 100644 (file)
@@ -89,6 +89,7 @@ struct drm_panel {
        struct drm_device *drm;
        struct drm_connector *connector;
        struct device *dev;
+       struct device_link *link;
 
        const struct drm_panel_funcs *funcs;
 
@@ -199,7 +200,7 @@ struct drm_panel *of_drm_find_panel(const struct device_node *np);
 #else
 static inline struct drm_panel *of_drm_find_panel(const struct device_node *np)
 {
-       return NULL;
+       return ERR_PTR(-ENODEV);
 }
 #endif
 
index 674599025d7d3b15fb08e5a7337475f8f9d588a8..8181e9e7cf1dc9c2c90659de3e48eb28e9f27520 100644 (file)
@@ -58,11 +58,4 @@ static inline int drm_get_pci_dev(struct pci_dev *pdev,
 }
 #endif
 
-#define DRM_PCIE_SPEED_25 1
-#define DRM_PCIE_SPEED_50 2
-#define DRM_PCIE_SPEED_80 4
-
-int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask);
-int drm_pcie_get_max_link_width(struct drm_device *dev, u32 *mlw);
-
 #endif /* _DRM_PCI_H_ */
index 26fa50c2a50e8cf88c7e5ad6e344ac0ba19de18c..8a152dc16ea539a97d599bfb693eb2373f3aa26d 100644 (file)
@@ -34,31 +34,15 @@ struct drm_modeset_acquire_ctx;
 
 /**
  * struct drm_plane_state - mutable plane state
- * @plane: backpointer to the plane
- * @crtc_w: width of visible portion of plane on crtc
- * @crtc_h: height of visible portion of plane on crtc
- * @src_x: left position of visible portion of plane within
- *     plane (in 16.16)
- * @src_y: upper position of visible portion of plane within
- *     plane (in 16.16)
- * @src_w: width of visible portion of plane (in 16.16)
- * @src_h: height of visible portion of plane (in 16.16)
- * @alpha: opacity of the plane
- * @rotation: rotation of the plane
- * @zpos: priority of the given plane on crtc (optional)
- *     Note that multiple active planes on the same crtc can have an identical
- *     zpos value. The rule to solving the conflict is to compare the plane
- *     object IDs; the plane with a higher ID must be stacked on top of a
- *     plane with a lower ID.
- * @normalized_zpos: normalized value of zpos: unique, range from 0 to N-1
- *     where N is the number of active planes for given crtc. Note that
- *     the driver must set drm_mode_config.normalize_zpos or call
- *     drm_atomic_normalize_zpos() to update this before it can be trusted.
- * @src: clipped source coordinates of the plane (in 16.16)
- * @dst: clipped destination coordinates of the plane
- * @state: backpointer to global drm_atomic_state
+ *
+ * Please not that the destination coordinates @crtc_x, @crtc_y, @crtc_h and
+ * @crtc_w and the source coordinates @src_x, @src_y, @src_h and @src_w are the
+ * raw coordinates provided by userspace. Drivers should use
+ * drm_atomic_helper_check_plane_state() and only use the derived rectangles in
+ * @src and @dst to program the hardware.
  */
 struct drm_plane_state {
+       /** @plane: backpointer to the plane */
        struct drm_plane *plane;
 
        /**
@@ -87,7 +71,7 @@ struct drm_plane_state {
         * preserved.
         *
         * Drivers should store any implicit fence in this from their
-        * &drm_plane_helper.prepare_fb callback. See drm_gem_fb_prepare_fb()
+        * &drm_plane_helper_funcs.prepare_fb callback. See drm_gem_fb_prepare_fb()
         * and drm_gem_fb_simple_display_pipe_prepare_fb() for suitable helpers.
         */
        struct dma_fence *fence;
@@ -108,20 +92,60 @@ struct drm_plane_state {
         */
        int32_t crtc_y;
 
+       /** @crtc_w: width of visible portion of plane on crtc */
+       /** @crtc_h: height of visible portion of plane on crtc */
        uint32_t crtc_w, crtc_h;
 
-       /* Source values are 16.16 fixed point */
-       uint32_t src_x, src_y;
+       /**
+        * @src_x: left position of visible portion of plane within plane (in
+        * 16.16 fixed point).
+        */
+       uint32_t src_x;
+       /**
+        * @src_y: upper position of visible portion of plane within plane (in
+        * 16.16 fixed point).
+        */
+       uint32_t src_y;
+       /** @src_w: width of visible portion of plane (in 16.16) */
+       /** @src_h: height of visible portion of plane (in 16.16) */
        uint32_t src_h, src_w;
 
-       /* Plane opacity */
+       /**
+        * @alpha:
+        * Opacity of the plane with 0 as completely transparent and 0xffff as
+        * completely opaque. See drm_plane_create_alpha_property() for more
+        * details.
+        */
        u16 alpha;
 
-       /* Plane rotation */
+       /**
+        * @rotation:
+        * Rotation of the plane. See drm_plane_create_rotation_property() for
+        * more details.
+        */
        unsigned int rotation;
 
-       /* Plane zpos */
+       /**
+        * @zpos:
+        * Priority of the given plane on crtc (optional).
+        *
+        * Note that multiple active planes on the same crtc can have an
+        * identical zpos value. The rule to solving the conflict is to compare
+        * the plane object IDs; the plane with a higher ID must be stacked on
+        * top of a plane with a lower ID.
+        *
+        * See drm_plane_create_zpos_property() and
+        * drm_plane_create_zpos_immutable_property() for more details.
+        */
        unsigned int zpos;
+
+       /**
+        * @normalized_zpos:
+        * Normalized value of zpos: unique, range from 0 to N-1 where N is the
+        * number of active planes for given crtc. Note that the driver must set
+        * &drm_mode_config.normalize_zpos or call drm_atomic_normalize_zpos() to
+        * update this before it can be trusted.
+        */
        unsigned int normalized_zpos;
 
        /**
@@ -138,7 +162,8 @@ struct drm_plane_state {
         */
        enum drm_color_range color_range;
 
-       /* Clipped coordinates */
+       /** @src: clipped source coordinates of the plane (in 16.16) */
+       /** @dst: clipped destination coordinates of the plane */
        struct drm_rect src, dst;
 
        /**
@@ -157,6 +182,7 @@ struct drm_plane_state {
         */
        struct drm_crtc_commit *commit;
 
+       /** @state: backpointer to global drm_atomic_state */
        struct drm_atomic_state *state;
 };
 
@@ -288,6 +314,8 @@ struct drm_plane_funcs {
         * cleaned up by calling the @atomic_destroy_state hook in this
         * structure.
         *
+        * This callback is mandatory for atomic drivers.
+        *
         * Atomic drivers which don't subclass &struct drm_plane_state should use
         * drm_atomic_helper_plane_duplicate_state(). Drivers that subclass the
         * state structure to extend it with driver-private state should use
@@ -314,6 +342,8 @@ struct drm_plane_funcs {
         *
         * Destroy a state duplicated with @atomic_duplicate_state and release
         * or unreference all resources it references
+        *
+        * This callback is mandatory for atomic drivers.
         */
        void (*atomic_destroy_state)(struct drm_plane *plane,
                                     struct drm_plane_state *state);
@@ -431,7 +461,10 @@ struct drm_plane_funcs {
         * This optional hook is used for the DRM to determine if the given
         * format/modifier combination is valid for the plane. This allows the
         * DRM to generate the correct format bitmask (which formats apply to
-        * which modifier).
+        * which modifier), and to valdiate modifiers at atomic_check time.
+        *
+        * If not present, then any modifier in the plane's modifier
+        * list is allowed with any of the plane's formats.
         *
         * Returns:
         *
@@ -492,30 +525,27 @@ enum drm_plane_type {
 
 /**
  * struct drm_plane - central DRM plane control structure
- * @dev: DRM device this plane belongs to
- * @head: for list management
- * @name: human readable name, can be overwritten by the driver
- * @base: base mode object
- * @possible_crtcs: pipes this plane can be bound to
- * @format_types: array of formats supported by this plane
- * @format_count: number of formats supported
- * @format_default: driver hasn't supplied supported formats for the plane
- * @modifiers: array of modifiers supported by this plane
- * @modifier_count: number of modifiers supported
- * @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by
- *     drm_mode_set_config_internal() to implement correct refcounting.
- * @funcs: helper functions
- * @properties: property tracking for this plane
- * @type: type of plane (overlay, primary, cursor)
- * @alpha_property: alpha property for this plane
- * @zpos_property: zpos property for this plane
- * @rotation_property: rotation property for this plane
- * @helper_private: mid-layer private data
+ *
+ * Planes represent the scanout hardware of a display block. They receive their
+ * input data from a &drm_framebuffer and feed it to a &drm_crtc. Planes control
+ * the color conversion, see `Plane Composition Properties`_ for more details,
+ * and are also involved in the color conversion of input pixels, see `Color
+ * Management Properties`_ for details on that.
  */
 struct drm_plane {
+       /** @dev: DRM device this plane belongs to */
        struct drm_device *dev;
+
+       /**
+        * @head:
+        *
+        * List of all planes on @dev, linked from &drm_mode_config.plane_list.
+        * Invariant over the lifetime of @dev and therefore does not need
+        * locking.
+        */
        struct list_head head;
 
+       /** @name: human readable name, can be overwritten by the driver */
        char *name;
 
        /**
@@ -529,35 +559,62 @@ struct drm_plane {
         */
        struct drm_modeset_lock mutex;
 
+       /** @base: base mode object */
        struct drm_mode_object base;
 
+       /**
+        * @possible_crtcs: pipes this plane can be bound to constructed from
+        * drm_crtc_mask()
+        */
        uint32_t possible_crtcs;
+       /** @format_types: array of formats supported by this plane */
        uint32_t *format_types;
+       /** @format_count: Size of the array pointed at by @format_types. */
        unsigned int format_count;
+       /**
+        * @format_default: driver hasn't supplied supported formats for the
+        * plane. Used by the drm_plane_init compatibility wrapper only.
+        */
        bool format_default;
 
+       /** @modifiers: array of modifiers supported by this plane */
        uint64_t *modifiers;
+       /** @modifier_count: Size of the array pointed at by @modifier_count. */
        unsigned int modifier_count;
 
        /**
-        * @crtc: Currently bound CRTC, only really meaningful for non-atomic
-        * drivers.  Atomic drivers should instead check &drm_plane_state.crtc.
+        * @crtc:
+        *
+        * Currently bound CRTC, only meaningful for non-atomic drivers. For
+        * atomic drivers this is forced to be NULL, atomic drivers should
+        * instead check &drm_plane_state.crtc.
         */
        struct drm_crtc *crtc;
 
        /**
-        * @fb: Currently bound framebuffer, only really meaningful for
-        * non-atomic drivers.  Atomic drivers should instead check
-        * &drm_plane_state.fb.
+        * @fb:
+        *
+        * Currently bound framebuffer, only meaningful for non-atomic drivers.
+        * For atomic drivers this is forced to be NULL, atomic drivers should
+        * instead check &drm_plane_state.fb.
         */
        struct drm_framebuffer *fb;
 
+       /**
+        * @old_fb:
+        *
+        * Temporary tracking of the old fb while a modeset is ongoing. Only
+        * used by non-atomic drivers, forced to be NULL for atomic drivers.
+        */
        struct drm_framebuffer *old_fb;
 
+       /** @funcs: plane control functions */
        const struct drm_plane_funcs *funcs;
 
+       /** @properties: property tracking for this plane */
        struct drm_object_properties properties;
 
+       /** @type: Type of plane, see &enum drm_plane_type for details. */
        enum drm_plane_type type;
 
        /**
@@ -566,6 +623,7 @@ struct drm_plane {
         */
        unsigned index;
 
+       /** @helper_private: mid-layer private data */
        const struct drm_plane_helper_funcs *helper_private;
 
        /**
@@ -583,8 +641,23 @@ struct drm_plane {
         */
        struct drm_plane_state *state;
 
+       /**
+        * @alpha_property:
+        * Optional alpha property for this plane. See
+        * drm_plane_create_alpha_property().
+        */
        struct drm_property *alpha_property;
+       /**
+        * @zpos_property:
+        * Optional zpos property for this plane. See
+        * drm_plane_create_zpos_property().
+        */
        struct drm_property *zpos_property;
+       /**
+        * @rotation_property:
+        * Optional rotation property for this plane. See
+        * drm_plane_create_rotation_property().
+        */
        struct drm_property *rotation_property;
 
        /**
@@ -632,10 +705,20 @@ void drm_plane_cleanup(struct drm_plane *plane);
  * Given a registered plane, return the index of that plane within a DRM
  * device's list of planes.
  */
-static inline unsigned int drm_plane_index(struct drm_plane *plane)
+static inline unsigned int drm_plane_index(const struct drm_plane *plane)
 {
        return plane->index;
 }
+
+/**
+ * drm_plane_mask - find the mask of a registered plane
+ * @plane: plane to find mask for
+ */
+static inline u32 drm_plane_mask(const struct drm_plane *plane)
+{
+       return 1 << drm_plane_index(plane);
+}
+
 struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx);
 void drm_plane_force_disable(struct drm_plane *plane);
 
@@ -671,7 +754,7 @@ static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
  */
 #define drm_for_each_plane_mask(plane, dev, plane_mask) \
        list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \
-               for_each_if ((plane_mask) & (1 << drm_plane_index(plane)))
+               for_each_if ((plane_mask) & drm_plane_mask(plane))
 
 /**
  * drm_for_each_legacy_plane - iterate over all planes for legacy userspace
index 28d7ce620729d48797bc0b0e41f773bc51e7c50b..26cee29347819fc41b0dcc1bd7697f8a20c92faa 100644 (file)
@@ -67,8 +67,10 @@ int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
                            int crtc_x, int crtc_y,
                            unsigned int crtc_w, unsigned int crtc_h,
                            uint32_t src_x, uint32_t src_y,
-                           uint32_t src_w, uint32_t src_h);
-int drm_plane_helper_disable(struct drm_plane *plane);
+                           uint32_t src_w, uint32_t src_h,
+                           struct drm_modeset_acquire_ctx *ctx);
+int drm_plane_helper_disable(struct drm_plane *plane,
+                            struct drm_modeset_acquire_ctx *ctx);
 
 /* For use by drm_crtc_helper.c */
 int drm_plane_helper_commit(struct drm_plane *plane,
index 4d5f5d6cf6a686d73264baa1e2806c9c71097cb6..d716d653b096465805e0e5c2f26c3bd0e388a708 100644 (file)
@@ -82,7 +82,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
 struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
                                      struct dma_buf_export_info *exp_info);
 void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
-int drm_gem_map_attach(struct dma_buf *dma_buf, struct device *target_dev,
+int drm_gem_map_attach(struct dma_buf *dma_buf,
                       struct dma_buf_attachment *attach);
 void drm_gem_map_detach(struct dma_buf *dma_buf,
                        struct dma_buf_attachment *attach);
@@ -93,10 +93,6 @@ void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
                           enum dma_data_direction dir);
 void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf);
 void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr);
-void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
-                                unsigned long page_num);
-void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
-                                 unsigned long page_num, void *addr);
 void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num);
 void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
                           void *addr);
index e1a46e9991cca11a641b9a26ad64757502da38a3..f3e6eed3e79c640e04ff1c163701a74a13ee9f40 100644 (file)
 struct drm_printer {
        /* private: */
        void (*printfn)(struct drm_printer *p, struct va_format *vaf);
+       void (*puts)(struct drm_printer *p, const char *str);
        void *arg;
        const char *prefix;
 };
 
+void __drm_printfn_coredump(struct drm_printer *p, struct va_format *vaf);
+void __drm_puts_coredump(struct drm_printer *p, const char *str);
 void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf);
+void __drm_puts_seq_file(struct drm_printer *p, const char *str);
 void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf);
 void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf);
 
 __printf(2, 3)
 void drm_printf(struct drm_printer *p, const char *f, ...);
+void drm_puts(struct drm_printer *p, const char *str);
 
 __printf(2, 0)
 /**
@@ -104,6 +109,71 @@ drm_vprintf(struct drm_printer *p, const char *fmt, va_list *va)
 #define drm_printf_indent(printer, indent, fmt, ...) \
        drm_printf((printer), "%.*s" fmt, (indent), "\t\t\t\t\tX", ##__VA_ARGS__)
 
+/**
+ * struct drm_print_iterator - local struct used with drm_printer_coredump
+ * @data: Pointer to the devcoredump output buffer
+ * @start: The offset within the buffer to start writing
+ * @remain: The number of bytes to write for this iteration
+ */
+struct drm_print_iterator {
+       void *data;
+       ssize_t start;
+       ssize_t remain;
+       /* private: */
+       ssize_t offset;
+};
+
+/**
+ * drm_coredump_printer - construct a &drm_printer that can output to a buffer
+ * from the read function for devcoredump
+ * @iter: A pointer to a struct drm_print_iterator for the read instance
+ *
+ * This wrapper extends drm_printf() to work with a dev_coredumpm() callback
+ * function. The passed in drm_print_iterator struct contains the buffer
+ * pointer, size and offset as passed in from devcoredump.
+ *
+ * For example::
+ *
+ *     void coredump_read(char *buffer, loff_t offset, size_t count,
+ *             void *data, size_t datalen)
+ *     {
+ *             struct drm_print_iterator iter;
+ *             struct drm_printer p;
+ *
+ *             iter.data = buffer;
+ *             iter.start = offset;
+ *             iter.remain = count;
+ *
+ *             p = drm_coredump_printer(&iter);
+ *
+ *             drm_printf(p, "foo=%d\n", foo);
+ *     }
+ *
+ *     void makecoredump(...)
+ *     {
+ *             ...
+ *             dev_coredumpm(dev, THIS_MODULE, data, 0, GFP_KERNEL,
+ *                     coredump_read, ...)
+ *     }
+ *
+ * RETURNS:
+ * The &drm_printer object
+ */
+static inline struct drm_printer
+drm_coredump_printer(struct drm_print_iterator *iter)
+{
+       struct drm_printer p = {
+               .printfn = __drm_printfn_coredump,
+               .puts = __drm_puts_coredump,
+               .arg = iter,
+       };
+
+       /* Set the internal offset of the iterator to zero */
+       iter->offset = 0;
+
+       return p;
+}
+
 /**
  * drm_seq_file_printer - construct a &drm_printer that outputs to &seq_file
  * @f:  the &struct seq_file to output to
@@ -115,6 +185,7 @@ static inline struct drm_printer drm_seq_file_printer(struct seq_file *f)
 {
        struct drm_printer p = {
                .printfn = __drm_printfn_seq_file,
+               .puts = __drm_puts_seq_file,
                .arg = f,
        };
        return p;
@@ -195,6 +266,7 @@ static inline struct drm_printer drm_debug_printer(const char *prefix)
 #define DRM_UT_VBL             0x20
 #define DRM_UT_STATE           0x40
 #define DRM_UT_LEASE           0x80
+#define DRM_UT_DP              0x100
 
 __printf(3, 4)
 void drm_dev_printk(const struct device *dev, const char *level,
@@ -307,6 +379,11 @@ void drm_err(const char *format, ...);
 #define DRM_DEBUG_LEASE(fmt, ...)                                      \
        drm_dbg(DRM_UT_LEASE, fmt, ##__VA_ARGS__)
 
+#define        DRM_DEV_DEBUG_DP(dev, fmt, ...)                                 \
+       drm_dev_dbg(dev, DRM_UT_DP, fmt, ## __VA_ARGS__)
+#define DRM_DEBUG_DP(dev, fmt, ...)                                    \
+       drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__)
+
 #define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, category, fmt, ...)     \
 ({                                                                     \
        static DEFINE_RATELIMIT_STATE(_rs,                              \
index 1d5c0b2a8956a3e41db6fb9d43e989b1d10773e7..c030f6ccab99cf469e603e55e550eeb8bc11f2f2 100644 (file)
@@ -147,10 +147,10 @@ struct drm_property {
         *     properties are not exposed to legacy userspace.
         *
         * DRM_MODE_PROP_IMMUTABLE
-        *     Set for properties where userspace cannot be changed by
+        *     Set for properties whose values cannot be changed by
         *     userspace. The kernel is allowed to update the value of these
         *     properties. This is generally used to expose probe state to
-        *     usersapce, e.g. the EDID, or the connector path property on DP
+        *     userspace, e.g. the EDID, or the connector path property on DP
         *     MST sinks.
         */
        uint32_t flags;
index 8758df94e9a0f765831a8cea162e2e824b507abf..c7987daeaed0856542e0be04caea76ff96ddf3ed 100644 (file)
@@ -41,6 +41,7 @@ struct drm_vma_offset_node {
        rwlock_t vm_lock;
        struct drm_mm_node vm_node;
        struct rb_root vm_files;
+       bool readonly:1;
 };
 
 struct drm_vma_offset_manager {
diff --git a/include/drm/drm_writeback.h b/include/drm/drm_writeback.h
new file mode 100644 (file)
index 0000000..23df9d4
--- /dev/null
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Brian Starkey <brian.starkey@arm.com>
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ */
+
+#ifndef __DRM_WRITEBACK_H__
+#define __DRM_WRITEBACK_H__
+#include <drm/drm_connector.h>
+#include <drm/drm_encoder.h>
+#include <linux/workqueue.h>
+
+struct drm_writeback_connector {
+       struct drm_connector base;
+
+       /**
+        * @encoder: Internal encoder used by the connector to fulfill
+        * the DRM framework requirements. The users of the
+        * @drm_writeback_connector control the behaviour of the @encoder
+        * by passing the @enc_funcs parameter to drm_writeback_connector_init()
+        * function.
+        */
+       struct drm_encoder encoder;
+
+       /**
+        * @pixel_formats_blob_ptr:
+        *
+        * DRM blob property data for the pixel formats list on writeback
+        * connectors
+        * See also drm_writeback_connector_init()
+        */
+       struct drm_property_blob *pixel_formats_blob_ptr;
+
+       /** @job_lock: Protects job_queue */
+       spinlock_t job_lock;
+
+       /**
+        * @job_queue:
+        *
+        * Holds a list of a connector's writeback jobs; the last item is the
+        * most recent. The first item may be either waiting for the hardware
+        * to begin writing, or currently being written.
+        *
+        * See also: drm_writeback_queue_job() and
+        * drm_writeback_signal_completion()
+        */
+       struct list_head job_queue;
+
+       /**
+        * @fence_context:
+        *
+        * timeline context used for fence operations.
+        */
+       unsigned int fence_context;
+       /**
+        * @fence_lock:
+        *
+        * spinlock to protect the fences in the fence_context.
+        */
+       spinlock_t fence_lock;
+       /**
+        * @fence_seqno:
+        *
+        * Seqno variable used as monotonic counter for the fences
+        * created on the connector's timeline.
+        */
+       unsigned long fence_seqno;
+       /**
+        * @timeline_name:
+        *
+        * The name of the connector's fence timeline.
+        */
+       char timeline_name[32];
+};
+
+struct drm_writeback_job {
+       /**
+        * @cleanup_work:
+        *
+        * Used to allow drm_writeback_signal_completion to defer dropping the
+        * framebuffer reference to a workqueue
+        */
+       struct work_struct cleanup_work;
+
+       /**
+        * @list_entry:
+        *
+        * List item for the writeback connector's @job_queue
+        */
+       struct list_head list_entry;
+
+       /**
+        * @fb:
+        *
+        * Framebuffer to be written to by the writeback connector. Do not set
+        * directly, use drm_atomic_set_writeback_fb_for_connector()
+        */
+       struct drm_framebuffer *fb;
+
+       /**
+        * @out_fence:
+        *
+        * Fence which will signal once the writeback has completed
+        */
+       struct dma_fence *out_fence;
+};
+
+static inline struct drm_writeback_connector *
+drm_connector_to_writeback(struct drm_connector *connector)
+{
+       return container_of(connector, struct drm_writeback_connector, base);
+}
+
+int drm_writeback_connector_init(struct drm_device *dev,
+                                struct drm_writeback_connector *wb_connector,
+                                const struct drm_connector_funcs *con_funcs,
+                                const struct drm_encoder_helper_funcs *enc_helper_funcs,
+                                const u32 *formats, int n_formats);
+
+void drm_writeback_queue_job(struct drm_writeback_connector *wb_connector,
+                            struct drm_writeback_job *job);
+
+void drm_writeback_cleanup_job(struct drm_writeback_job *job);
+
+void
+drm_writeback_signal_completion(struct drm_writeback_connector *wb_connector,
+                               int status);
+
+struct dma_fence *
+drm_writeback_get_out_fence(struct drm_writeback_connector *wb_connector);
+#endif
index dec655894d0806a8060fe6aef3551a2785ba1ca3..21c648b0b2a16c6dce65d61a75ced0823999ed95 100644 (file)
@@ -27,6 +27,8 @@
 #include <drm/spsc_queue.h>
 #include <linux/dma-fence.h>
 
+#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
+
 struct drm_gpu_scheduler;
 struct drm_sched_rq;
 
@@ -43,18 +45,37 @@ enum drm_sched_priority {
 };
 
 /**
- * drm_sched_entity - A wrapper around a job queue (typically attached
- * to the DRM file_priv).
+ * struct drm_sched_entity - A wrapper around a job queue (typically
+ * attached to the DRM file_priv).
+ *
+ * @list: used to append this struct to the list of entities in the
+ *        runqueue.
+ * @rq: runqueue to which this entity belongs.
+ * @rq_lock: lock to modify the runqueue to which this entity belongs.
+ * @job_queue: the list of jobs of this entity.
+ * @fence_seq: a linearly increasing seqno incremented with each
+ *             new &drm_sched_fence which is part of the entity.
+ * @fence_context: a unique context for all the fences which belong
+ *                 to this entity.
+ *                 The &drm_sched_fence.scheduled uses the
+ *                 fence_context but &drm_sched_fence.finished uses
+ *                 fence_context + 1.
+ * @dependency: the dependency fence of the job which is on the top
+ *              of the job queue.
+ * @cb: callback for the dependency fence above.
+ * @guilty: points to ctx's guilty.
+ * @fini_status: contains the exit status in case the process was signalled.
+ * @last_scheduled: points to the finished fence of the last scheduled job.
+ * @last_user: last group leader pushing a job into the entity.
  *
  * Entities will emit jobs in order to their corresponding hardware
  * ring, and the scheduler will alternate between entities based on
  * scheduling policy.
-*/
+ */
 struct drm_sched_entity {
        struct list_head                list;
        struct drm_sched_rq             *rq;
        spinlock_t                      rq_lock;
-       struct drm_gpu_scheduler        *sched;
 
        struct spsc_queue               job_queue;
 
@@ -63,47 +84,98 @@ struct drm_sched_entity {
 
        struct dma_fence                *dependency;
        struct dma_fence_cb             cb;
-       atomic_t                        *guilty; /* points to ctx's guilty */
-       int            fini_status;
-       struct dma_fence    *last_scheduled;
+       atomic_t                        *guilty;
+       struct dma_fence                *last_scheduled;
+       struct task_struct              *last_user;
 };
 
 /**
+ * struct drm_sched_rq - queue of entities to be scheduled.
+ *
+ * @lock: to modify the entities list.
+ * @sched: the scheduler to which this rq belongs to.
+ * @entities: list of the entities to be scheduled.
+ * @current_entity: the entity which is to be scheduled.
+ *
  * Run queue is a set of entities scheduling command submissions for
  * one specific ring. It implements the scheduling policy that selects
  * the next entity to emit commands from.
-*/
+ */
 struct drm_sched_rq {
        spinlock_t                      lock;
+       struct drm_gpu_scheduler        *sched;
        struct list_head                entities;
        struct drm_sched_entity         *current_entity;
 };
 
+/**
+ * struct drm_sched_fence - fences corresponding to the scheduling of a job.
+ */
 struct drm_sched_fence {
+        /**
+         * @scheduled: this fence is what will be signaled by the scheduler
+         * when the job is scheduled.
+         */
        struct dma_fence                scheduled;
 
-       /* This fence is what will be signaled by the scheduler when
-        * the job is completed.
-        *
-        * When setting up an out fence for the job, you should use
-        * this, since it's available immediately upon
-        * drm_sched_job_init(), and the fence returned by the driver
-        * from run_job() won't be created until the dependencies have
-        * resolved.
-        */
+        /**
+         * @finished: this fence is what will be signaled by the scheduler
+         * when the job is completed.
+         *
+         * When setting up an out fence for the job, you should use
+         * this, since it's available immediately upon
+         * drm_sched_job_init(), and the fence returned by the driver
+         * from run_job() won't be created until the dependencies have
+         * resolved.
+         */
        struct dma_fence                finished;
 
+        /**
+         * @cb: the callback for the parent fence below.
+         */
        struct dma_fence_cb             cb;
+        /**
+         * @parent: the fence returned by &drm_sched_backend_ops.run_job
+         * when scheduling the job on hardware. We signal the
+         * &drm_sched_fence.finished fence once parent is signalled.
+         */
        struct dma_fence                *parent;
+        /**
+         * @sched: the scheduler instance to which the job having this struct
+         * belongs to.
+         */
        struct drm_gpu_scheduler        *sched;
+        /**
+         * @lock: the lock used by the scheduled and the finished fences.
+         */
        spinlock_t                      lock;
+        /**
+         * @owner: job owner for debugging
+         */
        void                            *owner;
 };
 
 struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
 
 /**
- * drm_sched_job - A job to be run by an entity.
+ * struct drm_sched_job - A job to be run by an entity.
+ *
+ * @queue_node: used to append this struct to the queue of jobs in an entity.
+ * @sched: the scheduler instance on which this job is scheduled.
+ * @s_fence: contains the fences for the scheduling of job.
+ * @finish_cb: the callback for the finished fence.
+ * @finish_work: schedules the function @drm_sched_job_finish once the job has
+ *               finished to remove the job from the
+ *               @drm_gpu_scheduler.ring_mirror_list.
+ * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list.
+ * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the timeout
+ *            interval is over.
+ * @id: a unique id assigned to each job scheduled on the scheduler.
+ * @karma: increment on every hang caused by this job. If this exceeds the hang
+ *         limit of the scheduler then the job is marked guilty and will not
+ *         be scheduled further.
+ * @s_priority: the priority of the job.
+ * @entity: the entity to which this job belongs.
  *
  * A job is created by the driver using drm_sched_job_init(), and
  * should call drm_sched_entity_push_job() once it wants the scheduler
@@ -130,38 +202,64 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
 }
 
 /**
+ * struct drm_sched_backend_ops
+ *
  * Define the backend operations called by the scheduler,
- * these functions should be implemented in driver side
-*/
+ * these functions should be implemented in driver side.
+ */
 struct drm_sched_backend_ops {
-       /* Called when the scheduler is considering scheduling this
-        * job next, to get another struct dma_fence for this job to
+       /**
+         * @dependency: Called when the scheduler is considering scheduling
+         * this job next, to get another struct dma_fence for this job to
         * block on.  Once it returns NULL, run_job() may be called.
         */
        struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
                                        struct drm_sched_entity *s_entity);
 
-       /* Called to execute the job once all of the dependencies have
-        * been resolved.  This may be called multiple times, if
+       /**
+         * @run_job: Called to execute the job once all of the dependencies
+         * have been resolved.  This may be called multiple times, if
         * timedout_job() has happened and drm_sched_job_recovery()
         * decides to try it again.
         */
        struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
 
-       /* Called when a job has taken too long to execute, to trigger
-        * GPU recovery.
+       /**
+         * @timedout_job: Called when a job has taken too long to execute,
+         * to trigger GPU recovery.
         */
        void (*timedout_job)(struct drm_sched_job *sched_job);
 
-       /* Called once the job's finished fence has been signaled and
-        * it's time to clean it up.
+       /**
+         * @free_job: Called once the job's finished fence has been signaled
+         * and it's time to clean it up.
         */
        void (*free_job)(struct drm_sched_job *sched_job);
 };
 
 /**
- * One scheduler is implemented for each hardware ring
-*/
+ * struct drm_gpu_scheduler
+ *
+ * @ops: backend operations provided by the driver.
+ * @hw_submission_limit: the max size of the hardware queue.
+ * @timeout: the time after which a job is removed from the scheduler.
+ * @name: name of the ring for which this scheduler is being used.
+ * @sched_rq: priority wise array of run queues.
+ * @wake_up_worker: the wait queue on which the scheduler sleeps until a job
+ *                  is ready to be scheduled.
+ * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
+ *                 waits on this wait queue until all the scheduled jobs are
+ *                 finished.
+ * @hw_rq_count: the number of jobs currently in the hardware queue.
+ * @job_id_count: used to assign unique id to the each job.
+ * @thread: the kthread on which the scheduler which run.
+ * @ring_mirror_list: the list of jobs which are currently in the job queue.
+ * @job_list_lock: lock to protect the ring_mirror_list.
+ * @hang_limit: once the hangs by a job crosses this limit then it is marked
+ *              guilty and it will be considered for scheduling further.
+ *
+ * One scheduler is implemented for each hardware ring.
+ */
 struct drm_gpu_scheduler {
        const struct drm_sched_backend_ops      *ops;
        uint32_t                        hw_submission_limit;
@@ -184,16 +282,13 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
                   const char *name);
 void drm_sched_fini(struct drm_gpu_scheduler *sched);
 
-int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
-                         struct drm_sched_entity *entity,
-                         struct drm_sched_rq *rq,
+int drm_sched_entity_init(struct drm_sched_entity *entity,
+                         struct drm_sched_rq **rq_list,
+                         unsigned int num_rq_list,
                          atomic_t *guilty);
-void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
-                          struct drm_sched_entity *entity);
-void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
-                          struct drm_sched_entity *entity);
-void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
-                          struct drm_sched_entity *entity);
+long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
+void drm_sched_entity_fini(struct drm_sched_entity *entity);
+void drm_sched_entity_destroy(struct drm_sched_entity *entity);
 void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
                               struct drm_sched_entity *entity);
 void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
@@ -204,7 +299,6 @@ struct drm_sched_fence *drm_sched_fence_create(
 void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
 void drm_sched_fence_finished(struct drm_sched_fence *fence);
 int drm_sched_job_init(struct drm_sched_job *job,
-                      struct drm_gpu_scheduler *sched,
                       struct drm_sched_entity *entity,
                       void *owner);
 void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
index c9e5a6621b954c26823978b4b735119457117260..c44703f471b37d4dfa110b8a0b9368fc3c43d6b3 100644 (file)
@@ -95,7 +95,9 @@ extern struct resource intel_graphics_stolen_res;
 #define    I845_TSEG_SIZE_512K (2 << 1)
 #define    I845_TSEG_SIZE_1M   (3 << 1)
 
-#define INTEL_BSM 0x5c
+#define INTEL_BSM              0x5c
+#define INTEL_GEN11_BSM_DW0    0xc0
+#define INTEL_GEN11_BSM_DW1    0xc4
 #define   INTEL_BSM_MASK       (-(1u << 20))
 
 #endif                         /* _I915_DRM_H_ */
index bab70ff6e78bdda55d8708a6f3f4beeba11b28ab..fbf5cfc9b352f7a005071909479624e46253f516 100644 (file)
 #define INTEL_KBL_GT2_IDS(info)        \
        INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \
        INTEL_VGA_DEVICE(0x5917, info), /* Mobile GT2 */ \
-       INTEL_VGA_DEVICE(0x591C, info), /* ULX GT2 */ \
        INTEL_VGA_DEVICE(0x5921, info), /* ULT GT2F */ \
        INTEL_VGA_DEVICE(0x591E, info), /* ULX GT2 */ \
        INTEL_VGA_DEVICE(0x5912, info), /* DT  GT2 */ \
 #define INTEL_KBL_GT4_IDS(info) \
        INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */
 
+/* AML/KBL Y GT2 */
+#define INTEL_AML_GT2_IDS(info) \
+       INTEL_VGA_DEVICE(0x591C, info),  /* ULX GT2 */ \
+       INTEL_VGA_DEVICE(0x87C0, info) /* ULX GT2 */
+
 #define INTEL_KBL_IDS(info) \
        INTEL_KBL_GT1_IDS(info), \
        INTEL_KBL_GT2_IDS(info), \
        INTEL_KBL_GT3_IDS(info), \
-       INTEL_KBL_GT4_IDS(info)
+       INTEL_KBL_GT4_IDS(info), \
+       INTEL_AML_GT2_IDS(info)
 
 /* CFL S */
 #define INTEL_CFL_S_GT1_IDS(info) \
        INTEL_VGA_DEVICE(0x3E9B, info), /* Halo GT2 */ \
        INTEL_VGA_DEVICE(0x3E94, info)  /* Halo GT2 */
 
-/* CFL U GT1 */
-#define INTEL_CFL_U_GT1_IDS(info) \
-       INTEL_VGA_DEVICE(0x3EA1, info), \
-       INTEL_VGA_DEVICE(0x3EA4, info)
-
 /* CFL U GT2 */
 #define INTEL_CFL_U_GT2_IDS(info) \
-       INTEL_VGA_DEVICE(0x3EA0, info), \
-       INTEL_VGA_DEVICE(0x3EA3, info), \
        INTEL_VGA_DEVICE(0x3EA9, info)
 
 /* CFL U GT3 */
 #define INTEL_CFL_U_GT3_IDS(info) \
-       INTEL_VGA_DEVICE(0x3EA2, info), /* ULT GT3 */ \
        INTEL_VGA_DEVICE(0x3EA5, info), /* ULT GT3 */ \
        INTEL_VGA_DEVICE(0x3EA6, info), /* ULT GT3 */ \
        INTEL_VGA_DEVICE(0x3EA7, info), /* ULT GT3 */ \
        INTEL_VGA_DEVICE(0x3EA8, info)  /* ULT GT3 */
 
+/* WHL/CFL U GT1 */
+#define INTEL_WHL_U_GT1_IDS(info) \
+       INTEL_VGA_DEVICE(0x3EA1, info)
+
+/* WHL/CFL U GT2 */
+#define INTEL_WHL_U_GT2_IDS(info) \
+       INTEL_VGA_DEVICE(0x3EA0, info)
+
+/* WHL/CFL U GT3 */
+#define INTEL_WHL_U_GT3_IDS(info) \
+       INTEL_VGA_DEVICE(0x3EA2, info), \
+       INTEL_VGA_DEVICE(0x3EA3, info), \
+       INTEL_VGA_DEVICE(0x3EA4, info)
+
 #define INTEL_CFL_IDS(info)       \
        INTEL_CFL_S_GT1_IDS(info), \
        INTEL_CFL_S_GT2_IDS(info), \
        INTEL_CFL_H_GT2_IDS(info), \
-       INTEL_CFL_U_GT1_IDS(info), \
        INTEL_CFL_U_GT2_IDS(info), \
-       INTEL_CFL_U_GT3_IDS(info)
+       INTEL_CFL_U_GT3_IDS(info), \
+       INTEL_WHL_U_GT1_IDS(info), \
+       INTEL_WHL_U_GT2_IDS(info), \
+       INTEL_WHL_U_GT3_IDS(info)
 
 /* CNL */
 #define INTEL_CNL_IDS(info) \
index 56e4a916b5e85192711a77e045ab5a9ab6e2ff4a..fe9827d0ca8a5c9c5d1c7245dc6d49c4c2b238e7 100644 (file)
 
 /**
  * struct tinydrm_device - tinydrm device
- * @drm: DRM device
- * @pipe: Display pipe structure
- * @dirty_lock: Serializes framebuffer flushing
- * @fb_funcs: Framebuffer functions used when creating framebuffers
  */
 struct tinydrm_device {
+       /**
+        * @drm: DRM device
+        */
        struct drm_device *drm;
+
+       /**
+        * @pipe: Display pipe structure
+        */
        struct drm_simple_display_pipe pipe;
+
+       /**
+        * @dirty_lock: Serializes framebuffer flushing
+        */
        struct mutex dirty_lock;
+
+       /**
+        * @fb_funcs: Framebuffer functions used when creating framebuffers
+        */
        const struct drm_framebuffer_funcs *fb_funcs;
+
+       /**
+        * @fb_dirty: Framebuffer dirty callback
+        */
        int (*fb_dirty)(struct drm_framebuffer *framebuffer,
                        struct drm_file *file_priv, unsigned flags,
                        unsigned color, struct drm_clip_rect *clips,
index c67977aa1a0eb6d3f83441869c3689e95d64508e..a01ba2032f0ef7042653e08f7563c89518c475ed 100644 (file)
@@ -283,18 +283,30 @@ struct ttm_operation_ctx {
 /* when serving page fault or suspend, allow alloc anyway */
 #define TTM_OPT_FLAG_FORCE_ALLOC               0x2
 
+/**
+ * ttm_bo_get - reference a struct ttm_buffer_object
+ *
+ * @bo: The buffer object.
+ */
+static inline void ttm_bo_get(struct ttm_buffer_object *bo)
+{
+       kref_get(&bo->kref);
+}
+
 /**
  * ttm_bo_reference - reference a struct ttm_buffer_object
  *
  * @bo: The buffer object.
  *
  * Returns a refcounted pointer to a buffer object.
+ *
+ * This function is deprecated. Use @ttm_bo_get instead.
  */
 
 static inline struct ttm_buffer_object *
 ttm_bo_reference(struct ttm_buffer_object *bo)
 {
-       kref_get(&bo->kref);
+       ttm_bo_get(bo);
        return bo;
 }
 
@@ -345,12 +357,23 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
                    struct ttm_placement *placement,
                    struct ttm_operation_ctx *ctx);
 
+/**
+ * ttm_bo_put
+ *
+ * @bo: The buffer object.
+ *
+ * Unreference a buffer object.
+ */
+void ttm_bo_put(struct ttm_buffer_object *bo);
+
 /**
  * ttm_bo_unref
  *
  * @bo: The buffer object.
  *
  * Unreference and clear a pointer to a buffer object.
+ *
+ * This function is deprecated. Use @ttm_bo_put instead.
  */
 void ttm_bo_unref(struct ttm_buffer_object **bo);
 
diff --git a/include/drm/ttm/ttm_set_memory.h b/include/drm/ttm/ttm_set_memory.h
new file mode 100644 (file)
index 0000000..7c492b4
--- /dev/null
@@ -0,0 +1,150 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2018 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Huang Rui <ray.huang@amd.com>
+ */
+
+#ifndef TTM_SET_MEMORY
+#define TTM_SET_MEMORY
+
+#include <linux/mm.h>
+
+#ifdef CONFIG_X86
+
+#include <asm/set_memory.h>
+
+static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
+{
+       return set_pages_array_wb(pages, addrinarray);
+}
+
+static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
+{
+       return set_pages_array_wc(pages, addrinarray);
+}
+
+static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
+{
+       return set_pages_array_uc(pages, addrinarray);
+}
+
+static inline int ttm_set_pages_wb(struct page *page, int numpages)
+{
+       return set_pages_wb(page, numpages);
+}
+
+static inline int ttm_set_pages_wc(struct page *page, int numpages)
+{
+       unsigned long addr = (unsigned long)page_address(page);
+
+       return set_memory_wc(addr, numpages);
+}
+
+static inline int ttm_set_pages_uc(struct page *page, int numpages)
+{
+       return set_pages_uc(page, numpages);
+}
+
+#else /* for CONFIG_X86 */
+
+#if IS_ENABLED(CONFIG_AGP)
+
+#include <asm/agp.h>
+
+static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
+{
+       int i;
+
+       for (i = 0; i < addrinarray; i++)
+               unmap_page_from_agp(pages[i]);
+       return 0;
+}
+
+static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
+{
+       int i;
+
+       for (i = 0; i < addrinarray; i++)
+               map_page_into_agp(pages[i]);
+       return 0;
+}
+
+static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
+{
+       int i;
+
+       for (i = 0; i < addrinarray; i++)
+               map_page_into_agp(pages[i]);
+       return 0;
+}
+
+static inline int ttm_set_pages_wb(struct page *page, int numpages)
+{
+       int i;
+
+       for (i = 0; i < numpages; i++)
+               unmap_page_from_agp(page++);
+       return 0;
+}
+
+#else /* for CONFIG_AGP */
+
+static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
+{
+       return 0;
+}
+
+static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
+{
+       return 0;
+}
+
+static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
+{
+       return 0;
+}
+
+static inline int ttm_set_pages_wb(struct page *page, int numpages)
+{
+       return 0;
+}
+
+#endif /* for CONFIG_AGP */
+
+static inline int ttm_set_pages_wc(struct page *page, int numpages)
+{
+       return 0;
+}
+
+static inline int ttm_set_pages_uc(struct page *page, int numpages)
+{
+       return 0;
+}
+
+#endif /* for CONFIG_X86 */
+
+#endif
index 9564597cbfac59aa1837d4de80a2f1c18d64cd7f..0aa1d9c3e0b968479af93c764b438f9ae35bece3 100644 (file)
 #define IMX6UL_CLK_CSI_PODF            222
 #define IMX6UL_CLK_PLL3_120M           223
 #define IMX6UL_CLK_KPP                 224
-#define IMX6UL_CLK_CKO1_SEL            225
-#define IMX6UL_CLK_CKO1_PODF           226
-#define IMX6UL_CLK_CKO1                        227
-#define IMX6UL_CLK_CKO2_SEL            228
-#define IMX6UL_CLK_CKO2_PODF           229
-#define IMX6UL_CLK_CKO2                        230
-#define IMX6UL_CLK_CKO                 231
-
-/* For i.MX6ULL */
-#define IMX6ULL_CLK_ESAI_PRED          232
-#define IMX6ULL_CLK_ESAI_PODF          233
-#define IMX6ULL_CLK_ESAI_EXTAL         234
-#define IMX6ULL_CLK_ESAI_MEM           235
-#define IMX6ULL_CLK_ESAI_IPG           236
-#define IMX6ULL_CLK_DCP_CLK            237
-#define IMX6ULL_CLK_EPDC_PRE_SEL       238
-#define IMX6ULL_CLK_EPDC_SEL           239
-#define IMX6ULL_CLK_EPDC_PODF          240
-#define IMX6ULL_CLK_EPDC_ACLK          241
-#define IMX6ULL_CLK_EPDC_PIX           242
-#define IMX6ULL_CLK_ESAI_SEL           243
+#define IMX6ULL_CLK_ESAI_PRED          225
+#define IMX6ULL_CLK_ESAI_PODF          226
+#define IMX6ULL_CLK_ESAI_EXTAL         227
+#define IMX6ULL_CLK_ESAI_MEM           228
+#define IMX6ULL_CLK_ESAI_IPG           229
+#define IMX6ULL_CLK_DCP_CLK            230
+#define IMX6ULL_CLK_EPDC_PRE_SEL       231
+#define IMX6ULL_CLK_EPDC_SEL           232
+#define IMX6ULL_CLK_EPDC_PODF          233
+#define IMX6ULL_CLK_EPDC_ACLK          234
+#define IMX6ULL_CLK_EPDC_PIX           235
+#define IMX6ULL_CLK_ESAI_SEL           236
+#define IMX6UL_CLK_CKO1_SEL            237
+#define IMX6UL_CLK_CKO1_PODF           238
+#define IMX6UL_CLK_CKO1                        239
+#define IMX6UL_CLK_CKO2_SEL            240
+#define IMX6UL_CLK_CKO2_PODF           241
+#define IMX6UL_CLK_CKO2                        242
+#define IMX6UL_CLK_CKO                 243
 #define IMX6UL_CLK_END                 244
 
 #endif /* __DT_BINDINGS_CLOCK_IMX6UL_H */
diff --git a/include/dt-bindings/clock/sun8i-tcon-top.h b/include/dt-bindings/clock/sun8i-tcon-top.h
new file mode 100644 (file)
index 0000000..25164d7
--- /dev/null
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/* Copyright (C) 2018 Jernej Skrabec <jernej.skrabec@siol.net> */
+
+#ifndef _DT_BINDINGS_CLOCK_SUN8I_TCON_TOP_H_
+#define _DT_BINDINGS_CLOCK_SUN8I_TCON_TOP_H_
+
+#define CLK_TCON_TOP_TV0       0
+#define CLK_TCON_TOP_TV1       1
+#define CLK_TCON_TOP_DSI       2
+
+#endif /* _DT_BINDINGS_CLOCK_SUN8I_TCON_TOP_H_ */
index 4b35a66383f983f5594f3b71885145d6b1b101ef..e54f40974eb04ca516987ac3df89b0997b5ca0dd 100644 (file)
@@ -443,6 +443,9 @@ int acpi_check_resource_conflict(const struct resource *res);
 int acpi_check_region(resource_size_t start, resource_size_t n,
                      const char *name);
 
+acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
+                               u32 level);
+
 int acpi_resources_are_enforced(void);
 
 #ifdef CONFIG_HIBERNATION
diff --git a/include/linux/ascii85.h b/include/linux/ascii85.h
new file mode 100644 (file)
index 0000000..4cc4020
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2008 Intel Corporation
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _ASCII85_H_
+#define _ASCII85_H_
+
+#include <linux/kernel.h>
+
+#define ASCII85_BUFSZ 6
+
+static inline long
+ascii85_encode_len(long len)
+{
+       return DIV_ROUND_UP(len, 4);
+}
+
+static inline const char *
+ascii85_encode(u32 in, char *out)
+{
+       int i;
+
+       if (in == 0)
+               return "z";
+
+       out[5] = '\0';
+       for (i = 5; i--; ) {
+               out[i] = '!' + in % 85;
+               in /= 85;
+       }
+
+       return out;
+}
+
+#endif
index 0c27515d2cf6db3683da2341a700283f82a99645..8124815eb1218b5653572fc4a04f5d4d734e3469 100644 (file)
@@ -214,6 +214,7 @@ struct atmphy_ops {
 struct atm_skb_data {
        struct atm_vcc  *vcc;           /* ATM VCC */
        unsigned long   atm_options;    /* ATM layer options */
+       unsigned int    acct_truesize;  /* truesize accounted to vcc */
 };
 
 #define VCC_HTABLE_SIZE 32
@@ -241,6 +242,20 @@ void vcc_insert_socket(struct sock *sk);
 
 void atm_dev_release_vccs(struct atm_dev *dev);
 
+static inline void atm_account_tx(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+       /*
+        * Because ATM skbs may not belong to a sock (and we don't
+        * necessarily want to), skb->truesize may be adjusted,
+        * escaping the hack in pskb_expand_head() which avoids
+        * doing so for some cases. So stash the value of truesize
+        * at the time we accounted it, and atm_pop_raw() can use
+        * that value later, in case it changes.
+        */
+       refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+       ATM_SKB(skb)->acct_truesize = skb->truesize;
+       ATM_SKB(skb)->atm_options = vcc->atm_options;
+}
 
 static inline void atm_force_charge(struct atm_vcc *vcc,int truesize)
 {
index 0bd432a4d7bd00ce376292720edd104d617c80c2..24251762c20c94edd238cfca1c1f55f0269d4e80 100644 (file)
@@ -22,7 +22,6 @@ struct dentry;
  */
 enum wb_state {
        WB_registered,          /* bdi_register() was done */
-       WB_shutting_down,       /* wb_shutdown() in progress */
        WB_writeback_running,   /* Writeback is in progress */
        WB_has_dirty_io,        /* Dirty inodes on ->b_{dirty|io|more_io} */
        WB_start_all,           /* nr_pages == 0 (all) work pending */
@@ -189,6 +188,7 @@ struct backing_dev_info {
 #ifdef CONFIG_CGROUP_WRITEBACK
        struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
        struct rb_root cgwb_congested_tree; /* their congested states */
+       struct mutex cgwb_release_mutex;  /* protect shutdown of wb structs */
 #else
        struct bdi_writeback_congested *wb_congested;
 #endif
index e3147eb74222b868a014f498f1186a7a6c661804..ca3f2c2edd8573ac89e20447a12aff26bb599377 100644 (file)
@@ -287,6 +287,20 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
 
 void blk_mq_quiesce_queue_nowait(struct request_queue *q);
 
+/**
+ * blk_mq_mark_complete() - Set request state to complete
+ * @rq: request to set to complete state
+ *
+ * Returns true if request state was successfully set to complete. If
+ * successful, the caller is responsibile for seeing this request is ended, as
+ * blk_mq_complete_request will not work again.
+ */
+static inline bool blk_mq_mark_complete(struct request *rq)
+{
+       return cmpxchg(&rq->state, MQ_RQ_IN_FLIGHT, MQ_RQ_COMPLETE) ==
+                       MQ_RQ_IN_FLIGHT;
+}
+
 /*
  * Driver command data is immediately after the request. So subtract request
  * size to get back to the original request, add request size to get the PDU.
index 9154570edf2963628f873d7404930450735ff41a..79226ca8f80f2db7f813cf63973c61288c1b78ab 100644 (file)
@@ -1119,8 +1119,8 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q,
        if (!q->limits.chunk_sectors)
                return q->limits.max_sectors;
 
-       return q->limits.chunk_sectors -
-                       (offset & (q->limits.chunk_sectors - 1));
+       return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors -
+                       (offset & (q->limits.chunk_sectors - 1))));
 }
 
 static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
index 975fb4cf1bb743ccff5fae92e82df582533c0ff2..d50c2f0a655ae3f95271d5f8de40f8eabc917c65 100644 (file)
@@ -2,6 +2,7 @@
 #ifndef _BPF_CGROUP_H
 #define _BPF_CGROUP_H
 
+#include <linux/errno.h>
 #include <linux/jump_label.h>
 #include <uapi/linux/bpf.h>
 
@@ -188,12 +189,38 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
                                                                              \
        __ret;                                                                \
 })
+int cgroup_bpf_prog_attach(const union bpf_attr *attr,
+                          enum bpf_prog_type ptype, struct bpf_prog *prog);
+int cgroup_bpf_prog_detach(const union bpf_attr *attr,
+                          enum bpf_prog_type ptype);
+int cgroup_bpf_prog_query(const union bpf_attr *attr,
+                         union bpf_attr __user *uattr);
 #else
 
+struct bpf_prog;
 struct cgroup_bpf {};
 static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
 
+static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
+                                        enum bpf_prog_type ptype,
+                                        struct bpf_prog *prog)
+{
+       return -EINVAL;
+}
+
+static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
+                                        enum bpf_prog_type ptype)
+{
+       return -EINVAL;
+}
+
+static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
+                                       union bpf_attr __user *uattr)
+{
+       return -EINVAL;
+}
+
 #define cgroup_bpf_enabled (0)
 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
index 995c3b1e59bfa82ef3ad0504b090ab28a898f016..8827e797ff97d0973ddf1d4217a885cee9bb63ee 100644 (file)
@@ -488,12 +488,15 @@ void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
 
 /* Map specifics */
 struct xdp_buff;
+struct sk_buff;
 
 struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
 void __dev_map_insert_ctx(struct bpf_map *map, u32 index);
 void __dev_map_flush(struct bpf_map *map);
 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
                    struct net_device *dev_rx);
+int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
+                            struct bpf_prog *xdp_prog);
 
 struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
 void __cpu_map_insert_ctx(struct bpf_map *map, u32 index);
@@ -586,6 +589,15 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
        return 0;
 }
 
+struct sk_buff;
+
+static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
+                                          struct sk_buff *skb,
+                                          struct bpf_prog *xdp_prog)
+{
+       return 0;
+}
+
 static inline
 struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
 {
@@ -684,6 +696,8 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map)
 struct sock  *__sock_map_lookup_elem(struct bpf_map *map, u32 key);
 struct sock  *__sock_hash_lookup_elem(struct bpf_map *map, void *key);
 int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);
+int sockmap_get_from_fd(const union bpf_attr *attr, int type,
+                       struct bpf_prog *prog);
 #else
 static inline struct sock  *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
 {
@@ -702,6 +716,12 @@ static inline int sock_map_prog(struct bpf_map *map,
 {
        return -EOPNOTSUPP;
 }
+
+static inline int sockmap_get_from_fd(const union bpf_attr *attr, int type,
+                                     struct bpf_prog *prog)
+{
+       return -EINVAL;
+}
 #endif
 
 #if defined(CONFIG_XDP_SOCKETS)
index 5f8a4283092d0a6960fd663a33832221a9615353..9d9ff755ec2972cf6e46d1905e1a5caae9dd5ae6 100644 (file)
@@ -5,11 +5,12 @@
 #include <uapi/linux/bpf.h>
 
 #ifdef CONFIG_BPF_LIRC_MODE2
-int lirc_prog_attach(const union bpf_attr *attr);
+int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog);
 int lirc_prog_detach(const union bpf_attr *attr);
 int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr);
 #else
-static inline int lirc_prog_attach(const union bpf_attr *attr)
+static inline int lirc_prog_attach(const union bpf_attr *attr,
+                                  struct bpf_prog *prog)
 {
        return -EINVAL;
 }
index 687b1760bb9f87755f50fe575d30988259248e19..f02cee0225d40afcbb40e8bfc15d49e4a252ecb1 100644 (file)
@@ -5,10 +5,10 @@
 #include <uapi/linux/bpfilter.h>
 
 struct sock;
-int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char *optval,
+int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval,
                            unsigned int optlen);
-int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char *optval,
-                           int *optlen);
+int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
+                           int __user *optlen);
 extern int (*bpfilter_process_sockopt)(struct sock *sk, int optname,
                                       char __user *optval,
                                       unsigned int optlen, bool is_set);
index b1a5562b3215b71302422b7a727bbb2cf499d8f3..c68acc47da57b6a7bef7b8ef84a9c897d4b83ce6 100644 (file)
@@ -72,6 +72,9 @@
  */
 #ifndef COMPAT_SYSCALL_DEFINEx
 #define COMPAT_SYSCALL_DEFINEx(x, name, ...)                                   \
+       __diag_push();                                                          \
+       __diag_ignore(GCC, 8, "-Wattribute-alias",                              \
+                     "Type aliasing is used to sanitize syscall arguments");\
        asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));       \
        asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))        \
                __attribute__((alias(__stringify(__se_compat_sys##name))));     \
        asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__));  \
        asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__))   \
        {                                                                       \
-               return __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\
+               long ret = __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\
+               __MAP(x,__SC_TEST,__VA_ARGS__);                                 \
+               return ret;                                                     \
        }                                                                       \
+       __diag_pop();                                                           \
        static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
 #endif /* COMPAT_SYSCALL_DEFINEx */
 
index f1a7492a5cc8cc59813734d1b258dbaf04bf76c8..573f5a7d42d4fc9d1cbeecd6deb019d8d6b4d983 100644 (file)
 #define __must_be_array(a)     BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
 #endif
 
+/*
+ * Feature detection for gnu_inline (gnu89 extern inline semantics). Either
+ * __GNUC_STDC_INLINE__ is defined (not using gnu89 extern inline semantics,
+ * and we opt in to the gnu89 semantics), or __GNUC_STDC_INLINE__ is not
+ * defined so the gnu89 semantics are the default.
+ */
+#ifdef __GNUC_STDC_INLINE__
+# define __gnu_inline  __attribute__((gnu_inline))
+#else
+# define __gnu_inline
+#endif
+
 /*
  * Force always-inline if the user requests it so via the .config,
  * or if gcc is too old.
  * -Wunused-function.  This turns out to avoid the need for complex #ifdef
  * directives.  Suppress the warning in clang as well by using "unused"
  * function attribute, which is redundant but not harmful for gcc.
+ * Prefer gnu_inline, so that extern inline functions do not emit an
+ * externally visible function. This makes extern inline behave as per gnu89
+ * semantics rather than c99. This prevents multiple symbol definition errors
+ * of extern inline functions at link time.
+ * A lot of inline functions can cause havoc with function tracing.
  */
 #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) ||               \
     !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
-#define inline inline          __attribute__((always_inline,unused)) notrace
-#define __inline__ __inline__  __attribute__((always_inline,unused)) notrace
-#define __inline __inline      __attribute__((always_inline,unused)) notrace
+#define inline \
+       inline __attribute__((always_inline, unused)) notrace __gnu_inline
 #else
-/* A lot of inline functions can cause havoc with function tracing */
-#define inline inline          __attribute__((unused)) notrace
-#define __inline__ __inline__  __attribute__((unused)) notrace
-#define __inline __inline      __attribute__((unused)) notrace
+#define inline inline          __attribute__((unused)) notrace __gnu_inline
 #endif
 
+#define __inline__ inline
+#define __inline inline
 #define __always_inline        inline __attribute__((always_inline))
 #define  noinline      __attribute__((noinline))
 
 #if GCC_VERSION >= 50100
 #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
 #endif
+
+/*
+ * Turn individual warnings and errors on and off locally, depending
+ * on version.
+ */
+#define __diag_GCC(version, severity, s) \
+       __diag_GCC_ ## version(__diag_GCC_ ## severity s)
+
+/* Severity used in pragma directives */
+#define __diag_GCC_ignore      ignored
+#define __diag_GCC_warn                warning
+#define __diag_GCC_error       error
+
+/* Compilers before gcc-4.6 do not understand "#pragma GCC diagnostic push" */
+#if GCC_VERSION >= 40600
+#define __diag_str1(s)         #s
+#define __diag_str(s)          __diag_str1(s)
+#define __diag(s)              _Pragma(__diag_str(GCC diagnostic s))
+#endif
+
+#if GCC_VERSION >= 80000
+#define __diag_GCC_8(s)                __diag(s)
+#else
+#define __diag_GCC_8(s)
+#endif
index 6b79a9bba9a7630eb0b3a8fe35251d41717a2da0..a8ba6b04152c13c9ca2960898cd6ea4e89d37957 100644 (file)
@@ -271,4 +271,22 @@ struct ftrace_likely_data {
 # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
 #endif
 
+#ifndef __diag
+#define __diag(string)
+#endif
+
+#ifndef __diag_GCC
+#define __diag_GCC(version, severity, string)
+#endif
+
+#define __diag_push()  __diag(push)
+#define __diag_pop()   __diag(pop)
+
+#define __diag_ignore(compiler, version, option, comment) \
+       __diag_ ## compiler(version, ignore, option)
+#define __diag_warn(compiler, version, option, comment) \
+       __diag_ ## compiler(version, warn, option)
+#define __diag_error(compiler, version, option, comment) \
+       __diag_ ## compiler(version, error, option)
+
 #endif /* __LINUX_COMPILER_TYPES_H */
index dfd6b0e9785517dc1474aed8b14d685b3ebaa656..f59f3dbca65cb02d257ca0f1cf69f508200d508e 100644 (file)
@@ -21,6 +21,7 @@ struct console_font_op;
 struct console_font;
 struct module;
 struct tty_struct;
+struct notifier_block;
 
 /*
  * this is what the terminal answers to a ESC-Z or csi0c query.
@@ -220,4 +221,8 @@ static inline bool vgacon_text_force(void) { return false; }
 
 extern void console_init(void);
 
+/* For deferred console takeover */
+void dummycon_register_output_notifier(struct notifier_block *nb);
+void dummycon_unregister_output_notifier(struct notifier_block *nb);
+
 #endif /* _LINUX_CONSOLE_H */
index 3855e3800f483e07cc4c16e68f6a1f2780de1b3e..deb0f663252fc55e39546c7d3107e96dfb3f03ae 100644 (file)
@@ -135,7 +135,7 @@ void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
 
 ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
                const struct iomap_ops *ops);
-int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
+vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
                    pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
                enum page_entry_size pe_size, pfn_t pfn);
index e6c0448ebcc7f3f9c10b0b4ccdfb593d256a3b88..31c865d1842e88671d7f29534587311c479fb3e7 100644 (file)
@@ -124,7 +124,7 @@ static inline void delayacct_blkio_start(void)
 
 static inline void delayacct_blkio_end(struct task_struct *p)
 {
-       if (current->delays)
+       if (p->delays)
                __delayacct_blkio_end(p);
        delayacct_clear_flag(DELAYACCT_PF_BLKIO);
 }
index 085db2fee2d71b603bee462dc29c7cccb4010866..58725f890b5b6185cda07d48591d9f1162f78c3d 100644 (file)
@@ -39,12 +39,12 @@ struct dma_buf_attachment;
 
 /**
  * struct dma_buf_ops - operations possible on struct dma_buf
- * @map_atomic: maps a page from the buffer into kernel address
+ * @map_atomic: [optional] maps a page from the buffer into kernel address
  *             space, users may not block until the subsequent unmap call.
  *             This callback must not sleep.
  * @unmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
  *               This Callback must not sleep.
- * @map: maps a page from the buffer into kernel address space.
+ * @map: [optional] maps a page from the buffer into kernel address space.
  * @unmap: [optional] unmaps a page from the buffer.
  * @vmap: [optional] creates a virtual mapping for the buffer into kernel
  *       address space. Same restrictions as for vmap and friends apply.
@@ -55,11 +55,11 @@ struct dma_buf_ops {
         * @attach:
         *
         * This is called from dma_buf_attach() to make sure that a given
-        * &device can access the provided &dma_buf. Exporters which support
-        * buffer objects in special locations like VRAM or device-specific
-        * carveout areas should check whether the buffer could be move to
-        * system memory (or directly accessed by the provided device), and
-        * otherwise need to fail the attach operation.
+        * &dma_buf_attachment.dev can access the provided &dma_buf. Exporters
+        * which support buffer objects in special locations like VRAM or
+        * device-specific carveout areas should check whether the buffer could
+        * be move to system memory (or directly accessed by the provided
+        * device), and otherwise need to fail the attach operation.
         *
         * The exporter should also in general check whether the current
         * allocation fullfills the DMA constraints of the new device. If this
@@ -77,8 +77,7 @@ struct dma_buf_ops {
         * to signal that backing storage is already allocated and incompatible
         * with the requirements of requesting device.
         */
-       int (*attach)(struct dma_buf *, struct device *,
-                     struct dma_buf_attachment *);
+       int (*attach)(struct dma_buf *, struct dma_buf_attachment *);
 
        /**
         * @detach:
@@ -206,8 +205,6 @@ struct dma_buf_ops {
         * to be restarted.
         */
        int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
-       void *(*map_atomic)(struct dma_buf *, unsigned long);
-       void (*unmap_atomic)(struct dma_buf *, unsigned long, void *);
        void *(*map)(struct dma_buf *, unsigned long);
        void (*unmap)(struct dma_buf *, unsigned long, void *);
 
@@ -395,8 +392,6 @@ int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
                             enum dma_data_direction dir);
 int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
                           enum dma_data_direction dir);
-void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
-void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
 void *dma_buf_kmap(struct dma_buf *, unsigned long);
 void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
 
index b67bf6ac907d8f324494efaf1d441b0ee7955a13..3c5a4cb3eb953174c688c4b965ba09d87925fdb3 100644 (file)
@@ -48,7 +48,7 @@
  *   CMA should not be used by the device drivers directly. It is
  *   only a helper framework for dma-mapping subsystem.
  *
- *   For more information, see kernel-docs in drivers/base/dma-contiguous.c
+ *   For more information, see kernel-docs in kernel/dma/contiguous.c
  */
 
 #ifdef __KERNEL__
index eb9b05aa5aea14772cfad2dfdec2d4e5359dcec8..02dba8cd033d8e8d9ce9d673a2bd14e91fd87ea2 100644 (file)
@@ -166,7 +166,8 @@ struct dma_fence_ops {
         * released when the fence is signalled (through e.g. the interrupt
         * handler).
         *
-        * This callback is mandatory.
+        * This callback is optional. If this callback is not present, then the
+        * driver must always have signaling enabled.
         */
        bool (*enable_signaling)(struct dma_fence *fence);
 
@@ -190,11 +191,14 @@ struct dma_fence_ops {
        /**
         * @wait:
         *
-        * Custom wait implementation, or dma_fence_default_wait.
+        * Custom wait implementation, defaults to dma_fence_default_wait() if
+        * not set.
         *
-        * Must not be NULL, set to dma_fence_default_wait for default implementation.
-        * the dma_fence_default_wait implementation should work for any fence, as long
-        * as enable_signaling works correctly.
+        * The dma_fence_default_wait implementation should work for any fence, as long
+        * as @enable_signaling works correctly. This hook allows drivers to
+        * have an optimized version for the case where a process context is
+        * already available, e.g. if @enable_signaling for the general case
+        * needs to set up a worker thread.
         *
         * Must return -ERESTARTSYS if the wait is intr = true and the wait was
         * interrupted, and remaining jiffies if fence has signaled, or 0 if wait
@@ -202,7 +206,7 @@ struct dma_fence_ops {
         * which should be treated as if the fence is signaled. For example a hardware
         * lockup could be reported like that.
         *
-        * This callback is mandatory.
+        * This callback is optional.
         */
        signed long (*wait)(struct dma_fence *fence,
                            bool intr, signed long timeout);
@@ -217,17 +221,6 @@ struct dma_fence_ops {
         */
        void (*release)(struct dma_fence *fence);
 
-       /**
-        * @fill_driver_data:
-        *
-        * Callback to fill in free-form debug info.
-        *
-        * Returns amount of bytes filled, or negative error on failure.
-        *
-        * This callback is optional.
-        */
-       int (*fill_driver_data)(struct dma_fence *fence, void *data, int size);
-
        /**
         * @fence_value_str:
         *
@@ -242,8 +235,9 @@ struct dma_fence_ops {
         * @timeline_value_str:
         *
         * Fills in the current value of the timeline as a string, like the
-        * sequence number. This should match what @fill_driver_data prints for
-        * the most recently signalled fence (assuming no delayed signalling).
+        * sequence number. Note that the specific fence passed to this function
+        * should not matter, drivers should only use it to look up the
+        * corresponding timeline structures.
         */
        void (*timeline_value_str)(struct dma_fence *fence,
                                   char *str, int size);
index 7094718b653b7b4ce1ebdc4d8ca37734b4284438..ffcc7724ca210097f70cca236d8e249d09541e64 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <linux/fcntl.h>
 #include <linux/wait.h>
+#include <linux/err.h>
 
 /*
  * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
index 45fc0f5000d8899ead3592cbdaa813d726e2c2af..c73dd7396886751938a0e2e1355d2aa28797ad87 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/cryptohash.h>
 #include <linux/set_memory.h>
 #include <linux/kallsyms.h>
+#include <linux/if_vlan.h>
 
 #include <net/sch_generic.h>
 
@@ -469,15 +470,16 @@ struct sock_fprog_kern {
 };
 
 struct bpf_binary_header {
-       unsigned int pages;
-       u8 image[];
+       u32 pages;
+       /* Some arches need word alignment for their instructions */
+       u8 image[] __aligned(4);
 };
 
 struct bpf_prog {
        u16                     pages;          /* Number of allocated pages */
        u16                     jited:1,        /* Is our filter JIT'ed? */
                                jit_requested:1,/* archs need to JIT the prog */
-                               locked:1,       /* Program image locked? */
+                               undo_set_mem:1, /* Passed set_memory_ro() checkpoint */
                                gpl_compatible:1, /* Is filter GPL compatible? */
                                cb_access:1,    /* Is control block accessed? */
                                dst_needed:1,   /* Do we need dst entry? */
@@ -671,50 +673,27 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
 
 #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
 
-#ifdef CONFIG_ARCH_HAS_SET_MEMORY
-static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
-{
-       fp->locked = 1;
-       WARN_ON_ONCE(set_memory_ro((unsigned long)fp, fp->pages));
-}
-
-static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
-{
-       if (fp->locked) {
-               WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages));
-               /* In case set_memory_rw() fails, we want to be the first
-                * to crash here instead of some random place later on.
-                */
-               fp->locked = 0;
-       }
-}
-
-static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
-{
-       WARN_ON_ONCE(set_memory_ro((unsigned long)hdr, hdr->pages));
-}
-
-static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
-{
-       WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
-}
-#else
 static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
 {
+       fp->undo_set_mem = 1;
+       set_memory_ro((unsigned long)fp, fp->pages);
 }
 
 static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
 {
+       if (fp->undo_set_mem)
+               set_memory_rw((unsigned long)fp, fp->pages);
 }
 
 static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
 {
+       set_memory_ro((unsigned long)hdr, hdr->pages);
 }
 
 static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
 {
+       set_memory_rw((unsigned long)hdr, hdr->pages);
 }
-#endif /* CONFIG_ARCH_HAS_SET_MEMORY */
 
 static inline struct bpf_binary_header *
 bpf_jit_binary_hdr(const struct bpf_prog *fp)
@@ -786,6 +765,21 @@ static inline bool bpf_dump_raw_ok(void)
 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
                                       const struct bpf_insn *patch, u32 len);
 
+static inline int xdp_ok_fwd_dev(const struct net_device *fwd,
+                                unsigned int pktlen)
+{
+       unsigned int len;
+
+       if (unlikely(!(fwd->flags & IFF_UP)))
+               return -ENETDOWN;
+
+       len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
+       if (pktlen > len)
+               return -EMSGSIZE;
+
+       return 0;
+}
+
 /* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the
  * same cpu context. Further for best results no more than a single map
  * for the do_redirect/do_flush pair should be used. This limitation is
@@ -961,6 +955,9 @@ static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
 }
 #endif /* CONFIG_BPF_JIT */
 
+void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp);
+void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);
+
 #define BPF_ANC                BIT(15)
 
 static inline bool bpf_needs_clear_a(const struct sock_filter *first)
index 5c91108846db20894ab70dafe43b7922fe08fb1f..805bf22898cf23aefe2dc013bb1ee7e96b5b7685 100644 (file)
@@ -1720,8 +1720,6 @@ struct file_operations {
        int (*iterate) (struct file *, struct dir_context *);
        int (*iterate_shared) (struct file *, struct dir_context *);
        __poll_t (*poll) (struct file *, struct poll_table_struct *);
-       struct wait_queue_head * (*get_poll_head)(struct file *, __poll_t);
-       __poll_t (*poll_mask) (struct file *, __poll_t);
        long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
        long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
        int (*mmap) (struct file *, struct vm_area_struct *);
@@ -2422,6 +2420,7 @@ extern struct file *filp_open(const char *, int, umode_t);
 extern struct file *file_open_root(struct dentry *, struct vfsmount *,
                                   const char *, int, umode_t);
 extern struct file * dentry_open(const struct path *, int, const struct cred *);
+extern struct file *filp_clone_open(struct file *);
 extern int filp_close(struct file *, fl_owner_t id);
 
 extern struct filename *getname_flags(const char __user *, int, int *);
index 3efa3b861d44cae46670532c9db208d8630099a9..941b11811f85915bd70a730bbc338288d995493b 100644 (file)
@@ -16,6 +16,7 @@
 #define __FSL_GUTS_H__
 
 #include <linux/types.h>
+#include <linux/io.h>
 
 /**
  * Global Utility Registers.
index 8154f4920fcb9de96a24ec7b85d9b92f56968122..ebb77674be90cfff4466667c7bb62c121db5a235 100644 (file)
@@ -223,7 +223,6 @@ extern enum ftrace_tracing_type_t ftrace_tracing_type;
  */
 int register_ftrace_function(struct ftrace_ops *ops);
 int unregister_ftrace_function(struct ftrace_ops *ops);
-void clear_ftrace_function(void);
 
 extern void ftrace_stub(unsigned long a0, unsigned long a1,
                        struct ftrace_ops *op, struct pt_regs *regs);
@@ -239,7 +238,6 @@ static inline int ftrace_nr_registered_ops(void)
 {
        return 0;
 }
-static inline void clear_ftrace_function(void) { }
 static inline void ftrace_kill(void) { }
 static inline void ftrace_free_init_mem(void) { }
 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
index 41a3d5775394fed48e7b880317eaf6c1944c2817..773bcb1d4044ed2d83d4a1504f951951fa639d94 100644 (file)
@@ -511,6 +511,7 @@ struct hid_output_fifo {
 #define HID_STAT_ADDED         BIT(0)
 #define HID_STAT_PARSED                BIT(1)
 #define HID_STAT_DUP_DETECTED  BIT(2)
+#define HID_STAT_REPROBED      BIT(3)
 
 struct hid_input {
        struct list_head list;
@@ -579,7 +580,7 @@ struct hid_device {                                                 /* device report descriptor */
        bool battery_avoid_query;
 #endif
 
-       unsigned int status;                                            /* see STAT flags above */
+       unsigned long status;                                           /* see STAT flags above */
        unsigned claimed;                                               /* Claimed by hidinput, hiddev? */
        unsigned quirks;                                                /* Various quirks the device can pull on us */
        bool io_started;                                                /* If IO has started */
index 7843b98e1c6ea7802dcea3f8b5a944d2355398d5..c20c7e197d0731e58b0f68b87531299080e8421a 100644 (file)
@@ -105,13 +105,13 @@ static inline bool br_vlan_enabled(const struct net_device *dev)
 
 static inline int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
 {
-       return -1;
+       return -EINVAL;
 }
 
 static inline int br_vlan_get_info(const struct net_device *dev, u16 vid,
                                   struct bridge_vlan_info *p_vinfo)
 {
-       return -1;
+       return -EINVAL;
 }
 #endif
 
index f8231854b5d60316310fc5d8e57eea8625fe3078..119f53941c124c22452bf615f9ccca5a9130bb87 100644 (file)
@@ -109,6 +109,8 @@ struct ip_mc_list {
 extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u8 proto);
 extern int igmp_rcv(struct sk_buff *);
 extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr);
+extern int ip_mc_join_group_ssm(struct sock *sk, struct ip_mreqn *imr,
+                               unsigned int mode);
 extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr);
 extern void ip_mc_drop_socket(struct sock *sk);
 extern int ip_mc_source(int add, int omode, struct sock *sk,
index 767467d886de4d53f5f5b862614b3f1644a5ecfa..67c75372b6915289e6d0876ac21368c89eb3896a 100644 (file)
@@ -141,7 +141,7 @@ int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
        char __user *user_buffer);
 size_t iio_dma_buffer_data_available(struct iio_buffer *buffer);
 int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd);
-int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length);
+int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length);
 int iio_dma_buffer_request_update(struct iio_buffer *buffer);
 
 int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
index d7188de4db968c14c5db1a44fca6421aec22d041..3f4bf60b0bb55c4d9d2708593d2439aec269f9c9 100644 (file)
@@ -100,7 +100,7 @@ static inline bool input_is_mt_axis(int axis)
        return axis == ABS_MT_SLOT || input_is_mt_value(axis);
 }
 
-void input_mt_report_slot_state(struct input_dev *dev,
+bool input_mt_report_slot_state(struct input_dev *dev,
                                unsigned int tool_type, bool active);
 
 void input_mt_report_finger_count(struct input_dev *dev, int count);
index 1df940196ab2bd0987a177930be126f0828fd733..ef169d67df9217a8bf9d1dad19be7920ae0352f2 100644 (file)
 #define ecap_srs(e)            ((e >> 31) & 0x1)
 #define ecap_ers(e)            ((e >> 30) & 0x1)
 #define ecap_prs(e)            ((e >> 29) & 0x1)
+#define ecap_broken_pasid(e)   ((e >> 28) & 0x1)
 #define ecap_dis(e)            ((e >> 27) & 0x1)
 #define ecap_nest(e)           ((e >> 26) & 0x1)
 #define ecap_mts(e)            ((e >> 25) & 0x1)
index 4bd2f34947f4a7647a485fe2e8092c1fd055f630..201de12a9957171003757967bb69161c3d060575 100644 (file)
@@ -503,6 +503,7 @@ struct irq_chip {
  * IRQCHIP_SKIP_SET_WAKE:      Skip chip.irq_set_wake(), for this irq chip
  * IRQCHIP_ONESHOT_SAFE:       One shot does not require mask/unmask
  * IRQCHIP_EOI_THREADED:       Chip requires eoi() on unmask in threaded mode
+ * IRQCHIP_SUPPORTS_LEVEL_MSI  Chip can provide two doorbells for Level MSIs
  */
 enum {
        IRQCHIP_SET_TYPE_MASKED         = (1 <<  0),
index 25b33b66453773cb01509725fa68664c555ffd3f..dd1e40ddac7d8235e31aeb96fe460c77a70ac681 100644 (file)
@@ -145,11 +145,6 @@ static inline void *irq_desc_get_handler_data(struct irq_desc *desc)
        return desc->irq_common_data.handler_data;
 }
 
-static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
-{
-       return desc->irq_common_data.msi_desc;
-}
-
 /*
  * Architectures call this to let the generic IRQ layer
  * handle an interrupt.
index d231232385349146faf64c42cd05a73bddd0fcca..941dc0a5a877998e46d11541bdb491655a5f34af 100644 (file)
@@ -666,7 +666,7 @@ do {                                                                        \
  * your code. (Extra memory is used for special buffers that are
  * allocated when trace_printk() is used.)
  *
- * A little optization trick is done here. If there's only one
+ * A little optimization trick is done here. If there's only one
  * argument, there's no need to scan the string for printf formats.
  * The trace_puts() will suffice. But how can we take advantage of
  * using trace_puts() when trace_printk() has only one argument?
index 2803264c512f8f6bf80dffc462c4a7ab079ce5f3..c1961761311dbfd5968d6ed64ea91ca3c7d25b0e 100644 (file)
@@ -62,7 +62,6 @@ void *kthread_probe_data(struct task_struct *k);
 int kthread_park(struct task_struct *k);
 void kthread_unpark(struct task_struct *k);
 void kthread_parkme(void);
-void kthread_park_complete(struct task_struct *k);
 
 int kthreadd(void *unused);
 extern struct task_struct *kthreadd_task;
index 8b8946dd63b9d4df3d08c5051604fce0fc147be1..32f247cb5e9ea0c107970d31f135c903dfd04c55 100644 (file)
@@ -210,6 +210,7 @@ enum {
        ATA_FLAG_SLAVE_POSS     = (1 << 0), /* host supports slave dev */
                                            /* (doesn't imply presence) */
        ATA_FLAG_SATA           = (1 << 1),
+       ATA_FLAG_NO_LPM         = (1 << 2), /* host not happy with LPM */
        ATA_FLAG_NO_LOG_PAGE    = (1 << 5), /* do not issue log page read */
        ATA_FLAG_NO_ATAPI       = (1 << 6), /* No ATAPI support */
        ATA_FLAG_PIO_DMA        = (1 << 7), /* PIO cmds via DMA */
@@ -1495,6 +1496,29 @@ static inline bool ata_tag_valid(unsigned int tag)
        return tag < ATA_MAX_QUEUE || ata_tag_internal(tag);
 }
 
+#define __ata_qc_for_each(ap, qc, tag, max_tag, fn)            \
+       for ((tag) = 0; (tag) < (max_tag) &&                    \
+            ({ qc = fn((ap), (tag)); 1; }); (tag)++)           \
+
+/*
+ * Internal use only, iterate commands ignoring error handling and
+ * status of 'qc'.
+ */
+#define ata_qc_for_each_raw(ap, qc, tag)                                       \
+       __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE, __ata_qc_from_tag)
+
+/*
+ * Iterate all potential commands that can be queued
+ */
+#define ata_qc_for_each(ap, qc, tag)                                   \
+       __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE, ata_qc_from_tag)
+
+/*
+ * Like ata_qc_for_each, but with the internal tag included
+ */
+#define ata_qc_for_each_with_internal(ap, qc, tag)                     \
+       __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE + 1, ata_qc_from_tag)
+
 /*
  * device helpers
  */
index 4f5f8c21e2830bd3c7de20bd509b360f27e66816..1eb6f244588dae1efa08a2c9dcb8e46460592bcc 100644 (file)
@@ -27,6 +27,8 @@
  */
 #define MARVELL_PHY_ID_88E6390         0x01410f90
 
+#define MARVELL_PHY_FAMILY_ID(id)      ((id) >> 4)
+
 /* struct phy_device dev_flags definitions */
 #define MARVELL_PHY_M1145_FLAGS_RESISTANCE     0x00000001
 #define MARVELL_PHY_M1118_DNS323_LEDS          0x00000002
index 31ca3e28b0ebe98369a1582430230a2f68c6baae..a6ddefc60517899167b55b53b0007ba3e3b9ed80 100644 (file)
@@ -38,6 +38,7 @@ struct memory_block {
 
 int arch_get_memory_phys_device(unsigned long start_pfn);
 unsigned long memory_block_size_bytes(void);
+int set_memory_block_size_order(unsigned int order);
 
 /* These states are exposed to userspace as text strings in sysfs */
 #define        MEM_ONLINE              (1<<0) /* exposed to userspace */
index 80cbb7fdce4a1a9afea00cc7dbfbe16249871b2d..83957920653a0adeb08a90211f937e6227cb32a8 100644 (file)
@@ -358,6 +358,7 @@ struct mlx5_frag_buf_ctrl {
        struct mlx5_frag_buf    frag_buf;
        u32                     sz_m1;
        u32                     frag_sz_m1;
+       u32                     strides_offset;
        u8                      log_sz;
        u8                      log_stride;
        u8                      log_frag_strides;
@@ -983,14 +984,22 @@ static inline u32 mlx5_base_mkey(const u32 key)
        return key & 0xffffff00u;
 }
 
-static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz,
-                                struct mlx5_frag_buf_ctrl *fbc)
+static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz,
+                                       u32 strides_offset,
+                                       struct mlx5_frag_buf_ctrl *fbc)
 {
        fbc->log_stride = log_stride;
        fbc->log_sz     = log_sz;
        fbc->sz_m1      = (1 << fbc->log_sz) - 1;
        fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
        fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1;
+       fbc->strides_offset = strides_offset;
+}
+
+static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz,
+                                struct mlx5_frag_buf_ctrl *fbc)
+{
+       mlx5_fill_fbc_offset(log_stride, log_sz, 0, fbc);
 }
 
 static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
@@ -1004,7 +1013,10 @@ static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
 static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
                                          u32 ix)
 {
-       unsigned int frag = (ix >> fbc->log_frag_strides);
+       unsigned int frag;
+
+       ix  += fbc->strides_offset;
+       frag = ix >> fbc->log_frag_strides;
 
        return fbc->frag_buf.frags[frag].buf +
                ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
index d3c9db492b30065750726992ba1001c48153232b..fab5121ffb8f5de2b5f39b6a0a7e43cca4b047e0 100644 (file)
@@ -8,6 +8,8 @@
 
 #include <linux/mlx5/driver.h>
 
+#define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager)
+
 enum {
        SRIOV_NONE,
        SRIOV_LEGACY,
index 27134c4fcb76eb5140ff4828066e73e11d671cd9..ac281f5ec9b8077ba859f33eaf61e3f03ecdeb3d 100644 (file)
@@ -922,7 +922,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8         vnic_env_queue_counters[0x1];
        u8         ets[0x1];
        u8         nic_flow_table[0x1];
-       u8         eswitch_flow_table[0x1];
+       u8         eswitch_manager[0x1];
        u8         device_memory[0x1];
        u8         mcam_reg[0x1];
        u8         pcam_reg[0x1];
index a0fbb9ffe3805276a16c485564de77047898a18e..7ba6d356d18fbb27bedc6e5847a7a634a6a71516 100644 (file)
@@ -155,7 +155,9 @@ extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
  * mmap() functions).
  */
 
-extern struct kmem_cache *vm_area_cachep;
+struct vm_area_struct *vm_area_alloc(struct mm_struct *);
+struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
+void vm_area_free(struct vm_area_struct *);
 
 #ifndef CONFIG_MMU
 extern struct rb_root nommu_region_tree;
@@ -450,6 +452,20 @@ struct vm_operations_struct {
                                          unsigned long addr);
 };
 
+static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
+{
+       static const struct vm_operations_struct dummy_vm_ops = {};
+
+       vma->vm_mm = mm;
+       vma->vm_ops = &dummy_vm_ops;
+       INIT_LIST_HEAD(&vma->anon_vma_chain);
+}
+
+static inline void vma_set_anonymous(struct vm_area_struct *vma)
+{
+       vma->vm_ops = NULL;
+}
+
 struct mmu_gather;
 struct inode;
 
@@ -2132,7 +2148,7 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn,
                                        struct mminit_pfnnid_cache *state);
 #endif
 
-#ifdef CONFIG_HAVE_MEMBLOCK
+#if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP)
 void zero_resv_unavail(void);
 #else
 static inline void zero_resv_unavail(void) {}
index 2014bd19f28eff41ae37b80eba324644c537e291..96a71a648eed991530489ecea56b89d8755b395c 100644 (file)
@@ -501,6 +501,7 @@ enum dmi_field {
        DMI_PRODUCT_VERSION,
        DMI_PRODUCT_SERIAL,
        DMI_PRODUCT_UUID,
+       DMI_PRODUCT_SKU,
        DMI_PRODUCT_FAMILY,
        DMI_BOARD_VENDOR,
        DMI_BOARD_NAME,
index 08b6eb964dd6865af3e1a7079a54b1e99f77e077..6554d3ba4396b3df49acac934ad16eeb71a695f4 100644 (file)
@@ -147,7 +147,6 @@ struct proto_ops {
        int             (*getname)   (struct socket *sock,
                                      struct sockaddr *addr,
                                      int peer);
-       __poll_t        (*poll_mask) (struct socket *sock, __poll_t events);
        __poll_t        (*poll)      (struct file *file, struct socket *sock,
                                      struct poll_table_struct *wait);
        int             (*ioctl)     (struct socket *sock, unsigned int cmd,
index 3ec9850c7936f01c0f7564dbe519e95ce0849639..3d0cc0b5cec2d7514dbebf32effab9b1e6388c3c 100644 (file)
@@ -2789,11 +2789,31 @@ static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp,
        if (PTR_ERR(pp) != -EINPROGRESS)
                NAPI_GRO_CB(skb)->flush |= flush;
 }
+static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
+                                              struct sk_buff **pp,
+                                              int flush,
+                                              struct gro_remcsum *grc)
+{
+       if (PTR_ERR(pp) != -EINPROGRESS) {
+               NAPI_GRO_CB(skb)->flush |= flush;
+               skb_gro_remcsum_cleanup(skb, grc);
+               skb->remcsum_offload = 0;
+       }
+}
 #else
 static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
 {
        NAPI_GRO_CB(skb)->flush |= flush;
 }
+static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
+                                              struct sk_buff **pp,
+                                              int flush,
+                                              struct gro_remcsum *grc)
+{
+       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_remcsum_cleanup(skb, grc);
+       skb->remcsum_offload = 0;
+}
 #endif
 
 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
index 9dee3c23895d82fae05025961fe83d15b23d45b7..712eed156d0912f1aecc97de222597f1d7cc5dc9 100644 (file)
@@ -1438,6 +1438,8 @@ enum {
        NFS_IOHDR_EOF,
        NFS_IOHDR_REDO,
        NFS_IOHDR_STAT,
+       NFS_IOHDR_RESEND_PNFS,
+       NFS_IOHDR_RESEND_MDS,
 };
 
 struct nfs_io_completion;
index 340029b2fb382cc15888d72fde0bf5a069a6467c..e04ab6265566dc3337e972cce5801a70a9ba58bf 100644 (file)
@@ -261,6 +261,9 @@ enum pci_bus_speed {
        PCI_SPEED_UNKNOWN               = 0xff,
 };
 
+enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
+enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
+
 struct pci_cap_saved_data {
        u16             cap_nr;
        bool            cap_extended;
@@ -1240,6 +1243,8 @@ int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
 unsigned long pci_address_to_pio(phys_addr_t addr);
 phys_addr_t pci_pio_to_address(unsigned long pio);
 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
+int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
+                          phys_addr_t phys_addr);
 void pci_unmap_iospace(struct resource *res);
 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
                                      resource_size_t offset,
index 9206a4fef9ac151905a825700c6ae7477d7cbd88..cb8d84090cfb7adb478d156727279aa48686d816 100644 (file)
@@ -234,7 +234,7 @@ struct generic_pm_domain *of_genpd_remove_last(struct device_node *np);
 int of_genpd_parse_idle_states(struct device_node *dn,
                               struct genpd_power_state **states, int *n);
 unsigned int of_genpd_opp_to_performance_state(struct device *dev,
-                               struct device_node *opp_node);
+                               struct device_node *np);
 
 int genpd_dev_pm_attach(struct device *dev);
 struct device *genpd_dev_pm_attach_by_id(struct device *dev,
@@ -274,9 +274,9 @@ static inline int of_genpd_parse_idle_states(struct device_node *dn,
 
 static inline unsigned int
 of_genpd_opp_to_performance_state(struct device *dev,
-                                 struct device_node *opp_node)
+                                 struct device_node *np)
 {
-       return -ENODEV;
+       return 0;
 }
 
 static inline int genpd_dev_pm_attach(struct device *dev)
index fdf86b4cbc71bacca2795107532fb75e3855c0c9..7e0fdcf905d2e77b355c94a7381446927452723c 100644 (file)
@@ -74,18 +74,18 @@ static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
        pt->_key   = ~(__poll_t)0; /* all events enabled */
 }
 
-static inline bool file_has_poll_mask(struct file *file)
+static inline bool file_can_poll(struct file *file)
 {
-       return file->f_op->get_poll_head && file->f_op->poll_mask;
+       return file->f_op->poll;
 }
 
-static inline bool file_can_poll(struct file *file)
+static inline __poll_t vfs_poll(struct file *file, struct poll_table_struct *pt)
 {
-       return file->f_op->poll || file_has_poll_mask(file);
+       if (unlikely(!file->f_op->poll))
+               return DEFAULT_POLLMASK;
+       return file->f_op->poll(file, pt);
 }
 
-__poll_t vfs_poll(struct file *file, struct poll_table_struct *pt);
-
 struct poll_table_entry {
        struct file *filp;
        __poll_t key;
index 4193c41e383a897273605aac39f331b46512691a..a685da2c4522b5583ec405d0c0ff49da6aa718c9 100644 (file)
@@ -98,5 +98,7 @@ extern __must_check bool refcount_dec_if_one(refcount_t *r);
 extern __must_check bool refcount_dec_not_one(refcount_t *r);
 extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
 extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);
-
+extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
+                                                      spinlock_t *lock,
+                                                      unsigned long *flags);
 #endif /* _LINUX_REFCOUNT_H */
index b72ebdff0b77619ceba8d05a72c5db1f66772f8d..003d09ab308d99681a56e01b0f3bd7f3c8cce4a0 100644 (file)
@@ -165,6 +165,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer);
 void ring_buffer_record_off(struct ring_buffer *buffer);
 void ring_buffer_record_on(struct ring_buffer *buffer);
 int ring_buffer_record_is_on(struct ring_buffer *buffer);
+int ring_buffer_record_is_set_on(struct ring_buffer *buffer);
 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
 
index 64125443f8a638e787adfbaebab4755f5d63852a..5ef5c7c412a75b5f24e276563d278be8ced3b212 100644 (file)
@@ -354,6 +354,8 @@ struct rmi_driver_data {
        struct mutex irq_mutex;
        struct input_dev *input;
 
+       struct irq_domain *irqdomain;
+
        u8 pdt_props;
 
        u8 num_rx_electrodes;
index 51f52020ad5fdd44ab4fdfa6ad2e0063c4780947..093aa57120b0cf1f40c2a75f28612331c6e6f6e0 100644 (file)
@@ -9,9 +9,6 @@
 #include <asm/io.h>
 
 struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
-       unsigned long   sg_magic;
-#endif
        unsigned long   page_link;
        unsigned int    offset;
        unsigned int    length;
@@ -64,7 +61,6 @@ struct sg_table {
  *
  */
 
-#define SG_MAGIC       0x87654321
 #define SG_CHAIN       0x01UL
 #define SG_END         0x02UL
 
@@ -98,7 +94,6 @@ static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
         */
        BUG_ON((unsigned long) page & (SG_CHAIN | SG_END));
 #ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
        BUG_ON(sg_is_chain(sg));
 #endif
        sg->page_link = page_link | (unsigned long) page;
@@ -129,7 +124,6 @@ static inline void sg_set_page(struct scatterlist *sg, struct page *page,
 static inline struct page *sg_page(struct scatterlist *sg)
 {
 #ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
        BUG_ON(sg_is_chain(sg));
 #endif
        return (struct page *)((sg)->page_link & ~(SG_CHAIN | SG_END));
@@ -195,9 +189,6 @@ static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
  **/
 static inline void sg_mark_end(struct scatterlist *sg)
 {
-#ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
        /*
         * Set termination bit, clear potential chain bit
         */
@@ -215,9 +206,6 @@ static inline void sg_mark_end(struct scatterlist *sg)
  **/
 static inline void sg_unmark_end(struct scatterlist *sg)
 {
-#ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
        sg->page_link &= ~SG_END;
 }
 
@@ -260,12 +248,6 @@ static inline void *sg_virt(struct scatterlist *sg)
 static inline void sg_init_marker(struct scatterlist *sgl,
                                  unsigned int nents)
 {
-#ifdef CONFIG_DEBUG_SG
-       unsigned int i;
-
-       for (i = 0; i < nents; i++)
-               sgl[i].sg_magic = SG_MAGIC;
-#endif
        sg_mark_end(&sgl[nents - 1]);
 }
 
index 87bf02d93a279a9b98df452c7ad78a0b54adc1db..43731fe51c972ad6c3d6cb277ac940ec5a939023 100644 (file)
@@ -118,7 +118,7 @@ struct task_group;
  * the comment with set_special_state().
  */
 #define is_special_task_state(state)                           \
-       ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_DEAD))
+       ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
 
 #define __set_current_state(state_value)                       \
        do {                                                    \
@@ -1799,20 +1799,22 @@ static inline void rseq_set_notify_resume(struct task_struct *t)
                set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
 }
 
-void __rseq_handle_notify_resume(struct pt_regs *regs);
+void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
 
-static inline void rseq_handle_notify_resume(struct pt_regs *regs)
+static inline void rseq_handle_notify_resume(struct ksignal *ksig,
+                                            struct pt_regs *regs)
 {
        if (current->rseq)
-               __rseq_handle_notify_resume(regs);
+               __rseq_handle_notify_resume(ksig, regs);
 }
 
-static inline void rseq_signal_deliver(struct pt_regs *regs)
+static inline void rseq_signal_deliver(struct ksignal *ksig,
+                                      struct pt_regs *regs)
 {
        preempt_disable();
        __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
        preempt_enable();
-       rseq_handle_notify_resume(regs);
+       rseq_handle_notify_resume(ksig, regs);
 }
 
 /* rseq_preempt() requires preemption to be disabled. */
@@ -1831,9 +1833,7 @@ static inline void rseq_migrate(struct task_struct *t)
 
 /*
  * If parent process has a registered restartable sequences area, the
- * child inherits. Only applies when forking a process, not a thread. In
- * case a parent fork() in the middle of a restartable sequence, set the
- * resume notifier to force the child to retry.
+ * child inherits. Only applies when forking a process, not a thread.
  */
 static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
 {
@@ -1847,7 +1847,6 @@ static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
                t->rseq_len = current->rseq_len;
                t->rseq_sig = current->rseq_sig;
                t->rseq_event_mask = current->rseq_event_mask;
-               rseq_preempt(t);
        }
 }
 
@@ -1864,10 +1863,12 @@ static inline void rseq_execve(struct task_struct *t)
 static inline void rseq_set_notify_resume(struct task_struct *t)
 {
 }
-static inline void rseq_handle_notify_resume(struct pt_regs *regs)
+static inline void rseq_handle_notify_resume(struct ksignal *ksig,
+                                            struct pt_regs *regs)
 {
 }
-static inline void rseq_signal_deliver(struct pt_regs *regs)
+static inline void rseq_signal_deliver(struct ksignal *ksig,
+                                      struct pt_regs *regs)
 {
 }
 static inline void rseq_preempt(struct task_struct *t)
index 5be31eb7b26647ceb8d9f34298db36ebe6b4da20..108ede99e5335033526f754ba640f8fd0205f457 100644 (file)
@@ -75,7 +75,7 @@ extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *,
 extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
 struct task_struct *fork_idle(int);
 extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
-extern long kernel_wait4(pid_t, int *, int, struct rusage *);
+extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
 
 extern void free_task(struct task_struct *tsk);
 
index c8688595499421d9f051366d4a85e5553751768e..610a201126ee031166798baaf8ecae74fe478c4d 100644 (file)
@@ -630,6 +630,7 @@ typedef unsigned char *sk_buff_data_t;
  *     @hash: the packet hash
  *     @queue_mapping: Queue mapping for multiqueue devices
  *     @xmit_more: More SKBs are pending for this queue
+ *     @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
  *     @ndisc_nodetype: router type (from link layer)
  *     @ooo_okay: allow the mapping of a socket to a queue to be changed
  *     @l4_hash: indicate hash is a canonical 4-tuple hash over transport
@@ -735,7 +736,7 @@ struct sk_buff {
                                peeked:1,
                                head_frag:1,
                                xmit_more:1,
-                               __unused:1; /* one bit hole */
+                               pfmemalloc:1;
 
        /* fields enclosed in headers_start/headers_end are copied
         * using a single memcpy() in __copy_skb_header()
@@ -754,31 +755,30 @@ struct sk_buff {
 
        __u8                    __pkt_type_offset[0];
        __u8                    pkt_type:3;
-       __u8                    pfmemalloc:1;
        __u8                    ignore_df:1;
-
        __u8                    nf_trace:1;
        __u8                    ip_summed:2;
        __u8                    ooo_okay:1;
+
        __u8                    l4_hash:1;
        __u8                    sw_hash:1;
        __u8                    wifi_acked_valid:1;
        __u8                    wifi_acked:1;
-
        __u8                    no_fcs:1;
        /* Indicates the inner headers are valid in the skbuff. */
        __u8                    encapsulation:1;
        __u8                    encap_hdr_csum:1;
        __u8                    csum_valid:1;
+
        __u8                    csum_complete_sw:1;
        __u8                    csum_level:2;
        __u8                    csum_not_inet:1;
-
        __u8                    dst_pending_confirm:1;
 #ifdef CONFIG_IPV6_NDISC_NODETYPE
        __u8                    ndisc_nodetype:2;
 #endif
        __u8                    ipvs_property:1;
+
        __u8                    inner_protocol_type:1;
        __u8                    remcsum_offload:1;
 #ifdef CONFIG_NET_SWITCHDEV
@@ -3252,7 +3252,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
                                    int *peeked, int *off, int *err);
 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
                                  int *err);
-__poll_t datagram_poll_mask(struct socket *sock, __poll_t events);
+__poll_t datagram_poll(struct file *file, struct socket *sock,
+                          struct poll_table_struct *wait);
 int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
                           struct iov_iter *to, int size);
 static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
index 09fa2c6f0e68e69567b8b918cdda3f13f5ddaefa..3a1a1dbc6f49479f61f4c1a6588e6a672f0b0663 100644 (file)
@@ -155,8 +155,12 @@ struct kmem_cache {
 
 #ifdef CONFIG_SYSFS
 #define SLAB_SUPPORTS_SYSFS
+void sysfs_slab_unlink(struct kmem_cache *);
 void sysfs_slab_release(struct kmem_cache *);
 #else
+static inline void sysfs_slab_unlink(struct kmem_cache *s)
+{
+}
 static inline void sysfs_slab_release(struct kmem_cache *s)
 {
 }
index 1e8a46435838456ae57af232664ce9686a4ac5c0..fd57888d4942e10166440da41d449a52fc8e1730 100644 (file)
@@ -427,6 +427,11 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
 #define atomic_dec_and_lock(atomic, lock) \
                __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
 
+extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
+                                       unsigned long *flags);
+#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
+               __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
+
 int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
                           size_t max_size, unsigned int cpu_mult,
                           gfp_t gfp);
index 73810808cdf266e5cdcfc1e0c6b3af126a0bf4b1..5c1a0933768ee3202f360164d3defd1f3a81f5de 100644 (file)
@@ -11,6 +11,7 @@
 #ifndef _LINUX_SYSCALLS_H
 #define _LINUX_SYSCALLS_H
 
+struct __aio_sigset;
 struct epoll_event;
 struct iattr;
 struct inode;
@@ -231,6 +232,9 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
  */
 #ifndef __SYSCALL_DEFINEx
 #define __SYSCALL_DEFINEx(x, name, ...)                                        \
+       __diag_push();                                                  \
+       __diag_ignore(GCC, 8, "-Wattribute-alias",                      \
+                     "Type aliasing is used to sanitize syscall arguments");\
        asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))       \
                __attribute__((alias(__stringify(__se_sys##name))));    \
        ALLOW_ERROR_INJECTION(sys##name, ERRNO);                        \
@@ -243,6 +247,7 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
                __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__));       \
                return ret;                                             \
        }                                                               \
+       __diag_pop();                                                   \
        static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
 #endif /* __SYSCALL_DEFINEx */
 
index 6c5f2074e14f36d1368e1723394d4da9ef0cf3ae..6f8b68cd460f8c2b0aff758848a5de5a3ad65d6c 100644 (file)
@@ -75,7 +75,7 @@ struct uio_device {
         struct fasync_struct    *async_queue;
         wait_queue_head_t       wait;
         struct uio_info         *info;
-       spinlock_t              info_lock;
+       struct mutex            info_lock;
         struct kobject          *map_dir;
         struct kobject          *portio_dir;
 };
index 39fda195bf788001448398275cf24545c20731c3..3af7c0e03be5444d92fd2b80d49bc916c139550d 100644 (file)
@@ -6,8 +6,10 @@
  *
  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  *
- * Wound/wait implementation:
+ * Wait/Die implementation:
  *  Copyright (C) 2013 Canonical Ltd.
+ * Choice of algorithm:
+ *  Copyright (C) 2018 WMWare Inc.
  *
  * This file contains the main data structure and API definitions.
  */
@@ -23,14 +25,17 @@ struct ww_class {
        struct lock_class_key mutex_key;
        const char *acquire_name;
        const char *mutex_name;
+       unsigned int is_wait_die;
 };
 
 struct ww_acquire_ctx {
        struct task_struct *task;
        unsigned long stamp;
-       unsigned acquired;
+       unsigned int acquired;
+       unsigned short wounded;
+       unsigned short is_wait_die;
 #ifdef CONFIG_DEBUG_MUTEXES
-       unsigned done_acquire;
+       unsigned int done_acquire;
        struct ww_class *ww_class;
        struct ww_mutex *contending_lock;
 #endif
@@ -38,8 +43,8 @@ struct ww_acquire_ctx {
        struct lockdep_map dep_map;
 #endif
 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
-       unsigned deadlock_inject_interval;
-       unsigned deadlock_inject_countdown;
+       unsigned int deadlock_inject_interval;
+       unsigned int deadlock_inject_countdown;
 #endif
 };
 
@@ -58,17 +63,21 @@ struct ww_mutex {
 # define __WW_CLASS_MUTEX_INITIALIZER(lockname, class)
 #endif
 
-#define __WW_CLASS_INITIALIZER(ww_class) \
+#define __WW_CLASS_INITIALIZER(ww_class, _is_wait_die)     \
                { .stamp = ATOMIC_LONG_INIT(0) \
                , .acquire_name = #ww_class "_acquire" \
-               , .mutex_name = #ww_class "_mutex" }
+               , .mutex_name = #ww_class "_mutex" \
+               , .is_wait_die = _is_wait_die }
 
 #define __WW_MUTEX_INITIALIZER(lockname, class) \
                { .base =  __MUTEX_INITIALIZER(lockname.base) \
                __WW_CLASS_MUTEX_INITIALIZER(lockname, class) }
 
+#define DEFINE_WD_CLASS(classname) \
+       struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 1)
+
 #define DEFINE_WW_CLASS(classname) \
-       struct ww_class classname = __WW_CLASS_INITIALIZER(classname)
+       struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 0)
 
 #define DEFINE_WW_MUTEX(mutexname, ww_class) \
        struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class)
@@ -102,7 +111,7 @@ static inline void ww_mutex_init(struct ww_mutex *lock,
  *
  * Context-based w/w mutex acquiring can be done in any order whatsoever within
  * a given lock class. Deadlocks will be detected and handled with the
- * wait/wound logic.
+ * wait/die logic.
  *
  * Mixing of context-based w/w mutex acquiring and single w/w mutex locking can
  * result in undetected deadlocks and is so forbidden. Mixing different contexts
@@ -123,6 +132,8 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
        ctx->task = current;
        ctx->stamp = atomic_long_inc_return_relaxed(&ww_class->stamp);
        ctx->acquired = 0;
+       ctx->wounded = false;
+       ctx->is_wait_die = ww_class->is_wait_die;
 #ifdef CONFIG_DEBUG_MUTEXES
        ctx->ww_class = ww_class;
        ctx->done_acquire = 0;
@@ -195,13 +206,13 @@ static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
  * Lock the w/w mutex exclusively for this task.
  *
  * Deadlocks within a given w/w class of locks are detected and handled with the
- * wait/wound algorithm. If the lock isn't immediately avaiable this function
+ * wait/die algorithm. If the lock isn't immediately available this function
  * will either sleep until it is (wait case). Or it selects the current context
- * for backing off by returning -EDEADLK (wound case). Trying to acquire the
+ * for backing off by returning -EDEADLK (die case). Trying to acquire the
  * same lock with the same context twice is also detected and signalled by
  * returning -EALREADY. Returns 0 if the mutex was successfully acquired.
  *
- * In the wound case the caller must release all currently held w/w mutexes for
+ * In the die case the caller must release all currently held w/w mutexes for
  * the given context and then wait for this contending lock to be available by
  * calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this
  * lock and proceed with trying to acquire further w/w mutexes (e.g. when
@@ -226,14 +237,14 @@ extern int /* __must_check */ ww_mutex_lock(struct ww_mutex *lock, struct ww_acq
  * Lock the w/w mutex exclusively for this task.
  *
  * Deadlocks within a given w/w class of locks are detected and handled with the
- * wait/wound algorithm. If the lock isn't immediately avaiable this function
+ * wait/die algorithm. If the lock isn't immediately available this function
  * will either sleep until it is (wait case). Or it selects the current context
- * for backing off by returning -EDEADLK (wound case). Trying to acquire the
+ * for backing off by returning -EDEADLK (die case). Trying to acquire the
  * same lock with the same context twice is also detected and signalled by
  * returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a
  * signal arrives while waiting for the lock then this function returns -EINTR.
  *
- * In the wound case the caller must release all currently held w/w mutexes for
+ * In the die case the caller must release all currently held w/w mutexes for
  * the given context and then wait for this contending lock to be available by
  * calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to
  * not acquire this lock and proceed with trying to acquire further w/w mutexes
@@ -256,7 +267,7 @@ extern int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
  * @lock: the mutex to be acquired
  * @ctx: w/w acquire context
  *
- * Acquires a w/w mutex with the given context after a wound case. This function
+ * Acquires a w/w mutex with the given context after a die case. This function
  * will sleep until the lock becomes available.
  *
  * The caller must have released all w/w mutexes already acquired with the
@@ -290,7 +301,7 @@ ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
  * @lock: the mutex to be acquired
  * @ctx: w/w acquire context
  *
- * Acquires a w/w mutex with the given context after a wound case. This function
+ * Acquires a w/w mutex with the given context after a die case. This function
  * will sleep until the lock becomes available and returns 0 when the lock has
  * been acquired. If a signal arrives while waiting for the lock then this
  * function returns -EINTR.
index 53ce8176c31306deaf9c2be5743546abe4d27b53..ec9d6bc658559c55b64ac3c1d23b4e1166cc4b04 100644 (file)
@@ -271,7 +271,7 @@ int  bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
                     int flags);
 int  bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
                            size_t len, int flags);
-__poll_t bt_sock_poll_mask(struct socket *sock, __poll_t events);
+__poll_t bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
 int  bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 int  bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
 int  bt_sock_wait_ready(struct sock *sk, unsigned long flags);
index 5fbfe61f41c67f19713bf0e307ae0612428d68a6..1beb3ead038561d84c618757919871393f8c80c4 100644 (file)
@@ -5835,10 +5835,11 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
 /**
  * cfg80211_rx_control_port - notification about a received control port frame
  * @dev: The device the frame matched to
- * @buf: control port frame
- * @len: length of the frame data
- * @addr: The peer from which the frame was received
- * @proto: frame protocol, typically PAE or Pre-authentication
+ * @skb: The skbuf with the control port frame.  It is assumed that the skbuf
+ *     is 802.3 formatted (with 802.3 header).  The skb can be non-linear.
+ *     This function does not take ownership of the skb, so the caller is
+ *     responsible for any cleanup.  The caller must also ensure that
+ *     skb->protocol is set appropriately.
  * @unencrypted: Whether the frame was received unencrypted
  *
  * This function is used to inform userspace about a received control port
@@ -5851,8 +5852,7 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
  * Return: %true if the frame was passed to userspace
  */
 bool cfg80211_rx_control_port(struct net_device *dev,
-                             const u8 *buf, size_t len,
-                             const u8 *addr, u16 proto, bool unencrypted);
+                             struct sk_buff *skb, bool unencrypted);
 
 /**
  * cfg80211_cqm_rssi_notify - connection quality monitoring rssi event
index 5cba71d2dc44b9ea2366725ff68c9f668f639345..3d4930528db0d6f8bcdeaa7c141e2a800cbf0118 100644 (file)
@@ -170,6 +170,7 @@ struct fib6_info {
                                        unused:3;
 
        struct fib6_nh                  fib6_nh;
+       struct rcu_head                 rcu;
 };
 
 struct rt6_info {
@@ -273,17 +274,22 @@ static inline void ip6_rt_put(struct rt6_info *rt)
 }
 
 struct fib6_info *fib6_info_alloc(gfp_t gfp_flags);
-void fib6_info_destroy(struct fib6_info *f6i);
+void fib6_info_destroy_rcu(struct rcu_head *head);
 
 static inline void fib6_info_hold(struct fib6_info *f6i)
 {
        atomic_inc(&f6i->fib6_ref);
 }
 
+static inline bool fib6_info_hold_safe(struct fib6_info *f6i)
+{
+       return atomic_inc_not_zero(&f6i->fib6_ref);
+}
+
 static inline void fib6_info_release(struct fib6_info *f6i)
 {
        if (f6i && atomic_dec_and_test(&f6i->fib6_ref))
-               fib6_info_destroy(f6i);
+               call_rcu(&f6i->rcu, fib6_info_destroy_rcu);
 }
 
 enum fib6_walk_state {
index 59656fc580df7e0301e0c9282af9358a255b863f..7b9c82de11cc9388b070992af610e5fd14b66333 100644 (file)
@@ -66,6 +66,12 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
                (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
 }
 
+static inline bool rt6_qualify_for_ecmp(const struct fib6_info *f6i)
+{
+       return (f6i->fib6_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
+              RTF_GATEWAY;
+}
+
 void ip6_route_input(struct sk_buff *skb);
 struct dst_entry *ip6_route_input_lookup(struct net *net,
                                         struct net_device *dev,
index 16475c269749a72f3c487e102e50cabff797317e..8f73be4945037c6d0997ec8ab7c3e9da3980a6e4 100644 (file)
@@ -355,14 +355,7 @@ struct ipv6_txoptions *ipv6_dup_options(struct sock *sk,
 struct ipv6_txoptions *ipv6_renew_options(struct sock *sk,
                                          struct ipv6_txoptions *opt,
                                          int newtype,
-                                         struct ipv6_opt_hdr __user *newopt,
-                                         int newoptlen);
-struct ipv6_txoptions *
-ipv6_renew_options_kern(struct sock *sk,
-                       struct ipv6_txoptions *opt,
-                       int newtype,
-                       struct ipv6_opt_hdr *newopt,
-                       int newoptlen);
+                                         struct ipv6_opt_hdr *newopt);
 struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
                                          struct ipv6_txoptions *opt);
 
@@ -830,7 +823,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
         * to minimize possbility that any useful information to an
         * attacker is leaked. Only lower 20 bits are relevant.
         */
-       rol32(hash, 16);
+       hash = rol32(hash, 16);
 
        flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
 
@@ -1107,6 +1100,8 @@ void ipv6_sysctl_unregister(void);
 
 int ipv6_sock_mc_join(struct sock *sk, int ifindex,
                      const struct in6_addr *addr);
+int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex,
+                         const struct in6_addr *addr, unsigned int mode);
 int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
                      const struct in6_addr *addr);
 #endif /* _NET_IPV6_H */
index b0eaeb02d46d14ceb87f6e62d4765959c8383a66..f4c21b5a1242baac0415b3dde8fbc30524690ee7 100644 (file)
@@ -153,6 +153,8 @@ struct iucv_sock_list {
        atomic_t          autobind_name;
 };
 
+__poll_t iucv_sock_poll(struct file *file, struct socket *sock,
+                           poll_table *wait);
 void iucv_sock_link(struct iucv_sock_list *l, struct sock *s);
 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s);
 void iucv_accept_enqueue(struct sock *parent, struct sock *sk);
index 47e35cce3b648d696b127ed7bd643036128795f6..a71264d75d7f98d28f92dfd861ffe6e0d39c0198 100644 (file)
@@ -128,6 +128,7 @@ struct net {
 #endif
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
        struct netns_nf_frag    nf_frag;
+       struct ctl_table_header *nf_frag_frags_hdr;
 #endif
        struct sock             *nfnl;
        struct sock             *nfnl_stash;
index 08c005ce56e9ce3642804333062f0fc24006f02c..dc417ef0a0c5092208226cab3ab5e79ee283a18e 100644 (file)
@@ -150,6 +150,7 @@ static inline void nft_data_debug(const struct nft_data *data)
  *     @portid: netlink portID of the original message
  *     @seq: netlink sequence number
  *     @family: protocol family
+ *     @level: depth of the chains
  *     @report: notify via unicast netlink message
  */
 struct nft_ctx {
@@ -160,6 +161,7 @@ struct nft_ctx {
        u32                             portid;
        u32                             seq;
        u8                              family;
+       u8                              level;
        bool                            report;
 };
 
@@ -865,7 +867,6 @@ enum nft_chain_flags {
  *     @table: table that this chain belongs to
  *     @handle: chain handle
  *     @use: number of jump references to this chain
- *     @level: length of longest path to this chain
  *     @flags: bitmask of enum nft_chain_flags
  *     @name: name of the chain
  */
@@ -878,7 +879,6 @@ struct nft_chain {
        struct nft_table                *table;
        u64                             handle;
        u32                             use;
-       u16                             level;
        u8                              flags:6,
                                        genmask:2;
        char                            *name;
@@ -1124,7 +1124,6 @@ struct nft_flowtable {
        u32                             genmask:2,
                                        use:30;
        u64                             handle;
-       char                            *dev_name[NFT_FLOWTABLE_DEVICE_MAX];
        /* runtime data below here */
        struct nf_hook_ops              *ops ____cacheline_aligned;
        struct nf_flowtable             data;
index e0c0c2558ec48adfb27629c2180f9b04efb67bcf..a05134507e7bc806d9afd9ff7c86b95e5df084eb 100644 (file)
@@ -65,4 +65,10 @@ extern const struct nft_expr_ops nft_payload_fast_ops;
 extern struct static_key_false nft_counters_enabled;
 extern struct static_key_false nft_trace_enabled;
 
+extern struct nft_set_type nft_set_rhash_type;
+extern struct nft_set_type nft_set_hash_type;
+extern struct nft_set_type nft_set_hash_fast_type;
+extern struct nft_set_type nft_set_rbtree_type;
+extern struct nft_set_type nft_set_bitmap_type;
+
 #endif /* _NET_NF_TABLES_CORE_H */
index 9754a50ecde9c44162cc60e387d48cb034c6e6d4..4cc64c8446eb94f1c122cf15d4bf74c7e3f2275d 100644 (file)
@@ -64,7 +64,7 @@ nf_tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb,
  * belonging to established connections going through that one.
  */
 struct sock *
-nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp,
+nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb,
                      const u8 protocol,
                      const __be32 saddr, const __be32 daddr,
                      const __be16 sport, const __be16 dport,
@@ -103,7 +103,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
                            struct sock *sk);
 
 struct sock *
-nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp,
+nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff,
                      const u8 protocol,
                      const struct in6_addr *saddr, const struct in6_addr *daddr,
                      const __be16 sport, const __be16 dport,
index c978a31b0f846210b4c2a369af960d5349b5395a..762ac9931b6251152b6ee0e5780df0f7b073f3e6 100644 (file)
@@ -109,7 +109,6 @@ struct netns_ipv6 {
 
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
 struct netns_nf_frag {
-       struct netns_sysctl_ipv6 sysctl;
        struct netns_frags      frags;
 };
 #endif
index a3c1a2c47cd4bfd868004548cdf1ef7a361fa4c6..20b059574e600e64838b0bdecfaf6a76e6629d4a 100644 (file)
@@ -111,6 +111,11 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
 {
 }
 
+static inline bool tcf_block_shared(struct tcf_block *block)
+{
+       return false;
+}
+
 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
 {
        return NULL;
index 30b3e2fe240a88e3396a8b3664fd879c93fd30bf..8c2caa370e0f683ea764bc0d72da6dfa93699673 100644 (file)
@@ -109,7 +109,8 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
 int sctp_inet_listen(struct socket *sock, int backlog);
 void sctp_write_space(struct sock *sk);
 void sctp_data_ready(struct sock *sk);
-__poll_t sctp_poll_mask(struct socket *sock, __poll_t events);
+__poll_t sctp_poll(struct file *file, struct socket *sock,
+               poll_table *wait);
 void sctp_sock_rfree(struct sk_buff *skb);
 void sctp_copy_sock(struct sock *newsk, struct sock *sk,
                    struct sctp_association *asoc);
index 9470fd7e4350ea9546b43a504ebe12f6362dda18..32d2454c04793021c0dc87bca7f1802b49c5249b 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/tc_act/tc_csum.h>
 
 struct tcf_csum_params {
-       int action;
        u32 update_flags;
        struct rcu_head rcu;
 };
index efef0b4b1b2bddc76095bcd4d02ebaaa3b2beb56..46b8c7f1c8d5273791df55eeb6345807d8812e96 100644 (file)
@@ -18,7 +18,6 @@
 struct tcf_tunnel_key_params {
        struct rcu_head         rcu;
        int                     tcft_action;
-       int                     action;
        struct metadata_dst     *tcft_enc_metadata;
 };
 
index 0448e7c5d2b4062f8ceecb5b38882385a1be7ead..cd3ecda9386a680e009ca261ea535feaa1349d74 100644 (file)
@@ -342,6 +342,7 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
                        struct pipe_inode_info *pipe, size_t len,
                        unsigned int flags);
 
+void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
 static inline void tcp_dec_quickack_mode(struct sock *sk,
                                         const unsigned int pkts)
 {
@@ -388,7 +389,8 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
 void tcp_close(struct sock *sk, long timeout);
 void tcp_init_sock(struct sock *sk);
 void tcp_init_transfer(struct sock *sk, int bpf_op);
-__poll_t tcp_poll_mask(struct socket *sock, __poll_t events);
+__poll_t tcp_poll(struct file *file, struct socket *sock,
+                     struct poll_table_struct *wait);
 int tcp_getsockopt(struct sock *sk, int level, int optname,
                   char __user *optval, int __user *optlen);
 int tcp_setsockopt(struct sock *sk, int level, int optname,
@@ -538,6 +540,7 @@ void tcp_send_fin(struct sock *sk);
 void tcp_send_active_reset(struct sock *sk, gfp_t priority);
 int tcp_send_synack(struct sock *);
 void tcp_push_one(struct sock *, unsigned int mss_now);
+void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
 void tcp_send_ack(struct sock *sk);
 void tcp_send_delayed_ack(struct sock *sk);
 void tcp_send_loss_probe(struct sock *sk);
@@ -827,12 +830,21 @@ struct tcp_skb_cb {
 
 #define TCP_SKB_CB(__skb)      ((struct tcp_skb_cb *)&((__skb)->cb[0]))
 
+static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
+{
+       TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
+}
 
 #if IS_ENABLED(CONFIG_IPV6)
 /* This is the variant of inet6_iif() that must be used by TCP,
  * as TCP moves IP6CB into a different location in skb->cb[]
  */
 static inline int tcp_v6_iif(const struct sk_buff *skb)
+{
+       return TCP_SKB_CB(skb)->header.h6.iif;
+}
+
+static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
 {
        bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
 
@@ -907,8 +919,6 @@ enum tcp_ca_event {
        CA_EVENT_LOSS,          /* loss timeout */
        CA_EVENT_ECN_NO_CE,     /* ECT set, but not CE marked */
        CA_EVENT_ECN_IS_CE,     /* received CE marked IP packet */
-       CA_EVENT_DELAYED_ACK,   /* Delayed ack is sent */
-       CA_EVENT_NON_DELAYED_ACK,
 };
 
 /* Information about inbound ACK, passed to cong_ops->in_ack_event() */
index 7f84ea3e217cf5e3f78698ee63bc9dced179caed..70c273777fe9fe27b2ef1ba7c2c80970da8ea5c4 100644 (file)
@@ -109,7 +109,8 @@ struct tls_sw_context_rx {
 
        struct strparser strp;
        void (*saved_data_ready)(struct sock *sk);
-       __poll_t (*sk_poll_mask)(struct socket *sock, __poll_t events);
+       unsigned int (*sk_poll)(struct file *file, struct socket *sock,
+                               struct poll_table_struct *wait);
        struct sk_buff *recv_pkt;
        u8 control;
        bool decrypted;
@@ -224,7 +225,8 @@ void tls_sw_free_resources_tx(struct sock *sk);
 void tls_sw_free_resources_rx(struct sock *sk);
 int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
                   int nonblock, int flags, int *addr_len);
-__poll_t tls_sw_poll_mask(struct socket *sock, __poll_t events);
+unsigned int tls_sw_poll(struct file *file, struct socket *sock,
+                        struct poll_table_struct *wait);
 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
                           struct pipe_inode_info *pipe,
                           size_t len, unsigned int flags);
index b1ea8b0f5e6a8ce82602e593acd583170b4a6e73..81afdacd4fff04bd05335da85a7a06b1996282f8 100644 (file)
@@ -285,7 +285,7 @@ int udp_init_sock(struct sock *sk);
 int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
 int __udp_disconnect(struct sock *sk, int flags);
 int udp_disconnect(struct sock *sk, int flags);
-__poll_t udp_poll_mask(struct socket *sock, __poll_t events);
+__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait);
 struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
                                       netdev_features_t features,
                                       bool is_ipv6);
index 9fe472f2ac950c8f3f042cca547cb8f7ce820a97..7161856bcf9c7f572943f6a78676df2aa458a5f7 100644 (file)
@@ -60,6 +60,10 @@ struct xdp_sock {
        bool zc;
        /* Protects multiple processes in the control path */
        struct mutex mutex;
+       /* Mutual exclusion of NAPI TX thread and sendmsg error paths
+        * in the SKB destructor callback.
+        */
+       spinlock_t tx_completion_lock;
        u64 rx_dropped;
 };
 
index 4c6241bc203931dcc6b74de5be72349e741cb6be..6c003995347a3904cda6e57814c50bcf6c0733a7 100644 (file)
@@ -3391,11 +3391,14 @@ int ib_process_cq_direct(struct ib_cq *cq, int budget);
  *
  * Users can examine the cq structure to determine the actual CQ size.
  */
-struct ib_cq *ib_create_cq(struct ib_device *device,
-                          ib_comp_handler comp_handler,
-                          void (*event_handler)(struct ib_event *, void *),
-                          void *cq_context,
-                          const struct ib_cq_init_attr *cq_attr);
+struct ib_cq *__ib_create_cq(struct ib_device *device,
+                            ib_comp_handler comp_handler,
+                            void (*event_handler)(struct ib_event *, void *),
+                            void *cq_context,
+                            const struct ib_cq_init_attr *cq_attr,
+                            const char *caller);
+#define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
+       __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
 
 /**
  * ib_resize_cq - Modifies the capacity of the CQ.
index 78b4dd89fcb4d8b53a1f706de3f888d90462daa9..1ceec56de0157671c6a42af07322ffb7fb8e1731 100644 (file)
@@ -72,6 +72,29 @@ extern "C" {
 #define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
 #define DRM_IOCTL_AMDGPU_SCHED         DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union drm_amdgpu_sched)
 
+/**
+ * DOC: memory domains
+ *
+ * %AMDGPU_GEM_DOMAIN_CPU      System memory that is not GPU accessible.
+ * Memory in this pool could be swapped out to disk if there is pressure.
+ *
+ * %AMDGPU_GEM_DOMAIN_GTT      GPU accessible system memory, mapped into the
+ * GPU's virtual address space via gart. Gart memory linearizes non-contiguous
+ * pages of system memory, allows GPU access system memory in a linezrized
+ * fashion.
+ *
+ * %AMDGPU_GEM_DOMAIN_VRAM     Local video memory. For APUs, it is memory
+ * carved out by the BIOS.
+ *
+ * %AMDGPU_GEM_DOMAIN_GDS      Global on-chip data storage used to share data
+ * across shader threads.
+ *
+ * %AMDGPU_GEM_DOMAIN_GWS      Global wave sync, used to synchronize the
+ * execution of all the waves on a device.
+ *
+ * %AMDGPU_GEM_DOMAIN_OA       Ordered append, used by 3D or Compute engines
+ * for appending data.
+ */
 #define AMDGPU_GEM_DOMAIN_CPU          0x1
 #define AMDGPU_GEM_DOMAIN_GTT          0x2
 #define AMDGPU_GEM_DOMAIN_VRAM         0x4
@@ -483,7 +506,8 @@ struct drm_amdgpu_gem_va {
 #define AMDGPU_HW_IP_UVD_ENC      5
 #define AMDGPU_HW_IP_VCN_DEC      6
 #define AMDGPU_HW_IP_VCN_ENC      7
-#define AMDGPU_HW_IP_NUM          8
+#define AMDGPU_HW_IP_VCN_JPEG     8
+#define AMDGPU_HW_IP_NUM          9
 
 #define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1
 
@@ -492,6 +516,7 @@ struct drm_amdgpu_gem_va {
 #define AMDGPU_CHUNK_ID_DEPENDENCIES   0x03
 #define AMDGPU_CHUNK_ID_SYNCOBJ_IN      0x04
 #define AMDGPU_CHUNK_ID_SYNCOBJ_OUT     0x05
+#define AMDGPU_CHUNK_ID_BO_HANDLES      0x06
 
 struct drm_amdgpu_cs_chunk {
        __u32           chunk_id;
index 9c660e1688abe1cd6bf0e22bf709515e8a463e0d..300f336633f28ea20493570f80a73e30d87cd087 100644 (file)
@@ -687,6 +687,15 @@ struct drm_get_cap {
  */
 #define DRM_CLIENT_CAP_ASPECT_RATIO    4
 
+/**
+ * DRM_CLIENT_CAP_WRITEBACK_CONNECTORS
+ *
+ * If set to 1, the DRM core will expose special connectors to be used for
+ * writing back to memory the scene setup in the commit. Depends on client
+ * also supporting DRM_CLIENT_CAP_ATOMIC
+ */
+#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS    5
+
 /** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
 struct drm_set_client_cap {
        __u64 capability;
index e04613d30a134f696f9fa52dc439b57a87ac8f8c..721ab7e54d96d95afbd895b298729c0e1e172580 100644 (file)
@@ -183,6 +183,7 @@ extern "C" {
 #define DRM_FORMAT_MOD_VENDOR_QCOM    0x05
 #define DRM_FORMAT_MOD_VENDOR_VIVANTE 0x06
 #define DRM_FORMAT_MOD_VENDOR_BROADCOM 0x07
+#define DRM_FORMAT_MOD_VENDOR_ARM     0x08
 /* add more to the end as needed */
 
 #define DRM_FORMAT_RESERVED          ((1ULL << 56) - 1)
@@ -298,6 +299,19 @@ extern "C" {
  */
 #define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE      fourcc_mod_code(SAMSUNG, 1)
 
+/*
+ * Qualcomm Compressed Format
+ *
+ * Refers to a compressed variant of the base format that is compressed.
+ * Implementation may be platform and base-format specific.
+ *
+ * Each macrotile consists of m x n (mostly 4 x 4) tiles.
+ * Pixel data pitch/stride is aligned with macrotile width.
+ * Pixel data height is aligned with macrotile height.
+ * Entire pixel data buffer is aligned with 4k(bytes).
+ */
+#define DRM_FORMAT_MOD_QCOM_COMPRESSED fourcc_mod_code(QCOM, 1)
+
 /* Vivante framebuffer modifiers */
 
 /*
@@ -384,6 +398,23 @@ extern "C" {
 #define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_THIRTYTWO_GOB \
        fourcc_mod_code(NVIDIA, 0x15)
 
+/*
+ * Some Broadcom modifiers take parameters, for example the number of
+ * vertical lines in the image. Reserve the lower 32 bits for modifier
+ * type, and the next 24 bits for parameters. Top 8 bits are the
+ * vendor code.
+ */
+#define __fourcc_mod_broadcom_param_shift 8
+#define __fourcc_mod_broadcom_param_bits 48
+#define fourcc_mod_broadcom_code(val, params) \
+       fourcc_mod_code(BROADCOM, ((((__u64)params) << __fourcc_mod_broadcom_param_shift) | val))
+#define fourcc_mod_broadcom_param(m) \
+       ((int)(((m) >> __fourcc_mod_broadcom_param_shift) &     \
+              ((1ULL << __fourcc_mod_broadcom_param_bits) - 1)))
+#define fourcc_mod_broadcom_mod(m) \
+       ((m) & ~(((1ULL << __fourcc_mod_broadcom_param_bits) - 1) <<    \
+                __fourcc_mod_broadcom_param_shift))
+
 /*
  * Broadcom VC4 "T" format
  *
@@ -405,6 +436,151 @@ extern "C" {
  */
 #define DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED fourcc_mod_code(BROADCOM, 1)
 
+/*
+ * Broadcom SAND format
+ *
+ * This is the native format that the H.264 codec block uses.  For VC4
+ * HVS, it is only valid for H.264 (NV12/21) and RGBA modes.
+ *
+ * The image can be considered to be split into columns, and the
+ * columns are placed consecutively into memory.  The width of those
+ * columns can be either 32, 64, 128, or 256 pixels, but in practice
+ * only 128 pixel columns are used.
+ *
+ * The pitch between the start of each column is set to optimally
+ * switch between SDRAM banks. This is passed as the number of lines
+ * of column width in the modifier (we can't use the stride value due
+ * to various core checks that look at it , so you should set the
+ * stride to width*cpp).
+ *
+ * Note that the column height for this format modifier is the same
+ * for all of the planes, assuming that each column contains both Y
+ * and UV.  Some SAND-using hardware stores UV in a separate tiled
+ * image from Y to reduce the column height, which is not supported
+ * with these modifiers.
+ */
+
+#define DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(v) \
+       fourcc_mod_broadcom_code(2, v)
+#define DRM_FORMAT_MOD_BROADCOM_SAND64_COL_HEIGHT(v) \
+       fourcc_mod_broadcom_code(3, v)
+#define DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT(v) \
+       fourcc_mod_broadcom_code(4, v)
+#define DRM_FORMAT_MOD_BROADCOM_SAND256_COL_HEIGHT(v) \
+       fourcc_mod_broadcom_code(5, v)
+
+#define DRM_FORMAT_MOD_BROADCOM_SAND32 \
+       DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(0)
+#define DRM_FORMAT_MOD_BROADCOM_SAND64 \
+       DRM_FORMAT_MOD_BROADCOM_SAND64_COL_HEIGHT(0)
+#define DRM_FORMAT_MOD_BROADCOM_SAND128 \
+       DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT(0)
+#define DRM_FORMAT_MOD_BROADCOM_SAND256 \
+       DRM_FORMAT_MOD_BROADCOM_SAND256_COL_HEIGHT(0)
+
+/* Broadcom UIF format
+ *
+ * This is the common format for the current Broadcom multimedia
+ * blocks, including V3D 3.x and newer, newer video codecs, and
+ * displays.
+ *
+ * The image consists of utiles (64b blocks), UIF blocks (2x2 utiles),
+ * and macroblocks (4x4 UIF blocks).  Those 4x4 UIF block groups are
+ * stored in columns, with padding between the columns to ensure that
+ * moving from one column to the next doesn't hit the same SDRAM page
+ * bank.
+ *
+ * To calculate the padding, it is assumed that each hardware block
+ * and the software driving it knows the platform's SDRAM page size,
+ * number of banks, and XOR address, and that it's identical between
+ * all blocks using the format.  This tiling modifier will use XOR as
+ * necessary to reduce the padding.  If a hardware block can't do XOR,
+ * the assumption is that a no-XOR tiling modifier will be created.
+ */
+#define DRM_FORMAT_MOD_BROADCOM_UIF fourcc_mod_code(BROADCOM, 6)
+
+/*
+ * Arm Framebuffer Compression (AFBC) modifiers
+ *
+ * AFBC is a proprietary lossless image compression protocol and format.
+ * It provides fine-grained random access and minimizes the amount of data
+ * transferred between IP blocks.
+ *
+ * AFBC has several features which may be supported and/or used, which are
+ * represented using bits in the modifier. Not all combinations are valid,
+ * and different devices or use-cases may support different combinations.
+ */
+#define DRM_FORMAT_MOD_ARM_AFBC(__afbc_mode)   fourcc_mod_code(ARM, __afbc_mode)
+
+/*
+ * AFBC superblock size
+ *
+ * Indicates the superblock size(s) used for the AFBC buffer. The buffer
+ * size (in pixels) must be aligned to a multiple of the superblock size.
+ * Four lowest significant bits(LSBs) are reserved for block size.
+ */
+#define AFBC_FORMAT_MOD_BLOCK_SIZE_MASK      0xf
+#define AFBC_FORMAT_MOD_BLOCK_SIZE_16x16     (1ULL)
+#define AFBC_FORMAT_MOD_BLOCK_SIZE_32x8      (2ULL)
+
+/*
+ * AFBC lossless colorspace transform
+ *
+ * Indicates that the buffer makes use of the AFBC lossless colorspace
+ * transform.
+ */
+#define AFBC_FORMAT_MOD_YTR     (1ULL <<  4)
+
+/*
+ * AFBC block-split
+ *
+ * Indicates that the payload of each superblock is split. The second
+ * half of the payload is positioned at a predefined offset from the start
+ * of the superblock payload.
+ */
+#define AFBC_FORMAT_MOD_SPLIT   (1ULL <<  5)
+
+/*
+ * AFBC sparse layout
+ *
+ * This flag indicates that the payload of each superblock must be stored at a
+ * predefined position relative to the other superblocks in the same AFBC
+ * buffer. This order is the same order used by the header buffer. In this mode
+ * each superblock is given the same amount of space as an uncompressed
+ * superblock of the particular format would require, rounding up to the next
+ * multiple of 128 bytes in size.
+ */
+#define AFBC_FORMAT_MOD_SPARSE  (1ULL <<  6)
+
+/*
+ * AFBC copy-block restrict
+ *
+ * Buffers with this flag must obey the copy-block restriction. The restriction
+ * is such that there are no copy-blocks referring across the border of 8x8
+ * blocks. For the subsampled data the 8x8 limitation is also subsampled.
+ */
+#define AFBC_FORMAT_MOD_CBR     (1ULL <<  7)
+
+/*
+ * AFBC tiled layout
+ *
+ * The tiled layout groups superblocks in 8x8 or 4x4 tiles, where all
+ * superblocks inside a tile are stored together in memory. 8x8 tiles are used
+ * for pixel formats up to and including 32 bpp while 4x4 tiles are used for
+ * larger bpp formats. The order between the tiles is scan line.
+ * When the tiled layout is used, the buffer size (in pixels) must be aligned
+ * to the tile size.
+ */
+#define AFBC_FORMAT_MOD_TILED   (1ULL <<  8)
+
+/*
+ * AFBC solid color blocks
+ *
+ * Indicates that the buffer makes use of solid-color blocks, whereby bandwidth
+ * can be reduced if a whole superblock is a single color.
+ */
+#define AFBC_FORMAT_MOD_SC      (1ULL <<  9)
+
 #if defined(__cplusplus)
 }
 #endif
index 4b3a1bb58e68053bd880aa373fe5385730baaade..8d67243952f4f104f8b2ca745b4d94aa2565cbcc 100644 (file)
@@ -96,6 +96,13 @@ extern "C" {
 #define DRM_MODE_PICTURE_ASPECT_64_27          3
 #define DRM_MODE_PICTURE_ASPECT_256_135                4
 
+/* Content type options */
+#define DRM_MODE_CONTENT_TYPE_NO_DATA          0
+#define DRM_MODE_CONTENT_TYPE_GRAPHICS         1
+#define DRM_MODE_CONTENT_TYPE_PHOTO            2
+#define DRM_MODE_CONTENT_TYPE_CINEMA           3
+#define DRM_MODE_CONTENT_TYPE_GAME             4
+
 /* Aspect ratio flag bitmask (4 bits 22:19) */
 #define DRM_MODE_FLAG_PIC_AR_MASK              (0x0F<<19)
 #define  DRM_MODE_FLAG_PIC_AR_NONE \
@@ -344,6 +351,7 @@ enum drm_mode_subconnector {
 #define DRM_MODE_CONNECTOR_VIRTUAL      15
 #define DRM_MODE_CONNECTOR_DSI         16
 #define DRM_MODE_CONNECTOR_DPI         17
+#define DRM_MODE_CONNECTOR_WRITEBACK   18
 
 struct drm_mode_get_connector {
 
index 0bc784f5e0dbe15bc8d1f92c7941ff57b5008072..399f58317cff34d7505a76a3f285a18a7fa71869 100644 (file)
@@ -40,6 +40,7 @@ extern "C" {
 
 #define DRM_VMW_GET_PARAM            0
 #define DRM_VMW_ALLOC_DMABUF         1
+#define DRM_VMW_ALLOC_BO             1
 #define DRM_VMW_UNREF_DMABUF         2
 #define DRM_VMW_HANDLE_CLOSE         2
 #define DRM_VMW_CURSOR_BYPASS        3
@@ -68,6 +69,8 @@ extern "C" {
 #define DRM_VMW_GB_SURFACE_REF       24
 #define DRM_VMW_SYNCCPU              25
 #define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
+#define DRM_VMW_GB_SURFACE_CREATE_EXT   27
+#define DRM_VMW_GB_SURFACE_REF_EXT      28
 
 /*************************************************************************/
 /**
@@ -79,6 +82,9 @@ extern "C" {
  *
  * DRM_VMW_PARAM_OVERLAY_IOCTL:
  * Does the driver support the overlay ioctl.
+ *
+ * DRM_VMW_PARAM_SM4_1
+ * SM4_1 support is enabled.
  */
 
 #define DRM_VMW_PARAM_NUM_STREAMS      0
@@ -94,6 +100,8 @@ extern "C" {
 #define DRM_VMW_PARAM_MAX_MOB_SIZE     10
 #define DRM_VMW_PARAM_SCREEN_TARGET    11
 #define DRM_VMW_PARAM_DX               12
+#define DRM_VMW_PARAM_HW_CAPS2         13
+#define DRM_VMW_PARAM_SM4_1            14
 
 /**
  * enum drm_vmw_handle_type - handle type for ref ioctls
@@ -356,9 +364,9 @@ struct drm_vmw_fence_rep {
 
 /*************************************************************************/
 /**
- * DRM_VMW_ALLOC_DMABUF
+ * DRM_VMW_ALLOC_BO
  *
- * Allocate a DMA buffer that is visible also to the host.
+ * Allocate a buffer object that is visible also to the host.
  * NOTE: The buffer is
  * identified by a handle and an offset, which are private to the guest, but
  * useable in the command stream. The guest kernel may translate these
@@ -366,27 +374,28 @@ struct drm_vmw_fence_rep {
  * be zero at all times, or it may disappear from the interface before it is
  * fixed.
  *
- * The DMA buffer may stay user-space mapped in the guest at all times,
+ * The buffer object may stay user-space mapped in the guest at all times,
  * and is thus suitable for sub-allocation.
  *
- * DMA buffers are mapped using the mmap() syscall on the drm device.
+ * Buffer objects are mapped using the mmap() syscall on the drm device.
  */
 
 /**
- * struct drm_vmw_alloc_dmabuf_req
+ * struct drm_vmw_alloc_bo_req
  *
  * @size: Required minimum size of the buffer.
  *
- * Input data to the DRM_VMW_ALLOC_DMABUF Ioctl.
+ * Input data to the DRM_VMW_ALLOC_BO Ioctl.
  */
 
-struct drm_vmw_alloc_dmabuf_req {
+struct drm_vmw_alloc_bo_req {
        __u32 size;
        __u32 pad64;
 };
+#define drm_vmw_alloc_dmabuf_req drm_vmw_alloc_bo_req
 
 /**
- * struct drm_vmw_dmabuf_rep
+ * struct drm_vmw_bo_rep
  *
  * @map_handle: Offset to use in the mmap() call used to map the buffer.
  * @handle: Handle unique to this buffer. Used for unreferencing.
@@ -395,50 +404,32 @@ struct drm_vmw_alloc_dmabuf_req {
  * @cur_gmr_offset: Offset to use in the command stream when this buffer is
  * referenced. See note above.
  *
- * Output data from the DRM_VMW_ALLOC_DMABUF Ioctl.
+ * Output data from the DRM_VMW_ALLOC_BO Ioctl.
  */
 
-struct drm_vmw_dmabuf_rep {
+struct drm_vmw_bo_rep {
        __u64 map_handle;
        __u32 handle;
        __u32 cur_gmr_id;
        __u32 cur_gmr_offset;
        __u32 pad64;
 };
+#define drm_vmw_dmabuf_rep drm_vmw_bo_rep
 
 /**
- * union drm_vmw_dmabuf_arg
+ * union drm_vmw_alloc_bo_arg
  *
  * @req: Input data as described above.
  * @rep: Output data as described above.
  *
- * Argument to the DRM_VMW_ALLOC_DMABUF Ioctl.
+ * Argument to the DRM_VMW_ALLOC_BO Ioctl.
  */
 
-union drm_vmw_alloc_dmabuf_arg {
-       struct drm_vmw_alloc_dmabuf_req req;
-       struct drm_vmw_dmabuf_rep rep;
-};
-
-/*************************************************************************/
-/**
- * DRM_VMW_UNREF_DMABUF - Free a DMA buffer.
- *
- */
-
-/**
- * struct drm_vmw_unref_dmabuf_arg
- *
- * @handle: Handle indicating what buffer to free. Obtained from the
- * DRM_VMW_ALLOC_DMABUF Ioctl.
- *
- * Argument to the DRM_VMW_UNREF_DMABUF Ioctl.
- */
-
-struct drm_vmw_unref_dmabuf_arg {
-       __u32 handle;
-       __u32 pad64;
+union drm_vmw_alloc_bo_arg {
+       struct drm_vmw_alloc_bo_req req;
+       struct drm_vmw_bo_rep rep;
 };
+#define drm_vmw_alloc_dmabuf_arg drm_vmw_alloc_bo_arg
 
 /*************************************************************************/
 /**
@@ -1103,9 +1094,8 @@ union drm_vmw_extended_context_arg {
  * DRM_VMW_HANDLE_CLOSE - Close a user-space handle and release its
  * underlying resource.
  *
- * Note that this ioctl is overlaid on the DRM_VMW_UNREF_DMABUF Ioctl.
- * The ioctl arguments therefore need to be identical in layout.
- *
+ * Note that this ioctl is overlaid on the deprecated DRM_VMW_UNREF_DMABUF
+ * Ioctl.
  */
 
 /**
@@ -1119,7 +1109,107 @@ struct drm_vmw_handle_close_arg {
        __u32 handle;
        __u32 pad64;
 };
+#define drm_vmw_unref_dmabuf_arg drm_vmw_handle_close_arg
+
+/*************************************************************************/
+/**
+ * DRM_VMW_GB_SURFACE_CREATE_EXT - Create a host guest-backed surface.
+ *
+ * Allocates a surface handle and queues a create surface command
+ * for the host on the first use of the surface. The surface ID can
+ * be used as the surface ID in commands referencing the surface.
+ *
+ * This new command extends DRM_VMW_GB_SURFACE_CREATE by adding version
+ * parameter and 64 bit svga flag.
+ */
+
+/**
+ * enum drm_vmw_surface_version
+ *
+ * @drm_vmw_surface_gb_v1: Corresponds to current gb surface format with
+ * svga3d surface flags split into 2, upper half and lower half.
+ */
+enum drm_vmw_surface_version {
+       drm_vmw_gb_surface_v1
+};
+
+/**
+ * struct drm_vmw_gb_surface_create_ext_req
+ *
+ * @base: Surface create parameters.
+ * @version: Version of surface create ioctl.
+ * @svga3d_flags_upper_32_bits: Upper 32 bits of svga3d flags.
+ * @multisample_pattern: Multisampling pattern when msaa is supported.
+ * @quality_level: Precision settings for each sample.
+ * @must_be_zero: Reserved for future usage.
+ *
+ * Input argument to the  DRM_VMW_GB_SURFACE_CREATE_EXT Ioctl.
+ * Part of output argument for the DRM_VMW_GB_SURFACE_REF_EXT Ioctl.
+ */
+struct drm_vmw_gb_surface_create_ext_req {
+       struct drm_vmw_gb_surface_create_req base;
+       enum drm_vmw_surface_version version;
+       uint32_t svga3d_flags_upper_32_bits;
+       SVGA3dMSPattern multisample_pattern;
+       SVGA3dMSQualityLevel quality_level;
+       uint64_t must_be_zero;
+};
+
+/**
+ * union drm_vmw_gb_surface_create_ext_arg
+ *
+ * @req: Input argument as described above.
+ * @rep: Output argument as described above.
+ *
+ * Argument to the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
+ */
+union drm_vmw_gb_surface_create_ext_arg {
+       struct drm_vmw_gb_surface_create_rep rep;
+       struct drm_vmw_gb_surface_create_ext_req req;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_GB_SURFACE_REF_EXT - Reference a host surface.
+ *
+ * Puts a reference on a host surface with a given handle, as previously
+ * returned by the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
+ * A reference will make sure the surface isn't destroyed while we hold
+ * it and will allow the calling client to use the surface handle in
+ * the command stream.
+ *
+ * On successful return, the Ioctl returns the surface information given
+ * to and returned from the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
+ */
 
+/**
+ * struct drm_vmw_gb_surface_ref_ext_rep
+ *
+ * @creq: The data used as input when the surface was created, as described
+ *        above at "struct drm_vmw_gb_surface_create_ext_req"
+ * @crep: Additional data output when the surface was created, as described
+ *        above at "struct drm_vmw_gb_surface_create_rep"
+ *
+ * Output Argument to the DRM_VMW_GB_SURFACE_REF_EXT ioctl.
+ */
+struct drm_vmw_gb_surface_ref_ext_rep {
+       struct drm_vmw_gb_surface_create_ext_req creq;
+       struct drm_vmw_gb_surface_create_rep crep;
+};
+
+/**
+ * union drm_vmw_gb_surface_reference_ext_arg
+ *
+ * @req: Input data as described above at "struct drm_vmw_surface_arg"
+ * @rep: Output data as described above at
+ *       "struct drm_vmw_gb_surface_ref_ext_rep"
+ *
+ * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
+ */
+union drm_vmw_gb_surface_reference_ext_arg {
+       struct drm_vmw_gb_surface_ref_ext_rep rep;
+       struct drm_vmw_surface_arg req;
+};
 
 #if defined(__cplusplus)
 }
index d00221345c1988ff59de79f47401903d560c55e0..d4593a6062ef00d436bc237c9209082cba62805d 100644 (file)
@@ -29,7 +29,6 @@
 
 #include <linux/types.h>
 #include <linux/fs.h>
-#include <linux/signal.h>
 #include <asm/byteorder.h>
 
 typedef __kernel_ulong_t aio_context_t;
@@ -39,8 +38,10 @@ enum {
        IOCB_CMD_PWRITE = 1,
        IOCB_CMD_FSYNC = 2,
        IOCB_CMD_FDSYNC = 3,
-       /* 4 was the experimental IOCB_CMD_PREADX */
-       IOCB_CMD_POLL = 5,
+       /* These two are experimental.
+        * IOCB_CMD_PREADX = 4,
+        * IOCB_CMD_POLL = 5,
+        */
        IOCB_CMD_NOOP = 6,
        IOCB_CMD_PREADV = 7,
        IOCB_CMD_PWRITEV = 8,
@@ -108,10 +109,5 @@ struct iocb {
 #undef IFBIG
 #undef IFLITTLE
 
-struct __aio_sigset {
-       const sigset_t __user   *sigmask;
-       size_t          sigsetsize;
-};
-
 #endif /* __LINUX__AIO_ABI_H */
 
index 59b19b6a40d73ea6575f8810a6f4345a931c5a01..b7db3261c62d124760e98d9c851c1b01e64bdb03 100644 (file)
@@ -1857,7 +1857,8 @@ union bpf_attr {
  *             is resolved), the nexthop address is returned in ipv4_dst
  *             or ipv6_dst based on family, smac is set to mac address of
  *             egress device, dmac is set to nexthop mac address, rt_metric
- *             is set to metric from route (IPv4/IPv6 only).
+ *             is set to metric from route (IPv4/IPv6 only), and ifindex
+ *             is set to the device index of the nexthop from the FIB lookup.
  *
  *             *plen* argument is the size of the passed in struct.
  *             *flags* argument can be a combination of one or more of the
@@ -1873,9 +1874,10 @@ union bpf_attr {
  *             *ctx* is either **struct xdp_md** for XDP programs or
  *             **struct sk_buff** tc cls_act programs.
  *     Return
- *             Egress device index on success, 0 if packet needs to continue
- *             up the stack for further processing or a negative error in case
- *             of failure.
+ *             * < 0 if any input argument is invalid
+ *             *   0 on success (packet is forwarded, nexthop neighbor exists)
+ *             * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
+ *             *     packet is not forwarded or needs assist from full stack
  *
  * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags)
  *     Description
@@ -2612,6 +2614,18 @@ struct bpf_raw_tracepoint_args {
 #define BPF_FIB_LOOKUP_DIRECT  BIT(0)
 #define BPF_FIB_LOOKUP_OUTPUT  BIT(1)
 
+enum {
+       BPF_FIB_LKUP_RET_SUCCESS,      /* lookup successful */
+       BPF_FIB_LKUP_RET_BLACKHOLE,    /* dest is blackholed; can be dropped */
+       BPF_FIB_LKUP_RET_UNREACHABLE,  /* dest is unreachable; can be dropped */
+       BPF_FIB_LKUP_RET_PROHIBIT,     /* dest not allowed; can be dropped */
+       BPF_FIB_LKUP_RET_NOT_FWDED,    /* packet is not forwarded */
+       BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */
+       BPF_FIB_LKUP_RET_UNSUPP_LWT,   /* fwd requires encapsulation */
+       BPF_FIB_LKUP_RET_NO_NEIGH,     /* no neighbor entry for nh */
+       BPF_FIB_LKUP_RET_FRAG_NEEDED,  /* fragmentation required to fwd */
+};
+
 struct bpf_fib_lookup {
        /* input:  network family for lookup (AF_INET, AF_INET6)
         * output: network family of egress nexthop
@@ -2625,7 +2639,11 @@ struct bpf_fib_lookup {
 
        /* total length of packet from network header - used for MTU check */
        __u16   tot_len;
-       __u32   ifindex;  /* L3 device index for lookup */
+
+       /* input: L3 device index for lookup
+        * output: device index from FIB lookup
+        */
+       __u32   ifindex;
 
        union {
                /* inputs to lookup */
index 0b5ddbe135a47aa7f39b40ca44e665e5757014de..972265f328717b8286edc2fc93c9d2a54ed394f8 100644 (file)
@@ -76,7 +76,7 @@ struct btf_type {
  */
 #define BTF_INT_ENCODING(VAL)  (((VAL) & 0x0f000000) >> 24)
 #define BTF_INT_OFFSET(VAL)    (((VAL  & 0x00ff0000)) >> 16)
-#define BTF_INT_BITS(VAL)      ((VAL)  & 0x0000ffff)
+#define BTF_INT_BITS(VAL)      ((VAL)  & 0x000000ff)
 
 /* Attributes stored in the BTF_INT_ENCODING */
 #define BTF_INT_SIGNED (1 << 0)
index 4ca65b56084f94526435a58a8663d58054c924f4..7363f18e65a553e12f4d1cc13844dfbf2bbe6f17 100644 (file)
@@ -226,7 +226,7 @@ enum tunable_id {
        ETHTOOL_TX_COPYBREAK,
        ETHTOOL_PFC_PREVENTION_TOUT, /* timeout in msecs */
        /*
-        * Add your fresh new tubale attribute above and remember to update
+        * Add your fresh new tunable attribute above and remember to update
         * tunable_strings[] in net/core/ethtool.c
         */
        __ETHTOOL_TUNABLE_COUNT,
index b4f5073dbac25b0d71f39f44b4ddfca86736929f..01674b56e14f2f47c053b8104490a89e6a5052c9 100644 (file)
@@ -76,6 +76,12 @@ struct kfd_ioctl_update_queue_args {
        __u32 queue_priority;   /* to KFD */
 };
 
+struct kfd_ioctl_set_cu_mask_args {
+       __u32 queue_id;         /* to KFD */
+       __u32 num_cu_mask;              /* to KFD */
+       __u64 cu_mask_ptr;              /* to KFD */
+};
+
 /* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
 #define KFD_IOC_CACHE_POLICY_COHERENT 0
 #define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
@@ -189,6 +195,15 @@ struct kfd_ioctl_dbg_wave_control_args {
 
 #define KFD_SIGNAL_EVENT_LIMIT                 4096
 
+/* For kfd_event_data.hw_exception_data.reset_type. */
+#define KFD_HW_EXCEPTION_WHOLE_GPU_RESET       0
+#define KFD_HW_EXCEPTION_PER_ENGINE_RESET      1
+
+/* For kfd_event_data.hw_exception_data.reset_cause. */
+#define KFD_HW_EXCEPTION_GPU_HANG      0
+#define KFD_HW_EXCEPTION_ECC           1
+
+
 struct kfd_ioctl_create_event_args {
        __u64 event_page_offset;        /* from KFD */
        __u32 event_trigger_data;       /* from KFD - signal events only */
@@ -219,7 +234,7 @@ struct kfd_memory_exception_failure {
        __u32 NotPresent;       /* Page not present or supervisor privilege */
        __u32 ReadOnly; /* Write access to a read-only page */
        __u32 NoExecute;        /* Execute access to a page marked NX */
-       __u32 pad;
+       __u32 imprecise;        /* Can't determine the  exact fault address */
 };
 
 /* memory exception data*/
@@ -230,10 +245,19 @@ struct kfd_hsa_memory_exception_data {
        __u32 pad;
 };
 
-/* Event data*/
+/* hw exception data */
+struct kfd_hsa_hw_exception_data {
+       uint32_t reset_type;
+       uint32_t reset_cause;
+       uint32_t memory_lost;
+       uint32_t gpu_id;
+};
+
+/* Event data */
 struct kfd_event_data {
        union {
                struct kfd_hsa_memory_exception_data memory_exception_data;
+               struct kfd_hsa_hw_exception_data hw_exception_data;
        };                              /* From KFD */
        __u64 kfd_event_data_ext;       /* pointer to an extension structure
                                           for future exception types */
@@ -448,7 +472,10 @@ struct kfd_ioctl_unmap_memory_from_gpu_args {
 #define AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU       \
                AMDKFD_IOWR(0x19, struct kfd_ioctl_unmap_memory_from_gpu_args)
 
+#define AMDKFD_IOC_SET_CU_MASK         \
+               AMDKFD_IOW(0x1A, struct kfd_ioctl_set_cu_mask_args)
+
 #define AMDKFD_COMMAND_START           0x01
-#define AMDKFD_COMMAND_END             0x1A
+#define AMDKFD_COMMAND_END             0x1B
 
 #endif
index 85a3fb65e40a6f3941337c7fad17e7fdff3b33d0..20d6cc91435df90f08741c478ab29ea85efa7167 100644 (file)
@@ -53,6 +53,9 @@ enum {
 /* These are client behavior specific flags. */
 #define NBD_CFLAG_DESTROY_ON_DISCONNECT        (1 << 0) /* delete the nbd device on
                                                    disconnect. */
+#define NBD_CFLAG_DISCONNECT_ON_CLOSE (1 << 1) /* disconnect the nbd device on
+                                               *  close by last opener.
+                                               */
 
 /* userspace doesn't need the nbd_device structure */
 
index d620fa43756cab2685428861f31c27d9a59b2a39..9a402fdb60e97bc92591312ebc7071de54fa900c 100644 (file)
  * Copyright (c) 2015-2018 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  */
 
-#ifdef __KERNEL__
-# include <linux/types.h>
-#else
-# include <stdint.h>
-#endif
-
-#include <linux/types_32_64.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
 
 enum rseq_cpu_id_state {
        RSEQ_CPU_ID_UNINITIALIZED               = -1,
@@ -52,10 +47,10 @@ struct rseq_cs {
        __u32 version;
        /* enum rseq_cs_flags */
        __u32 flags;
-       LINUX_FIELD_u32_u64(start_ip);
+       __u64 start_ip;
        /* Offset from start_ip. */
-       LINUX_FIELD_u32_u64(post_commit_offset);
-       LINUX_FIELD_u32_u64(abort_ip);
+       __u64 post_commit_offset;
+       __u64 abort_ip;
 } __attribute__((aligned(4 * sizeof(__u64))));
 
 /*
@@ -67,28 +62,30 @@ struct rseq_cs {
 struct rseq {
        /*
         * Restartable sequences cpu_id_start field. Updated by the
-        * kernel, and read by user-space with single-copy atomicity
-        * semantics. Aligned on 32-bit. Always contains a value in the
-        * range of possible CPUs, although the value may not be the
-        * actual current CPU (e.g. if rseq is not initialized). This
-        * CPU number value should always be compared against the value
-        * of the cpu_id field before performing a rseq commit or
-        * returning a value read from a data structure indexed using
-        * the cpu_id_start value.
+        * kernel. Read by user-space with single-copy atomicity
+        * semantics. This field should only be read by the thread which
+        * registered this data structure. Aligned on 32-bit. Always
+        * contains a value in the range of possible CPUs, although the
+        * value may not be the actual current CPU (e.g. if rseq is not
+        * initialized). This CPU number value should always be compared
+        * against the value of the cpu_id field before performing a rseq
+        * commit or returning a value read from a data structure indexed
+        * using the cpu_id_start value.
         */
        __u32 cpu_id_start;
        /*
-        * Restartable sequences cpu_id field. Updated by the kernel,
-        * and read by user-space with single-copy atomicity semantics.
-        * Aligned on 32-bit. Values RSEQ_CPU_ID_UNINITIALIZED and
-        * RSEQ_CPU_ID_REGISTRATION_FAILED have a special semantic: the
-        * former means "rseq uninitialized", and latter means "rseq
-        * initialization failed". This value is meant to be read within
-        * rseq critical sections and compared with the cpu_id_start
-        * value previously read, before performing the commit instruction,
-        * or read and compared with the cpu_id_start value before returning
-        * a value loaded from a data structure indexed using the
-        * cpu_id_start value.
+        * Restartable sequences cpu_id field. Updated by the kernel.
+        * Read by user-space with single-copy atomicity semantics. This
+        * field should only be read by the thread which registered this
+        * data structure. Aligned on 32-bit. Values
+        * RSEQ_CPU_ID_UNINITIALIZED and RSEQ_CPU_ID_REGISTRATION_FAILED
+        * have a special semantic: the former means "rseq uninitialized",
+        * and latter means "rseq initialization failed". This value is
+        * meant to be read within rseq critical sections and compared
+        * with the cpu_id_start value previously read, before performing
+        * the commit instruction, or read and compared with the
+        * cpu_id_start value before returning a value loaded from a data
+        * structure indexed using the cpu_id_start value.
         */
        __u32 cpu_id;
        /*
@@ -105,27 +102,44 @@ struct rseq {
         * targeted by the rseq_cs. Also needs to be set to NULL by user-space
         * before reclaiming memory that contains the targeted struct rseq_cs.
         *
-        * Read and set by the kernel with single-copy atomicity semantics.
-        * Set by user-space with single-copy atomicity semantics. Aligned
-        * on 64-bit.
+        * Read and set by the kernel. Set by user-space with single-copy
+        * atomicity semantics. This field should only be updated by the
+        * thread which registered this data structure. Aligned on 64-bit.
         */
-       LINUX_FIELD_u32_u64(rseq_cs);
+       union {
+               __u64 ptr64;
+#ifdef __LP64__
+               __u64 ptr;
+#else
+               struct {
+#if (defined(__BYTE_ORDER) && (__BYTE_ORDER == __BIG_ENDIAN)) || defined(__BIG_ENDIAN)
+                       __u32 padding;          /* Initialized to zero. */
+                       __u32 ptr32;
+#else /* LITTLE */
+                       __u32 ptr32;
+                       __u32 padding;          /* Initialized to zero. */
+#endif /* ENDIAN */
+               } ptr;
+#endif
+       } rseq_cs;
+
        /*
-        * - RSEQ_DISABLE flag:
+        * Restartable sequences flags field.
+        *
+        * This field should only be updated by the thread which
+        * registered this data structure. Read by the kernel.
+        * Mainly used for single-stepping through rseq critical sections
+        * with debuggers.
         *
-        * Fallback fast-track flag for single-stepping.
-        * Set by user-space if lack of progress is detected.
-        * Cleared by user-space after rseq finish.
-        * Read by the kernel.
         * - RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT
-        *     Inhibit instruction sequence block restart and event
-        *     counter increment on preemption for this thread.
+        *     Inhibit instruction sequence block restart on preemption
+        *     for this thread.
         * - RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL
-        *     Inhibit instruction sequence block restart and event
-        *     counter increment on signal delivery for this thread.
+        *     Inhibit instruction sequence block restart on signal
+        *     delivery for this thread.
         * - RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE
-        *     Inhibit instruction sequence block restart and event
-        *     counter increment on migration for this thread.
+        *     Inhibit instruction sequence block restart on migration for
+        *     this thread.
         */
        __u32 flags;
 } __attribute__((aligned(4 * sizeof(__u64))));
index 6e299349b15876d3302cc784576dd84cff6f1d66..b7b57967d90f09cd428d90e12b0035e3ecbcfc67 100644 (file)
@@ -44,6 +44,7 @@
 #define TCMU_MAILBOX_VERSION 2
 #define ALIGN_SIZE 64 /* Should be enough for most CPUs */
 #define TCMU_MAILBOX_FLAG_CAP_OOOC (1 << 0) /* Out-of-order completions */
+#define TCMU_MAILBOX_FLAG_CAP_READ_LEN (1 << 1) /* Read data length */
 
 struct tcmu_mailbox {
        __u16 version;
@@ -71,6 +72,7 @@ struct tcmu_cmd_entry_hdr {
        __u16 cmd_id;
        __u8 kflags;
 #define TCMU_UFLAG_UNKNOWN_OP 0x1
+#define TCMU_UFLAG_READ_LEN   0x2
        __u8 uflags;
 
 } __packed;
@@ -119,7 +121,7 @@ struct tcmu_cmd_entry {
                        __u8 scsi_status;
                        __u8 __pad1;
                        __u16 __pad2;
-                       __u32 __pad3;
+                       __u32 read_len;
                        char sense_buffer[TCMU_SENSE_BUFFERSIZE];
                } rsp;
        };
index 29eb659aa77a183e36082599866fb512908d1197..e3f6ed8a7064f9276ca2b57ed5ecff3364786e9d 100644 (file)
@@ -127,6 +127,10 @@ enum {
 
 #define TCP_CM_INQ             TCP_INQ
 
+#define TCP_REPAIR_ON          1
+#define TCP_REPAIR_OFF         0
+#define TCP_REPAIR_OFF_NO_WP   -1      /* Turn off without window probes */
+
 struct tcp_repair_opt {
        __u32   opt_code;
        __u32   opt_val;
diff --git a/include/uapi/linux/types_32_64.h b/include/uapi/linux/types_32_64.h
deleted file mode 100644 (file)
index 0a87ace..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
-#ifndef _UAPI_LINUX_TYPES_32_64_H
-#define _UAPI_LINUX_TYPES_32_64_H
-
-/*
- * linux/types_32_64.h
- *
- * Integer type declaration for pointers across 32-bit and 64-bit systems.
- *
- * Copyright (c) 2015-2018 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifdef __KERNEL__
-# include <linux/types.h>
-#else
-# include <stdint.h>
-#endif
-
-#include <asm/byteorder.h>
-
-#ifdef __BYTE_ORDER
-# if (__BYTE_ORDER == __BIG_ENDIAN)
-#  define LINUX_BYTE_ORDER_BIG_ENDIAN
-# else
-#  define LINUX_BYTE_ORDER_LITTLE_ENDIAN
-# endif
-#else
-# ifdef __BIG_ENDIAN
-#  define LINUX_BYTE_ORDER_BIG_ENDIAN
-# else
-#  define LINUX_BYTE_ORDER_LITTLE_ENDIAN
-# endif
-#endif
-
-#ifdef __LP64__
-# define LINUX_FIELD_u32_u64(field)                    __u64 field
-# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v)    field = (intptr_t)v
-#else
-# ifdef LINUX_BYTE_ORDER_BIG_ENDIAN
-#  define LINUX_FIELD_u32_u64(field)   __u32 field ## _padding, field
-#  define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v)   \
-       field ## _padding = 0, field = (intptr_t)v
-# else
-#  define LINUX_FIELD_u32_u64(field)   __u32 field, field ## _padding
-#  define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v)   \
-       field = (intptr_t)v, field ## _padding = 0
-# endif
-#endif
-
-#endif /* _UAPI_LINUX_TYPES_32_64_H */
index 19aa65a35546160fa9b556374ed800e8448352e3..49a53ef8da963597210953fa120207d8d1bea035 100644 (file)
@@ -38,6 +38,9 @@ enum {
 
        MIPI_DSI_DCS_READ                               = 0x06,
 
+       MIPI_DSI_DCS_COMPRESSION_MODE                   = 0x07,
+       MIPI_DSI_PPS_LONG_WRITE                         = 0x0A,
+
        MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE         = 0x37,
 
        MIPI_DSI_END_OF_TRANSMISSION                    = 0x08,
index 9d4340c907d17d0c2ecacdd6762e36d7c9d6def5..1e1d9bd0bd3788711d8722e7ec9e1a15661d7c3b 100644 (file)
@@ -25,12 +25,16 @@ extern bool xen_pvh;
 #define xen_hvm_domain()       (xen_domain_type == XEN_HVM_DOMAIN)
 #define xen_pvh_domain()       (xen_pvh)
 
+#include <linux/types.h>
+
+extern uint32_t xen_start_flags;
+
 #ifdef CONFIG_XEN_DOM0
 #include <xen/interface/xen.h>
 #include <asm/xen/hypervisor.h>
 
 #define xen_initial_domain()   (xen_domain() && \
-                                xen_start_info && xen_start_info->flags & SIF_INITDOMAIN)
+                                (xen_start_flags & SIF_INITDOMAIN))
 #else  /* !CONFIG_XEN_DOM0 */
 #define xen_initial_domain()   (0)
 #endif /* CONFIG_XEN_DOM0 */
index 5a52f07259a2aab4ad5993801a6d15b5dfe5d4a3..041f3a022122d559b8588c8c24c8db37756464de 100644 (file)
@@ -1051,10 +1051,9 @@ config LD_DEAD_CODE_DATA_ELIMINATION
        depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION
        depends on EXPERT
        help
-         Select this if the architecture wants to do dead code and
-         data elimination with the linker by compiling with
-         -ffunction-sections -fdata-sections, and linking with
-         --gc-sections.
+         Enable this if you want to do dead code and data elimination with
+         the linker by compiling with -ffunction-sections -fdata-sections,
+         and linking with --gc-sections.
 
          This can reduce on disk and in-memory size of the kernel
          code and static data, particularly for small configs and
@@ -1719,10 +1718,6 @@ source "arch/Kconfig"
 
 endmenu                # General setup
 
-config HAVE_GENERIC_DMA_COHERENT
-       bool
-       default n
-
 config RT_MUTEXES
        bool
 
index 5af1943ad782b415a3dd331161e9b2ecccf89210..76e95e4f3aa284f6ded3962b3055233ea533add8 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -2118,7 +2118,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
        }
 
        do {
-               queue.status = -EINTR;
+               WRITE_ONCE(queue.status, -EINTR);
                queue.sleeper = current;
 
                __set_current_state(TASK_INTERRUPTIBLE);
index d2001624fe7a31b788508e5da97924173bf2e33e..04bc07c2b42a9dfef399caea56a12b072f0ad028 100644 (file)
@@ -41,6 +41,7 @@ obj-y += printk/
 obj-y += irq/
 obj-y += rcu/
 obj-y += livepatch/
+obj-y += dma/
 
 obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o
 obj-$(CONFIG_FREEZER) += freezer.o
index 2d49d18b793abaf60379c4050e4148a77bae732f..9704934252b3f5eb0a8816ba2cc076f5674785f7 100644 (file)
@@ -450,7 +450,7 @@ static const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
  */
 static bool btf_type_int_is_regular(const struct btf_type *t)
 {
-       u16 nr_bits, nr_bytes;
+       u8 nr_bits, nr_bytes;
        u32 int_data;
 
        int_data = btf_type_int(t);
@@ -991,38 +991,38 @@ static void btf_int_bits_seq_show(const struct btf *btf,
                                  void *data, u8 bits_offset,
                                  struct seq_file *m)
 {
+       u16 left_shift_bits, right_shift_bits;
        u32 int_data = btf_type_int(t);
-       u16 nr_bits = BTF_INT_BITS(int_data);
-       u16 total_bits_offset;
-       u16 nr_copy_bytes;
-       u16 nr_copy_bits;
-       u8 nr_upper_bits;
-       union {
-               u64 u64_num;
-               u8  u8_nums[8];
-       } print_num;
+       u8 nr_bits = BTF_INT_BITS(int_data);
+       u8 total_bits_offset;
+       u8 nr_copy_bytes;
+       u8 nr_copy_bits;
+       u64 print_num;
 
+       /*
+        * bits_offset is at most 7.
+        * BTF_INT_OFFSET() cannot exceed 64 bits.
+        */
        total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
        data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
        bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
        nr_copy_bits = nr_bits + bits_offset;
        nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
 
-       print_num.u64_num = 0;
-       memcpy(&print_num.u64_num, data, nr_copy_bytes);
+       print_num = 0;
+       memcpy(&print_num, data, nr_copy_bytes);
 
-       /* Ditch the higher order bits */
-       nr_upper_bits = BITS_PER_BYTE_MASKED(nr_copy_bits);
-       if (nr_upper_bits) {
-               /* We need to mask out some bits of the upper byte. */
-               u8 mask = (1 << nr_upper_bits) - 1;
-
-               print_num.u8_nums[nr_copy_bytes - 1] &= mask;
-       }
+#ifdef __BIG_ENDIAN_BITFIELD
+       left_shift_bits = bits_offset;
+#else
+       left_shift_bits = BITS_PER_U64 - nr_copy_bits;
+#endif
+       right_shift_bits = BITS_PER_U64 - nr_bits;
 
-       print_num.u64_num >>= bits_offset;
+       print_num <<= left_shift_bits;
+       print_num >>= right_shift_bits;
 
-       seq_printf(m, "0x%llx", print_num.u64_num);
+       seq_printf(m, "0x%llx", print_num);
 }
 
 static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
@@ -1032,7 +1032,7 @@ static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
        u32 int_data = btf_type_int(t);
        u8 encoding = BTF_INT_ENCODING(int_data);
        bool sign = encoding & BTF_INT_SIGNED;
-       u32 nr_bits = BTF_INT_BITS(int_data);
+       u8 nr_bits = BTF_INT_BITS(int_data);
 
        if (bits_offset || BTF_INT_OFFSET(int_data) ||
            BITS_PER_BYTE_MASKED(nr_bits)) {
index f7c00bd6f8e49ca9cc4e6ee323b01f718aebd9ec..3d83ee7df381b1def956b5e645376451d797440e 100644 (file)
@@ -428,6 +428,60 @@ int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
        return ret;
 }
 
+int cgroup_bpf_prog_attach(const union bpf_attr *attr,
+                          enum bpf_prog_type ptype, struct bpf_prog *prog)
+{
+       struct cgroup *cgrp;
+       int ret;
+
+       cgrp = cgroup_get_from_fd(attr->target_fd);
+       if (IS_ERR(cgrp))
+               return PTR_ERR(cgrp);
+
+       ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type,
+                               attr->attach_flags);
+       cgroup_put(cgrp);
+       return ret;
+}
+
+int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
+{
+       struct bpf_prog *prog;
+       struct cgroup *cgrp;
+       int ret;
+
+       cgrp = cgroup_get_from_fd(attr->target_fd);
+       if (IS_ERR(cgrp))
+               return PTR_ERR(cgrp);
+
+       prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
+       if (IS_ERR(prog))
+               prog = NULL;
+
+       ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0);
+       if (prog)
+               bpf_prog_put(prog);
+
+       cgroup_put(cgrp);
+       return ret;
+}
+
+int cgroup_bpf_prog_query(const union bpf_attr *attr,
+                         union bpf_attr __user *uattr)
+{
+       struct cgroup *cgrp;
+       int ret;
+
+       cgrp = cgroup_get_from_fd(attr->query.target_fd);
+       if (IS_ERR(cgrp))
+               return PTR_ERR(cgrp);
+
+       ret = cgroup_bpf_query(cgrp, attr, uattr);
+
+       cgroup_put(cgrp);
+       return ret;
+}
+
 /**
  * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
  * @sk: The socket sending or receiving traffic
index 9f1493705f4043066033dd44ec6deb95e7418287..1e5625d46414cc68efe372b2c6a8dab266a24dd6 100644 (file)
@@ -350,6 +350,20 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
        return prog_adj;
 }
 
+void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
+{
+       int i;
+
+       for (i = 0; i < fp->aux->func_cnt; i++)
+               bpf_prog_kallsyms_del(fp->aux->func[i]);
+}
+
+void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
+{
+       bpf_prog_kallsyms_del_subprogs(fp);
+       bpf_prog_kallsyms_del(fp);
+}
+
 #ifdef CONFIG_BPF_JIT
 /* All BPF JIT sysctl knobs here. */
 int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
@@ -1434,6 +1448,17 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
        return 0;
 }
 
+static void bpf_prog_select_func(struct bpf_prog *fp)
+{
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+       u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
+
+       fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
+#else
+       fp->bpf_func = __bpf_prog_ret0_warn;
+#endif
+}
+
 /**
  *     bpf_prog_select_runtime - select exec runtime for BPF program
  *     @fp: bpf_prog populated with internal BPF program
@@ -1444,13 +1469,13 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
  */
 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
 {
-#ifndef CONFIG_BPF_JIT_ALWAYS_ON
-       u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
+       /* In case of BPF to BPF calls, verifier did all the prep
+        * work with regards to JITing, etc.
+        */
+       if (fp->bpf_func)
+               goto finalize;
 
-       fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
-#else
-       fp->bpf_func = __bpf_prog_ret0_warn;
-#endif
+       bpf_prog_select_func(fp);
 
        /* eBPF JITs can rewrite the program in case constant
         * blinding is active. However, in case of error during
@@ -1471,6 +1496,8 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
                if (*err)
                        return fp;
        }
+
+finalize:
        bpf_prog_lock_ro(fp);
 
        /* The tail call compatibility check can only be done at
index a7cc7b3494a90f582886485668562ccfef5f5ffd..d361fc1e3bf35fd54d485e72c2e258171e3394d5 100644 (file)
@@ -334,10 +334,15 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
 {
        struct net_device *dev = dst->dev;
        struct xdp_frame *xdpf;
+       int err;
 
        if (!dev->netdev_ops->ndo_xdp_xmit)
                return -EOPNOTSUPP;
 
+       err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
+       if (unlikely(err))
+               return err;
+
        xdpf = convert_to_xdp_frame(xdp);
        if (unlikely(!xdpf))
                return -EOVERFLOW;
@@ -345,6 +350,20 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
        return bq_enqueue(dst, xdpf, dev_rx);
 }
 
+int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
+                            struct bpf_prog *xdp_prog)
+{
+       int err;
+
+       err = xdp_ok_fwd_dev(dst->dev, skb->len);
+       if (unlikely(err))
+               return err;
+       skb->dev = dst->dev;
+       generic_xdp_tx(skb, xdp_prog);
+
+       return 0;
+}
+
 static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
 {
        struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
index 3ca2198a6d22d9ab67c61e964811046cf9c9e512..513d9dfcf4ee136dd5e6733789a996612272376d 100644 (file)
@@ -747,13 +747,15 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
                                 * old element will be freed immediately.
                                 * Otherwise return an error
                                 */
-                               atomic_dec(&htab->count);
-                               return ERR_PTR(-E2BIG);
+                               l_new = ERR_PTR(-E2BIG);
+                               goto dec_count;
                        }
                l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
                                     htab->map.numa_node);
-               if (!l_new)
-                       return ERR_PTR(-ENOMEM);
+               if (!l_new) {
+                       l_new = ERR_PTR(-ENOMEM);
+                       goto dec_count;
+               }
        }
 
        memcpy(l_new->key, key, key_size);
@@ -766,7 +768,8 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
                                                  GFP_ATOMIC | __GFP_NOWARN);
                        if (!pptr) {
                                kfree(l_new);
-                               return ERR_PTR(-ENOMEM);
+                               l_new = ERR_PTR(-ENOMEM);
+                               goto dec_count;
                        }
                }
 
@@ -780,6 +783,9 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
 
        l_new->hash = hash;
        return l_new;
+dec_count:
+       atomic_dec(&htab->count);
+       return l_new;
 }
 
 static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
index 52a91d816c0eb9a1f9fe96fd77b3ffefd6145149..98fb7938beea9dd18a255ad77ebe797b01660dea 100644 (file)
@@ -72,6 +72,7 @@ struct bpf_htab {
        u32 n_buckets;
        u32 elem_size;
        struct bpf_sock_progs progs;
+       struct rcu_head rcu;
 };
 
 struct htab_elem {
@@ -89,8 +90,8 @@ enum smap_psock_state {
 struct smap_psock_map_entry {
        struct list_head list;
        struct sock **entry;
-       struct htab_elem *hash_link;
-       struct bpf_htab *htab;
+       struct htab_elem __rcu *hash_link;
+       struct bpf_htab __rcu *htab;
 };
 
 struct smap_psock {
@@ -120,6 +121,7 @@ struct smap_psock {
        struct bpf_prog *bpf_parse;
        struct bpf_prog *bpf_verdict;
        struct list_head maps;
+       spinlock_t maps_lock;
 
        /* Back reference used when sock callback trigger sockmap operations */
        struct sock *sock;
@@ -140,6 +142,7 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
 static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
                            int offset, size_t size, int flags);
+static void bpf_tcp_close(struct sock *sk, long timeout);
 
 static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
 {
@@ -161,7 +164,42 @@ out:
        return !empty;
 }
 
-static struct proto tcp_bpf_proto;
+enum {
+       SOCKMAP_IPV4,
+       SOCKMAP_IPV6,
+       SOCKMAP_NUM_PROTS,
+};
+
+enum {
+       SOCKMAP_BASE,
+       SOCKMAP_TX,
+       SOCKMAP_NUM_CONFIGS,
+};
+
+static struct proto *saved_tcpv6_prot __read_mostly;
+static DEFINE_SPINLOCK(tcpv6_prot_lock);
+static struct proto bpf_tcp_prots[SOCKMAP_NUM_PROTS][SOCKMAP_NUM_CONFIGS];
+static void build_protos(struct proto prot[SOCKMAP_NUM_CONFIGS],
+                        struct proto *base)
+{
+       prot[SOCKMAP_BASE]                      = *base;
+       prot[SOCKMAP_BASE].close                = bpf_tcp_close;
+       prot[SOCKMAP_BASE].recvmsg              = bpf_tcp_recvmsg;
+       prot[SOCKMAP_BASE].stream_memory_read   = bpf_tcp_stream_read;
+
+       prot[SOCKMAP_TX]                        = prot[SOCKMAP_BASE];
+       prot[SOCKMAP_TX].sendmsg                = bpf_tcp_sendmsg;
+       prot[SOCKMAP_TX].sendpage               = bpf_tcp_sendpage;
+}
+
+static void update_sk_prot(struct sock *sk, struct smap_psock *psock)
+{
+       int family = sk->sk_family == AF_INET6 ? SOCKMAP_IPV6 : SOCKMAP_IPV4;
+       int conf = psock->bpf_tx_msg ? SOCKMAP_TX : SOCKMAP_BASE;
+
+       sk->sk_prot = &bpf_tcp_prots[family][conf];
+}
+
 static int bpf_tcp_init(struct sock *sk)
 {
        struct smap_psock *psock;
@@ -181,14 +219,17 @@ static int bpf_tcp_init(struct sock *sk)
        psock->save_close = sk->sk_prot->close;
        psock->sk_proto = sk->sk_prot;
 
-       if (psock->bpf_tx_msg) {
-               tcp_bpf_proto.sendmsg = bpf_tcp_sendmsg;
-               tcp_bpf_proto.sendpage = bpf_tcp_sendpage;
-               tcp_bpf_proto.recvmsg = bpf_tcp_recvmsg;
-               tcp_bpf_proto.stream_memory_read = bpf_tcp_stream_read;
+       /* Build IPv6 sockmap whenever the address of tcpv6_prot changes */
+       if (sk->sk_family == AF_INET6 &&
+           unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
+               spin_lock_bh(&tcpv6_prot_lock);
+               if (likely(sk->sk_prot != saved_tcpv6_prot)) {
+                       build_protos(bpf_tcp_prots[SOCKMAP_IPV6], sk->sk_prot);
+                       smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
+               }
+               spin_unlock_bh(&tcpv6_prot_lock);
        }
-
-       sk->sk_prot = &tcp_bpf_proto;
+       update_sk_prot(sk, psock);
        rcu_read_unlock();
        return 0;
 }
@@ -219,24 +260,64 @@ out:
        rcu_read_unlock();
 }
 
+static struct htab_elem *lookup_elem_raw(struct hlist_head *head,
+                                        u32 hash, void *key, u32 key_size)
+{
+       struct htab_elem *l;
+
+       hlist_for_each_entry_rcu(l, head, hash_node) {
+               if (l->hash == hash && !memcmp(&l->key, key, key_size))
+                       return l;
+       }
+
+       return NULL;
+}
+
+static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
+{
+       return &htab->buckets[hash & (htab->n_buckets - 1)];
+}
+
+static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash)
+{
+       return &__select_bucket(htab, hash)->head;
+}
+
 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
 {
        atomic_dec(&htab->count);
        kfree_rcu(l, rcu);
 }
 
+static struct smap_psock_map_entry *psock_map_pop(struct sock *sk,
+                                                 struct smap_psock *psock)
+{
+       struct smap_psock_map_entry *e;
+
+       spin_lock_bh(&psock->maps_lock);
+       e = list_first_entry_or_null(&psock->maps,
+                                    struct smap_psock_map_entry,
+                                    list);
+       if (e)
+               list_del(&e->list);
+       spin_unlock_bh(&psock->maps_lock);
+       return e;
+}
+
 static void bpf_tcp_close(struct sock *sk, long timeout)
 {
        void (*close_fun)(struct sock *sk, long timeout);
-       struct smap_psock_map_entry *e, *tmp;
+       struct smap_psock_map_entry *e;
        struct sk_msg_buff *md, *mtmp;
        struct smap_psock *psock;
        struct sock *osk;
 
+       lock_sock(sk);
        rcu_read_lock();
        psock = smap_psock_sk(sk);
        if (unlikely(!psock)) {
                rcu_read_unlock();
+               release_sock(sk);
                return sk->sk_prot->close(sk, timeout);
        }
 
@@ -247,7 +328,6 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
         */
        close_fun = psock->save_close;
 
-       write_lock_bh(&sk->sk_callback_lock);
        if (psock->cork) {
                free_start_sg(psock->sock, psock->cork);
                kfree(psock->cork);
@@ -260,21 +340,40 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
                kfree(md);
        }
 
-       list_for_each_entry_safe(e, tmp, &psock->maps, list) {
+       e = psock_map_pop(sk, psock);
+       while (e) {
                if (e->entry) {
                        osk = cmpxchg(e->entry, sk, NULL);
                        if (osk == sk) {
-                               list_del(&e->list);
                                smap_release_sock(psock, sk);
                        }
                } else {
-                       hlist_del_rcu(&e->hash_link->hash_node);
-                       smap_release_sock(psock, e->hash_link->sk);
-                       free_htab_elem(e->htab, e->hash_link);
+                       struct htab_elem *link = rcu_dereference(e->hash_link);
+                       struct bpf_htab *htab = rcu_dereference(e->htab);
+                       struct hlist_head *head;
+                       struct htab_elem *l;
+                       struct bucket *b;
+
+                       b = __select_bucket(htab, link->hash);
+                       head = &b->head;
+                       raw_spin_lock_bh(&b->lock);
+                       l = lookup_elem_raw(head,
+                                           link->hash, link->key,
+                                           htab->map.key_size);
+                       /* If another thread deleted this object skip deletion.
+                        * The refcnt on psock may or may not be zero.
+                        */
+                       if (l) {
+                               hlist_del_rcu(&link->hash_node);
+                               smap_release_sock(psock, link->sk);
+                               free_htab_elem(htab, link);
+                       }
+                       raw_spin_unlock_bh(&b->lock);
                }
+               e = psock_map_pop(sk, psock);
        }
-       write_unlock_bh(&sk->sk_callback_lock);
        rcu_read_unlock();
+       release_sock(sk);
        close_fun(sk, timeout);
 }
 
@@ -472,7 +571,8 @@ static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
        while (sg[i].length) {
                free += sg[i].length;
                sk_mem_uncharge(sk, sg[i].length);
-               put_page(sg_page(&sg[i]));
+               if (!md->skb)
+                       put_page(sg_page(&sg[i]));
                sg[i].length = 0;
                sg[i].page_link = 0;
                sg[i].offset = 0;
@@ -481,6 +581,8 @@ static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
                if (i == MAX_SKB_FRAGS)
                        i = 0;
        }
+       if (md->skb)
+               consume_skb(md->skb);
 
        return free;
 }
@@ -1111,8 +1213,7 @@ static void bpf_tcp_msg_add(struct smap_psock *psock,
 
 static int bpf_tcp_ulp_register(void)
 {
-       tcp_bpf_proto = tcp_prot;
-       tcp_bpf_proto.close = bpf_tcp_close;
+       build_protos(bpf_tcp_prots[SOCKMAP_IPV4], &tcp_prot);
        /* Once BPF TX ULP is registered it is never unregistered. It
         * will be in the ULP list for the lifetime of the system. Doing
         * duplicate registers is not a problem.
@@ -1135,7 +1236,7 @@ static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
         */
        TCP_SKB_CB(skb)->bpf.sk_redir = NULL;
        skb->sk = psock->sock;
-       bpf_compute_data_pointers(skb);
+       bpf_compute_data_end_sk_skb(skb);
        preempt_disable();
        rc = (*prog->bpf_func)(skb, prog->insnsi);
        preempt_enable();
@@ -1357,7 +1458,9 @@ static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
 {
        if (refcount_dec_and_test(&psock->refcnt)) {
                tcp_cleanup_ulp(sock);
+               write_lock_bh(&sock->sk_callback_lock);
                smap_stop_sock(psock, sock);
+               write_unlock_bh(&sock->sk_callback_lock);
                clear_bit(SMAP_TX_RUNNING, &psock->state);
                rcu_assign_sk_user_data(sock, NULL);
                call_rcu_sched(&psock->rcu, smap_destroy_psock);
@@ -1388,7 +1491,7 @@ static int smap_parse_func_strparser(struct strparser *strp,
         * any socket yet.
         */
        skb->sk = psock->sock;
-       bpf_compute_data_pointers(skb);
+       bpf_compute_data_end_sk_skb(skb);
        rc = (*prog->bpf_func)(skb, prog->insnsi);
        skb->sk = NULL;
        rcu_read_unlock();
@@ -1508,6 +1611,7 @@ static struct smap_psock *smap_init_psock(struct sock *sock, int node)
        INIT_LIST_HEAD(&psock->maps);
        INIT_LIST_HEAD(&psock->ingress);
        refcount_set(&psock->refcnt, 1);
+       spin_lock_init(&psock->maps_lock);
 
        rcu_assign_sk_user_data(sock, psock);
        sock_hold(sock);
@@ -1564,18 +1668,32 @@ free_stab:
        return ERR_PTR(err);
 }
 
-static void smap_list_remove(struct smap_psock *psock,
-                            struct sock **entry,
-                            struct htab_elem *hash_link)
+static void smap_list_map_remove(struct smap_psock *psock,
+                                struct sock **entry)
 {
        struct smap_psock_map_entry *e, *tmp;
 
+       spin_lock_bh(&psock->maps_lock);
        list_for_each_entry_safe(e, tmp, &psock->maps, list) {
-               if (e->entry == entry || e->hash_link == hash_link) {
+               if (e->entry == entry)
+                       list_del(&e->list);
+       }
+       spin_unlock_bh(&psock->maps_lock);
+}
+
+static void smap_list_hash_remove(struct smap_psock *psock,
+                                 struct htab_elem *hash_link)
+{
+       struct smap_psock_map_entry *e, *tmp;
+
+       spin_lock_bh(&psock->maps_lock);
+       list_for_each_entry_safe(e, tmp, &psock->maps, list) {
+               struct htab_elem *c = rcu_dereference(e->hash_link);
+
+               if (c == hash_link)
                        list_del(&e->list);
-                       break;
-               }
        }
+       spin_unlock_bh(&psock->maps_lock);
 }
 
 static void sock_map_free(struct bpf_map *map)
@@ -1601,7 +1719,6 @@ static void sock_map_free(struct bpf_map *map)
                if (!sock)
                        continue;
 
-               write_lock_bh(&sock->sk_callback_lock);
                psock = smap_psock_sk(sock);
                /* This check handles a racing sock event that can get the
                 * sk_callback_lock before this case but after xchg happens
@@ -1609,10 +1726,9 @@ static void sock_map_free(struct bpf_map *map)
                 * to be null and queued for garbage collection.
                 */
                if (likely(psock)) {
-                       smap_list_remove(psock, &stab->sock_map[i], NULL);
+                       smap_list_map_remove(psock, &stab->sock_map[i]);
                        smap_release_sock(psock, sock);
                }
-               write_unlock_bh(&sock->sk_callback_lock);
        }
        rcu_read_unlock();
 
@@ -1661,17 +1777,15 @@ static int sock_map_delete_elem(struct bpf_map *map, void *key)
        if (!sock)
                return -EINVAL;
 
-       write_lock_bh(&sock->sk_callback_lock);
        psock = smap_psock_sk(sock);
        if (!psock)
                goto out;
 
        if (psock->bpf_parse)
                smap_stop_sock(psock, sock);
-       smap_list_remove(psock, &stab->sock_map[k], NULL);
+       smap_list_map_remove(psock, &stab->sock_map[k]);
        smap_release_sock(psock, sock);
 out:
-       write_unlock_bh(&sock->sk_callback_lock);
        return 0;
 }
 
@@ -1752,7 +1866,6 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
                }
        }
 
-       write_lock_bh(&sock->sk_callback_lock);
        psock = smap_psock_sk(sock);
 
        /* 2. Do not allow inheriting programs if psock exists and has
@@ -1789,7 +1902,7 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
                e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
                if (!e) {
                        err = -ENOMEM;
-                       goto out_progs;
+                       goto out_free;
                }
        }
 
@@ -1809,7 +1922,9 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
                if (err)
                        goto out_free;
                smap_init_progs(psock, verdict, parse);
+               write_lock_bh(&sock->sk_callback_lock);
                smap_start_sock(psock, sock);
+               write_unlock_bh(&sock->sk_callback_lock);
        }
 
        /* 4. Place psock in sockmap for use and stop any programs on
@@ -1819,9 +1934,10 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
         */
        if (map_link) {
                e->entry = map_link;
+               spin_lock_bh(&psock->maps_lock);
                list_add_tail(&e->list, &psock->maps);
+               spin_unlock_bh(&psock->maps_lock);
        }
-       write_unlock_bh(&sock->sk_callback_lock);
        return err;
 out_free:
        smap_release_sock(psock, sock);
@@ -1832,7 +1948,6 @@ out_progs:
        }
        if (tx_msg)
                bpf_prog_put(tx_msg);
-       write_unlock_bh(&sock->sk_callback_lock);
        kfree(e);
        return err;
 }
@@ -1869,10 +1984,8 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
        if (osock) {
                struct smap_psock *opsock = smap_psock_sk(osock);
 
-               write_lock_bh(&osock->sk_callback_lock);
-               smap_list_remove(opsock, &stab->sock_map[i], NULL);
+               smap_list_map_remove(opsock, &stab->sock_map[i]);
                smap_release_sock(opsock, osock);
-               write_unlock_bh(&osock->sk_callback_lock);
        }
 out:
        return err;
@@ -1915,6 +2028,24 @@ int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type)
        return 0;
 }
 
+int sockmap_get_from_fd(const union bpf_attr *attr, int type,
+                       struct bpf_prog *prog)
+{
+       int ufd = attr->target_fd;
+       struct bpf_map *map;
+       struct fd f;
+       int err;
+
+       f = fdget(ufd);
+       map = __bpf_map_get(f);
+       if (IS_ERR(map))
+               return PTR_ERR(map);
+
+       err = sock_map_prog(map, prog, attr->attach_type);
+       fdput(f);
+       return err;
+}
+
 static void *sock_map_lookup(struct bpf_map *map, void *key)
 {
        return NULL;
@@ -1944,7 +2075,13 @@ static int sock_map_update_elem(struct bpf_map *map,
                return -EOPNOTSUPP;
        }
 
+       lock_sock(skops.sk);
+       preempt_disable();
+       rcu_read_lock();
        err = sock_map_ctx_update_elem(&skops, map, key, flags);
+       rcu_read_unlock();
+       preempt_enable();
+       release_sock(skops.sk);
        fput(socket->file);
        return err;
 }
@@ -2043,14 +2180,13 @@ free_htab:
        return ERR_PTR(err);
 }
 
-static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
+static void __bpf_htab_free(struct rcu_head *rcu)
 {
-       return &htab->buckets[hash & (htab->n_buckets - 1)];
-}
+       struct bpf_htab *htab;
 
-static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash)
-{
-       return &__select_bucket(htab, hash)->head;
+       htab = container_of(rcu, struct bpf_htab, rcu);
+       bpf_map_area_free(htab->buckets);
+       kfree(htab);
 }
 
 static void sock_hash_free(struct bpf_map *map)
@@ -2069,16 +2205,18 @@ static void sock_hash_free(struct bpf_map *map)
         */
        rcu_read_lock();
        for (i = 0; i < htab->n_buckets; i++) {
-               struct hlist_head *head = select_bucket(htab, i);
+               struct bucket *b = __select_bucket(htab, i);
+               struct hlist_head *head;
                struct hlist_node *n;
                struct htab_elem *l;
 
+               raw_spin_lock_bh(&b->lock);
+               head = &b->head;
                hlist_for_each_entry_safe(l, n, head, hash_node) {
                        struct sock *sock = l->sk;
                        struct smap_psock *psock;
 
                        hlist_del_rcu(&l->hash_node);
-                       write_lock_bh(&sock->sk_callback_lock);
                        psock = smap_psock_sk(sock);
                        /* This check handles a racing sock event that can get
                         * the sk_callback_lock before this case but after xchg
@@ -2086,16 +2224,15 @@ static void sock_hash_free(struct bpf_map *map)
                         * (psock) to be null and queued for garbage collection.
                         */
                        if (likely(psock)) {
-                               smap_list_remove(psock, NULL, l);
+                               smap_list_hash_remove(psock, l);
                                smap_release_sock(psock, sock);
                        }
-                       write_unlock_bh(&sock->sk_callback_lock);
-                       kfree(l);
+                       free_htab_elem(htab, l);
                }
+               raw_spin_unlock_bh(&b->lock);
        }
        rcu_read_unlock();
-       bpf_map_area_free(htab->buckets);
-       kfree(htab);
+       call_rcu(&htab->rcu, __bpf_htab_free);
 }
 
 static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab,
@@ -2122,19 +2259,6 @@ static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab,
        return l_new;
 }
 
-static struct htab_elem *lookup_elem_raw(struct hlist_head *head,
-                                        u32 hash, void *key, u32 key_size)
-{
-       struct htab_elem *l;
-
-       hlist_for_each_entry_rcu(l, head, hash_node) {
-               if (l->hash == hash && !memcmp(&l->key, key, key_size))
-                       return l;
-       }
-
-       return NULL;
-}
-
 static inline u32 htab_map_hash(const void *key, u32 key_len)
 {
        return jhash(key, key_len, 0);
@@ -2230,7 +2354,10 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
        if (err)
                goto err;
 
-       /* bpf_map_update_elem() can be called in_irq() */
+       /* psock is valid here because otherwise above *ctx_update_elem would
+        * have thrown an error. It is safe to skip error check.
+        */
+       psock = smap_psock_sk(sock);
        raw_spin_lock_bh(&b->lock);
        l_old = lookup_elem_raw(head, hash, key, key_size);
        if (l_old && map_flags == BPF_NOEXIST) {
@@ -2248,15 +2375,12 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
                goto bucket_err;
        }
 
-       psock = smap_psock_sk(sock);
-       if (unlikely(!psock)) {
-               err = -EINVAL;
-               goto bucket_err;
-       }
-
-       e->hash_link = l_new;
-       e->htab = container_of(map, struct bpf_htab, map);
+       rcu_assign_pointer(e->hash_link, l_new);
+       rcu_assign_pointer(e->htab,
+                          container_of(map, struct bpf_htab, map));
+       spin_lock_bh(&psock->maps_lock);
        list_add_tail(&e->list, &psock->maps);
+       spin_unlock_bh(&psock->maps_lock);
 
        /* add new element to the head of the list, so that
         * concurrent search will find it before old elem
@@ -2266,19 +2390,17 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
                psock = smap_psock_sk(l_old->sk);
 
                hlist_del_rcu(&l_old->hash_node);
-               smap_list_remove(psock, NULL, l_old);
+               smap_list_hash_remove(psock, l_old);
                smap_release_sock(psock, l_old->sk);
                free_htab_elem(htab, l_old);
        }
        raw_spin_unlock_bh(&b->lock);
        return 0;
 bucket_err:
+       smap_release_sock(psock, sock);
        raw_spin_unlock_bh(&b->lock);
 err:
        kfree(e);
-       psock = smap_psock_sk(sock);
-       if (psock)
-               smap_release_sock(psock, sock);
        return err;
 }
 
@@ -2300,7 +2422,13 @@ static int sock_hash_update_elem(struct bpf_map *map,
                return -EINVAL;
        }
 
+       lock_sock(skops.sk);
+       preempt_disable();
+       rcu_read_lock();
        err = sock_hash_ctx_update_elem(&skops, map, key, flags);
+       rcu_read_unlock();
+       preempt_enable();
+       release_sock(skops.sk);
        fput(socket->file);
        return err;
 }
@@ -2326,7 +2454,6 @@ static int sock_hash_delete_elem(struct bpf_map *map, void *key)
                struct smap_psock *psock;
 
                hlist_del_rcu(&l->hash_node);
-               write_lock_bh(&sock->sk_callback_lock);
                psock = smap_psock_sk(sock);
                /* This check handles a racing sock event that can get the
                 * sk_callback_lock before this case but after xchg happens
@@ -2334,10 +2461,9 @@ static int sock_hash_delete_elem(struct bpf_map *map, void *key)
                 * to be null and queued for garbage collection.
                 */
                if (likely(psock)) {
-                       smap_list_remove(psock, NULL, l);
+                       smap_list_hash_remove(psock, l);
                        smap_release_sock(psock, sock);
                }
-               write_unlock_bh(&sock->sk_callback_lock);
                free_htab_elem(htab, l);
                ret = 0;
        }
@@ -2359,10 +2485,8 @@ struct sock  *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
        b = __select_bucket(htab, hash);
        head = &b->head;
 
-       raw_spin_lock_bh(&b->lock);
        l = lookup_elem_raw(head, hash, key, key_size);
        sk = l ? l->sk : NULL;
-       raw_spin_unlock_bh(&b->lock);
        return sk;
 }
 
@@ -2383,6 +2507,7 @@ const struct bpf_map_ops sock_hash_ops = {
        .map_get_next_key = sock_hash_get_next_key,
        .map_update_elem = sock_hash_update_elem,
        .map_delete_elem = sock_hash_delete_elem,
+       .map_release_uref = sock_map_release,
 };
 
 BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
index 0fa20624707f23b15d3200c1302765a119ee9fc1..a31a1ba0f8eada88e03dc4a73ad9c0305cc70198 100644 (file)
@@ -735,7 +735,9 @@ static int map_update_elem(union bpf_attr *attr)
        if (bpf_map_is_dev_bound(map)) {
                err = bpf_map_offload_update_elem(map, key, value, attr->flags);
                goto out;
-       } else if (map->map_type == BPF_MAP_TYPE_CPUMAP) {
+       } else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
+                  map->map_type == BPF_MAP_TYPE_SOCKHASH ||
+                  map->map_type == BPF_MAP_TYPE_SOCKMAP) {
                err = map->ops->map_update_elem(map, key, value, attr->flags);
                goto out;
        }
@@ -1034,14 +1036,9 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
 {
        if (atomic_dec_and_test(&prog->aux->refcnt)) {
-               int i;
-
                /* bpf_prog_free_id() must be called first */
                bpf_prog_free_id(prog, do_idr_lock);
-
-               for (i = 0; i < prog->aux->func_cnt; i++)
-                       bpf_prog_kallsyms_del(prog->aux->func[i]);
-               bpf_prog_kallsyms_del(prog);
+               bpf_prog_kallsyms_del_all(prog);
 
                call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
        }
@@ -1358,9 +1355,7 @@ static int bpf_prog_load(union bpf_attr *attr)
        if (err < 0)
                goto free_used_maps;
 
-       /* eBPF program is ready to be JITed */
-       if (!prog->bpf_func)
-               prog = bpf_prog_select_runtime(prog, &err);
+       prog = bpf_prog_select_runtime(prog, &err);
        if (err < 0)
                goto free_used_maps;
 
@@ -1384,6 +1379,7 @@ static int bpf_prog_load(union bpf_attr *attr)
        return err;
 
 free_used_maps:
+       bpf_prog_kallsyms_del_subprogs(prog);
        free_used_maps(prog->aux);
 free_prog:
        bpf_prog_uncharge_memlock(prog);
@@ -1489,8 +1485,6 @@ out_free_tp:
        return err;
 }
 
-#ifdef CONFIG_CGROUP_BPF
-
 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
                                             enum bpf_attach_type attach_type)
 {
@@ -1505,40 +1499,6 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
 
 #define BPF_PROG_ATTACH_LAST_FIELD attach_flags
 
-static int sockmap_get_from_fd(const union bpf_attr *attr,
-                              int type, bool attach)
-{
-       struct bpf_prog *prog = NULL;
-       int ufd = attr->target_fd;
-       struct bpf_map *map;
-       struct fd f;
-       int err;
-
-       f = fdget(ufd);
-       map = __bpf_map_get(f);
-       if (IS_ERR(map))
-               return PTR_ERR(map);
-
-       if (attach) {
-               prog = bpf_prog_get_type(attr->attach_bpf_fd, type);
-               if (IS_ERR(prog)) {
-                       fdput(f);
-                       return PTR_ERR(prog);
-               }
-       }
-
-       err = sock_map_prog(map, prog, attr->attach_type);
-       if (err) {
-               fdput(f);
-               if (prog)
-                       bpf_prog_put(prog);
-               return err;
-       }
-
-       fdput(f);
-       return 0;
-}
-
 #define BPF_F_ATTACH_MASK \
        (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI)
 
@@ -1546,7 +1506,6 @@ static int bpf_prog_attach(const union bpf_attr *attr)
 {
        enum bpf_prog_type ptype;
        struct bpf_prog *prog;
-       struct cgroup *cgrp;
        int ret;
 
        if (!capable(CAP_NET_ADMIN))
@@ -1583,12 +1542,15 @@ static int bpf_prog_attach(const union bpf_attr *attr)
                ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
                break;
        case BPF_SK_MSG_VERDICT:
-               return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, true);
+               ptype = BPF_PROG_TYPE_SK_MSG;
+               break;
        case BPF_SK_SKB_STREAM_PARSER:
        case BPF_SK_SKB_STREAM_VERDICT:
-               return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, true);
+               ptype = BPF_PROG_TYPE_SK_SKB;
+               break;
        case BPF_LIRC_MODE2:
-               return lirc_prog_attach(attr);
+               ptype = BPF_PROG_TYPE_LIRC_MODE2;
+               break;
        default:
                return -EINVAL;
        }
@@ -1602,18 +1564,20 @@ static int bpf_prog_attach(const union bpf_attr *attr)
                return -EINVAL;
        }
 
-       cgrp = cgroup_get_from_fd(attr->target_fd);
-       if (IS_ERR(cgrp)) {
-               bpf_prog_put(prog);
-               return PTR_ERR(cgrp);
+       switch (ptype) {
+       case BPF_PROG_TYPE_SK_SKB:
+       case BPF_PROG_TYPE_SK_MSG:
+               ret = sockmap_get_from_fd(attr, ptype, prog);
+               break;
+       case BPF_PROG_TYPE_LIRC_MODE2:
+               ret = lirc_prog_attach(attr, prog);
+               break;
+       default:
+               ret = cgroup_bpf_prog_attach(attr, ptype, prog);
        }
 
-       ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type,
-                               attr->attach_flags);
        if (ret)
                bpf_prog_put(prog);
-       cgroup_put(cgrp);
-
        return ret;
 }
 
@@ -1622,9 +1586,6 @@ static int bpf_prog_attach(const union bpf_attr *attr)
 static int bpf_prog_detach(const union bpf_attr *attr)
 {
        enum bpf_prog_type ptype;
-       struct bpf_prog *prog;
-       struct cgroup *cgrp;
-       int ret;
 
        if (!capable(CAP_NET_ADMIN))
                return -EPERM;
@@ -1657,29 +1618,17 @@ static int bpf_prog_detach(const union bpf_attr *attr)
                ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
                break;
        case BPF_SK_MSG_VERDICT:
-               return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, false);
+               return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, NULL);
        case BPF_SK_SKB_STREAM_PARSER:
        case BPF_SK_SKB_STREAM_VERDICT:
-               return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, false);
+               return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, NULL);
        case BPF_LIRC_MODE2:
                return lirc_prog_detach(attr);
        default:
                return -EINVAL;
        }
 
-       cgrp = cgroup_get_from_fd(attr->target_fd);
-       if (IS_ERR(cgrp))
-               return PTR_ERR(cgrp);
-
-       prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
-       if (IS_ERR(prog))
-               prog = NULL;
-
-       ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0);
-       if (prog)
-               bpf_prog_put(prog);
-       cgroup_put(cgrp);
-       return ret;
+       return cgroup_bpf_prog_detach(attr, ptype);
 }
 
 #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt
@@ -1687,9 +1636,6 @@ static int bpf_prog_detach(const union bpf_attr *attr)
 static int bpf_prog_query(const union bpf_attr *attr,
                          union bpf_attr __user *uattr)
 {
-       struct cgroup *cgrp;
-       int ret;
-
        if (!capable(CAP_NET_ADMIN))
                return -EPERM;
        if (CHECK_ATTR(BPF_PROG_QUERY))
@@ -1717,14 +1663,9 @@ static int bpf_prog_query(const union bpf_attr *attr,
        default:
                return -EINVAL;
        }
-       cgrp = cgroup_get_from_fd(attr->query.target_fd);
-       if (IS_ERR(cgrp))
-               return PTR_ERR(cgrp);
-       ret = cgroup_bpf_query(cgrp, attr, uattr);
-       cgroup_put(cgrp);
-       return ret;
+
+       return cgroup_bpf_prog_query(attr, uattr);
 }
-#endif /* CONFIG_CGROUP_BPF */
 
 #define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
 
@@ -2371,7 +2312,6 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
        case BPF_OBJ_GET:
                err = bpf_obj_get(&attr);
                break;
-#ifdef CONFIG_CGROUP_BPF
        case BPF_PROG_ATTACH:
                err = bpf_prog_attach(&attr);
                break;
@@ -2381,7 +2321,6 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
        case BPF_PROG_QUERY:
                err = bpf_prog_query(&attr, uattr);
                break;
-#endif
        case BPF_PROG_TEST_RUN:
                err = bpf_prog_test_run(&attr, uattr);
                break;
index 9e2bf834f13a21090b566862a853e7567ee03ac9..63aaac52a26553fb29529790cf0350f6dafa504b 100644 (file)
@@ -5430,6 +5430,10 @@ static int jit_subprogs(struct bpf_verifier_env *env)
                if (insn->code != (BPF_JMP | BPF_CALL) ||
                    insn->src_reg != BPF_PSEUDO_CALL)
                        continue;
+               /* Upon error here we cannot fall back to interpreter but
+                * need a hard reject of the program. Thus -EFAULT is
+                * propagated in any case.
+                */
                subprog = find_subprog(env, i + insn->imm + 1);
                if (subprog < 0) {
                        WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
@@ -5450,7 +5454,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
 
        func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
        if (!func)
-               return -ENOMEM;
+               goto out_undo_insn;
 
        for (i = 0; i < env->subprog_cnt; i++) {
                subprog_start = subprog_end;
@@ -5515,7 +5519,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
                tmp = bpf_int_jit_compile(func[i]);
                if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
                        verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
-                       err = -EFAULT;
+                       err = -ENOTSUPP;
                        goto out_free;
                }
                cond_resched();
@@ -5552,6 +5556,7 @@ out_free:
                if (func[i])
                        bpf_jit_free(func[i]);
        kfree(func);
+out_undo_insn:
        /* cleanup main prog to be interpreted */
        prog->jit_requested = 0;
        for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
@@ -5578,6 +5583,8 @@ static int fixup_call_args(struct bpf_verifier_env *env)
                err = jit_subprogs(env);
                if (err == 0)
                        return 0;
+               if (err == -EFAULT)
+                       return err;
        }
 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
        for (i = 0; i < prog->len; i++, insn++) {
diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
new file mode 100644 (file)
index 0000000..9bd5430
--- /dev/null
@@ -0,0 +1,50 @@
+
+config HAS_DMA
+       bool
+       depends on !NO_DMA
+       default y
+
+config NEED_SG_DMA_LENGTH
+       bool
+
+config NEED_DMA_MAP_STATE
+       bool
+
+config ARCH_DMA_ADDR_T_64BIT
+       def_bool 64BIT || PHYS_ADDR_T_64BIT
+
+config HAVE_GENERIC_DMA_COHERENT
+       bool
+
+config ARCH_HAS_SYNC_DMA_FOR_DEVICE
+       bool
+
+config ARCH_HAS_SYNC_DMA_FOR_CPU
+       bool
+       select NEED_DMA_MAP_STATE
+
+config DMA_DIRECT_OPS
+       bool
+       depends on HAS_DMA
+
+config DMA_NONCOHERENT_OPS
+       bool
+       depends on HAS_DMA
+       select DMA_DIRECT_OPS
+
+config DMA_NONCOHERENT_MMAP
+       bool
+       depends on DMA_NONCOHERENT_OPS
+
+config DMA_NONCOHERENT_CACHE_SYNC
+       bool
+       depends on DMA_NONCOHERENT_OPS
+
+config DMA_VIRT_OPS
+       bool
+       depends on HAS_DMA
+
+config SWIOTLB
+       bool
+       select DMA_DIRECT_OPS
+       select NEED_DMA_MAP_STATE
diff --git a/kernel/dma/Makefile b/kernel/dma/Makefile
new file mode 100644 (file)
index 0000000..6de44e4
--- /dev/null
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_HAS_DMA)                  += mapping.o
+obj-$(CONFIG_DMA_CMA)                  += contiguous.o
+obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += coherent.o
+obj-$(CONFIG_DMA_DIRECT_OPS)           += direct.o
+obj-$(CONFIG_DMA_NONCOHERENT_OPS)      += noncoherent.o
+obj-$(CONFIG_DMA_VIRT_OPS)             += virt.o
+obj-$(CONFIG_DMA_API_DEBUG)            += debug.o
+obj-$(CONFIG_SWIOTLB)                  += swiotlb.o
+
similarity index 100%
rename from lib/dma-debug.c
rename to kernel/dma/debug.c
similarity index 100%
rename from lib/dma-direct.c
rename to kernel/dma/direct.c
similarity index 99%
rename from drivers/base/dma-mapping.c
rename to kernel/dma/mapping.c
index f831a582209c63b7412a6a4276ed7f1205371610..d2a92ddaac4d14c8683433856672fddbace7a4c9 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * drivers/base/dma-mapping.c - arch-independent dma-mapping routines
+ * arch-independent dma-mapping routines
  *
  * Copyright (c) 2006  SUSE Linux Products GmbH
  * Copyright (c) 2006  Tejun Heo <teheo@suse.de>
similarity index 99%
rename from lib/swiotlb.c
rename to kernel/dma/swiotlb.c
index 04b68d9dfface72cd56d1f48ec0a58c68d2c2032..904541055792bf227faaf49734cd737142fd4111 100644 (file)
@@ -1085,3 +1085,4 @@ const struct dma_map_ops swiotlb_dma_ops = {
        .unmap_page             = swiotlb_unmap_page,
        .dma_supported          = dma_direct_supported,
 };
+EXPORT_SYMBOL(swiotlb_dma_ops);
similarity index 98%
rename from lib/dma-virt.c
rename to kernel/dma/virt.c
index 8e61a02ef9ca06cb2aabfaef7484a44f31610ef1..631ddec4b60a8b94576b1c3a36db71a818bff5d8 100644 (file)
@@ -1,7 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- *     lib/dma-virt.c
- *
  * DMA operations that map to virtual addresses without flushing memory.
  */
 #include <linux/export.h>
index 80cca2b30c4fe02c1baca59b08c0cae158a7368e..8f0434a9951af00bce3f009c21ddbd3c5cd61e01 100644 (file)
@@ -6482,7 +6482,7 @@ void perf_prepare_sample(struct perf_event_header *header,
                data->phys_addr = perf_virt_to_phys(data->addr);
 }
 
-static void __always_inline
+static __always_inline void
 __perf_event_output(struct perf_event *event,
                    struct perf_sample_data *data,
                    struct pt_regs *regs,
index 045a37e9ddee3255fac6ab34b80682f4e2e968e1..5d3cf407e37469a7b1cafab8c4af303d074bbdf8 100644 (file)
@@ -103,7 +103,7 @@ out:
        preempt_enable();
 }
 
-static bool __always_inline
+static __always_inline bool
 ring_buffer_has_space(unsigned long head, unsigned long tail,
                      unsigned long data_size, unsigned int size,
                      bool backward)
@@ -114,7 +114,7 @@ ring_buffer_has_space(unsigned long head, unsigned long tail,
                return CIRC_SPACE(tail, head, data_size) >= size;
 }
 
-static int __always_inline
+static __always_inline int
 __perf_output_begin(struct perf_output_handle *handle,
                    struct perf_event *event, unsigned int size,
                    bool backward)
@@ -414,7 +414,7 @@ err:
 }
 EXPORT_SYMBOL_GPL(perf_aux_output_begin);
 
-static bool __always_inline rb_need_aux_wakeup(struct ring_buffer *rb)
+static __always_inline bool rb_need_aux_wakeup(struct ring_buffer *rb)
 {
        if (rb->aux_overwrite)
                return false;
index 9440d61b925ca08faa3beb7d3a02fedfdd0a9b84..1b27babc4c780484b87757f6257cfaab3b5b6772 100644 (file)
@@ -303,11 +303,36 @@ struct kmem_cache *files_cachep;
 struct kmem_cache *fs_cachep;
 
 /* SLAB cache for vm_area_struct structures */
-struct kmem_cache *vm_area_cachep;
+static struct kmem_cache *vm_area_cachep;
 
 /* SLAB cache for mm_struct structures (tsk->mm) */
 static struct kmem_cache *mm_cachep;
 
+struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
+{
+       struct vm_area_struct *vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+
+       if (vma)
+               vma_init(vma, mm);
+       return vma;
+}
+
+struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
+{
+       struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+
+       if (new) {
+               *new = *orig;
+               INIT_LIST_HEAD(&new->anon_vma_chain);
+       }
+       return new;
+}
+
+void vm_area_free(struct vm_area_struct *vma)
+{
+       kmem_cache_free(vm_area_cachep, vma);
+}
+
 static void account_kernel_stack(struct task_struct *tsk, int account)
 {
        void *stack = task_stack_page(tsk);
@@ -455,11 +480,9 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
                                goto fail_nomem;
                        charge = len;
                }
-               tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+               tmp = vm_area_dup(mpnt);
                if (!tmp)
                        goto fail_nomem;
-               *tmp = *mpnt;
-               INIT_LIST_HEAD(&tmp->anon_vma_chain);
                retval = vma_dup_policy(mpnt, tmp);
                if (retval)
                        goto fail_nomem_policy;
@@ -539,7 +562,7 @@ fail_uprobe_end:
 fail_nomem_anon_vma_fork:
        mpol_put(vma_policy(tmp));
 fail_nomem_policy:
-       kmem_cache_free(vm_area_cachep, tmp);
+       vm_area_free(tmp);
 fail_nomem:
        retval = -ENOMEM;
        vm_unacct_memory(charge);
index 4dadeb3d666621239a7273f7651847fa7099dacf..6f636136cccc05993e20034e92effc0c0fc3e7e2 100644 (file)
@@ -55,6 +55,7 @@ static const struct irq_bit_descr irqchip_flags[] = {
        BIT_MASK_DESCR(IRQCHIP_SKIP_SET_WAKE),
        BIT_MASK_DESCR(IRQCHIP_ONESHOT_SAFE),
        BIT_MASK_DESCR(IRQCHIP_EOI_THREADED),
+       BIT_MASK_DESCR(IRQCHIP_SUPPORTS_LEVEL_MSI),
 };
 
 static void
index 481951bf091d49fbe4378bb21504b6482e11919f..486dedbd9af58bfe9edf467ab107bcc66a16d831 100644 (file)
@@ -177,9 +177,20 @@ void *kthread_probe_data(struct task_struct *task)
 static void __kthread_parkme(struct kthread *self)
 {
        for (;;) {
-               set_current_state(TASK_PARKED);
+               /*
+                * TASK_PARKED is a special state; we must serialize against
+                * possible pending wakeups to avoid store-store collisions on
+                * task->state.
+                *
+                * Such a collision might possibly result in the task state
+                * changin from TASK_PARKED and us failing the
+                * wait_task_inactive() in kthread_park().
+                */
+               set_special_state(TASK_PARKED);
                if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
                        break;
+
+               complete_all(&self->parked);
                schedule();
        }
        __set_current_state(TASK_RUNNING);
@@ -191,11 +202,6 @@ void kthread_parkme(void)
 }
 EXPORT_SYMBOL_GPL(kthread_parkme);
 
-void kthread_park_complete(struct task_struct *k)
-{
-       complete_all(&to_kthread(k)->parked);
-}
-
 static int kthread(void *_create)
 {
        /* Copy data: it's on kthread's stack */
@@ -319,8 +325,14 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
        task = create->result;
        if (!IS_ERR(task)) {
                static const struct sched_param param = { .sched_priority = 0 };
+               char name[TASK_COMM_LEN];
 
-               vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
+               /*
+                * task is already visible to other tasks, so updating
+                * COMM must be protected.
+                */
+               vsnprintf(name, sizeof(name), namefmt, args);
+               set_task_comm(task, name);
                /*
                 * root may have changed our (kthreadd's) priority or CPU mask.
                 * The kernel thread should not inherit these properties.
@@ -461,6 +473,9 @@ void kthread_unpark(struct task_struct *k)
 
        reinit_completion(&kthread->parked);
        clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
+       /*
+        * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
+        */
        wake_up_state(k, TASK_PARKED);
 }
 EXPORT_SYMBOL_GPL(kthread_unpark);
@@ -487,7 +502,16 @@ int kthread_park(struct task_struct *k)
        set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
        if (k != current) {
                wake_up_process(k);
+               /*
+                * Wait for __kthread_parkme() to complete(), this means we
+                * _will_ have TASK_PARKED and are about to call schedule().
+                */
                wait_for_completion(&kthread->parked);
+               /*
+                * Now wait for that schedule() to complete and the task to
+                * get scheduled out.
+                */
+               WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
        }
 
        return 0;
index edcac5de7ebcdb489113800c941274d8887f9b56..5fa4d3138bf106cd87f822636652c144e846f3aa 100644 (file)
@@ -1265,11 +1265,11 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
        this.parent = NULL;
        this.class = class;
 
-       local_irq_save(flags);
+       raw_local_irq_save(flags);
        arch_spin_lock(&lockdep_lock);
        ret = __lockdep_count_forward_deps(&this);
        arch_spin_unlock(&lockdep_lock);
-       local_irq_restore(flags);
+       raw_local_irq_restore(flags);
 
        return ret;
 }
@@ -1292,11 +1292,11 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
        this.parent = NULL;
        this.class = class;
 
-       local_irq_save(flags);
+       raw_local_irq_save(flags);
        arch_spin_lock(&lockdep_lock);
        ret = __lockdep_count_backward_deps(&this);
        arch_spin_unlock(&lockdep_lock);
-       local_irq_restore(flags);
+       raw_local_irq_restore(flags);
 
        return ret;
 }
@@ -4411,7 +4411,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
        if (unlikely(!debug_locks))
                return;
 
-       local_irq_save(flags);
+       raw_local_irq_save(flags);
        for (i = 0; i < curr->lockdep_depth; i++) {
                hlock = curr->held_locks + i;
 
@@ -4422,7 +4422,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
                print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
                break;
        }
-       local_irq_restore(flags);
+       raw_local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
 
index 8402b3349dca40a53a82a34900165c1054ae2fff..c28224347d6905b30301df5bda3f88724297b051 100644 (file)
@@ -365,7 +365,7 @@ static struct lock_torture_ops mutex_lock_ops = {
 };
 
 #include <linux/ww_mutex.h>
-static DEFINE_WW_CLASS(torture_ww_class);
+static DEFINE_WD_CLASS(torture_ww_class);
 static DEFINE_WW_MUTEX(torture_ww_mutex_0, &torture_ww_class);
 static DEFINE_WW_MUTEX(torture_ww_mutex_1, &torture_ww_class);
 static DEFINE_WW_MUTEX(torture_ww_mutex_2, &torture_ww_class);
index f44f658ae629b1f705945f8949ce56abb1f4113b..1a81a1257b3f3aca67e00bbe9a3a0b4e465b0e4f 100644 (file)
@@ -173,6 +173,21 @@ static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_wait
        return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
 }
 
+/*
+ * Add @waiter to a given location in the lock wait_list and set the
+ * FLAG_WAITERS flag if it's the first waiter.
+ */
+static void __sched
+__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+                  struct list_head *list)
+{
+       debug_mutex_add_waiter(lock, waiter, current);
+
+       list_add_tail(&waiter->list, list);
+       if (__mutex_waiter_is_first(lock, waiter))
+               __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
+}
+
 /*
  * Give up ownership to a specific task, when @task = NULL, this is equivalent
  * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
@@ -244,6 +259,22 @@ void __sched mutex_lock(struct mutex *lock)
 EXPORT_SYMBOL(mutex_lock);
 #endif
 
+/*
+ * Wait-Die:
+ *   The newer transactions are killed when:
+ *     It (the new transaction) makes a request for a lock being held
+ *     by an older transaction.
+ *
+ * Wound-Wait:
+ *   The newer transactions are wounded when:
+ *     An older transaction makes a request for a lock being held by
+ *     the newer transaction.
+ */
+
+/*
+ * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
+ * it.
+ */
 static __always_inline void
 ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
 {
@@ -282,26 +313,108 @@ ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
        DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
 #endif
        ww_ctx->acquired++;
+       ww->ctx = ww_ctx;
 }
 
+/*
+ * Determine if context @a is 'after' context @b. IOW, @a is a younger
+ * transaction than @b and depending on algorithm either needs to wait for
+ * @b or die.
+ */
 static inline bool __sched
 __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
 {
-       return a->stamp - b->stamp <= LONG_MAX &&
-              (a->stamp != b->stamp || a > b);
+
+       return (signed long)(a->stamp - b->stamp) > 0;
+}
+
+/*
+ * Wait-Die; wake a younger waiter context (when locks held) such that it can
+ * die.
+ *
+ * Among waiters with context, only the first one can have other locks acquired
+ * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
+ * __ww_mutex_check_kill() wake any but the earliest context.
+ */
+static bool __sched
+__ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
+              struct ww_acquire_ctx *ww_ctx)
+{
+       if (!ww_ctx->is_wait_die)
+               return false;
+
+       if (waiter->ww_ctx->acquired > 0 &&
+                       __ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) {
+               debug_mutex_wake_waiter(lock, waiter);
+               wake_up_process(waiter->task);
+       }
+
+       return true;
+}
+
+/*
+ * Wound-Wait; wound a younger @hold_ctx if it holds the lock.
+ *
+ * Wound the lock holder if there are waiters with older transactions than
+ * the lock holders. Even if multiple waiters may wound the lock holder,
+ * it's sufficient that only one does.
+ */
+static bool __ww_mutex_wound(struct mutex *lock,
+                            struct ww_acquire_ctx *ww_ctx,
+                            struct ww_acquire_ctx *hold_ctx)
+{
+       struct task_struct *owner = __mutex_owner(lock);
+
+       lockdep_assert_held(&lock->wait_lock);
+
+       /*
+        * Possible through __ww_mutex_add_waiter() when we race with
+        * ww_mutex_set_context_fastpath(). In that case we'll get here again
+        * through __ww_mutex_check_waiters().
+        */
+       if (!hold_ctx)
+               return false;
+
+       /*
+        * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
+        * it cannot go away because we'll have FLAG_WAITERS set and hold
+        * wait_lock.
+        */
+       if (!owner)
+               return false;
+
+       if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) {
+               hold_ctx->wounded = 1;
+
+               /*
+                * wake_up_process() paired with set_current_state()
+                * inserts sufficient barriers to make sure @owner either sees
+                * it's wounded in __ww_mutex_lock_check_stamp() or has a
+                * wakeup pending to re-read the wounded state.
+                */
+               if (owner != current)
+                       wake_up_process(owner);
+
+               return true;
+       }
+
+       return false;
 }
 
 /*
- * Wake up any waiters that may have to back off when the lock is held by the
- * given context.
+ * We just acquired @lock under @ww_ctx, if there are later contexts waiting
+ * behind us on the wait-list, check if they need to die, or wound us.
  *
- * Due to the invariants on the wait list, this can only affect the first
- * waiter with a context.
+ * See __ww_mutex_add_waiter() for the list-order construction; basically the
+ * list is ordered by stamp, smallest (oldest) first.
+ *
+ * This relies on never mixing wait-die/wound-wait on the same wait-list;
+ * which is currently ensured by that being a ww_class property.
  *
  * The current task must not be on the wait list.
  */
 static void __sched
-__ww_mutex_wakeup_for_backoff(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
+__ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
 {
        struct mutex_waiter *cur;
 
@@ -311,66 +424,51 @@ __ww_mutex_wakeup_for_backoff(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
                if (!cur->ww_ctx)
                        continue;
 
-               if (cur->ww_ctx->acquired > 0 &&
-                   __ww_ctx_stamp_after(cur->ww_ctx, ww_ctx)) {
-                       debug_mutex_wake_waiter(lock, cur);
-                       wake_up_process(cur->task);
-               }
-
-               break;
+               if (__ww_mutex_die(lock, cur, ww_ctx) ||
+                   __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
+                       break;
        }
 }
 
 /*
- * After acquiring lock with fastpath or when we lost out in contested
- * slowpath, set ctx and wake up any waiters so they can recheck.
+ * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
+ * and wake up any waiters so they can recheck.
  */
 static __always_inline void
 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
 {
        ww_mutex_lock_acquired(lock, ctx);
 
-       lock->ctx = ctx;
-
        /*
         * The lock->ctx update should be visible on all cores before
-        * the atomic read is done, otherwise contended waiters might be
+        * the WAITERS check is done, otherwise contended waiters might be
         * missed. The contended waiters will either see ww_ctx == NULL
         * and keep spinning, or it will acquire wait_lock, add itself
         * to waiter list and sleep.
         */
-       smp_mb(); /* ^^^ */
+       smp_mb(); /* See comments above and below. */
 
        /*
-        * Check if lock is contended, if not there is nobody to wake up
+        * [W] ww->ctx = ctx        [W] MUTEX_FLAG_WAITERS
+        *     MB                       MB
+        * [R] MUTEX_FLAG_WAITERS   [R] ww->ctx
+        *
+        * The memory barrier above pairs with the memory barrier in
+        * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
+        * and/or !empty list.
         */
        if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
                return;
 
        /*
-        * Uh oh, we raced in fastpath, wake up everyone in this case,
-        * so they can see the new lock->ctx.
+        * Uh oh, we raced in fastpath, check if any of the waiters need to
+        * die or wound us.
         */
        spin_lock(&lock->base.wait_lock);
-       __ww_mutex_wakeup_for_backoff(&lock->base, ctx);
+       __ww_mutex_check_waiters(&lock->base, ctx);
        spin_unlock(&lock->base.wait_lock);
 }
 
-/*
- * After acquiring lock in the slowpath set ctx.
- *
- * Unlike for the fast path, the caller ensures that waiters are woken up where
- * necessary.
- *
- * Callers must hold the mutex wait_lock.
- */
-static __always_inline void
-ww_mutex_set_context_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
-{
-       ww_mutex_lock_acquired(lock, ctx);
-       lock->ctx = ctx;
-}
-
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
 
 static inline
@@ -646,37 +744,83 @@ void __sched ww_mutex_unlock(struct ww_mutex *lock)
 }
 EXPORT_SYMBOL(ww_mutex_unlock);
 
+
+static __always_inline int __sched
+__ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
+{
+       if (ww_ctx->acquired > 0) {
+#ifdef CONFIG_DEBUG_MUTEXES
+               struct ww_mutex *ww;
+
+               ww = container_of(lock, struct ww_mutex, base);
+               DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
+               ww_ctx->contending_lock = ww;
+#endif
+               return -EDEADLK;
+       }
+
+       return 0;
+}
+
+
+/*
+ * Check the wound condition for the current lock acquire.
+ *
+ * Wound-Wait: If we're wounded, kill ourself.
+ *
+ * Wait-Die: If we're trying to acquire a lock already held by an older
+ *           context, kill ourselves.
+ *
+ * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
+ * look at waiters before us in the wait-list.
+ */
 static inline int __sched
-__ww_mutex_lock_check_stamp(struct mutex *lock, struct mutex_waiter *waiter,
-                           struct ww_acquire_ctx *ctx)
+__ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
+                     struct ww_acquire_ctx *ctx)
 {
        struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
        struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
        struct mutex_waiter *cur;
 
+       if (ctx->acquired == 0)
+               return 0;
+
+       if (!ctx->is_wait_die) {
+               if (ctx->wounded)
+                       return __ww_mutex_kill(lock, ctx);
+
+               return 0;
+       }
+
        if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
-               goto deadlock;
+               return __ww_mutex_kill(lock, ctx);
 
        /*
         * If there is a waiter in front of us that has a context, then its
-        * stamp is earlier than ours and we must back off.
+        * stamp is earlier than ours and we must kill ourself.
         */
        cur = waiter;
        list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
-               if (cur->ww_ctx)
-                       goto deadlock;
+               if (!cur->ww_ctx)
+                       continue;
+
+               return __ww_mutex_kill(lock, ctx);
        }
 
        return 0;
-
-deadlock:
-#ifdef CONFIG_DEBUG_MUTEXES
-       DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
-       ctx->contending_lock = ww;
-#endif
-       return -EDEADLK;
 }
 
+/*
+ * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
+ * first. Such that older contexts are preferred to acquire the lock over
+ * younger contexts.
+ *
+ * Waiters without context are interspersed in FIFO order.
+ *
+ * Furthermore, for Wait-Die kill ourself immediately when possible (there are
+ * older contexts already waiting) to avoid unnecessary waiting and for
+ * Wound-Wait ensure we wound the owning context when it is younger.
+ */
 static inline int __sched
 __ww_mutex_add_waiter(struct mutex_waiter *waiter,
                      struct mutex *lock,
@@ -684,16 +828,21 @@ __ww_mutex_add_waiter(struct mutex_waiter *waiter,
 {
        struct mutex_waiter *cur;
        struct list_head *pos;
+       bool is_wait_die;
 
        if (!ww_ctx) {
-               list_add_tail(&waiter->list, &lock->wait_list);
+               __mutex_add_waiter(lock, waiter, &lock->wait_list);
                return 0;
        }
 
+       is_wait_die = ww_ctx->is_wait_die;
+
        /*
         * Add the waiter before the first waiter with a higher stamp.
         * Waiters without a context are skipped to avoid starving
-        * them.
+        * them. Wait-Die waiters may die here. Wound-Wait waiters
+        * never die here, but they are sorted in stamp order and
+        * may wound the lock holder.
         */
        pos = &lock->wait_list;
        list_for_each_entry_reverse(cur, &lock->wait_list, list) {
@@ -701,16 +850,16 @@ __ww_mutex_add_waiter(struct mutex_waiter *waiter,
                        continue;
 
                if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
-                       /* Back off immediately if necessary. */
-                       if (ww_ctx->acquired > 0) {
-#ifdef CONFIG_DEBUG_MUTEXES
-                               struct ww_mutex *ww;
-
-                               ww = container_of(lock, struct ww_mutex, base);
-                               DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
-                               ww_ctx->contending_lock = ww;
-#endif
-                               return -EDEADLK;
+                       /*
+                        * Wait-Die: if we find an older context waiting, there
+                        * is no point in queueing behind it, as we'd have to
+                        * die the moment it would acquire the lock.
+                        */
+                       if (is_wait_die) {
+                               int ret = __ww_mutex_kill(lock, ww_ctx);
+
+                               if (ret)
+                                       return ret;
                        }
 
                        break;
@@ -718,17 +867,28 @@ __ww_mutex_add_waiter(struct mutex_waiter *waiter,
 
                pos = &cur->list;
 
+               /* Wait-Die: ensure younger waiters die. */
+               __ww_mutex_die(lock, cur, ww_ctx);
+       }
+
+       __mutex_add_waiter(lock, waiter, pos);
+
+       /*
+        * Wound-Wait: if we're blocking on a mutex owned by a younger context,
+        * wound that such that we might proceed.
+        */
+       if (!is_wait_die) {
+               struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
+
                /*
-                * Wake up the waiter so that it gets a chance to back
-                * off.
+                * See ww_mutex_set_context_fastpath(). Orders setting
+                * MUTEX_FLAG_WAITERS vs the ww->ctx load,
+                * such that either we or the fastpath will wound @ww->ctx.
                 */
-               if (cur->ww_ctx->acquired > 0) {
-                       debug_mutex_wake_waiter(lock, cur);
-                       wake_up_process(cur->task);
-               }
+               smp_mb();
+               __ww_mutex_wound(lock, ww_ctx, ww->ctx);
        }
 
-       list_add_tail(&waiter->list, pos);
        return 0;
 }
 
@@ -751,6 +911,14 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
        if (use_ww_ctx && ww_ctx) {
                if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
                        return -EALREADY;
+
+               /*
+                * Reset the wounded flag after a kill. No other process can
+                * race and wound us here since they can't have a valid owner
+                * pointer if we don't have any locks held.
+                */
+               if (ww_ctx->acquired == 0)
+                       ww_ctx->wounded = 0;
        }
 
        preempt_disable();
@@ -772,7 +940,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
         */
        if (__mutex_trylock(lock)) {
                if (use_ww_ctx && ww_ctx)
-                       __ww_mutex_wakeup_for_backoff(lock, ww_ctx);
+                       __ww_mutex_check_waiters(lock, ww_ctx);
 
                goto skip_wait;
        }
@@ -784,25 +952,26 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 
        if (!use_ww_ctx) {
                /* add waiting tasks to the end of the waitqueue (FIFO): */
-               list_add_tail(&waiter.list, &lock->wait_list);
+               __mutex_add_waiter(lock, &waiter, &lock->wait_list);
+
 
 #ifdef CONFIG_DEBUG_MUTEXES
                waiter.ww_ctx = MUTEX_POISON_WW_CTX;
 #endif
        } else {
-               /* Add in stamp order, waking up waiters that must back off. */
+               /*
+                * Add in stamp order, waking up waiters that must kill
+                * themselves.
+                */
                ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
                if (ret)
-                       goto err_early_backoff;
+                       goto err_early_kill;
 
                waiter.ww_ctx = ww_ctx;
        }
 
        waiter.task = current;
 
-       if (__mutex_waiter_is_first(lock, &waiter))
-               __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
-
        set_current_state(state);
        for (;;) {
                /*
@@ -815,7 +984,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                        goto acquired;
 
                /*
-                * Check for signals and wound conditions while holding
+                * Check for signals and kill conditions while holding
                 * wait_lock. This ensures the lock cancellation is ordered
                 * against mutex_unlock() and wake-ups do not go missing.
                 */
@@ -824,8 +993,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                        goto err;
                }
 
-               if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) {
-                       ret = __ww_mutex_lock_check_stamp(lock, &waiter, ww_ctx);
+               if (use_ww_ctx && ww_ctx) {
+                       ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
                        if (ret)
                                goto err;
                }
@@ -859,6 +1028,16 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 acquired:
        __set_current_state(TASK_RUNNING);
 
+       if (use_ww_ctx && ww_ctx) {
+               /*
+                * Wound-Wait; we stole the lock (!first_waiter), check the
+                * waiters as anyone might want to wound us.
+                */
+               if (!ww_ctx->is_wait_die &&
+                   !__mutex_waiter_is_first(lock, &waiter))
+                       __ww_mutex_check_waiters(lock, ww_ctx);
+       }
+
        mutex_remove_waiter(lock, &waiter, current);
        if (likely(list_empty(&lock->wait_list)))
                __mutex_clear_flag(lock, MUTEX_FLAGS);
@@ -870,7 +1049,7 @@ skip_wait:
        lock_acquired(&lock->dep_map, ip);
 
        if (use_ww_ctx && ww_ctx)
-               ww_mutex_set_context_slowpath(ww, ww_ctx);
+               ww_mutex_lock_acquired(ww, ww_ctx);
 
        spin_unlock(&lock->wait_lock);
        preempt_enable();
@@ -879,7 +1058,7 @@ skip_wait:
 err:
        __set_current_state(TASK_RUNNING);
        mutex_remove_waiter(lock, &waiter, current);
-err_early_backoff:
+err_early_kill:
        spin_unlock(&lock->wait_lock);
        debug_mutex_free_waiter(&waiter);
        mutex_release(&lock->dep_map, 1, ip);
index bc1e507be9ff7aea311261e78002d53375f9a6d7..776308d2fa9e9468116f0174eed4b8062475a83f 100644 (file)
@@ -181,6 +181,7 @@ void down_read_non_owner(struct rw_semaphore *sem)
        might_sleep();
 
        __down_read(sem);
+       rwsem_set_reader_owned(sem);
 }
 
 EXPORT_SYMBOL(down_read_non_owner);
index 0e4cd64ad2c018cf4ad2375004b966d6de49c241..5b915b370d5a833fcf087bec26f06b8d07a59cda 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/slab.h>
 #include <linux/ww_mutex.h>
 
-static DEFINE_WW_CLASS(ww_class);
+static DEFINE_WD_CLASS(ww_class);
 struct workqueue_struct *wq;
 
 struct test_mutex {
index 5857267a4af5dab0cb0eb85ed9b61cdfcbc57dcc..38283363da06d40057ddfe71c3c8ec1a2c7a94d2 100644 (file)
@@ -176,10 +176,27 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
        unsigned long pfn, pgoff, order;
        pgprot_t pgprot = PAGE_KERNEL;
        int error, nid, is_ram;
+       struct dev_pagemap *conflict_pgmap;
 
        align_start = res->start & ~(SECTION_SIZE - 1);
        align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
                - align_start;
+       align_end = align_start + align_size - 1;
+
+       conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_start), NULL);
+       if (conflict_pgmap) {
+               dev_WARN(dev, "Conflicting mapping in same section\n");
+               put_dev_pagemap(conflict_pgmap);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_end), NULL);
+       if (conflict_pgmap) {
+               dev_WARN(dev, "Conflicting mapping in same section\n");
+               put_dev_pagemap(conflict_pgmap);
+               return ERR_PTR(-ENOMEM);
+       }
+
        is_ram = region_intersects(align_start, align_size,
                IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
 
@@ -199,7 +216,6 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
 
        mutex_lock(&pgmap_lock);
        error = 0;
-       align_end = align_start + align_size - 1;
 
        foreach_order_pgoff(res, order, pgoff) {
                error = __radix_tree_insert(&pgmap_radix,
@@ -305,7 +321,7 @@ EXPORT_SYMBOL_GPL(get_dev_pagemap);
 
 #ifdef CONFIG_DEV_PAGEMAP_OPS
 DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
-EXPORT_SYMBOL_GPL(devmap_managed_key);
+EXPORT_SYMBOL(devmap_managed_key);
 static atomic_t devmap_enable;
 
 /*
@@ -346,5 +362,5 @@ void __put_devmap_managed_page(struct page *page)
        } else if (!count)
                __put_page(page);
 }
-EXPORT_SYMBOL_GPL(__put_devmap_managed_page);
+EXPORT_SYMBOL(__put_devmap_managed_page);
 #endif /* CONFIG_DEV_PAGEMAP_OPS */
index 247808333ba430f75c0ac7382bc08d6f219685cf..3f041e7cbfc9ea5a016ec7c9cfb177280305a229 100644 (file)
@@ -2243,6 +2243,7 @@ int is_console_locked(void)
 {
        return console_locked;
 }
+EXPORT_SYMBOL(is_console_locked);
 
 /*
  * Check if we have any console that is capable of printing while cpu is
index ae306f90c51484fae6bb583733ca5e8f8b3e76be..c6242d8594dc7c0fab52de9df7f9cf01e49e5d0f 100644 (file)
@@ -85,9 +85,9 @@ static int rseq_update_cpu_id(struct task_struct *t)
 {
        u32 cpu_id = raw_smp_processor_id();
 
-       if (__put_user(cpu_id, &t->rseq->cpu_id_start))
+       if (put_user(cpu_id, &t->rseq->cpu_id_start))
                return -EFAULT;
-       if (__put_user(cpu_id, &t->rseq->cpu_id))
+       if (put_user(cpu_id, &t->rseq->cpu_id))
                return -EFAULT;
        trace_rseq_update(t);
        return 0;
@@ -100,14 +100,14 @@ static int rseq_reset_rseq_cpu_id(struct task_struct *t)
        /*
         * Reset cpu_id_start to its initial state (0).
         */
-       if (__put_user(cpu_id_start, &t->rseq->cpu_id_start))
+       if (put_user(cpu_id_start, &t->rseq->cpu_id_start))
                return -EFAULT;
        /*
         * Reset cpu_id to RSEQ_CPU_ID_UNINITIALIZED, so any user coming
         * in after unregistration can figure out that rseq needs to be
         * registered again.
         */
-       if (__put_user(cpu_id, &t->rseq->cpu_id))
+       if (put_user(cpu_id, &t->rseq->cpu_id))
                return -EFAULT;
        return 0;
 }
@@ -115,29 +115,36 @@ static int rseq_reset_rseq_cpu_id(struct task_struct *t)
 static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs)
 {
        struct rseq_cs __user *urseq_cs;
-       unsigned long ptr;
+       u64 ptr;
        u32 __user *usig;
        u32 sig;
        int ret;
 
-       ret = __get_user(ptr, &t->rseq->rseq_cs);
-       if (ret)
-               return ret;
+       if (copy_from_user(&ptr, &t->rseq->rseq_cs.ptr64, sizeof(ptr)))
+               return -EFAULT;
        if (!ptr) {
                memset(rseq_cs, 0, sizeof(*rseq_cs));
                return 0;
        }
-       urseq_cs = (struct rseq_cs __user *)ptr;
+       if (ptr >= TASK_SIZE)
+               return -EINVAL;
+       urseq_cs = (struct rseq_cs __user *)(unsigned long)ptr;
        if (copy_from_user(rseq_cs, urseq_cs, sizeof(*rseq_cs)))
                return -EFAULT;
-       if (rseq_cs->version > 0)
-               return -EINVAL;
 
+       if (rseq_cs->start_ip >= TASK_SIZE ||
+           rseq_cs->start_ip + rseq_cs->post_commit_offset >= TASK_SIZE ||
+           rseq_cs->abort_ip >= TASK_SIZE ||
+           rseq_cs->version > 0)
+               return -EINVAL;
+       /* Check for overflow. */
+       if (rseq_cs->start_ip + rseq_cs->post_commit_offset < rseq_cs->start_ip)
+               return -EINVAL;
        /* Ensure that abort_ip is not in the critical section. */
        if (rseq_cs->abort_ip - rseq_cs->start_ip < rseq_cs->post_commit_offset)
                return -EINVAL;
 
-       usig = (u32 __user *)(rseq_cs->abort_ip - sizeof(u32));
+       usig = (u32 __user *)(unsigned long)(rseq_cs->abort_ip - sizeof(u32));
        ret = get_user(sig, usig);
        if (ret)
                return ret;
@@ -146,7 +153,7 @@ static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs)
                printk_ratelimited(KERN_WARNING
                        "Possible attack attempt. Unexpected rseq signature 0x%x, expecting 0x%x (pid=%d, addr=%p).\n",
                        sig, current->rseq_sig, current->pid, usig);
-               return -EPERM;
+               return -EINVAL;
        }
        return 0;
 }
@@ -157,7 +164,7 @@ static int rseq_need_restart(struct task_struct *t, u32 cs_flags)
        int ret;
 
        /* Get thread flags. */
-       ret = __get_user(flags, &t->rseq->flags);
+       ret = get_user(flags, &t->rseq->flags);
        if (ret)
                return ret;
 
@@ -195,9 +202,11 @@ static int clear_rseq_cs(struct task_struct *t)
         * of code outside of the rseq assembly block. This performs
         * a lazy clear of the rseq_cs field.
         *
-        * Set rseq_cs to NULL with single-copy atomicity.
+        * Set rseq_cs to NULL.
         */
-       return __put_user(0UL, &t->rseq->rseq_cs);
+       if (clear_user(&t->rseq->rseq_cs.ptr64, sizeof(t->rseq->rseq_cs.ptr64)))
+               return -EFAULT;
+       return 0;
 }
 
 /*
@@ -251,10 +260,10 @@ static int rseq_ip_fixup(struct pt_regs *regs)
  * respect to other threads scheduled on the same CPU, and with respect
  * to signal handlers.
  */
-void __rseq_handle_notify_resume(struct pt_regs *regs)
+void __rseq_handle_notify_resume(struct ksignal *ksig, struct pt_regs *regs)
 {
        struct task_struct *t = current;
-       int ret;
+       int ret, sig;
 
        if (unlikely(t->flags & PF_EXITING))
                return;
@@ -268,7 +277,8 @@ void __rseq_handle_notify_resume(struct pt_regs *regs)
        return;
 
 error:
-       force_sig(SIGSEGV, t);
+       sig = ksig ? ksig->sig : 0;
+       force_sigsegv(sig, t);
 }
 
 #ifdef CONFIG_DEBUG_RSEQ
index 78d8facba456c2fc44c7024ae1a1c8d9db6f0692..fe365c9a08e98392d17a0f54ac1c7209db5f90b2 100644 (file)
@@ -7,7 +7,6 @@
  */
 #include "sched.h"
 
-#include <linux/kthread.h>
 #include <linux/nospec.h>
 
 #include <linux/kcov.h>
@@ -2724,28 +2723,20 @@ static struct rq *finish_task_switch(struct task_struct *prev)
                membarrier_mm_sync_core_before_usermode(mm);
                mmdrop(mm);
        }
-       if (unlikely(prev_state & (TASK_DEAD|TASK_PARKED))) {
-               switch (prev_state) {
-               case TASK_DEAD:
-                       if (prev->sched_class->task_dead)
-                               prev->sched_class->task_dead(prev);
+       if (unlikely(prev_state == TASK_DEAD)) {
+               if (prev->sched_class->task_dead)
+                       prev->sched_class->task_dead(prev);
 
-                       /*
-                        * Remove function-return probe instances associated with this
-                        * task and put them back on the free list.
-                        */
-                       kprobe_flush_task(prev);
-
-                       /* Task is done with its stack. */
-                       put_task_stack(prev);
+               /*
+                * Remove function-return probe instances associated with this
+                * task and put them back on the free list.
+                */
+               kprobe_flush_task(prev);
 
-                       put_task_struct(prev);
-                       break;
+               /* Task is done with its stack. */
+               put_task_stack(prev);
 
-               case TASK_PARKED:
-                       kthread_park_complete(prev);
-                       break;
-               }
+               put_task_struct(prev);
        }
 
        tick_nohz_task_switch();
@@ -3113,7 +3104,9 @@ static void sched_tick_remote(struct work_struct *work)
        struct tick_work *twork = container_of(dwork, struct tick_work, work);
        int cpu = twork->cpu;
        struct rq *rq = cpu_rq(cpu);
+       struct task_struct *curr;
        struct rq_flags rf;
+       u64 delta;
 
        /*
         * Handle the tick only if it appears the remote CPU is running in full
@@ -3122,24 +3115,28 @@ static void sched_tick_remote(struct work_struct *work)
         * statistics and checks timeslices in a time-independent way, regardless
         * of when exactly it is running.
         */
-       if (!idle_cpu(cpu) && tick_nohz_tick_stopped_cpu(cpu)) {
-               struct task_struct *curr;
-               u64 delta;
+       if (idle_cpu(cpu) || !tick_nohz_tick_stopped_cpu(cpu))
+               goto out_requeue;
 
-               rq_lock_irq(rq, &rf);
-               update_rq_clock(rq);
-               curr = rq->curr;
-               delta = rq_clock_task(rq) - curr->se.exec_start;
+       rq_lock_irq(rq, &rf);
+       curr = rq->curr;
+       if (is_idle_task(curr))
+               goto out_unlock;
 
-               /*
-                * Make sure the next tick runs within a reasonable
-                * amount of time.
-                */
-               WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
-               curr->sched_class->task_tick(rq, curr, 0);
-               rq_unlock_irq(rq, &rf);
-       }
+       update_rq_clock(rq);
+       delta = rq_clock_task(rq) - curr->se.exec_start;
+
+       /*
+        * Make sure the next tick runs within a reasonable
+        * amount of time.
+        */
+       WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
+       curr->sched_class->task_tick(rq, curr, 0);
+
+out_unlock:
+       rq_unlock_irq(rq, &rf);
 
+out_requeue:
        /*
         * Run the remote tick once per second (1Hz). This arbitrary
         * frequency is large enough to avoid overload but short enough
index 3cde46483f0aa5e57ea14502322fd90741e88ac0..c907fde01eaa65fcf784074466a17f8a64ab0203 100644 (file)
@@ -192,7 +192,7 @@ static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu)
 {
        struct rq *rq = cpu_rq(sg_cpu->cpu);
 
-       if (rq->rt.rt_nr_running)
+       if (rt_rq_is_runnable(&rq->rt))
                return sg_cpu->max;
 
        /*
index fbfc3f1d368a08dd9ebd7c510caf67f52d377334..10c7b51c0d1fd4be1399f802d1c82e40f60709b5 100644 (file)
@@ -2290,8 +2290,17 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
        if (task_on_rq_queued(p) && p->dl.dl_runtime)
                task_non_contending(p);
 
-       if (!task_on_rq_queued(p))
+       if (!task_on_rq_queued(p)) {
+               /*
+                * Inactive timer is armed. However, p is leaving DEADLINE and
+                * might migrate away from this rq while continuing to run on
+                * some other class. We need to remove its contribution from
+                * this rq running_bw now, or sub_rq_bw (below) will complain.
+                */
+               if (p->dl.dl_non_contending)
+                       sub_running_bw(&p->dl, &rq->dl);
                sub_rq_bw(&p->dl, &rq->dl);
+       }
 
        /*
         * We cannot use inactive_task_timer() to invoke sub_running_bw()
index 1866e64792a791f8737128c88ae691d7453ff117..2f0a0be4d344d7de76211b01c6883c10fbb2ba89 100644 (file)
@@ -3982,18 +3982,10 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
        if (!sched_feat(UTIL_EST))
                return;
 
-       /*
-        * Update root cfs_rq's estimated utilization
-        *
-        * If *p is the last task then the root cfs_rq's estimated utilization
-        * of a CPU is 0 by definition.
-        */
-       ue.enqueued = 0;
-       if (cfs_rq->nr_running) {
-               ue.enqueued  = cfs_rq->avg.util_est.enqueued;
-               ue.enqueued -= min_t(unsigned int, ue.enqueued,
-                                    (_task_util_est(p) | UTIL_AVG_UNCHANGED));
-       }
+       /* Update root cfs_rq's estimated utilization */
+       ue.enqueued  = cfs_rq->avg.util_est.enqueued;
+       ue.enqueued -= min_t(unsigned int, ue.enqueued,
+                            (_task_util_est(p) | UTIL_AVG_UNCHANGED));
        WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued);
 
        /*
@@ -4590,6 +4582,7 @@ void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
        now = sched_clock_cpu(smp_processor_id());
        cfs_b->runtime = cfs_b->quota;
        cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
+       cfs_b->expires_seq++;
 }
 
 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
@@ -4612,6 +4605,7 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
        struct task_group *tg = cfs_rq->tg;
        struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
        u64 amount = 0, min_amount, expires;
+       int expires_seq;
 
        /* note: this is a positive sum as runtime_remaining <= 0 */
        min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
@@ -4628,6 +4622,7 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
                        cfs_b->idle = 0;
                }
        }
+       expires_seq = cfs_b->expires_seq;
        expires = cfs_b->runtime_expires;
        raw_spin_unlock(&cfs_b->lock);
 
@@ -4637,8 +4632,10 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
         * spread between our sched_clock and the one on which runtime was
         * issued.
         */
-       if ((s64)(expires - cfs_rq->runtime_expires) > 0)
+       if (cfs_rq->expires_seq != expires_seq) {
+               cfs_rq->expires_seq = expires_seq;
                cfs_rq->runtime_expires = expires;
+       }
 
        return cfs_rq->runtime_remaining > 0;
 }
@@ -4664,12 +4661,9 @@ static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
         * has not truly expired.
         *
         * Fortunately we can check determine whether this the case by checking
-        * whether the global deadline has advanced. It is valid to compare
-        * cfs_b->runtime_expires without any locks since we only care about
-        * exact equality, so a partial write will still work.
+        * whether the global deadline(cfs_b->expires_seq) has advanced.
         */
-
-       if (cfs_rq->runtime_expires != cfs_b->runtime_expires) {
+       if (cfs_rq->expires_seq == cfs_b->expires_seq) {
                /* extend local deadline, drift is bounded above by 2 ticks */
                cfs_rq->runtime_expires += TICK_NSEC;
        } else {
@@ -5202,13 +5196,18 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 
 void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
 {
+       u64 overrun;
+
        lockdep_assert_held(&cfs_b->lock);
 
-       if (!cfs_b->period_active) {
-               cfs_b->period_active = 1;
-               hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
-               hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
-       }
+       if (cfs_b->period_active)
+               return;
+
+       cfs_b->period_active = 1;
+       overrun = hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
+       cfs_b->runtime_expires += (overrun + 1) * ktime_to_ns(cfs_b->period);
+       cfs_b->expires_seq++;
+       hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
 }
 
 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
index 47556b0c9a95faff3e827f6ffd690646cff38224..572567078b60b59dd14e3a608c6e9207f1c7e024 100644 (file)
@@ -508,8 +508,11 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 
        rt_se = rt_rq->tg->rt_se[cpu];
 
-       if (!rt_se)
+       if (!rt_se) {
                dequeue_top_rt_rq(rt_rq);
+               /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
+               cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
+       }
        else if (on_rt_rq(rt_se))
                dequeue_rt_entity(rt_se, 0);
 }
@@ -1001,8 +1004,6 @@ dequeue_top_rt_rq(struct rt_rq *rt_rq)
        sub_nr_running(rq, rt_rq->rt_nr_running);
        rt_rq->rt_queued = 0;
 
-       /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
-       cpufreq_update_util(rq, 0);
 }
 
 static void
@@ -1014,11 +1015,14 @@ enqueue_top_rt_rq(struct rt_rq *rt_rq)
 
        if (rt_rq->rt_queued)
                return;
-       if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
+
+       if (rt_rq_throttled(rt_rq))
                return;
 
-       add_nr_running(rq, rt_rq->rt_nr_running);
-       rt_rq->rt_queued = 1;
+       if (rt_rq->rt_nr_running) {
+               add_nr_running(rq, rt_rq->rt_nr_running);
+               rt_rq->rt_queued = 1;
+       }
 
        /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
        cpufreq_update_util(rq, 0);
index 6601baf2361c04605ce198091bdfe8d56aa8a7b7..c7742dcc136c6aa3ba95f0874c02c96e1eaccde8 100644 (file)
@@ -334,9 +334,10 @@ struct cfs_bandwidth {
        u64                     runtime;
        s64                     hierarchical_quota;
        u64                     runtime_expires;
+       int                     expires_seq;
 
-       int                     idle;
-       int                     period_active;
+       short                   idle;
+       short                   period_active;
        struct hrtimer          period_timer;
        struct hrtimer          slack_timer;
        struct list_head        throttled_cfs_rq;
@@ -551,6 +552,7 @@ struct cfs_rq {
 
 #ifdef CONFIG_CFS_BANDWIDTH
        int                     runtime_enabled;
+       int                     expires_seq;
        u64                     runtime_expires;
        s64                     runtime_remaining;
 
@@ -609,6 +611,11 @@ struct rt_rq {
 #endif
 };
 
+static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq)
+{
+       return rt_rq->rt_queued && rt_rq->rt_nr_running;
+}
+
 /* Deadline class' related fields in a runqueue */
 struct dl_rq {
        /* runqueue is an rbtree, ordered by deadline */
index de2f57fddc04ed85f5d419fe64e51cdcbb93193b..75ffc1d1a2e06e9d08f1e121e7dfb86d7758b3ed 100644 (file)
@@ -79,12 +79,16 @@ static void wakeup_softirqd(void)
 
 /*
  * If ksoftirqd is scheduled, we do not want to process pending softirqs
- * right now. Let ksoftirqd handle this at its own rate, to get fairness.
+ * right now. Let ksoftirqd handle this at its own rate, to get fairness,
+ * unless we're doing some of the synchronous softirqs.
  */
-static bool ksoftirqd_running(void)
+#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
+static bool ksoftirqd_running(unsigned long pending)
 {
        struct task_struct *tsk = __this_cpu_read(ksoftirqd);
 
+       if (pending & SOFTIRQ_NOW_MASK)
+               return false;
        return tsk && (tsk->state == TASK_RUNNING);
 }
 
@@ -139,9 +143,13 @@ static void __local_bh_enable(unsigned int cnt)
 {
        lockdep_assert_irqs_disabled();
 
+       if (preempt_count() == cnt)
+               trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
+
        if (softirq_count() == (cnt & SOFTIRQ_MASK))
                trace_softirqs_on(_RET_IP_);
-       preempt_count_sub(cnt);
+
+       __preempt_count_sub(cnt);
 }
 
 /*
@@ -324,7 +332,7 @@ asmlinkage __visible void do_softirq(void)
 
        pending = local_softirq_pending();
 
-       if (pending && !ksoftirqd_running())
+       if (pending && !ksoftirqd_running(pending))
                do_softirq_own_stack();
 
        local_irq_restore(flags);
@@ -351,7 +359,7 @@ void irq_enter(void)
 
 static inline void invoke_softirq(void)
 {
-       if (ksoftirqd_running())
+       if (ksoftirqd_running(local_softirq_pending()))
                return;
 
        if (!force_irqthreads) {
index f89014a2c2381e4b38c0839970d28864f751a8d1..1ff523dae6e2b7c0980162549db02bdd350620cc 100644 (file)
@@ -270,7 +270,11 @@ unlock:
                goto retry;
        }
 
-       wake_up_q(&wakeq);
+       if (!err) {
+               preempt_disable();
+               wake_up_q(&wakeq);
+               preempt_enable();
+       }
 
        return err;
 }
index 055a4a728c00cce3945afc04b9bee692b243896b..3e93c54bd3a16b7fc282a20064f5d75f7c812ee8 100644 (file)
@@ -1659,7 +1659,7 @@ EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
 int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts)
 {
        switch(restart->nanosleep.type) {
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_32BIT_TIME
        case TT_COMPAT:
                if (compat_put_timespec64(ts, restart->nanosleep.compat_rmtp))
                        return -EFAULT;
index 5a6251ac6f7acd183c35a51d9d55fb680fda64dd..9cdf54b04ca8860b7aa2eec2e3625de076b5f7e2 100644 (file)
@@ -604,7 +604,6 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
        /*
         * Disarm any old timer after extracting its expiry time.
         */
-       lockdep_assert_irqs_disabled();
 
        ret = 0;
        old_incr = timer->it.cpu.incr;
@@ -1049,7 +1048,6 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer)
        /*
         * Now re-arm for the new expiry time.
         */
-       lockdep_assert_irqs_disabled();
        arm_timer(timer);
 unlock:
        unlock_task_sighand(p, &flags);
index b7005dd21ec16ce5fa92e33b3b46f04bedbbf7f0..14de3727b18e6ca5c21780aa37af22fbc4d29739 100644 (file)
@@ -277,8 +277,7 @@ static bool tick_check_preferred(struct clock_event_device *curdev,
         */
        return !curdev ||
                newdev->rating > curdev->rating ||
-              (!cpumask_equal(curdev->cpumask, newdev->cpumask) &&
-               !tick_check_percpu(curdev, newdev, smp_processor_id()));
+              !cpumask_equal(curdev->cpumask, newdev->cpumask);
 }
 
 /*
index 6fa99213fc720e4b77c467ae69a87007c22b37d2..2b41e8e2d31db26faaaf905543af749463939b9c 100644 (file)
@@ -28,6 +28,7 @@
  */
 
 #include <linux/export.h>
+#include <linux/kernel.h>
 #include <linux/timex.h>
 #include <linux/capability.h>
 #include <linux/timekeeper_internal.h>
@@ -314,9 +315,10 @@ unsigned int jiffies_to_msecs(const unsigned long j)
        return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
 #else
 # if BITS_PER_LONG == 32
-       return (HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32;
+       return (HZ_TO_MSEC_MUL32 * j + (1ULL << HZ_TO_MSEC_SHR32) - 1) >>
+              HZ_TO_MSEC_SHR32;
 # else
-       return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN;
+       return DIV_ROUND_UP(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN);
 # endif
 #endif
 }
index efed9c1cfb7ea4ea12182e711dacf01623f73452..caf9cbf3581683ace69577fd2f365ba039138cea 100644 (file)
@@ -192,17 +192,6 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
        op->saved_func(ip, parent_ip, op, regs);
 }
 
-/**
- * clear_ftrace_function - reset the ftrace function
- *
- * This NULLs the ftrace function and in essence stops
- * tracing.  There may be lag
- */
-void clear_ftrace_function(void)
-{
-       ftrace_trace_function = ftrace_stub;
-}
-
 static void ftrace_sync(struct work_struct *work)
 {
        /*
@@ -6689,7 +6678,7 @@ void ftrace_kill(void)
 {
        ftrace_disabled = 1;
        ftrace_enabled = 0;
-       clear_ftrace_function();
+       ftrace_trace_function = ftrace_stub;
 }
 
 /**
index 6a46af21765cc60685e54297e8092ee1abf9aca3..0b0b688ea166f29bd73b22ef302f3cb3776ea424 100644 (file)
@@ -3226,6 +3226,22 @@ int ring_buffer_record_is_on(struct ring_buffer *buffer)
        return !atomic_read(&buffer->record_disabled);
 }
 
+/**
+ * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
+ * @buffer: The ring buffer to see if write is set enabled
+ *
+ * Returns true if the ring buffer is set writable by ring_buffer_record_on().
+ * Note that this does NOT mean it is in a writable state.
+ *
+ * It may return true when the ring buffer has been disabled by
+ * ring_buffer_record_disable(), as that is a temporary disabling of
+ * the ring buffer.
+ */
+int ring_buffer_record_is_set_on(struct ring_buffer *buffer)
+{
+       return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
+}
+
 /**
  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
  * @buffer: The ring buffer to stop writes to.
index c9336e98ac59a778d31c16a9ac72b184477e7177..823687997b015f5101375fc2aa0020c5c4415c02 100644 (file)
@@ -1360,8 +1360,6 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 void
 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 {
-       struct ring_buffer *buf;
-
        if (tr->stop_count)
                return;
 
@@ -1375,9 +1373,13 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 
        arch_spin_lock(&tr->max_lock);
 
-       buf = tr->trace_buffer.buffer;
-       tr->trace_buffer.buffer = tr->max_buffer.buffer;
-       tr->max_buffer.buffer = buf;
+       /* Inherit the recordable setting from trace_buffer */
+       if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
+               ring_buffer_record_on(tr->max_buffer.buffer);
+       else
+               ring_buffer_record_off(tr->max_buffer.buffer);
+
+       swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
 
        __update_max_tr(tr, tsk, cpu);
        arch_spin_unlock(&tr->max_lock);
@@ -2957,6 +2959,7 @@ out_nobuffer:
 }
 EXPORT_SYMBOL_GPL(trace_vbprintk);
 
+__printf(3, 0)
 static int
 __trace_array_vprintk(struct ring_buffer *buffer,
                      unsigned long ip, const char *fmt, va_list args)
@@ -3011,12 +3014,14 @@ out_nobuffer:
        return len;
 }
 
+__printf(3, 0)
 int trace_array_vprintk(struct trace_array *tr,
                        unsigned long ip, const char *fmt, va_list args)
 {
        return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
 }
 
+__printf(3, 0)
 int trace_array_printk(struct trace_array *tr,
                       unsigned long ip, const char *fmt, ...)
 {
@@ -3032,6 +3037,7 @@ int trace_array_printk(struct trace_array *tr,
        return ret;
 }
 
+__printf(3, 4)
 int trace_array_printk_buf(struct ring_buffer *buffer,
                           unsigned long ip, const char *fmt, ...)
 {
@@ -3047,6 +3053,7 @@ int trace_array_printk_buf(struct ring_buffer *buffer,
        return ret;
 }
 
+__printf(2, 0)
 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
 {
        return trace_array_vprintk(&global_trace, ip, fmt, args);
@@ -3364,8 +3371,8 @@ static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
 
        print_event_info(buf, m);
 
-       seq_printf(m, "#           TASK-PID   CPU#   %s  TIMESTAMP  FUNCTION\n", tgid ? "TGID     " : "");
-       seq_printf(m, "#              | |       |    %s     |         |\n",      tgid ? "  |      " : "");
+       seq_printf(m, "#           TASK-PID   %s  CPU#   TIMESTAMP  FUNCTION\n", tgid ? "TGID     " : "");
+       seq_printf(m, "#              | |     %s    |       |         |\n",      tgid ? "  |      " : "");
 }
 
 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
@@ -3385,9 +3392,9 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
                   tgid ? tgid_space : space);
        seq_printf(m, "#                          %s||| /     delay\n",
                   tgid ? tgid_space : space);
-       seq_printf(m, "#           TASK-PID   CPU#%s||||    TIMESTAMP  FUNCTION\n",
+       seq_printf(m, "#           TASK-PID %sCPU#  ||||    TIMESTAMP  FUNCTION\n",
                   tgid ? "   TGID   " : space);
-       seq_printf(m, "#              | |       | %s||||       |         |\n",
+       seq_printf(m, "#              | |   %s  |   ||||       |         |\n",
                   tgid ? "     |    " : space);
 }
 
index 630c5a24b2b255bf7cd35ef1524e25508835679f..f8f86231ad90e48b73b7eb2dc8d1def48071219a 100644 (file)
@@ -583,9 +583,7 @@ static __always_inline void trace_clear_recursion(int bit)
 static inline struct ring_buffer_iter *
 trace_buffer_iter(struct trace_iterator *iter, int cpu)
 {
-       if (iter->buffer_iter && iter->buffer_iter[cpu])
-               return iter->buffer_iter[cpu];
-       return NULL;
+       return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL;
 }
 
 int tracer_init(struct tracer *t, struct trace_array *tr);
index e1c818dbc0d724c603be39463b83de8f021cf79f..893a206bcba4f76dd504e246d9667475fd7ed456 100644 (file)
@@ -78,7 +78,8 @@ static const char * ops[] = { OPS };
        C(TOO_MANY_PREDS,       "Too many terms in predicate expression"), \
        C(INVALID_FILTER,       "Meaningless filter expression"),       \
        C(IP_FIELD_ONLY,        "Only 'ip' field is supported for function trace"), \
-       C(INVALID_VALUE,        "Invalid value (did you forget quotes)?"),
+       C(INVALID_VALUE,        "Invalid value (did you forget quotes)?"), \
+       C(NO_FILTER,            "No filter found"),
 
 #undef C
 #define C(a, b)                FILT_ERR_##a
@@ -550,6 +551,13 @@ predicate_parse(const char *str, int nr_parens, int nr_preds,
                goto out_free;
        }
 
+       if (!N) {
+               /* No program? */
+               ret = -EINVAL;
+               parse_error(pe, FILT_ERR_NO_FILTER, ptr - str);
+               goto out_free;
+       }
+
        prog[N].pred = NULL;                                    /* #13 */
        prog[N].target = 1;             /* TRUE */
        prog[N+1].pred = NULL;
@@ -1693,6 +1701,7 @@ static void create_filter_finish(struct filter_parse_error *pe)
  * @filter_str: filter string
  * @set_str: remember @filter_str and enable detailed error in filter
  * @filterp: out param for created filter (always updated on return)
+ *           Must be a pointer that references a NULL pointer.
  *
  * Creates a filter for @call with @filter_str.  If @set_str is %true,
  * @filter_str is copied and recorded in the new filter.
@@ -1710,6 +1719,10 @@ static int create_filter(struct trace_event_call *call,
        struct filter_parse_error *pe = NULL;
        int err;
 
+       /* filterp must point to NULL */
+       if (WARN_ON(*filterp))
+               *filterp = NULL;
+
        err = create_filter_start(filter_string, set_str, &pe, filterp);
        if (err)
                return err;
index 046c716a6536ba4f504df2523a420b92920d4e52..aae18af94c94e61967063ac7534fe2979fdcec45 100644 (file)
@@ -393,7 +393,7 @@ static void hist_err_event(char *str, char *system, char *event, char *var)
        else if (system)
                snprintf(err, MAX_FILTER_STR_VAL, "%s.%s", system, event);
        else
-               strncpy(err, var, MAX_FILTER_STR_VAL);
+               strscpy(err, var, MAX_FILTER_STR_VAL);
 
        hist_err(str, err);
 }
index d18249683682f750a1b86387355e9b159bb1b3d5..5dea177cef53120129c7d4a4d76eacb67ad66db0 100644 (file)
@@ -679,6 +679,8 @@ event_trigger_callback(struct event_command *cmd_ops,
                goto out_free;
 
  out_reg:
+       /* Up the trigger_data count to make sure reg doesn't free it on failure */
+       event_trigger_init(trigger_ops, trigger_data);
        ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
        /*
         * The above returns on success the # of functions enabled,
@@ -686,11 +688,13 @@ event_trigger_callback(struct event_command *cmd_ops,
         * Consider no functions a failure too.
         */
        if (!ret) {
+               cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
                ret = -ENOENT;
-               goto out_free;
-       } else if (ret < 0)
-               goto out_free;
-       ret = 0;
+       } else if (ret > 0)
+               ret = 0;
+
+       /* Down the counter of trigger_data or free it if not used anymore */
+       event_trigger_free(trigger_ops, trigger_data);
  out:
        return ret;
 
@@ -1416,6 +1420,9 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
                goto out;
        }
 
+       /* Up the trigger_data count to make sure nothing frees it on failure */
+       event_trigger_init(trigger_ops, trigger_data);
+
        if (trigger) {
                number = strsep(&trigger, ":");
 
@@ -1466,6 +1473,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
                goto out_disable;
        /* Just return zero, not the number of enabled functions */
        ret = 0;
+       event_trigger_free(trigger_ops, trigger_data);
  out:
        return ret;
 
@@ -1476,7 +1484,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
  out_free:
        if (cmd_ops->set_filter)
                cmd_ops->set_filter(NULL, trigger_data, NULL);
-       kfree(trigger_data);
+       event_trigger_free(trigger_ops, trigger_data);
        kfree(enable_data);
        goto out;
 }
index 23c0b0cb5fb95c9875fb35cbd0d22f027430343c..169b3c44ee97f3cf00bc574b185f16fa572a12d5 100644 (file)
@@ -831,6 +831,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
        struct ftrace_graph_ret *graph_ret;
        struct ftrace_graph_ent *call;
        unsigned long long duration;
+       int cpu = iter->cpu;
        int i;
 
        graph_ret = &ret_entry->ret;
@@ -839,7 +840,6 @@ print_graph_entry_leaf(struct trace_iterator *iter,
 
        if (data) {
                struct fgraph_cpu_data *cpu_data;
-               int cpu = iter->cpu;
 
                cpu_data = per_cpu_ptr(data->cpu_data, cpu);
 
@@ -869,6 +869,9 @@ print_graph_entry_leaf(struct trace_iterator *iter,
 
        trace_seq_printf(s, "%ps();\n", (void *)call->func);
 
+       print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
+                       cpu, iter->ent->pid, flags);
+
        return trace_handle_return(s);
 }
 
index daa81571b22a4646bcc6400ccee0fe638dda2515..6b71860f3998c8df990b7e089075a56f759c4306 100644 (file)
@@ -400,11 +400,10 @@ static struct trace_kprobe *find_trace_kprobe(const char *event,
 static int
 enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
 {
+       struct event_file_link *link = NULL;
        int ret = 0;
 
        if (file) {
-               struct event_file_link *link;
-
                link = kmalloc(sizeof(*link), GFP_KERNEL);
                if (!link) {
                        ret = -ENOMEM;
@@ -424,6 +423,18 @@ enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
                else
                        ret = enable_kprobe(&tk->rp.kp);
        }
+
+       if (ret) {
+               if (file) {
+                       /* Notice the if is true on not WARN() */
+                       if (!WARN_ON_ONCE(!link))
+                               list_del_rcu(&link->list);
+                       kfree(link);
+                       tk->tp.flags &= ~TP_FLAG_TRACE;
+               } else {
+                       tk->tp.flags &= ~TP_FLAG_PROFILE;
+               }
+       }
  out:
        return ret;
 }
@@ -1480,8 +1491,10 @@ create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
        }
 
        ret = __register_trace_kprobe(tk);
-       if (ret < 0)
+       if (ret < 0) {
+               kfree(tk->tp.call.print_fmt);
                goto error;
+       }
 
        return &tk->tp.call;
 error:
@@ -1501,6 +1514,8 @@ void destroy_local_trace_kprobe(struct trace_event_call *event_call)
        }
 
        __unregister_trace_kprobe(tk);
+
+       kfree(tk->tp.call.print_fmt);
        free_trace_kprobe(tk);
 }
 #endif /* CONFIG_PERF_EVENTS */
index 90db994ac9004d2fc7163eeb95b8c79e121e991a..1c8e30fda46a8a4abab5c748868e52c06a8b30ea 100644 (file)
@@ -594,8 +594,7 @@ int trace_print_context(struct trace_iterator *iter)
 
        trace_find_cmdline(entry->pid, comm);
 
-       trace_seq_printf(s, "%16s-%-5d [%03d] ",
-                              comm, entry->pid, iter->cpu);
+       trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
 
        if (tr->trace_flags & TRACE_ITER_RECORD_TGID) {
                unsigned int tgid = trace_find_tgid(entry->pid);
@@ -606,6 +605,8 @@ int trace_print_context(struct trace_iterator *iter)
                        trace_seq_printf(s, "(%5d) ", tgid);
        }
 
+       trace_seq_printf(s, "[%03d] ", iter->cpu);
+
        if (tr->trace_flags & TRACE_ITER_IRQ_INFO)
                trace_print_lat_fmt(s, entry);
 
index e34b04b56057a86cd0ade5cb9fcb4919730f80a2..706836ec314d2add83b84ebe59dec8fd7e13a7ad 100644 (file)
@@ -420,60 +420,15 @@ config HAS_IOPORT_MAP
        depends on HAS_IOMEM && !NO_IOPORT_MAP
        default y
 
-config HAS_DMA
-       bool
-       depends on !NO_DMA
-       default y
+source "kernel/dma/Kconfig"
 
 config SGL_ALLOC
        bool
        default n
 
-config NEED_SG_DMA_LENGTH
-       bool
-
-config NEED_DMA_MAP_STATE
-       bool
-
-config ARCH_DMA_ADDR_T_64BIT
-       def_bool 64BIT || PHYS_ADDR_T_64BIT
-
 config IOMMU_HELPER
        bool
 
-config ARCH_HAS_SYNC_DMA_FOR_DEVICE
-       bool
-
-config ARCH_HAS_SYNC_DMA_FOR_CPU
-       bool
-       select NEED_DMA_MAP_STATE
-
-config DMA_DIRECT_OPS
-       bool
-       depends on HAS_DMA
-
-config DMA_NONCOHERENT_OPS
-       bool
-       depends on HAS_DMA
-       select DMA_DIRECT_OPS
-
-config DMA_NONCOHERENT_MMAP
-       bool
-       depends on DMA_NONCOHERENT_OPS
-
-config DMA_NONCOHERENT_CACHE_SYNC
-       bool
-       depends on DMA_NONCOHERENT_OPS
-
-config DMA_VIRT_OPS
-       bool
-       depends on HAS_DMA
-
-config SWIOTLB
-       bool
-       select DMA_DIRECT_OPS
-       select NEED_DMA_MAP_STATE
-
 config CHECK_SIGNATURE
        bool
 
index 3d35d062970d2459ecee5573cf512a999061b3ab..befb127507c0b1cb05f6f83b81464e99f4ac4fb0 100644 (file)
@@ -5,7 +5,8 @@ if HAVE_ARCH_KASAN
 
 config KASAN
        bool "KASan: runtime memory debugger"
-       depends on SLUB || (SLAB && !DEBUG_SLAB)
+       depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
+       select SLUB_DEBUG if SLUB
        select CONSTRUCTORS
        select STACKDEPOT
        help
index 956b320292fef9a4055a1a955f37b6f41c2a4b71..90dc5520b7849dc69dc4c3df3ea419c45e9451cc 100644 (file)
@@ -23,15 +23,12 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
         sha1.o chacha20.o irq_regs.o argv_split.o \
         flex_proportions.o ratelimit.o show_mem.o \
         is_single_threaded.o plist.o decompress.o kobject_uevent.o \
-        earlycpio.o seq_buf.o siphash.o \
+        earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
         nmi_backtrace.o nodemask.o win_minmax.o
 
 lib-$(CONFIG_PRINTK) += dump_stack.o
 lib-$(CONFIG_MMU) += ioremap.o
 lib-$(CONFIG_SMP) += cpumask.o
-lib-$(CONFIG_DMA_DIRECT_OPS) += dma-direct.o
-lib-$(CONFIG_DMA_NONCOHERENT_OPS) += dma-noncoherent.o
-lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o
 
 lib-y  += kobject.o klist.o
 obj-y  += lockref.o
@@ -98,10 +95,6 @@ obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
 obj-$(CONFIG_DEBUG_LIST) += list_debug.o
 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
 
-ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
-  lib-y += dec_and_lock.o
-endif
-
 obj-$(CONFIG_BITREVERSE) += bitrev.o
 obj-$(CONFIG_RATIONAL) += rational.o
 obj-$(CONFIG_CRC_CCITT)        += crc-ccitt.o
@@ -148,7 +141,6 @@ obj-$(CONFIG_SMP) += percpu_counter.o
 obj-$(CONFIG_AUDIT_GENERIC) += audit.o
 obj-$(CONFIG_AUDIT_COMPAT_GENERIC) += compat_audit.o
 
-obj-$(CONFIG_SWIOTLB) += swiotlb.o
 obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
 obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
 obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o
@@ -169,8 +161,6 @@ obj-$(CONFIG_NLATTR) += nlattr.o
 
 obj-$(CONFIG_LRU_CACHE) += lru_cache.o
 
-obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o
-
 obj-$(CONFIG_GENERIC_CSUM) += checksum.o
 
 obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o
index 347fa7ac2e8a858827415d44725d256d2a9e96a3..9555b68bb774cc3277dca434d19880286d71df0e 100644 (file)
@@ -33,3 +33,19 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
 }
 
 EXPORT_SYMBOL(_atomic_dec_and_lock);
+
+int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
+                                unsigned long *flags)
+{
+       /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
+       if (atomic_add_unless(atomic, -1, 1))
+               return 0;
+
+       /* Otherwise do it the slow way */
+       spin_lock_irqsave(lock, *flags);
+       if (atomic_dec_and_test(atomic))
+               return 1;
+       spin_unlock_irqrestore(lock, *flags);
+       return 0;
+}
+EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave);
index 7e43cd54c84ca3da2d77b02e7112c69386428a2b..8be175df30753c95692007a5d41503838344d9a5 100644 (file)
@@ -596,15 +596,70 @@ static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset,
        return ret;
 }
 
+static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes,
+                               struct iov_iter *i)
+{
+       struct pipe_inode_info *pipe = i->pipe;
+       size_t n, off, xfer = 0;
+       int idx;
+
+       if (!sanity(i))
+               return 0;
+
+       bytes = n = push_pipe(i, bytes, &idx, &off);
+       if (unlikely(!n))
+               return 0;
+       for ( ; n; idx = next_idx(idx, pipe), off = 0) {
+               size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
+               unsigned long rem;
+
+               rem = memcpy_mcsafe_to_page(pipe->bufs[idx].page, off, addr,
+                               chunk);
+               i->idx = idx;
+               i->iov_offset = off + chunk - rem;
+               xfer += chunk - rem;
+               if (rem)
+                       break;
+               n -= chunk;
+               addr += chunk;
+       }
+       i->count -= xfer;
+       return xfer;
+}
+
+/**
+ * _copy_to_iter_mcsafe - copy to user with source-read error exception handling
+ * @addr: source kernel address
+ * @bytes: total transfer length
+ * @iter: destination iterator
+ *
+ * The pmem driver arranges for filesystem-dax to use this facility via
+ * dax_copy_to_iter() for protecting read/write to persistent memory.
+ * Unless / until an architecture can guarantee identical performance
+ * between _copy_to_iter_mcsafe() and _copy_to_iter() it would be a
+ * performance regression to switch more users to the mcsafe version.
+ *
+ * Otherwise, the main differences between this and typical _copy_to_iter().
+ *
+ * * Typical tail/residue handling after a fault retries the copy
+ *   byte-by-byte until the fault happens again. Re-triggering machine
+ *   checks is potentially fatal so the implementation uses source
+ *   alignment and poison alignment assumptions to avoid re-triggering
+ *   hardware exceptions.
+ *
+ * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
+ *   Compare to copy_to_iter() where only ITER_IOVEC attempts might return
+ *   a short copy.
+ *
+ * See MCSAFE_TEST for self-test.
+ */
 size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
 {
        const char *from = addr;
        unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
 
-       if (unlikely(i->type & ITER_PIPE)) {
-               WARN_ON(1);
-               return 0;
-       }
+       if (unlikely(i->type & ITER_PIPE))
+               return copy_pipe_to_iter_mcsafe(addr, bytes, i);
        if (iter_is_iovec(i))
                might_fault();
        iterate_and_advance(i, bytes, v,
@@ -701,6 +756,20 @@ size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
 EXPORT_SYMBOL(_copy_from_iter_nocache);
 
 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
+/**
+ * _copy_from_iter_flushcache - write destination through cpu cache
+ * @addr: destination kernel address
+ * @bytes: total transfer length
+ * @iter: source iterator
+ *
+ * The pmem driver arranges for filesystem-dax to use this facility via
+ * dax_copy_from_iter() for ensuring that writes to persistent memory
+ * are flushed through the CPU cache. It is differentiated from
+ * _copy_from_iter_nocache() in that guarantees all data is flushed for
+ * all iterator types. The _copy_from_iter_nocache() only attempts to
+ * bypass the cache for the ITER_IOVEC case, and on some archs may use
+ * instructions that strand dirty-data in the cache.
+ */
 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
 {
        char *to = addr;
index b5c1293ce1474fbb7056c1995acb33991cd662af..1e1bbf171eca4076a39383d51b75224dbb4099d0 100644 (file)
@@ -29,7 +29,7 @@
  */
 static unsigned int debug_locks_verbose;
 
-static DEFINE_WW_CLASS(ww_lockdep);
+static DEFINE_WD_CLASS(ww_lockdep);
 
 static int __init setup_debug_locks_verbose(char *str)
 {
index 9bbd9c5d375a2c8bf9a6d950ba42ab556c12063b..beb14839b41ae3c04fd698ec33a34727a2bc92d5 100644 (file)
@@ -141,7 +141,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
        spin_lock_irqsave(&tags->lock, flags);
 
        /* Fastpath */
-       if (likely(tags->nr_free >= 0)) {
+       if (likely(tags->nr_free)) {
                tag = tags->freelist[--tags->nr_free];
                spin_unlock_irqrestore(&tags->lock, flags);
                return tag;
index 0eb48353abe30164d4ae564aa21bee901fad72c3..d3b81cefce91a83698c3127d562f7e81cac0b55f 100644 (file)
@@ -350,3 +350,31 @@ bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
 }
 EXPORT_SYMBOL(refcount_dec_and_lock);
 
+/**
+ * refcount_dec_and_lock_irqsave - return holding spinlock with disabled
+ *                                 interrupts if able to decrement refcount to 0
+ * @r: the refcount
+ * @lock: the spinlock to be locked
+ * @flags: saved IRQ-flags if the is acquired
+ *
+ * Same as refcount_dec_and_lock() above except that the spinlock is acquired
+ * with disabled interupts.
+ *
+ * Return: true and hold spinlock if able to decrement refcount to 0, false
+ *         otherwise
+ */
+bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
+                                  unsigned long *flags)
+{
+       if (refcount_dec_not_one(r))
+               return false;
+
+       spin_lock_irqsave(lock, *flags);
+       if (!refcount_dec_and_test(r)) {
+               spin_unlock_irqrestore(lock, *flags);
+               return false;
+       }
+
+       return true;
+}
+EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);
index 9427b5766134cb139ef385b27f92f6027fecceca..e5c8586cf7174cfe0526dc8fb3314676601c5e57 100644 (file)
@@ -774,7 +774,7 @@ int rhashtable_walk_start_check(struct rhashtable_iter *iter)
                                skip++;
                                if (list == iter->list) {
                                        iter->p = p;
-                                       skip = skip;
+                                       iter->skip = skip;
                                        goto found;
                                }
                        }
@@ -964,8 +964,16 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
 
 static size_t rounded_hashtable_size(const struct rhashtable_params *params)
 {
-       return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
-                  (unsigned long)params->min_size);
+       size_t retsize;
+
+       if (params->nelem_hint)
+               retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
+                             (unsigned long)params->min_size);
+       else
+               retsize = max(HASH_DEFAULT_SIZE,
+                             (unsigned long)params->min_size);
+
+       return retsize;
 }
 
 static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
@@ -1022,8 +1030,6 @@ int rhashtable_init(struct rhashtable *ht,
        struct bucket_table *tbl;
        size_t size;
 
-       size = HASH_DEFAULT_SIZE;
-
        if ((!params->key_len && !params->obj_hashfn) ||
            (params->obj_hashfn && !params->obj_cmpfn))
                return -EINVAL;
@@ -1050,8 +1056,7 @@ int rhashtable_init(struct rhashtable *ht,
 
        ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
 
-       if (params->nelem_hint)
-               size = rounded_hashtable_size(&ht->p);
+       size = rounded_hashtable_size(&ht->p);
 
        if (params->locks_mul)
                ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
@@ -1143,13 +1148,14 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
                                 void (*free_fn)(void *ptr, void *arg),
                                 void *arg)
 {
-       struct bucket_table *tbl;
+       struct bucket_table *tbl, *next_tbl;
        unsigned int i;
 
        cancel_work_sync(&ht->run_work);
 
        mutex_lock(&ht->mutex);
        tbl = rht_dereference(ht->tbl, ht);
+restart:
        if (free_fn) {
                for (i = 0; i < tbl->size; i++) {
                        struct rhash_head *pos, *next;
@@ -1166,7 +1172,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
                }
        }
 
+       next_tbl = rht_dereference(tbl->future_tbl, ht);
        bucket_table_free(tbl);
+       if (next_tbl) {
+               tbl = next_tbl;
+               goto restart;
+       }
        mutex_unlock(&ht->mutex);
 }
 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
index 1642fd507a960f5deb2b6d7366db83800a3e547b..7c6096a7170486449736d82a37fbd50326ac169e 100644 (file)
@@ -24,9 +24,6 @@
  **/
 struct scatterlist *sg_next(struct scatterlist *sg)
 {
-#ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
        if (sg_is_last(sg))
                return NULL;
 
@@ -111,10 +108,7 @@ struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
        for_each_sg(sgl, sg, nents, i)
                ret = sg;
 
-#ifdef CONFIG_DEBUG_SG
-       BUG_ON(sgl[0].sg_magic != SG_MAGIC);
        BUG_ON(!sg_is_last(ret));
-#endif
        return ret;
 }
 EXPORT_SYMBOL(sg_last);
index 60aedc87936106460e436fe66429d45a59f36060..08d3d59dca17343c1a91def02d0a7da931c1c0f0 100644 (file)
@@ -5282,21 +5282,31 @@ static struct bpf_test tests[] = {
        {       /* Mainly checking JIT here. */
                "BPF_MAXINSNS: Ctx heavy transformations",
                { },
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
+               CLASSIC | FLAG_EXPECTED_FAIL,
+#else
                CLASSIC,
+#endif
                { },
                {
                        {  1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
                        { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }
                },
                .fill_helper = bpf_fill_maxinsns6,
+               .expected_errcode = -ENOTSUPP,
        },
        {       /* Mainly checking JIT here. */
                "BPF_MAXINSNS: Call heavy transformations",
                { },
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
+               CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+#else
                CLASSIC | FLAG_NO_DATA,
+#endif
                { },
                { { 1, 0 }, { 10, 0 } },
                .fill_helper = bpf_fill_maxinsns7,
+               .expected_errcode = -ENOTSUPP,
        },
        {       /* Mainly checking JIT here. */
                "BPF_MAXINSNS: Jump heavy test",
@@ -5347,18 +5357,28 @@ static struct bpf_test tests[] = {
        {
                "BPF_MAXINSNS: exec all MSH",
                { },
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
+               CLASSIC | FLAG_EXPECTED_FAIL,
+#else
                CLASSIC,
+#endif
                { 0xfa, 0xfb, 0xfc, 0xfd, },
                { { 4, 0xababab83 } },
                .fill_helper = bpf_fill_maxinsns13,
+               .expected_errcode = -ENOTSUPP,
        },
        {
                "BPF_MAXINSNS: ld_abs+get_processor_id",
                { },
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
+               CLASSIC | FLAG_EXPECTED_FAIL,
+#else
                CLASSIC,
+#endif
                { },
                { { 1, 0xbee } },
                .fill_helper = bpf_fill_ld_abs_get_processor_id,
+               .expected_errcode = -ENOTSUPP,
        },
        /*
         * LD_IND / LD_ABS on fragmented SKBs
index b2aa8f5148449de1557e3ee48feebb8f1cab3083..cea592f402ed029d6d5dd63addd2d2bc8a8391f1 100644 (file)
@@ -260,13 +260,6 @@ plain(void)
 {
        int err;
 
-       /*
-        * Make sure crng is ready. Otherwise we get "(ptrval)" instead
-        * of a hashed address when printing '%p' in plain_hash() and
-        * plain_format().
-        */
-       wait_for_random_bytes();
-
        err = plain_hash();
        if (err) {
                pr_warn("plain 'p' does not appear to be hashed\n");
index 347cc834c04a8cbc388af1b7594e4f09bdc68b41..2e5d3df0853d928021cba0e30c70ff682afeaf68 100644 (file)
@@ -359,15 +359,8 @@ static void wb_shutdown(struct bdi_writeback *wb)
        spin_lock_bh(&wb->work_lock);
        if (!test_and_clear_bit(WB_registered, &wb->state)) {
                spin_unlock_bh(&wb->work_lock);
-               /*
-                * Wait for wb shutdown to finish if someone else is just
-                * running wb_shutdown(). Otherwise we could proceed to wb /
-                * bdi destruction before wb_shutdown() is finished.
-                */
-               wait_on_bit(&wb->state, WB_shutting_down, TASK_UNINTERRUPTIBLE);
                return;
        }
-       set_bit(WB_shutting_down, &wb->state);
        spin_unlock_bh(&wb->work_lock);
 
        cgwb_remove_from_bdi_list(wb);
@@ -379,12 +372,6 @@ static void wb_shutdown(struct bdi_writeback *wb)
        mod_delayed_work(bdi_wq, &wb->dwork, 0);
        flush_delayed_work(&wb->dwork);
        WARN_ON(!list_empty(&wb->work_list));
-       /*
-        * Make sure bit gets cleared after shutdown is finished. Matches with
-        * the barrier provided by test_and_clear_bit() above.
-        */
-       smp_wmb();
-       clear_and_wake_up_bit(WB_shutting_down, &wb->state);
 }
 
 static void wb_exit(struct bdi_writeback *wb)
@@ -508,10 +495,12 @@ static void cgwb_release_workfn(struct work_struct *work)
        struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
                                                release_work);
 
+       mutex_lock(&wb->bdi->cgwb_release_mutex);
        wb_shutdown(wb);
 
        css_put(wb->memcg_css);
        css_put(wb->blkcg_css);
+       mutex_unlock(&wb->bdi->cgwb_release_mutex);
 
        fprop_local_destroy_percpu(&wb->memcg_completions);
        percpu_ref_exit(&wb->refcnt);
@@ -697,6 +686,7 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
 
        INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
        bdi->cgwb_congested_tree = RB_ROOT;
+       mutex_init(&bdi->cgwb_release_mutex);
 
        ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
        if (!ret) {
@@ -717,7 +707,10 @@ static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
        spin_lock_irq(&cgwb_lock);
        radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
                cgwb_kill(*slot);
+       spin_unlock_irq(&cgwb_lock);
 
+       mutex_lock(&bdi->cgwb_release_mutex);
+       spin_lock_irq(&cgwb_lock);
        while (!list_empty(&bdi->wb_list)) {
                wb = list_first_entry(&bdi->wb_list, struct bdi_writeback,
                                      bdi_node);
@@ -726,6 +719,7 @@ static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
                spin_lock_irq(&cgwb_lock);
        }
        spin_unlock_irq(&cgwb_lock);
+       mutex_unlock(&bdi->cgwb_release_mutex);
 }
 
 /**
index 56e2d9125ea55a57632feb22c64f2c1a76f5ec6e..38c926520c9718b8929a72829931080a3a53502d 100644 (file)
@@ -43,12 +43,25 @@ const struct trace_print_flags vmaflag_names[] = {
 
 void __dump_page(struct page *page, const char *reason)
 {
+       bool page_poisoned = PagePoisoned(page);
+       int mapcount;
+
+       /*
+        * If struct page is poisoned don't access Page*() functions as that
+        * leads to recursive loop. Page*() check for poisoned pages, and calls
+        * dump_page() when detected.
+        */
+       if (page_poisoned) {
+               pr_emerg("page:%px is uninitialized and poisoned", page);
+               goto hex_only;
+       }
+
        /*
         * Avoid VM_BUG_ON() in page_mapcount().
         * page->_mapcount space in struct page is used by sl[aou]b pages to
         * encode own info.
         */
-       int mapcount = PageSlab(page) ? 0 : page_mapcount(page);
+       mapcount = PageSlab(page) ? 0 : page_mapcount(page);
 
        pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx",
                  page, page_ref_count(page), mapcount,
@@ -60,6 +73,7 @@ void __dump_page(struct page *page, const char *reason)
 
        pr_emerg("flags: %#lx(%pGp)\n", page->flags, &page->flags);
 
+hex_only:
        print_hex_dump(KERN_ALERT, "raw: ", DUMP_PREFIX_NONE, 32,
                        sizeof(unsigned long), page,
                        sizeof(struct page), false);
@@ -68,7 +82,7 @@ void __dump_page(struct page *page, const char *reason)
                pr_alert("page dumped because: %s\n", reason);
 
 #ifdef CONFIG_MEMCG
-       if (page->mem_cgroup)
+       if (!page_poisoned && page->mem_cgroup)
                pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup);
 #endif
 }
index b70d7ba7cc13522c5bab5594b1211679b21b01e7..fc5f98069f4ea5b2906cf45e8997327c99a5b7ce 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1238,8 +1238,6 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
        int locked = 0;
        long ret = 0;
 
-       VM_BUG_ON(start & ~PAGE_MASK);
-       VM_BUG_ON(len != PAGE_ALIGN(len));
        end = start + len;
 
        for (nstart = start; nstart < end; nstart = nend) {
index 1cd7c1a57a144320b7d1729d7caa6ec93351cc54..25346bd9936432c383b0e98218763bc9d3e27de5 100644 (file)
@@ -2084,6 +2084,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
                if (vma_is_dax(vma))
                        return;
                page = pmd_page(_pmd);
+               if (!PageDirty(page) && pmd_dirty(_pmd))
+                       set_page_dirty(page);
                if (!PageReferenced(page) && pmd_young(_pmd))
                        SetPageReferenced(page);
                page_remove_rmap(page, true);
index 3612fbb32e9d5412e8494e4c220fad84e3a4e779..039ddbc574e926800f9104ede90782753605f46b 100644 (file)
@@ -2163,6 +2163,7 @@ static void __init gather_bootmem_prealloc(void)
                 */
                if (hstate_is_gigantic(h))
                        adjust_managed_page_count(page, 1 << h->order);
+               cond_resched();
        }
 }
 
index f185455b34065d27efa2b6a90c9dd2c1dfe92ae9..c3bd5209da380d9a51a0fa4515f4fcdeefcad409 100644 (file)
@@ -619,12 +619,13 @@ void kasan_kfree_large(void *ptr, unsigned long ip)
 int kasan_module_alloc(void *addr, size_t size)
 {
        void *ret;
+       size_t scaled_size;
        size_t shadow_size;
        unsigned long shadow_start;
 
        shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
-       shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
-                       PAGE_SIZE);
+       scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
+       shadow_size = round_up(scaled_size, PAGE_SIZE);
 
        if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
                return -EINVAL;
index cc16d70b8333890730d16c08b858631947e38d70..4b5d245fafc17cbde5c2de63ee516e5f30924cf6 100644 (file)
@@ -228,7 +228,8 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
                 * so we use WARN_ONCE() here to see the stack trace if
                 * fail happens.
                 */
-               WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n");
+               WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE),
+                         "memblock: bottom-up allocation failed, memory hotremove may be affected\n");
        }
 
        return __memblock_find_range_top_down(start, end, size, align, nid,
@@ -1225,6 +1226,7 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i
        return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
 }
 
+#if defined(CONFIG_NO_BOOTMEM)
 /**
  * memblock_virt_alloc_internal - allocate boot memory block
  * @size: size of memory block to be allocated in bytes
@@ -1432,6 +1434,7 @@ void * __init memblock_virt_alloc_try_nid(
              (u64)max_addr);
        return NULL;
 }
+#endif
 
 /**
  * __memblock_free_early - free boot memory block
index e6f0d5ef320aa65d2b65ceed4b202021a84fd49b..8c0280b3143ee241053ccdf0eb31e9790edfef0c 100644 (file)
@@ -850,7 +850,7 @@ static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
        int nid;
        int i;
 
-       while ((memcg = parent_mem_cgroup(memcg))) {
+       for (; memcg; memcg = parent_mem_cgroup(memcg)) {
                for_each_node(nid) {
                        mz = mem_cgroup_nodeinfo(memcg, nid);
                        for (i = 0; i <= DEF_PRIORITY; i++) {
index 9ac49ef17b4e1e5128f3db38e0a97bbc067ed1b9..01f1a14facc461c4ca5490adc4060d50887aa053 100644 (file)
@@ -2505,6 +2505,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
 
                /* Create pseudo-vma that contains just the policy */
                memset(&pvma, 0, sizeof(struct vm_area_struct));
+               vma_init(&pvma, NULL);
                pvma.vm_end = TASK_SIZE;        /* policy covers entire file */
                mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
 
index d1eb87ef4b1afa101fde9da06d2991644ca49dda..17bbf4d3e24f846b9cf4a60012dfcbfae008ee11 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -182,12 +182,12 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
        if (vma->vm_file)
                fput(vma->vm_file);
        mpol_put(vma_policy(vma));
-       kmem_cache_free(vm_area_cachep, vma);
+       vm_area_free(vma);
        return next;
 }
 
-static int do_brk(unsigned long addr, unsigned long len, struct list_head *uf);
-
+static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags,
+               struct list_head *uf);
 SYSCALL_DEFINE1(brk, unsigned long, brk)
 {
        unsigned long retval;
@@ -245,7 +245,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
                goto out;
 
        /* Ok, looks good - let it rip. */
-       if (do_brk(oldbrk, newbrk-oldbrk, &uf) < 0)
+       if (do_brk_flags(oldbrk, newbrk-oldbrk, 0, &uf) < 0)
                goto out;
 
 set_brk:
@@ -911,7 +911,7 @@ again:
                        anon_vma_merge(vma, next);
                mm->map_count--;
                mpol_put(vma_policy(next));
-               kmem_cache_free(vm_area_cachep, next);
+               vm_area_free(next);
                /*
                 * In mprotect's case 6 (see comments on vma_merge),
                 * we must remove another next too. It would clutter
@@ -1729,19 +1729,17 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
         * specific mapper. the address has already been validated, but
         * not unmapped, but the maps are removed from the list.
         */
-       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+       vma = vm_area_alloc(mm);
        if (!vma) {
                error = -ENOMEM;
                goto unacct_error;
        }
 
-       vma->vm_mm = mm;
        vma->vm_start = addr;
        vma->vm_end = addr + len;
        vma->vm_flags = vm_flags;
        vma->vm_page_prot = vm_get_page_prot(vm_flags);
        vma->vm_pgoff = pgoff;
-       INIT_LIST_HEAD(&vma->anon_vma_chain);
 
        if (file) {
                if (vm_flags & VM_DENYWRITE) {
@@ -1780,6 +1778,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
                error = shmem_zero_setup(vma);
                if (error)
                        goto free_vma;
+       } else {
+               vma_set_anonymous(vma);
        }
 
        vma_link(mm, vma, prev, rb_link, rb_parent);
@@ -1832,7 +1832,7 @@ allow_write_and_free_vma:
        if (vm_flags & VM_DENYWRITE)
                allow_write_access(file);
 free_vma:
-       kmem_cache_free(vm_area_cachep, vma);
+       vm_area_free(vma);
 unacct_error:
        if (charged)
                vm_unacct_memory(charged);
@@ -2620,15 +2620,10 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
                        return err;
        }
 
-       new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+       new = vm_area_dup(vma);
        if (!new)
                return -ENOMEM;
 
-       /* most fields are the same, copy all, and then fixup */
-       *new = *vma;
-
-       INIT_LIST_HEAD(&new->anon_vma_chain);
-
        if (new_below)
                new->vm_end = addr;
        else {
@@ -2669,7 +2664,7 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
  out_free_mpol:
        mpol_put(vma_policy(new));
  out_free_vma:
-       kmem_cache_free(vm_area_cachep, new);
+       vm_area_free(new);
        return err;
 }
 
@@ -2929,21 +2924,14 @@ static inline void verify_mm_writelocked(struct mm_struct *mm)
  *  anonymous maps.  eventually we may be able to do some
  *  brk-specific accounting here.
  */
-static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags, struct list_head *uf)
+static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long flags, struct list_head *uf)
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma, *prev;
-       unsigned long len;
        struct rb_node **rb_link, *rb_parent;
        pgoff_t pgoff = addr >> PAGE_SHIFT;
        int error;
 
-       len = PAGE_ALIGN(request);
-       if (len < request)
-               return -ENOMEM;
-       if (!len)
-               return 0;
-
        /* Until we need other flags, refuse anything except VM_EXEC. */
        if ((flags & (~VM_EXEC)) != 0)
                return -EINVAL;
@@ -2991,14 +2979,13 @@ static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long
        /*
         * create a vma struct for an anonymous mapping
         */
-       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+       vma = vm_area_alloc(mm);
        if (!vma) {
                vm_unacct_memory(len >> PAGE_SHIFT);
                return -ENOMEM;
        }
 
-       INIT_LIST_HEAD(&vma->anon_vma_chain);
-       vma->vm_mm = mm;
+       vma_set_anonymous(vma);
        vma->vm_start = addr;
        vma->vm_end = addr + len;
        vma->vm_pgoff = pgoff;
@@ -3015,18 +3002,20 @@ out:
        return 0;
 }
 
-static int do_brk(unsigned long addr, unsigned long len, struct list_head *uf)
-{
-       return do_brk_flags(addr, len, 0, uf);
-}
-
-int vm_brk_flags(unsigned long addr, unsigned long len, unsigned long flags)
+int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
 {
        struct mm_struct *mm = current->mm;
+       unsigned long len;
        int ret;
        bool populate;
        LIST_HEAD(uf);
 
+       len = PAGE_ALIGN(request);
+       if (len < request)
+               return -ENOMEM;
+       if (!len)
+               return 0;
+
        if (down_write_killable(&mm->mmap_sem))
                return -EINTR;
 
@@ -3207,16 +3196,14 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
                }
                *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
        } else {
-               new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+               new_vma = vm_area_dup(vma);
                if (!new_vma)
                        goto out;
-               *new_vma = *vma;
                new_vma->vm_start = addr;
                new_vma->vm_end = addr + len;
                new_vma->vm_pgoff = pgoff;
                if (vma_dup_policy(vma, new_vma))
                        goto out_free_vma;
-               INIT_LIST_HEAD(&new_vma->anon_vma_chain);
                if (anon_vma_clone(new_vma, vma))
                        goto out_free_mempol;
                if (new_vma->vm_file)
@@ -3231,7 +3218,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
 out_free_mempol:
        mpol_put(vma_policy(new_vma));
 out_free_vma:
-       kmem_cache_free(vm_area_cachep, new_vma);
+       vm_area_free(new_vma);
 out:
        return NULL;
 }
@@ -3355,12 +3342,10 @@ static struct vm_area_struct *__install_special_mapping(
        int ret;
        struct vm_area_struct *vma;
 
-       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+       vma = vm_area_alloc(mm);
        if (unlikely(vma == NULL))
                return ERR_PTR(-ENOMEM);
 
-       INIT_LIST_HEAD(&vma->anon_vma_chain);
-       vma->vm_mm = mm;
        vma->vm_start = addr;
        vma->vm_end = addr + len;
 
@@ -3381,7 +3366,7 @@ static struct vm_area_struct *__install_special_mapping(
        return vma;
 
 out:
-       kmem_cache_free(vm_area_cachep, vma);
+       vm_area_free(vma);
        return ERR_PTR(ret);
 }
 
index 4452d8bd9ae4b84851f433885eac18e050d72dad..9fc9e43335b6be3d2da03f77d625cff341e1b735 100644 (file)
@@ -769,7 +769,7 @@ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
        if (vma->vm_file)
                fput(vma->vm_file);
        put_nommu_region(vma->vm_region);
-       kmem_cache_free(vm_area_cachep, vma);
+       vm_area_free(vma);
 }
 
 /*
@@ -1145,6 +1145,8 @@ static int do_mmap_private(struct vm_area_struct *vma,
                if (ret < len)
                        memset(base + ret, 0, len - ret);
 
+       } else {
+               vma_set_anonymous(vma);
        }
 
        return 0;
@@ -1204,7 +1206,7 @@ unsigned long do_mmap(struct file *file,
        if (!region)
                goto error_getting_region;
 
-       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+       vma = vm_area_alloc(current->mm);
        if (!vma)
                goto error_getting_vma;
 
@@ -1212,7 +1214,6 @@ unsigned long do_mmap(struct file *file,
        region->vm_flags = vm_flags;
        region->vm_pgoff = pgoff;
 
-       INIT_LIST_HEAD(&vma->anon_vma_chain);
        vma->vm_flags = vm_flags;
        vma->vm_pgoff = pgoff;
 
@@ -1368,7 +1369,7 @@ error:
        kmem_cache_free(vm_region_jar, region);
        if (vma->vm_file)
                fput(vma->vm_file);
-       kmem_cache_free(vm_area_cachep, vma);
+       vm_area_free(vma);
        return ret;
 
 sharing_violation:
@@ -1469,14 +1470,13 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
        if (!region)
                return -ENOMEM;
 
-       new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+       new = vm_area_dup(vma);
        if (!new) {
                kmem_cache_free(vm_region_jar, region);
                return -ENOMEM;
        }
 
        /* most fields are the same, copy all, and then fixup */
-       *new = *vma;
        *region = *vma->vm_region;
        new->vm_region = region;
 
index 1521100f1e63b729bba37e21723a312950d688d8..a790ef4be74e3bb35e5cefc13398645bd607979a 100644 (file)
@@ -6383,7 +6383,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
        free_area_init_core(pgdat);
 }
 
-#ifdef CONFIG_HAVE_MEMBLOCK
+#if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP)
 /*
  * Only struct pages that are backed by physical memory are zeroed and
  * initialized by going through __init_single_page(). But, there are some
@@ -6421,7 +6421,7 @@ void __paginginit zero_resv_unavail(void)
        if (pgcnt)
                pr_info("Reserved but unavailable: %lld pages", pgcnt);
 }
-#endif /* CONFIG_HAVE_MEMBLOCK */
+#endif /* CONFIG_HAVE_MEMBLOCK && !CONFIG_FLAT_NODE_MEM_MAP */
 
 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 
@@ -6847,6 +6847,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
        /* Initialise every node */
        mminit_verify_pageflags_layout();
        setup_nr_node_ids();
+       zero_resv_unavail();
        for_each_online_node(nid) {
                pg_data_t *pgdat = NODE_DATA(nid);
                free_area_init_node(nid, NULL,
@@ -6857,7 +6858,6 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
                        node_set_state(nid, N_MEMORY);
                check_for_memory(pgdat, nid);
        }
-       zero_resv_unavail();
 }
 
 static int __init cmdline_parse_core(char *p, unsigned long *core,
@@ -7033,9 +7033,9 @@ void __init set_dma_reserve(unsigned long new_dma_reserve)
 
 void __init free_area_init(unsigned long *zones_size)
 {
+       zero_resv_unavail();
        free_area_init_node(0, zones_size,
                        __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
-       zero_resv_unavail();
 }
 
 static int page_alloc_cpu_dead(unsigned int cpu)
index 6db729dc4c5013784e65cc9e6438bef39d22b9a8..eb477809a5c0a534e2977f6fd6c1df74a05bc170 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -64,6 +64,7 @@
 #include <linux/backing-dev.h>
 #include <linux/page_idle.h>
 #include <linux/memremap.h>
+#include <linux/userfaultfd_k.h>
 
 #include <asm/tlbflush.h>
 
@@ -1481,11 +1482,16 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                                set_pte_at(mm, address, pvmw.pte, pteval);
                        }
 
-               } else if (pte_unused(pteval)) {
+               } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
                        /*
                         * The guest indicated that the page content is of no
                         * interest anymore. Simply discard the pte, vmscan
                         * will take care of the rest.
+                        * A future reference will then fault in a new zero
+                        * page. When userfaultfd is active, we must not drop
+                        * this page though, as its main user (postcopy
+                        * migration) will not expect userfaults on already
+                        * copied pages.
                         */
                        dec_mm_counter(mm, mm_counter(page));
                        /* We have to invalidate as we cleared the pte */
index 2cab8440305531f8ab97f3a56c95b91516bcd2ea..41b9bbf24e16b49db1da4be286fe7310941ad542 100644 (file)
@@ -1421,6 +1421,7 @@ static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
 {
        /* Create a pseudo vma that just contains the policy */
        memset(vma, 0, sizeof(*vma));
+       vma_init(vma, NULL);
        /* Bias interleave by inode number to distribute better across nodes */
        vma->vm_pgoff = index + info->vfs_inode.i_ino;
        vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
index 890b1f04a03a3d46f80fe1b2cfccae1f14b79836..2296caf87bfbd28a626663af04f2054a0cc3c45c 100644 (file)
@@ -567,10 +567,14 @@ static int shutdown_cache(struct kmem_cache *s)
        list_del(&s->list);
 
        if (s->flags & SLAB_TYPESAFE_BY_RCU) {
+#ifdef SLAB_SUPPORTS_SYSFS
+               sysfs_slab_unlink(s);
+#endif
                list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
                schedule_work(&slab_caches_to_rcu_destroy_work);
        } else {
 #ifdef SLAB_SUPPORTS_SYSFS
+               sysfs_slab_unlink(s);
                sysfs_slab_release(s);
 #else
                slab_kmem_cache_release(s);
index a3b8467c14af642138deaf35fd3ed3f7f87aed93..51258eff417836f6c5a72433a65c016c8391beb2 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5667,7 +5667,6 @@ static void sysfs_slab_remove_workfn(struct work_struct *work)
        kset_unregister(s->memcg_kset);
 #endif
        kobject_uevent(&s->kobj, KOBJ_REMOVE);
-       kobject_del(&s->kobj);
 out:
        kobject_put(&s->kobj);
 }
@@ -5752,6 +5751,12 @@ static void sysfs_slab_remove(struct kmem_cache *s)
        schedule_work(&s->kobj_remove_work);
 }
 
+void sysfs_slab_unlink(struct kmem_cache *s)
+{
+       if (slab_state >= FULL)
+               kobject_del(&s->kobj);
+}
+
 void sysfs_slab_release(struct kmem_cache *s)
 {
        if (slab_state >= FULL)
index 75eda9c2b2602fe24b4c431f797c5e0fc563ebda..8ba0870ecddd0fd592d16ee674b060db512b5b37 100644 (file)
@@ -1796,11 +1796,9 @@ static void vmstat_update(struct work_struct *w)
                 * to occur in the future. Keep on running the
                 * update worker thread.
                 */
-               preempt_disable();
                queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
                                this_cpu_ptr(&vmstat_work),
                                round_jiffies_relative(sysctl_stat_interval));
-               preempt_enable();
        }
 }
 
index 7d34e69507e305adec0a64b5e272626385f9d651..cd91fd9d96b814d145e378b573dd289fb501e64e 100644 (file)
@@ -1026,6 +1026,15 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
                        ret = -ENOMEM;
                        goto reject;
                }
+
+               /* A second zswap_is_full() check after
+                * zswap_shrink() to make sure it's now
+                * under the max_pool_percent
+                */
+               if (zswap_is_full()) {
+                       ret = -ENOMEM;
+                       goto reject;
+               }
        }
 
        /* allocate entry */
index 73a65789271ba9346902dd721b0accd8ce747adc..8ccee3d01822f78184357141ced7a07c3109dc2c 100644 (file)
@@ -693,7 +693,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head,
 out_unlock:
        rcu_read_unlock();
 out:
-       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_flush_final(skb, pp, flush);
 
        return pp;
 }
index 18c5271910dc2c1e1715efcd2b448b7cee6a844b..5c1343195292c8f474ff683c73c8f73aa51843a0 100644 (file)
@@ -225,7 +225,8 @@ static int parse_opts(char *opts, struct p9_client *clnt)
        }
 
 free_and_return:
-       v9fs_put_trans(clnt->trans_mod);
+       if (ret)
+               v9fs_put_trans(clnt->trans_mod);
        kfree(tmp_options);
        return ret;
 }
index 13ec0d5415c74486c68f8290689d16d78513e6e9..bdaf53925acd5606fdb953800620bd05cf0f259e 100644 (file)
@@ -20,11 +20,7 @@ obj-$(CONFIG_TLS)            += tls/
 obj-$(CONFIG_XFRM)             += xfrm/
 obj-$(CONFIG_UNIX)             += unix/
 obj-$(CONFIG_NET)              += ipv6/
-ifneq ($(CC_CAN_LINK),y)
-$(warning CC cannot link executables. Skipping bpfilter.)
-else
 obj-$(CONFIG_BPFILTER)         += bpfilter/
-endif
 obj-$(CONFIG_PACKET)           += packet/
 obj-$(CONFIG_NET_KEY)          += key/
 obj-$(CONFIG_BRIDGE)           += bridge/
index 55fdba05d7d9daa805d358118852aabb07746e81..9b6bc5abe94680c0a982b9193932f245080f2f85 100644 (file)
@@ -1869,7 +1869,7 @@ static const struct proto_ops atalk_dgram_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = atalk_getname,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .ioctl          = atalk_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = atalk_compat_ioctl,
index 36b3adacc0ddc1bd9a6c5b8dd55cedba4e9bf47b..10462de734eafc00efb9490ddd58cd0bbc83b7c8 100644 (file)
@@ -252,8 +252,7 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev,
 
        ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc;
        pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev);
-       refcount_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc);
-       ATM_SKB(skb)->atm_options = atmvcc->atm_options;
+       atm_account_tx(atmvcc, skb);
        dev->stats.tx_packets++;
        dev->stats.tx_bytes += skb->len;
 
index 66caa48a27c2307c1b2b43f4a4381f3b34e78485..d795b9c5aea4a4e35021d9db2e10254036df55fe 100644 (file)
@@ -381,8 +381,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
                memcpy(here, llc_oui, sizeof(llc_oui));
                ((__be16 *) here)[3] = skb->protocol;
        }
-       refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
-       ATM_SKB(skb)->atm_options = vcc->atm_options;
+       atm_account_tx(vcc, skb);
        entry->vccs->last_use = jiffies;
        pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev);
        old = xchg(&entry->vccs->xoff, 1);      /* assume XOFF ... */
index 1f2af59935db356c003cfa8dd7d1bce388fada53..a7a68e5096288df11af1037297189962dc2fa548 100644 (file)
@@ -630,10 +630,9 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size)
                goto out;
        }
        pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
-       refcount_add(skb->truesize, &sk->sk_wmem_alloc);
+       atm_account_tx(vcc, skb);
 
        skb->dev = NULL; /* for paths shared with net_device interfaces */
-       ATM_SKB(skb)->atm_options = vcc->atm_options;
        if (!copy_from_iter_full(skb_put(skb, size), size, &m->msg_iter)) {
                kfree_skb(skb);
                error = -EFAULT;
@@ -648,11 +647,16 @@ out:
        return error;
 }
 
-__poll_t vcc_poll_mask(struct socket *sock, __poll_t events)
+__poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
 {
        struct sock *sk = sock->sk;
-       struct atm_vcc *vcc = ATM_SD(sock);
-       __poll_t mask = 0;
+       struct atm_vcc *vcc;
+       __poll_t mask;
+
+       sock_poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
+
+       vcc = ATM_SD(sock);
 
        /* exceptional events */
        if (sk->sk_err)
index 526796ad230fc6a2dbdca37f0d4f66f4edf47f17..5850649068bb29b3d688b4c8e29373b4a7f7592d 100644 (file)
@@ -17,7 +17,7 @@ int vcc_connect(struct socket *sock, int itf, short vpi, int vci);
 int vcc_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
                int flags);
 int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len);
-__poll_t vcc_poll_mask(struct socket *sock, __poll_t events);
+__poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait);
 int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 int vcc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 int vcc_setsockopt(struct socket *sock, int level, int optname,
index 5a95fcf6f9b6cc62ced5480910dac7e41f2e7f06..d7f5cf5b7594d0ea4e766e06fbc07e6fce590e3b 100644 (file)
@@ -182,9 +182,8 @@ lec_send(struct atm_vcc *vcc, struct sk_buff *skb)
        struct net_device *dev = skb->dev;
 
        ATM_SKB(skb)->vcc = vcc;
-       ATM_SKB(skb)->atm_options = vcc->atm_options;
+       atm_account_tx(vcc, skb);
 
-       refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
        if (vcc->send(vcc, skb) < 0) {
                dev->stats.tx_dropped++;
                return;
index 75620c2f261723a915b74df013ac214479ba70c4..24b53c4c39c6a6b5323a1aa79318b2ab2907a332 100644 (file)
@@ -555,8 +555,7 @@ static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc)
                                        sizeof(struct llc_snap_hdr));
        }
 
-       refcount_add(skb->truesize, &sk_atm(entry->shortcut)->sk_wmem_alloc);
-       ATM_SKB(skb)->atm_options = entry->shortcut->atm_options;
+       atm_account_tx(entry->shortcut, skb);
        entry->shortcut->send(entry->shortcut, skb);
        entry->packets_fwded++;
        mpc->in_ops->put(entry);
index 21d9d341a6199255a017437954e4b688f1ba5bfd..af8c4b38b7463e03bf4b060735ce852b515d526c 100644 (file)
@@ -350,8 +350,7 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
                return 1;
        }
 
-       refcount_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc);
-       ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options;
+       atm_account_tx(vcc, skb);
        pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n",
                 skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev);
        ret = ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb)
index 9f75092fe7785c080b2a32f9c2c8b147056bd488..2cb10af16afcf8eeb925bfe1aab33e839821109a 100644 (file)
@@ -113,7 +113,7 @@ static const struct proto_ops pvc_proto_ops = {
        .socketpair =   sock_no_socketpair,
        .accept =       sock_no_accept,
        .getname =      pvc_getname,
-       .poll_mask =    vcc_poll_mask,
+       .poll =         vcc_poll,
        .ioctl =        vcc_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = vcc_compat_ioctl,
index ee10e8d46185173067f459aa5efdf5a77f8f9f06..b3ba44aab0ee6c9425fd278ebf8e2df1590a6d7a 100644 (file)
@@ -35,8 +35,8 @@ static void atm_pop_raw(struct atm_vcc *vcc, struct sk_buff *skb)
        struct sock *sk = sk_atm(vcc);
 
        pr_debug("(%d) %d -= %d\n",
-                vcc->vci, sk_wmem_alloc_get(sk), skb->truesize);
-       WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
+                vcc->vci, sk_wmem_alloc_get(sk), ATM_SKB(skb)->acct_truesize);
+       WARN_ON(refcount_sub_and_test(ATM_SKB(skb)->acct_truesize, &sk->sk_wmem_alloc));
        dev_kfree_skb_any(skb);
        sk->sk_write_space(sk);
 }
index 53f4ad7087b169bccbd8d0b86c7463fd77204a8d..2f91b766ac423c97a0b9c1fd340222e31b17eefa 100644 (file)
@@ -636,7 +636,7 @@ static const struct proto_ops svc_proto_ops = {
        .socketpair =   sock_no_socketpair,
        .accept =       svc_accept,
        .getname =      svc_getname,
-       .poll_mask =    vcc_poll_mask,
+       .poll =         vcc_poll,
        .ioctl =        svc_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = svc_compat_ioctl,
index d1d2442ce573280cbc5b12beba96225bb7445a47..c603d33d54108b9f93f1745534da28d25f12c0ea 100644 (file)
@@ -1941,7 +1941,7 @@ static const struct proto_ops ax25_proto_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = ax25_accept,
        .getname        = ax25_getname,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .ioctl          = ax25_ioctl,
        .listen         = ax25_listen,
        .shutdown       = ax25_shutdown,
index be09a98838252f4f0c23cec0625930cf896cd0ff..73bf6a93a3cf1141a34657bf1284893199e04db9 100644 (file)
@@ -2732,7 +2732,7 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 {
        struct batadv_neigh_ifinfo *router_ifinfo = NULL;
        struct batadv_neigh_node *router;
-       struct batadv_gw_node *curr_gw;
+       struct batadv_gw_node *curr_gw = NULL;
        int ret = 0;
        void *hdr;
 
@@ -2780,6 +2780,8 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
        ret = 0;
 
 out:
+       if (curr_gw)
+               batadv_gw_node_put(curr_gw);
        if (router_ifinfo)
                batadv_neigh_ifinfo_put(router_ifinfo);
        if (router)
index ec93337ee2597738e46b87dd72724d5becf3f48e..6baec4e68898c6e992e7522d2ee8c78ce62a1b08 100644 (file)
@@ -927,7 +927,7 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 {
        struct batadv_neigh_ifinfo *router_ifinfo = NULL;
        struct batadv_neigh_node *router;
-       struct batadv_gw_node *curr_gw;
+       struct batadv_gw_node *curr_gw = NULL;
        int ret = 0;
        void *hdr;
 
@@ -995,6 +995,8 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
        ret = 0;
 
 out:
+       if (curr_gw)
+               batadv_gw_node_put(curr_gw);
        if (router_ifinfo)
                batadv_neigh_ifinfo_put(router_ifinfo);
        if (router)
index 4229b01ac7b54008e023df0ed6546a6d541498ba..87479c60670ebfbe2ad3df17130f1289d657df7b 100644 (file)
@@ -19,6 +19,7 @@
 #include "debugfs.h"
 #include "main.h"
 
+#include <linux/dcache.h>
 #include <linux/debugfs.h>
 #include <linux/err.h>
 #include <linux/errno.h>
@@ -343,6 +344,25 @@ out:
        return -ENOMEM;
 }
 
+/**
+ * batadv_debugfs_rename_hardif() - Fix debugfs path for renamed hardif
+ * @hard_iface: hard interface which was renamed
+ */
+void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface)
+{
+       const char *name = hard_iface->net_dev->name;
+       struct dentry *dir;
+       struct dentry *d;
+
+       dir = hard_iface->debug_dir;
+       if (!dir)
+               return;
+
+       d = debugfs_rename(dir->d_parent, dir, dir->d_parent, name);
+       if (!d)
+               pr_err("Can't rename debugfs dir to %s\n", name);
+}
+
 /**
  * batadv_debugfs_del_hardif() - delete the base directory for a hard interface
  *  in debugfs.
@@ -413,6 +433,26 @@ out:
        return -ENOMEM;
 }
 
+/**
+ * batadv_debugfs_rename_meshif() - Fix debugfs path for renamed softif
+ * @dev: net_device which was renamed
+ */
+void batadv_debugfs_rename_meshif(struct net_device *dev)
+{
+       struct batadv_priv *bat_priv = netdev_priv(dev);
+       const char *name = dev->name;
+       struct dentry *dir;
+       struct dentry *d;
+
+       dir = bat_priv->debug_dir;
+       if (!dir)
+               return;
+
+       d = debugfs_rename(dir->d_parent, dir, dir->d_parent, name);
+       if (!d)
+               pr_err("Can't rename debugfs dir to %s\n", name);
+}
+
 /**
  * batadv_debugfs_del_meshif() - Remove interface dependent debugfs entries
  * @dev: netdev struct of the soft interface
index 37b069698b04b369e68e4e8a31c3ac01575b0178..08a592ffbee5203ac4994fc49bf9c187c2e66f8e 100644 (file)
@@ -30,8 +30,10 @@ struct net_device;
 void batadv_debugfs_init(void);
 void batadv_debugfs_destroy(void);
 int batadv_debugfs_add_meshif(struct net_device *dev);
+void batadv_debugfs_rename_meshif(struct net_device *dev);
 void batadv_debugfs_del_meshif(struct net_device *dev);
 int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface);
+void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface);
 void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface);
 
 #else
@@ -49,6 +51,10 @@ static inline int batadv_debugfs_add_meshif(struct net_device *dev)
        return 0;
 }
 
+static inline void batadv_debugfs_rename_meshif(struct net_device *dev)
+{
+}
+
 static inline void batadv_debugfs_del_meshif(struct net_device *dev)
 {
 }
@@ -59,6 +65,11 @@ int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface)
        return 0;
 }
 
+static inline
+void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface)
+{
+}
+
 static inline
 void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface)
 {
index c405d15befd60bdabf9f50813c3bee446238d539..2f0d42f2f913e74cf10c0c6ce89320434994cac5 100644 (file)
@@ -989,6 +989,32 @@ void batadv_hardif_remove_interfaces(void)
        rtnl_unlock();
 }
 
+/**
+ * batadv_hard_if_event_softif() - Handle events for soft interfaces
+ * @event: NETDEV_* event to handle
+ * @net_dev: net_device which generated an event
+ *
+ * Return: NOTIFY_* result
+ */
+static int batadv_hard_if_event_softif(unsigned long event,
+                                      struct net_device *net_dev)
+{
+       struct batadv_priv *bat_priv;
+
+       switch (event) {
+       case NETDEV_REGISTER:
+               batadv_sysfs_add_meshif(net_dev);
+               bat_priv = netdev_priv(net_dev);
+               batadv_softif_create_vlan(bat_priv, BATADV_NO_FLAGS);
+               break;
+       case NETDEV_CHANGENAME:
+               batadv_debugfs_rename_meshif(net_dev);
+               break;
+       }
+
+       return NOTIFY_DONE;
+}
+
 static int batadv_hard_if_event(struct notifier_block *this,
                                unsigned long event, void *ptr)
 {
@@ -997,12 +1023,8 @@ static int batadv_hard_if_event(struct notifier_block *this,
        struct batadv_hard_iface *primary_if = NULL;
        struct batadv_priv *bat_priv;
 
-       if (batadv_softif_is_valid(net_dev) && event == NETDEV_REGISTER) {
-               batadv_sysfs_add_meshif(net_dev);
-               bat_priv = netdev_priv(net_dev);
-               batadv_softif_create_vlan(bat_priv, BATADV_NO_FLAGS);
-               return NOTIFY_DONE;
-       }
+       if (batadv_softif_is_valid(net_dev))
+               return batadv_hard_if_event_softif(event, net_dev);
 
        hard_iface = batadv_hardif_get_by_netdev(net_dev);
        if (!hard_iface && (event == NETDEV_REGISTER ||
@@ -1051,6 +1073,9 @@ static int batadv_hard_if_event(struct notifier_block *this,
                if (batadv_is_wifi_hardif(hard_iface))
                        hard_iface->num_bcasts = BATADV_NUM_BCASTS_WIRELESS;
                break;
+       case NETDEV_CHANGENAME:
+               batadv_debugfs_rename_hardif(hard_iface);
+               break;
        default:
                break;
        }
index 3986551397caa5ffb6ba7338eeb4769c8b8f99fb..12a2b7d21376721d15c6a31f3e794e4270d74b5c 100644 (file)
@@ -1705,7 +1705,9 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
                ether_addr_copy(common->addr, tt_addr);
                common->vid = vid;
 
-               common->flags = flags;
+               if (!is_multicast_ether_addr(common->addr))
+                       common->flags = flags & (~BATADV_TT_SYNC_MASK);
+
                tt_global_entry->roam_at = 0;
                /* node must store current time in case of roaming. This is
                 * needed to purge this entry out on timeout (if nobody claims
@@ -1768,7 +1770,8 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
                 * TT_CLIENT_TEMP, therefore they have to be copied in the
                 * client entry
                 */
-               common->flags |= flags & (~BATADV_TT_SYNC_MASK);
+               if (!is_multicast_ether_addr(common->addr))
+                       common->flags |= flags & (~BATADV_TT_SYNC_MASK);
 
                /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
                 * one originator left in the list and we previously received a
index 510ab4f55df56bc1c356d5130d2dbea4be4744ff..3264e1873219bd40b8c1ccfc2ce6c40d96ca0030 100644 (file)
@@ -437,13 +437,16 @@ static inline __poll_t bt_accept_poll(struct sock *parent)
        return 0;
 }
 
-__poll_t bt_sock_poll_mask(struct socket *sock, __poll_t events)
+__poll_t bt_sock_poll(struct file *file, struct socket *sock,
+                         poll_table *wait)
 {
        struct sock *sk = sock->sk;
        __poll_t mask = 0;
 
        BT_DBG("sock %p, sk %p", sock, sk);
 
+       poll_wait(file, sk_sleep(sk), wait);
+
        if (sk->sk_state == BT_LISTEN)
                return bt_accept_poll(sk);
 
@@ -475,7 +478,7 @@ __poll_t bt_sock_poll_mask(struct socket *sock, __poll_t events)
 
        return mask;
 }
-EXPORT_SYMBOL(bt_sock_poll_mask);
+EXPORT_SYMBOL(bt_sock_poll);
 
 int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
 {
index d6c0998615388d078c0910bee08784b4fac2f0c0..1506e1632394acf06e9f5873d045bd394e5b3059 100644 (file)
@@ -1975,7 +1975,7 @@ static const struct proto_ops hci_sock_ops = {
        .sendmsg        = hci_sock_sendmsg,
        .recvmsg        = hci_sock_recvmsg,
        .ioctl          = hci_sock_ioctl,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
        .setsockopt     = hci_sock_setsockopt,
index 742a190034e6378a4be886ed730d55936c82ee27..686bdc6b35b03d1fd0965dc0fd76c5edde78c1eb 100644 (file)
@@ -1653,7 +1653,7 @@ static const struct proto_ops l2cap_sock_ops = {
        .getname        = l2cap_sock_getname,
        .sendmsg        = l2cap_sock_sendmsg,
        .recvmsg        = l2cap_sock_recvmsg,
-       .poll_mask      = bt_sock_poll_mask,
+       .poll           = bt_sock_poll,
        .ioctl          = bt_sock_ioctl,
        .mmap           = sock_no_mmap,
        .socketpair     = sock_no_socketpair,
index 1cf57622473aa70d626e1df5ad867800ab4cfe6e..d606e9212291608ea2e266238c0f65ce18d0c311 100644 (file)
@@ -1049,7 +1049,7 @@ static const struct proto_ops rfcomm_sock_ops = {
        .setsockopt     = rfcomm_sock_setsockopt,
        .getsockopt     = rfcomm_sock_getsockopt,
        .ioctl          = rfcomm_sock_ioctl,
-       .poll_mask      = bt_sock_poll_mask,
+       .poll           = bt_sock_poll,
        .socketpair     = sock_no_socketpair,
        .mmap           = sock_no_mmap
 };
index d60dbc61d170864b1393aabb0d7f7965a1e6ad17..413b8ee49feca325dea79e328c11b8ba00afbce3 100644 (file)
@@ -1197,7 +1197,7 @@ static const struct proto_ops sco_sock_ops = {
        .getname        = sco_sock_getname,
        .sendmsg        = sco_sock_sendmsg,
        .recvmsg        = sco_sock_recvmsg,
-       .poll_mask      = bt_sock_poll_mask,
+       .poll           = bt_sock_poll,
        .ioctl          = bt_sock_ioctl,
        .mmap           = sock_no_mmap,
        .socketpair     = sock_no_socketpair,
index 68c3578343b4b4d026e9df40fda98a7850757877..22a78eedf4b1447a8f42cc442615191d66ff1b99 100644 (file)
@@ -96,6 +96,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
        u32 size = kattr->test.data_size_in;
        u32 repeat = kattr->test.repeat;
        u32 retval, duration;
+       int hh_len = ETH_HLEN;
        struct sk_buff *skb;
        void *data;
        int ret;
@@ -131,12 +132,22 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
        skb_reset_network_header(skb);
 
        if (is_l2)
-               __skb_push(skb, ETH_HLEN);
+               __skb_push(skb, hh_len);
        if (is_direct_pkt_access)
                bpf_compute_data_pointers(skb);
        retval = bpf_test_run(prog, skb, repeat, &duration);
-       if (!is_l2)
-               __skb_push(skb, ETH_HLEN);
+       if (!is_l2) {
+               if (skb_headroom(skb) < hh_len) {
+                       int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
+
+                       if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
+                               kfree_skb(skb);
+                               return -ENOMEM;
+                       }
+               }
+               memset(__skb_push(skb, hh_len), 0, hh_len);
+       }
+
        size = skb->len;
        /* bpf program can never convert linear skb to non-linear */
        if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
diff --git a/net/bpfilter/.gitignore b/net/bpfilter/.gitignore
new file mode 100644 (file)
index 0000000..e97084e
--- /dev/null
@@ -0,0 +1 @@
+bpfilter_umh
index a948b072c28f36451a587a88bcb6f86c32023693..76deb661588322d9cf8ac6bdd73ba63f5d1416fc 100644 (file)
@@ -1,6 +1,5 @@
 menuconfig BPFILTER
        bool "BPF based packet filtering framework (BPFILTER)"
-       default n
        depends on NET && BPF && INET
        help
          This builds experimental bpfilter framework that is aiming to
@@ -9,6 +8,7 @@ menuconfig BPFILTER
 if BPFILTER
 config BPFILTER_UMH
        tristate "bpfilter kernel module with user mode helper"
+       depends on $(success,$(srctree)/scripts/cc-can-link.sh $(CC))
        default m
        help
          This builds bpfilter kernel module with embedded user mode helper
index e0bbe7583e58dcca5e17136d1b091ff03465b4d2..39c6980b5d9952eed1046f656d8c0a85b4a0d2d6 100644 (file)
@@ -15,18 +15,7 @@ ifeq ($(CONFIG_BPFILTER_UMH), y)
 HOSTLDFLAGS += -static
 endif
 
-# a bit of elf magic to convert bpfilter_umh binary into a binary blob
-# inside bpfilter_umh.o elf file referenced by
-# _binary_net_bpfilter_bpfilter_umh_start symbol
-# which bpfilter_kern.c passes further into umh blob loader at run-time
-quiet_cmd_copy_umh = GEN $@
-      cmd_copy_umh = echo ':' > $(obj)/.bpfilter_umh.o.cmd; \
-      $(OBJCOPY) -I binary -O `$(OBJDUMP) -f $<|grep format|cut -d' ' -f8` \
-      -B `$(OBJDUMP) -f $<|grep architecture|cut -d, -f1|cut -d' ' -f2` \
-      --rename-section .data=.init.rodata $< $@
-
-$(obj)/bpfilter_umh.o: $(obj)/bpfilter_umh
-       $(call cmd,copy_umh)
+$(obj)/bpfilter_umh_blob.o: $(obj)/bpfilter_umh
 
 obj-$(CONFIG_BPFILTER_UMH) += bpfilter.o
-bpfilter-objs += bpfilter_kern.o bpfilter_umh.o
+bpfilter-objs += bpfilter_kern.o bpfilter_umh_blob.o
index 09522573f611b01ba5fb4d52125e8264d9147f20..f0fc182d3db77eb311d91f7faef4e8a6f85886b3 100644 (file)
 #include <linux/file.h>
 #include "msgfmt.h"
 
-#define UMH_start _binary_net_bpfilter_bpfilter_umh_start
-#define UMH_end _binary_net_bpfilter_bpfilter_umh_end
-
-extern char UMH_start;
-extern char UMH_end;
+extern char bpfilter_umh_start;
+extern char bpfilter_umh_end;
 
 static struct umh_info info;
 /* since ip_getsockopt() can run in parallel, serialize access to umh */
@@ -93,7 +90,9 @@ static int __init load_umh(void)
        int err;
 
        /* fork usermode process */
-       err = fork_usermode_blob(&UMH_start, &UMH_end - &UMH_start, &info);
+       err = fork_usermode_blob(&bpfilter_umh_start,
+                                &bpfilter_umh_end - &bpfilter_umh_start,
+                                &info);
        if (err)
                return err;
        pr_info("Loaded bpfilter_umh pid %d\n", info.pid);
diff --git a/net/bpfilter/bpfilter_umh_blob.S b/net/bpfilter/bpfilter_umh_blob.S
new file mode 100644 (file)
index 0000000..40311d1
--- /dev/null
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+       .section .init.rodata, "a"
+       .global bpfilter_umh_start
+bpfilter_umh_start:
+       .incbin "net/bpfilter/bpfilter_umh"
+       .global bpfilter_umh_end
+bpfilter_umh_end:
index e0adcd123f48a1a4f66028bbf731132ed8e7ff17..711d7156efd8bc94b449d0e8066eedd4fe0d5747 100644 (file)
@@ -131,8 +131,10 @@ static void caif_flow_cb(struct sk_buff *skb)
        caifd = caif_get(skb->dev);
 
        WARN_ON(caifd == NULL);
-       if (caifd == NULL)
+       if (!caifd) {
+               rcu_read_unlock();
                return;
+       }
 
        caifd_hold(caifd);
        rcu_read_unlock();
index c7991867d62273f48bb55e88774b573e81f40536..a6fb1b3bcad9b2f3c1c24b2a3496ad21b07c69d9 100644 (file)
@@ -934,11 +934,15 @@ static int caif_release(struct socket *sock)
 }
 
 /* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */
-static __poll_t caif_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t caif_poll(struct file *file,
+                             struct socket *sock, poll_table *wait)
 {
        struct sock *sk = sock->sk;
+       __poll_t mask;
        struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
-       __poll_t mask = 0;
+
+       sock_poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
 
        /* exceptional events? */
        if (sk->sk_err)
@@ -972,7 +976,7 @@ static const struct proto_ops caif_seqpacket_ops = {
        .socketpair = sock_no_socketpair,
        .accept = sock_no_accept,
        .getname = sock_no_getname,
-       .poll_mask = caif_poll_mask,
+       .poll = caif_poll,
        .ioctl = sock_no_ioctl,
        .listen = sock_no_listen,
        .shutdown = sock_no_shutdown,
@@ -993,7 +997,7 @@ static const struct proto_ops caif_stream_ops = {
        .socketpair = sock_no_socketpair,
        .accept = sock_no_accept,
        .getname = sock_no_getname,
-       .poll_mask = caif_poll_mask,
+       .poll = caif_poll,
        .ioctl = sock_no_ioctl,
        .listen = sock_no_listen,
        .shutdown = sock_no_shutdown,
index 9393f25df08d3fce299aaa463efd79244e6527e9..0af8f0db892a3311fb5a1a898ab0bff5696adf00 100644 (file)
@@ -1660,7 +1660,7 @@ static const struct proto_ops bcm_ops = {
        .socketpair    = sock_no_socketpair,
        .accept        = sock_no_accept,
        .getname       = sock_no_getname,
-       .poll_mask     = datagram_poll_mask,
+       .poll          = datagram_poll,
        .ioctl         = can_ioctl,     /* use can_ioctl() from af_can.c */
        .listen        = sock_no_listen,
        .shutdown      = sock_no_shutdown,
index fd7e2f49ea6a20b79c43bf50c72d2b1e8b48d260..1051eee8258184f33d15a6142ee8b387839c9adc 100644 (file)
@@ -843,7 +843,7 @@ static const struct proto_ops raw_ops = {
        .socketpair    = sock_no_socketpair,
        .accept        = sock_no_accept,
        .getname       = raw_getname,
-       .poll_mask     = datagram_poll_mask,
+       .poll          = datagram_poll,
        .ioctl         = can_ioctl,     /* use can_ioctl() from af_can.c */
        .listen        = sock_no_listen,
        .shutdown      = sock_no_shutdown,
index f19bf3dc2bd6ea02cb828a95d0b91322ac8b0004..9938952c5c78f1e72ef13f44517ef054a60205b2 100644 (file)
@@ -819,8 +819,9 @@ EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg);
 
 /**
  *     datagram_poll - generic datagram poll
+ *     @file: file struct
  *     @sock: socket
- *     @events to wait for
+ *     @wait: poll table
  *
  *     Datagram poll: Again totally generic. This also handles
  *     sequenced packet sockets providing the socket receive queue
@@ -830,10 +831,14 @@ EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg);
  *     and you use a different write policy from sock_writeable()
  *     then please supply your own write_space callback.
  */
-__poll_t datagram_poll_mask(struct socket *sock, __poll_t events)
+__poll_t datagram_poll(struct file *file, struct socket *sock,
+                          poll_table *wait)
 {
        struct sock *sk = sock->sk;
-       __poll_t mask = 0;
+       __poll_t mask;
+
+       sock_poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
 
        /* exceptional events? */
        if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
@@ -866,4 +871,4 @@ __poll_t datagram_poll_mask(struct socket *sock, __poll_t events)
 
        return mask;
 }
-EXPORT_SYMBOL(datagram_poll_mask);
+EXPORT_SYMBOL(datagram_poll);
index 57b7bab5f70bb7c50a8be565cc90a40bc1c2d5d6..a5aa1c7444e688e66263fc112a4211409840a749 100644 (file)
@@ -8643,7 +8643,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
                /* We get here if we can't use the current device name */
                if (!pat)
                        goto out;
-               if (dev_get_valid_name(net, dev, pat) < 0)
+               err = dev_get_valid_name(net, dev, pat);
+               if (err < 0)
                        goto out;
        }
 
@@ -8655,7 +8656,6 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
        dev_close(dev);
 
        /* And unlink it from device chain */
-       err = -ENODEV;
        unlist_netdevice(dev);
 
        synchronize_net();
index a04e1e88bf3ab49340d788589c365aaf45d9d3e2..50537ff961a722e18731b7b9671deb739bfce847 100644 (file)
@@ -285,16 +285,9 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
                if (ifr->ifr_qlen < 0)
                        return -EINVAL;
                if (dev->tx_queue_len ^ ifr->ifr_qlen) {
-                       unsigned int orig_len = dev->tx_queue_len;
-
-                       dev->tx_queue_len = ifr->ifr_qlen;
-                       err = call_netdevice_notifiers(
-                                       NETDEV_CHANGE_TX_QUEUE_LEN, dev);
-                       err = notifier_to_errno(err);
-                       if (err) {
-                               dev->tx_queue_len = orig_len;
+                       err = dev_change_tx_queue_len(dev, ifr->ifr_qlen);
+                       if (err)
                                return err;
-                       }
                }
                return 0;
 
index 126ffc5bc630cb412e4bcf1a48869ec6711fda54..f64aa13811eaeedf8f0040bc9f993ad9e1661eca 100644 (file)
@@ -416,6 +416,14 @@ static struct fib_rule *rule_find(struct fib_rules_ops *ops,
                if (rule->mark && r->mark != rule->mark)
                        continue;
 
+               if (rule->suppress_ifgroup != -1 &&
+                   r->suppress_ifgroup != rule->suppress_ifgroup)
+                       continue;
+
+               if (rule->suppress_prefixlen != -1 &&
+                   r->suppress_prefixlen != rule->suppress_prefixlen)
+                       continue;
+
                if (rule->mark_mask && r->mark_mask != rule->mark_mask)
                        continue;
 
@@ -436,6 +444,9 @@ static struct fib_rule *rule_find(struct fib_rules_ops *ops,
                if (rule->ip_proto && r->ip_proto != rule->ip_proto)
                        continue;
 
+               if (rule->proto && r->proto != rule->proto)
+                       continue;
+
                if (fib_rule_port_range_set(&rule->sport_range) &&
                    !fib_rule_port_range_compare(&r->sport_range,
                                                 &rule->sport_range))
@@ -645,6 +656,73 @@ errout:
        return err;
 }
 
+static int rule_exists(struct fib_rules_ops *ops, struct fib_rule_hdr *frh,
+                      struct nlattr **tb, struct fib_rule *rule)
+{
+       struct fib_rule *r;
+
+       list_for_each_entry(r, &ops->rules_list, list) {
+               if (r->action != rule->action)
+                       continue;
+
+               if (r->table != rule->table)
+                       continue;
+
+               if (r->pref != rule->pref)
+                       continue;
+
+               if (memcmp(r->iifname, rule->iifname, IFNAMSIZ))
+                       continue;
+
+               if (memcmp(r->oifname, rule->oifname, IFNAMSIZ))
+                       continue;
+
+               if (r->mark != rule->mark)
+                       continue;
+
+               if (r->suppress_ifgroup != rule->suppress_ifgroup)
+                       continue;
+
+               if (r->suppress_prefixlen != rule->suppress_prefixlen)
+                       continue;
+
+               if (r->mark_mask != rule->mark_mask)
+                       continue;
+
+               if (r->tun_id != rule->tun_id)
+                       continue;
+
+               if (r->fr_net != rule->fr_net)
+                       continue;
+
+               if (r->l3mdev != rule->l3mdev)
+                       continue;
+
+               if (!uid_eq(r->uid_range.start, rule->uid_range.start) ||
+                   !uid_eq(r->uid_range.end, rule->uid_range.end))
+                       continue;
+
+               if (r->ip_proto != rule->ip_proto)
+                       continue;
+
+               if (r->proto != rule->proto)
+                       continue;
+
+               if (!fib_rule_port_range_compare(&r->sport_range,
+                                                &rule->sport_range))
+                       continue;
+
+               if (!fib_rule_port_range_compare(&r->dport_range,
+                                                &rule->dport_range))
+                       continue;
+
+               if (!ops->compare(r, frh, tb))
+                       continue;
+               return 1;
+       }
+       return 0;
+}
+
 int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
                   struct netlink_ext_ack *extack)
 {
@@ -679,7 +757,7 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
                goto errout;
 
        if ((nlh->nlmsg_flags & NLM_F_EXCL) &&
-           rule_find(ops, frh, tb, rule, user_priority)) {
+           rule_exists(ops, frh, tb, rule)) {
                err = -EEXIST;
                goto errout_free;
        }
index 3d9ba7e5965adc4658b379a0cf55ff2f22f4b94d..06da770f543fdc2742f8503a5436a5893a566914 100644 (file)
@@ -459,11 +459,21 @@ static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp)
             (!unaligned_ok && offset >= 0 &&
              offset + ip_align >= 0 &&
              offset + ip_align % size == 0))) {
+               bool ldx_off_ok = offset <= S16_MAX;
+
                *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H);
                *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset);
-               *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP, size, 2 + endian);
-               *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, BPF_REG_D,
-                                     offset);
+               *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP,
+                                     size, 2 + endian + (!ldx_off_ok * 2));
+               if (ldx_off_ok) {
+                       *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
+                                             BPF_REG_D, offset);
+               } else {
+                       *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_D);
+                       *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, offset);
+                       *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
+                                             BPF_REG_TMP, 0);
+               }
                if (endian)
                        *insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8);
                *insn++ = BPF_JMP_A(8);
@@ -1762,6 +1772,37 @@ static const struct bpf_func_proto bpf_skb_pull_data_proto = {
        .arg2_type      = ARG_ANYTHING,
 };
 
+static inline int sk_skb_try_make_writable(struct sk_buff *skb,
+                                          unsigned int write_len)
+{
+       int err = __bpf_try_make_writable(skb, write_len);
+
+       bpf_compute_data_end_sk_skb(skb);
+       return err;
+}
+
+BPF_CALL_2(sk_skb_pull_data, struct sk_buff *, skb, u32, len)
+{
+       /* Idea is the following: should the needed direct read/write
+        * test fail during runtime, we can pull in more data and redo
+        * again, since implicitly, we invalidate previous checks here.
+        *
+        * Or, since we know how much we need to make read/writeable,
+        * this can be done once at the program beginning for direct
+        * access case. By this we overcome limitations of only current
+        * headroom being accessible.
+        */
+       return sk_skb_try_make_writable(skb, len ? : skb_headlen(skb));
+}
+
+static const struct bpf_func_proto sk_skb_pull_data_proto = {
+       .func           = sk_skb_pull_data,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_ANYTHING,
+};
+
 BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset,
           u64, from, u64, to, u64, flags)
 {
@@ -2779,7 +2820,8 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
 
 static u32 __bpf_skb_max_len(const struct sk_buff *skb)
 {
-       return skb->dev->mtu + skb->dev->hard_header_len;
+       return skb->dev ? skb->dev->mtu + skb->dev->hard_header_len :
+                         SKB_MAX_ALLOC;
 }
 
 static int bpf_skb_adjust_net(struct sk_buff *skb, s32 len_diff)
@@ -2863,8 +2905,8 @@ static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len)
        return __skb_trim_rcsum(skb, new_len);
 }
 
-BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
-          u64, flags)
+static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len,
+                                       u64 flags)
 {
        u32 max_len = __bpf_skb_max_len(skb);
        u32 min_len = __bpf_skb_min_len(skb);
@@ -2900,6 +2942,13 @@ BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
                if (!ret && skb_is_gso(skb))
                        skb_gso_reset(skb);
        }
+       return ret;
+}
+
+BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
+          u64, flags)
+{
+       int ret = __bpf_skb_change_tail(skb, new_len, flags);
 
        bpf_compute_data_pointers(skb);
        return ret;
@@ -2914,8 +2963,26 @@ static const struct bpf_func_proto bpf_skb_change_tail_proto = {
        .arg3_type      = ARG_ANYTHING,
 };
 
-BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room,
+BPF_CALL_3(sk_skb_change_tail, struct sk_buff *, skb, u32, new_len,
           u64, flags)
+{
+       int ret = __bpf_skb_change_tail(skb, new_len, flags);
+
+       bpf_compute_data_end_sk_skb(skb);
+       return ret;
+}
+
+static const struct bpf_func_proto sk_skb_change_tail_proto = {
+       .func           = sk_skb_change_tail,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_ANYTHING,
+       .arg3_type      = ARG_ANYTHING,
+};
+
+static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room,
+                                       u64 flags)
 {
        u32 max_len = __bpf_skb_max_len(skb);
        u32 new_len = skb->len + head_room;
@@ -2941,8 +3008,16 @@ BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room,
                skb_reset_mac_header(skb);
        }
 
+       return ret;
+}
+
+BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room,
+          u64, flags)
+{
+       int ret = __bpf_skb_change_head(skb, head_room, flags);
+
        bpf_compute_data_pointers(skb);
-       return 0;
+       return ret;
 }
 
 static const struct bpf_func_proto bpf_skb_change_head_proto = {
@@ -2954,6 +3029,23 @@ static const struct bpf_func_proto bpf_skb_change_head_proto = {
        .arg3_type      = ARG_ANYTHING,
 };
 
+BPF_CALL_3(sk_skb_change_head, struct sk_buff *, skb, u32, head_room,
+          u64, flags)
+{
+       int ret = __bpf_skb_change_head(skb, head_room, flags);
+
+       bpf_compute_data_end_sk_skb(skb);
+       return ret;
+}
+
+static const struct bpf_func_proto sk_skb_change_head_proto = {
+       .func           = sk_skb_change_head,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_ANYTHING,
+       .arg3_type      = ARG_ANYTHING,
+};
 static unsigned long xdp_get_metalen(const struct xdp_buff *xdp)
 {
        return xdp_data_meta_unsupported(xdp) ? 0 :
@@ -3046,12 +3138,16 @@ static int __bpf_tx_xdp(struct net_device *dev,
                        u32 index)
 {
        struct xdp_frame *xdpf;
-       int sent;
+       int err, sent;
 
        if (!dev->netdev_ops->ndo_xdp_xmit) {
                return -EOPNOTSUPP;
        }
 
+       err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
+       if (unlikely(err))
+               return err;
+
        xdpf = convert_to_xdp_frame(xdp);
        if (unlikely(!xdpf))
                return -EOVERFLOW;
@@ -3214,20 +3310,6 @@ err:
 }
 EXPORT_SYMBOL_GPL(xdp_do_redirect);
 
-static int __xdp_generic_ok_fwd_dev(struct sk_buff *skb, struct net_device *fwd)
-{
-       unsigned int len;
-
-       if (unlikely(!(fwd->flags & IFF_UP)))
-               return -ENETDOWN;
-
-       len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
-       if (skb->len > len)
-               return -EMSGSIZE;
-
-       return 0;
-}
-
 static int xdp_do_generic_redirect_map(struct net_device *dev,
                                       struct sk_buff *skb,
                                       struct xdp_buff *xdp,
@@ -3256,10 +3338,11 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
        }
 
        if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
-               if (unlikely((err = __xdp_generic_ok_fwd_dev(skb, fwd))))
+               struct bpf_dtab_netdev *dst = fwd;
+
+               err = dev_map_generic_redirect(dst, skb, xdp_prog);
+               if (unlikely(err))
                        goto err;
-               skb->dev = fwd;
-               generic_xdp_tx(skb, xdp_prog);
        } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
                struct xdp_sock *xs = fwd;
 
@@ -3298,7 +3381,8 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
                goto err;
        }
 
-       if (unlikely((err = __xdp_generic_ok_fwd_dev(skb, fwd))))
+       err = xdp_ok_fwd_dev(fwd, skb->len);
+       if (unlikely(err))
                goto err;
 
        skb->dev = fwd;
@@ -4086,8 +4170,9 @@ static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params,
        memcpy(params->smac, dev->dev_addr, ETH_ALEN);
        params->h_vlan_TCI = 0;
        params->h_vlan_proto = 0;
+       params->ifindex = dev->ifindex;
 
-       return dev->ifindex;
+       return 0;
 }
 #endif
 
@@ -4111,7 +4196,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
        /* verify forwarding is enabled on this interface */
        in_dev = __in_dev_get_rcu(dev);
        if (unlikely(!in_dev || !IN_DEV_FORWARD(in_dev)))
-               return 0;
+               return BPF_FIB_LKUP_RET_FWD_DISABLED;
 
        if (flags & BPF_FIB_LOOKUP_OUTPUT) {
                fl4.flowi4_iif = 1;
@@ -4136,7 +4221,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
 
                tb = fib_get_table(net, tbid);
                if (unlikely(!tb))
-                       return 0;
+                       return BPF_FIB_LKUP_RET_NOT_FWDED;
 
                err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
        } else {
@@ -4148,8 +4233,20 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
                err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_NOREF);
        }
 
-       if (err || res.type != RTN_UNICAST)
-               return 0;
+       if (err) {
+               /* map fib lookup errors to RTN_ type */
+               if (err == -EINVAL)
+                       return BPF_FIB_LKUP_RET_BLACKHOLE;
+               if (err == -EHOSTUNREACH)
+                       return BPF_FIB_LKUP_RET_UNREACHABLE;
+               if (err == -EACCES)
+                       return BPF_FIB_LKUP_RET_PROHIBIT;
+
+               return BPF_FIB_LKUP_RET_NOT_FWDED;
+       }
+
+       if (res.type != RTN_UNICAST)
+               return BPF_FIB_LKUP_RET_NOT_FWDED;
 
        if (res.fi->fib_nhs > 1)
                fib_select_path(net, &res, &fl4, NULL);
@@ -4157,19 +4254,16 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
        if (check_mtu) {
                mtu = ip_mtu_from_fib_result(&res, params->ipv4_dst);
                if (params->tot_len > mtu)
-                       return 0;
+                       return BPF_FIB_LKUP_RET_FRAG_NEEDED;
        }
 
        nh = &res.fi->fib_nh[res.nh_sel];
 
        /* do not handle lwt encaps right now */
        if (nh->nh_lwtstate)
-               return 0;
+               return BPF_FIB_LKUP_RET_UNSUPP_LWT;
 
        dev = nh->nh_dev;
-       if (unlikely(!dev))
-               return 0;
-
        if (nh->nh_gw)
                params->ipv4_dst = nh->nh_gw;
 
@@ -4179,10 +4273,10 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
         * rcu_read_lock_bh is not needed here
         */
        neigh = __ipv4_neigh_lookup_noref(dev, (__force u32)params->ipv4_dst);
-       if (neigh)
-               return bpf_fib_set_fwd_params(params, neigh, dev);
+       if (!neigh)
+               return BPF_FIB_LKUP_RET_NO_NEIGH;
 
-       return 0;
+       return bpf_fib_set_fwd_params(params, neigh, dev);
 }
 #endif
 
@@ -4203,7 +4297,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
 
        /* link local addresses are never forwarded */
        if (rt6_need_strict(dst) || rt6_need_strict(src))
-               return 0;
+               return BPF_FIB_LKUP_RET_NOT_FWDED;
 
        dev = dev_get_by_index_rcu(net, params->ifindex);
        if (unlikely(!dev))
@@ -4211,7 +4305,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
 
        idev = __in6_dev_get_safely(dev);
        if (unlikely(!idev || !net->ipv6.devconf_all->forwarding))
-               return 0;
+               return BPF_FIB_LKUP_RET_FWD_DISABLED;
 
        if (flags & BPF_FIB_LOOKUP_OUTPUT) {
                fl6.flowi6_iif = 1;
@@ -4238,7 +4332,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
 
                tb = ipv6_stub->fib6_get_table(net, tbid);
                if (unlikely(!tb))
-                       return 0;
+                       return BPF_FIB_LKUP_RET_NOT_FWDED;
 
                f6i = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, strict);
        } else {
@@ -4251,11 +4345,23 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
        }
 
        if (unlikely(IS_ERR_OR_NULL(f6i) || f6i == net->ipv6.fib6_null_entry))
-               return 0;
+               return BPF_FIB_LKUP_RET_NOT_FWDED;
+
+       if (unlikely(f6i->fib6_flags & RTF_REJECT)) {
+               switch (f6i->fib6_type) {
+               case RTN_BLACKHOLE:
+                       return BPF_FIB_LKUP_RET_BLACKHOLE;
+               case RTN_UNREACHABLE:
+                       return BPF_FIB_LKUP_RET_UNREACHABLE;
+               case RTN_PROHIBIT:
+                       return BPF_FIB_LKUP_RET_PROHIBIT;
+               default:
+                       return BPF_FIB_LKUP_RET_NOT_FWDED;
+               }
+       }
 
-       if (unlikely(f6i->fib6_flags & RTF_REJECT ||
-           f6i->fib6_type != RTN_UNICAST))
-               return 0;
+       if (f6i->fib6_type != RTN_UNICAST)
+               return BPF_FIB_LKUP_RET_NOT_FWDED;
 
        if (f6i->fib6_nsiblings && fl6.flowi6_oif == 0)
                f6i = ipv6_stub->fib6_multipath_select(net, f6i, &fl6,
@@ -4265,11 +4371,11 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
        if (check_mtu) {
                mtu = ipv6_stub->ip6_mtu_from_fib6(f6i, dst, src);
                if (params->tot_len > mtu)
-                       return 0;
+                       return BPF_FIB_LKUP_RET_FRAG_NEEDED;
        }
 
        if (f6i->fib6_nh.nh_lwtstate)
-               return 0;
+               return BPF_FIB_LKUP_RET_UNSUPP_LWT;
 
        if (f6i->fib6_flags & RTF_GATEWAY)
                *dst = f6i->fib6_nh.nh_gw;
@@ -4283,10 +4389,10 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
         */
        neigh = ___neigh_lookup_noref(ipv6_stub->nd_tbl, neigh_key_eq128,
                                      ndisc_hashfn, dst, dev);
-       if (neigh)
-               return bpf_fib_set_fwd_params(params, neigh, dev);
+       if (!neigh)
+               return BPF_FIB_LKUP_RET_NO_NEIGH;
 
-       return 0;
+       return bpf_fib_set_fwd_params(params, neigh, dev);
 }
 #endif
 
@@ -4328,7 +4434,7 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
           struct bpf_fib_lookup *, params, int, plen, u32, flags)
 {
        struct net *net = dev_net(skb->dev);
-       int index = -EAFNOSUPPORT;
+       int rc = -EAFNOSUPPORT;
 
        if (plen < sizeof(*params))
                return -EINVAL;
@@ -4339,25 +4445,25 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
        switch (params->family) {
 #if IS_ENABLED(CONFIG_INET)
        case AF_INET:
-               index = bpf_ipv4_fib_lookup(net, params, flags, false);
+               rc = bpf_ipv4_fib_lookup(net, params, flags, false);
                break;
 #endif
 #if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
-               index = bpf_ipv6_fib_lookup(net, params, flags, false);
+               rc = bpf_ipv6_fib_lookup(net, params, flags, false);
                break;
 #endif
        }
 
-       if (index > 0) {
+       if (!rc) {
                struct net_device *dev;
 
-               dev = dev_get_by_index_rcu(net, index);
+               dev = dev_get_by_index_rcu(net, params->ifindex);
                if (!is_skb_forwardable(dev, skb))
-                       index = 0;
+                       rc = BPF_FIB_LKUP_RET_FRAG_NEEDED;
        }
 
-       return index;
+       return rc;
 }
 
 static const struct bpf_func_proto bpf_skb_fib_lookup_proto = {
@@ -4430,10 +4536,10 @@ static const struct bpf_func_proto bpf_lwt_push_encap_proto = {
        .arg4_type      = ARG_CONST_SIZE
 };
 
+#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
 BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset,
           const void *, from, u32, len)
 {
-#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
        struct seg6_bpf_srh_state *srh_state =
                this_cpu_ptr(&seg6_bpf_srh_states);
        void *srh_tlvs, *srh_end, *ptr;
@@ -4459,9 +4565,6 @@ BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset,
 
        memcpy(skb->data + offset, from, len);
        return 0;
-#else /* CONFIG_IPV6_SEG6_BPF */
-       return -EOPNOTSUPP;
-#endif
 }
 
 static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = {
@@ -4477,7 +4580,6 @@ static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = {
 BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb,
           u32, action, void *, param, u32, param_len)
 {
-#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
        struct seg6_bpf_srh_state *srh_state =
                this_cpu_ptr(&seg6_bpf_srh_states);
        struct ipv6_sr_hdr *srh;
@@ -4525,9 +4627,6 @@ BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb,
        default:
                return -EINVAL;
        }
-#else /* CONFIG_IPV6_SEG6_BPF */
-       return -EOPNOTSUPP;
-#endif
 }
 
 static const struct bpf_func_proto bpf_lwt_seg6_action_proto = {
@@ -4543,7 +4642,6 @@ static const struct bpf_func_proto bpf_lwt_seg6_action_proto = {
 BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset,
           s32, len)
 {
-#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
        struct seg6_bpf_srh_state *srh_state =
                this_cpu_ptr(&seg6_bpf_srh_states);
        void *srh_end, *srh_tlvs, *ptr;
@@ -4587,9 +4685,6 @@ BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset,
        srh_state->hdrlen += len;
        srh_state->valid = 0;
        return 0;
-#else /* CONFIG_IPV6_SEG6_BPF */
-       return -EOPNOTSUPP;
-#endif
 }
 
 static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
@@ -4600,6 +4695,7 @@ static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
        .arg2_type      = ARG_ANYTHING,
        .arg3_type      = ARG_ANYTHING,
 };
+#endif /* CONFIG_IPV6_SEG6_BPF */
 
 bool bpf_helper_changes_pkt_data(void *func)
 {
@@ -4608,9 +4704,12 @@ bool bpf_helper_changes_pkt_data(void *func)
            func == bpf_skb_store_bytes ||
            func == bpf_skb_change_proto ||
            func == bpf_skb_change_head ||
+           func == sk_skb_change_head ||
            func == bpf_skb_change_tail ||
+           func == sk_skb_change_tail ||
            func == bpf_skb_adjust_room ||
            func == bpf_skb_pull_data ||
+           func == sk_skb_pull_data ||
            func == bpf_clone_redirect ||
            func == bpf_l3_csum_replace ||
            func == bpf_l4_csum_replace ||
@@ -4618,11 +4717,12 @@ bool bpf_helper_changes_pkt_data(void *func)
            func == bpf_xdp_adjust_meta ||
            func == bpf_msg_pull_data ||
            func == bpf_xdp_adjust_tail ||
-           func == bpf_lwt_push_encap ||
+#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
            func == bpf_lwt_seg6_store_bytes ||
            func == bpf_lwt_seg6_adjust_srh ||
-           func == bpf_lwt_seg6_action
-           )
+           func == bpf_lwt_seg6_action ||
+#endif
+           func == bpf_lwt_push_encap)
                return true;
 
        return false;
@@ -4862,11 +4962,11 @@ sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
        case BPF_FUNC_skb_load_bytes:
                return &bpf_skb_load_bytes_proto;
        case BPF_FUNC_skb_pull_data:
-               return &bpf_skb_pull_data_proto;
+               return &sk_skb_pull_data_proto;
        case BPF_FUNC_skb_change_tail:
-               return &bpf_skb_change_tail_proto;
+               return &sk_skb_change_tail_proto;
        case BPF_FUNC_skb_change_head:
-               return &bpf_skb_change_head_proto;
+               return &sk_skb_change_head_proto;
        case BPF_FUNC_get_socket_cookie:
                return &bpf_get_socket_cookie_proto;
        case BPF_FUNC_get_socket_uid:
@@ -4957,12 +5057,14 @@ static const struct bpf_func_proto *
 lwt_seg6local_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 {
        switch (func_id) {
+#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
        case BPF_FUNC_lwt_seg6_store_bytes:
                return &bpf_lwt_seg6_store_bytes_proto;
        case BPF_FUNC_lwt_seg6_action:
                return &bpf_lwt_seg6_action_proto;
        case BPF_FUNC_lwt_seg6_adjust_srh:
                return &bpf_lwt_seg6_adjust_srh_proto;
+#endif
        default:
                return lwt_out_func_proto(func_id, prog);
        }
index b2b2323bdc84c44afc33304d9d2f6a22738f6523..188d693cb251a05d6483b81bbd8d815e28b77164 100644 (file)
@@ -77,8 +77,20 @@ gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type,
                d->lock = lock;
                spin_lock_bh(lock);
        }
-       if (d->tail)
-               return gnet_stats_copy(d, type, NULL, 0, padattr);
+       if (d->tail) {
+               int ret = gnet_stats_copy(d, type, NULL, 0, padattr);
+
+               /* The initial attribute added in gnet_stats_copy() may be
+                * preceded by a padding attribute, in which case d->tail will
+                * end up pointing at the padding instead of the real attribute.
+                * Fix this so gnet_stats_finish_copy() adjusts the length of
+                * the right attribute.
+                */
+               if (ret == 0 && d->tail->nla_type == padattr)
+                       d->tail = (struct nlattr *)((char *)d->tail +
+                                                   NLA_ALIGN(d->tail->nla_len));
+               return ret;
+       }
 
        return 0;
 }
index 68bf072067442567c66db978078ba161d11df442..43a932cb609b78521c1b30ce73c1206bcab439d7 100644 (file)
@@ -269,7 +269,7 @@ static void __page_pool_empty_ring(struct page_pool *pool)
        struct page *page;
 
        /* Empty recycle ring */
-       while ((page = ptr_ring_consume(&pool->ring))) {
+       while ((page = ptr_ring_consume_bh(&pool->ring))) {
                /* Verify the refcnt invariant of cached pages */
                if (!(page_ref_count(page) == 1))
                        pr_crit("%s() page_pool refcnt %d violation\n",
index 5ef61222fdef1f305909eeca6ac278bcac88e1b0..e3f743c141b3f7b4684fba1ff7de10b27af35349 100644 (file)
@@ -2759,9 +2759,12 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
                        return err;
        }
 
-       dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
-
-       __dev_notify_flags(dev, old_flags, ~0U);
+       if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
+               __dev_notify_flags(dev, old_flags, 0U);
+       } else {
+               dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
+               __dev_notify_flags(dev, old_flags, ~0U);
+       }
        return 0;
 }
 EXPORT_SYMBOL(rtnl_configure_link);
index c642304f178ce0a4e1358d59e45032a39f76fb3f..fb35b62af2724025f743d61de24f9fb7eb9186a8 100644 (file)
@@ -858,6 +858,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
        n->cloned = 1;
        n->nohdr = 0;
        n->peeked = 0;
+       C(pfmemalloc);
        n->destructor = NULL;
        C(tail);
        C(end);
@@ -3719,6 +3720,7 @@ normal:
                                net_warn_ratelimited(
                                        "skb_segment: too many frags: %u %u\n",
                                        pos, mss);
+                               err = -EINVAL;
                                goto err;
                        }
 
@@ -3752,11 +3754,10 @@ skip_fraglist:
 
 perform_csum_check:
                if (!csum) {
-                       if (skb_has_shared_frag(nskb)) {
-                               err = __skb_linearize(nskb);
-                               if (err)
-                                       goto err;
-                       }
+                       if (skb_has_shared_frag(nskb) &&
+                           __skb_linearize(nskb))
+                               goto err;
+
                        if (!nskb->remcsum_offload)
                                nskb->ip_summed = CHECKSUM_NONE;
                        SKB_GSO_CB(nskb)->csum =
@@ -5276,8 +5277,7 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
                        if (npages >= 1 << order) {
                                page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
                                                   __GFP_COMP |
-                                                  __GFP_NOWARN |
-                                                  __GFP_NORETRY,
+                                                  __GFP_NOWARN,
                                                   order);
                                if (page)
                                        goto fill_page;
index bcc41829a16d50714bdd3c25c976c0b7296fab84..bc2d7a37297fecfbf3fbddd09ce53931fe0e28af 100644 (file)
@@ -2277,9 +2277,9 @@ int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
                pfrag->offset += use;
 
                sge = sg + sg_curr - 1;
-               if (sg_curr > first_coalesce && sg_page(sg) == pfrag->page &&
-                   sg->offset + sg->length == orig_offset) {
-                       sg->length += use;
+               if (sg_curr > first_coalesce && sg_page(sge) == pfrag->page &&
+                   sge->offset + sge->length == orig_offset) {
+                       sge->length += use;
                } else {
                        sge = sg + sg_curr;
                        sg_unmark_end(sge);
@@ -3243,7 +3243,8 @@ static int req_prot_init(const struct proto *prot)
 
        rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
                                           rsk_prot->obj_size, 0,
-                                          prot->slab_flags, NULL);
+                                          SLAB_ACCOUNT | prot->slab_flags,
+                                          NULL);
 
        if (!rsk_prot->slab) {
                pr_crit("%s: Can't create request sock SLAB cache!\n",
@@ -3258,7 +3259,8 @@ int proto_register(struct proto *prot, int alloc_slab)
        if (alloc_slab) {
                prot->slab = kmem_cache_create_usercopy(prot->name,
                                        prot->obj_size, 0,
-                                       SLAB_HWCACHE_ALIGN | prot->slab_flags,
+                                       SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT |
+                                       prot->slab_flags,
                                        prot->useroffset, prot->usersize,
                                        NULL);
 
@@ -3281,6 +3283,7 @@ int proto_register(struct proto *prot, int alloc_slab)
                                kmem_cache_create(prot->twsk_prot->twsk_slab_name,
                                                  prot->twsk_prot->twsk_obj_size,
                                                  0,
+                                                 SLAB_ACCOUNT |
                                                  prot->slab_flags,
                                                  NULL);
                        if (prot->twsk_prot->twsk_slab == NULL)
index 8b5ba6dffac7ebc88fd21075793dc3db43a74a43..12877a1514e7b8e873cd26529e58f7ebaae99c1a 100644 (file)
@@ -600,7 +600,7 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
 {
        struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
        struct dccp_sock *dp = dccp_sk(sk);
-       ktime_t now = ktime_get_real();
+       ktime_t now = ktime_get();
        s64 delta = 0;
 
        switch (fbtype) {
@@ -625,15 +625,14 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
        case CCID3_FBACK_PERIODIC:
                delta = ktime_us_delta(now, hc->rx_tstamp_last_feedback);
                if (delta <= 0)
-                       DCCP_BUG("delta (%ld) <= 0", (long)delta);
-               else
-                       hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
+                       delta = 1;
+               hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
                break;
        default:
                return;
        }
 
-       ccid3_pr_debug("Interval %ldusec, X_recv=%u, 1/p=%u\n", (long)delta,
+       ccid3_pr_debug("Interval %lldusec, X_recv=%u, 1/p=%u\n", delta,
                       hc->rx_x_recv, hc->rx_pinv);
 
        hc->rx_tstamp_last_feedback = now;
@@ -680,7 +679,8 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
 static u32 ccid3_first_li(struct sock *sk)
 {
        struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
-       u32 x_recv, p, delta;
+       u32 x_recv, p;
+       s64 delta;
        u64 fval;
 
        if (hc->rx_rtt == 0) {
@@ -688,7 +688,9 @@ static u32 ccid3_first_li(struct sock *sk)
                hc->rx_rtt = DCCP_FALLBACK_RTT;
        }
 
-       delta  = ktime_to_us(net_timedelta(hc->rx_tstamp_last_feedback));
+       delta = ktime_us_delta(ktime_get(), hc->rx_tstamp_last_feedback);
+       if (delta <= 0)
+               delta = 1;
        x_recv = scaled_div32(hc->rx_bytes_recv, delta);
        if (x_recv == 0) {              /* would also trigger divide-by-zero */
                DCCP_WARN("X_recv==0\n");
index 0ea2ee56ac1bee6948ee4ed37c8172b300a7f9de..f91e3816806baae37e0e0793dcef72e8b291777e 100644 (file)
@@ -316,7 +316,8 @@ int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
                 int flags, int *addr_len);
 void dccp_shutdown(struct sock *sk, int how);
 int inet_dccp_listen(struct socket *sock, int backlog);
-__poll_t dccp_poll_mask(struct socket *sock, __poll_t events);
+__poll_t dccp_poll(struct file *file, struct socket *sock,
+                      poll_table *wait);
 int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
 void dccp_req_err(struct sock *sk, u64 seq);
 
index a9e478cd3787c90f3d81e3bc2f71a14f7b11e280..b08feb219b44b67eadf408a33649d8c7ec9db2d0 100644 (file)
@@ -984,7 +984,7 @@ static const struct proto_ops inet_dccp_ops = {
        .accept            = inet_accept,
        .getname           = inet_getname,
        /* FIXME: work on tcp_poll to rename it to inet_csk_poll */
-       .poll_mask         = dccp_poll_mask,
+       .poll              = dccp_poll,
        .ioctl             = inet_ioctl,
        /* FIXME: work on inet_listen to rename it to sock_common_listen */
        .listen            = inet_dccp_listen,
index 17fc4e0166ba89ed435dc65bbdd5951d9018c093..6344f1b18a6a1b30cd2f3c559987a2c9e9546f81 100644 (file)
@@ -1070,7 +1070,7 @@ static const struct proto_ops inet6_dccp_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = inet_accept,
        .getname           = inet6_getname,
-       .poll_mask         = dccp_poll_mask,
+       .poll              = dccp_poll,
        .ioctl             = inet6_ioctl,
        .listen            = inet_dccp_listen,
        .shutdown          = inet_shutdown,
index ca21c1c76da013575d5bd0c8b3a4ac42eb2b229b..0d56e36a6db7b77dcdeb9697dd81bf62895e6e4c 100644 (file)
@@ -312,11 +312,20 @@ int dccp_disconnect(struct sock *sk, int flags)
 
 EXPORT_SYMBOL_GPL(dccp_disconnect);
 
-__poll_t dccp_poll_mask(struct socket *sock, __poll_t events)
+/*
+ *     Wait for a DCCP event.
+ *
+ *     Note that we don't need to lock the socket, as the upper poll layers
+ *     take care of normal races (between the test and the event) and we don't
+ *     go look at any of the socket buffers directly.
+ */
+__poll_t dccp_poll(struct file *file, struct socket *sock,
+                      poll_table *wait)
 {
        __poll_t mask;
        struct sock *sk = sock->sk;
 
+       sock_poll_wait(file, sk_sleep(sk), wait);
        if (sk->sk_state == DCCP_LISTEN)
                return inet_csk_listen_poll(sk);
 
@@ -358,7 +367,7 @@ __poll_t dccp_poll_mask(struct socket *sock, __poll_t events)
        return mask;
 }
 
-EXPORT_SYMBOL_GPL(dccp_poll_mask);
+EXPORT_SYMBOL_GPL(dccp_poll);
 
 int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
 {
index 9a686d890bfad179c09a182245a96bba5dba21ea..7d6ff983ba2cbbf7915a61ffad57e52f66f3a193 100644 (file)
@@ -1207,11 +1207,11 @@ static int dn_getname(struct socket *sock, struct sockaddr *uaddr,int peer)
 }
 
 
-static __poll_t dn_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t dn_poll(struct file *file, struct socket *sock, poll_table  *wait)
 {
        struct sock *sk = sock->sk;
        struct dn_scp *scp = DN_SK(sk);
-       __poll_t mask = datagram_poll_mask(sock, events);
+       __poll_t mask = datagram_poll(file, sock, wait);
 
        if (!skb_queue_empty(&scp->other_receive_queue))
                mask |= EPOLLRDBAND;
@@ -2331,7 +2331,7 @@ static const struct proto_ops dn_proto_ops = {
        .socketpair =   sock_no_socketpair,
        .accept =       dn_accept,
        .getname =      dn_getname,
-       .poll_mask =    dn_poll_mask,
+       .poll =         dn_poll,
        .ioctl =        dn_ioctl,
        .listen =       dn_listen,
        .shutdown =     dn_shutdown,
index 40c851693f77e35a1f573fdbf0bcd86adb94cf13..0c9478b91fa5b6c8f6b586ed8ead66c8db538ea7 100644 (file)
@@ -86,35 +86,39 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
                opt++;
                kdebug("options: '%s'", opt);
                do {
+                       int opt_len, opt_nlen;
                        const char *eq;
-                       int opt_len, opt_nlen, opt_vlen, tmp;
+                       char optval[128];
 
                        next_opt = memchr(opt, '#', end - opt) ?: end;
                        opt_len = next_opt - opt;
-                       if (opt_len <= 0 || opt_len > 128) {
+                       if (opt_len <= 0 || opt_len > sizeof(optval)) {
                                pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n",
                                                    opt_len);
                                return -EINVAL;
                        }
 
-                       eq = memchr(opt, '=', opt_len) ?: end;
-                       opt_nlen = eq - opt;
-                       eq++;
-                       opt_vlen = next_opt - eq; /* will be -1 if no value */
+                       eq = memchr(opt, '=', opt_len);
+                       if (eq) {
+                               opt_nlen = eq - opt;
+                               eq++;
+                               memcpy(optval, eq, next_opt - eq);
+                               optval[next_opt - eq] = '\0';
+                       } else {
+                               opt_nlen = opt_len;
+                               optval[0] = '\0';
+                       }
 
-                       tmp = opt_vlen >= 0 ? opt_vlen : 0;
-                       kdebug("option '%*.*s' val '%*.*s'",
-                              opt_nlen, opt_nlen, opt, tmp, tmp, eq);
+                       kdebug("option '%*.*s' val '%s'",
+                              opt_nlen, opt_nlen, opt, optval);
 
                        /* see if it's an error number representing a DNS error
                         * that's to be recorded as the result in this key */
                        if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 &&
                            memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) {
                                kdebug("dns error number option");
-                               if (opt_vlen <= 0)
-                                       goto bad_option_value;
 
-                               ret = kstrtoul(eq, 10, &derrno);
+                               ret = kstrtoul(optval, 10, &derrno);
                                if (ret < 0)
                                        goto bad_option_value;
 
index 275449b0d633586a4befec517ab3a36c5e3ba5a5..3297e7fa99458b13c40609588f187d366cf37411 100644 (file)
@@ -90,12 +90,18 @@ static int lowpan_neigh_construct(struct net_device *dev, struct neighbour *n)
        return 0;
 }
 
+static int lowpan_get_iflink(const struct net_device *dev)
+{
+       return lowpan_802154_dev(dev)->wdev->ifindex;
+}
+
 static const struct net_device_ops lowpan_netdev_ops = {
        .ndo_init               = lowpan_dev_init,
        .ndo_start_xmit         = lowpan_xmit,
        .ndo_open               = lowpan_open,
        .ndo_stop               = lowpan_stop,
        .ndo_neigh_construct    = lowpan_neigh_construct,
+       .ndo_get_iflink         = lowpan_get_iflink,
 };
 
 static void lowpan_setup(struct net_device *ldev)
index a0768d2759b8ecb8954dd544561b68f26d0c6510..a60658c85a9ad09b405f2d928e70acf64a9ebc4d 100644 (file)
@@ -423,7 +423,7 @@ static const struct proto_ops ieee802154_raw_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = sock_no_accept,
        .getname           = sock_no_getname,
-       .poll_mask         = datagram_poll_mask,
+       .poll              = datagram_poll,
        .ioctl             = ieee802154_sock_ioctl,
        .listen            = sock_no_listen,
        .shutdown          = sock_no_shutdown,
@@ -969,7 +969,7 @@ static const struct proto_ops ieee802154_dgram_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = sock_no_accept,
        .getname           = sock_no_getname,
-       .poll_mask         = datagram_poll_mask,
+       .poll              = datagram_poll,
        .ioctl             = ieee802154_sock_ioctl,
        .listen            = sock_no_listen,
        .shutdown          = sock_no_shutdown,
index 15e125558c76e5fa2fe466ab0d64be1d3183ebed..b403499fdabea7367f65c588d957a30f5a6572b5 100644 (file)
@@ -986,7 +986,7 @@ const struct proto_ops inet_stream_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = inet_accept,
        .getname           = inet_getname,
-       .poll_mask         = tcp_poll_mask,
+       .poll              = tcp_poll,
        .ioctl             = inet_ioctl,
        .listen            = inet_listen,
        .shutdown          = inet_shutdown,
@@ -1021,7 +1021,7 @@ const struct proto_ops inet_dgram_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = sock_no_accept,
        .getname           = inet_getname,
-       .poll_mask         = udp_poll_mask,
+       .poll              = udp_poll,
        .ioctl             = inet_ioctl,
        .listen            = sock_no_listen,
        .shutdown          = inet_shutdown,
@@ -1042,7 +1042,7 @@ EXPORT_SYMBOL(inet_dgram_ops);
 
 /*
  * For SOCK_RAW sockets; should be the same as inet_dgram_ops but without
- * udp_poll_mask
+ * udp_poll
  */
 static const struct proto_ops inet_sockraw_ops = {
        .family            = PF_INET,
@@ -1053,7 +1053,7 @@ static const struct proto_ops inet_sockraw_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = sock_no_accept,
        .getname           = inet_getname,
-       .poll_mask         = datagram_poll_mask,
+       .poll              = datagram_poll,
        .ioctl             = inet_ioctl,
        .listen            = sock_no_listen,
        .shutdown          = inet_shutdown,
index b21833651394233bbdb143d765e4408333b13b72..e46cdd310e5f86ef6985993e4226db614a2a8732 100644 (file)
@@ -300,6 +300,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
        if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
                struct flowi4 fl4 = {
                        .flowi4_iif = LOOPBACK_IFINDEX,
+                       .flowi4_oif = l3mdev_master_ifindex_rcu(dev),
                        .daddr = ip_hdr(skb)->saddr,
                        .flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
                        .flowi4_scope = scope,
index 1540db65241a6fd4d96b00546f13a3e3d3cd1815..c9ec1603666bffcfb24597b933a05f53b6d83440 100644 (file)
@@ -448,9 +448,7 @@ next_proto:
 out_unlock:
        rcu_read_unlock();
 out:
-       NAPI_GRO_CB(skb)->flush |= flush;
-       skb_gro_remcsum_cleanup(skb, &grc);
-       skb->remcsum_offload = 0;
+       skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
 
        return pp;
 }
index 1859c473b21a862b383edebbcf2c1656f9c58b3b..6a7d980105f60514c8180e6333f0a4a53912c3d5 100644 (file)
@@ -223,7 +223,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
 out_unlock:
        rcu_read_unlock();
 out:
-       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_flush_final(skb, pp, flush);
 
        return pp;
 }
index 85b617b655bc2d602563b1bd174f436554c9d046..28fef7d15959f85e407ac9de3b426b734f5bd003 100644 (file)
@@ -1200,13 +1200,13 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
        spin_lock_bh(&im->lock);
        if (pmc) {
                im->interface = pmc->interface;
-               im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
-               im->sfmode = pmc->sfmode;
-               if (pmc->sfmode == MCAST_INCLUDE) {
+               if (im->sfmode == MCAST_INCLUDE) {
                        im->tomb = pmc->tomb;
                        im->sources = pmc->sources;
                        for (psf = im->sources; psf; psf = psf->sf_next)
-                               psf->sf_crcount = im->crcount;
+                               psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
+               } else {
+                       im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
                }
                in_dev_put(pmc->interface);
                kfree(pmc);
@@ -1288,7 +1288,7 @@ static void igmp_group_dropped(struct ip_mc_list *im)
 #endif
 }
 
-static void igmp_group_added(struct ip_mc_list *im)
+static void igmp_group_added(struct ip_mc_list *im, unsigned int mode)
 {
        struct in_device *in_dev = im->interface;
 #ifdef CONFIG_IP_MULTICAST
@@ -1316,7 +1316,13 @@ static void igmp_group_added(struct ip_mc_list *im)
        }
        /* else, v3 */
 
-       im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
+       /* Based on RFC3376 5.1, for newly added INCLUDE SSM, we should
+        * not send filter-mode change record as the mode should be from
+        * IN() to IN(A).
+        */
+       if (mode == MCAST_EXCLUDE)
+               im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
+
        igmp_ifc_event(in_dev);
 #endif
 }
@@ -1381,8 +1387,7 @@ static void ip_mc_hash_remove(struct in_device *in_dev,
 /*
  *     A socket has joined a multicast group on device dev.
  */
-
-void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
+void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, unsigned int mode)
 {
        struct ip_mc_list *im;
 #ifdef CONFIG_IP_MULTICAST
@@ -1394,7 +1399,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
        for_each_pmc_rtnl(in_dev, im) {
                if (im->multiaddr == addr) {
                        im->users++;
-                       ip_mc_add_src(in_dev, &addr, MCAST_EXCLUDE, 0, NULL, 0);
+                       ip_mc_add_src(in_dev, &addr, mode, 0, NULL, 0);
                        goto out;
                }
        }
@@ -1408,8 +1413,8 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
        in_dev_hold(in_dev);
        im->multiaddr = addr;
        /* initial mode is (EX, empty) */
-       im->sfmode = MCAST_EXCLUDE;
-       im->sfcount[MCAST_EXCLUDE] = 1;
+       im->sfmode = mode;
+       im->sfcount[mode] = 1;
        refcount_set(&im->refcnt, 1);
        spin_lock_init(&im->lock);
 #ifdef CONFIG_IP_MULTICAST
@@ -1426,12 +1431,17 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
 #ifdef CONFIG_IP_MULTICAST
        igmpv3_del_delrec(in_dev, im);
 #endif
-       igmp_group_added(im);
+       igmp_group_added(im, mode);
        if (!in_dev->dead)
                ip_rt_multicast_event(in_dev);
 out:
        return;
 }
+
+void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
+{
+       __ip_mc_inc_group(in_dev, addr, MCAST_EXCLUDE);
+}
 EXPORT_SYMBOL(ip_mc_inc_group);
 
 static int ip_mc_check_iphdr(struct sk_buff *skb)
@@ -1688,7 +1698,7 @@ void ip_mc_remap(struct in_device *in_dev)
 #ifdef CONFIG_IP_MULTICAST
                igmpv3_del_delrec(in_dev, pmc);
 #endif
-               igmp_group_added(pmc);
+               igmp_group_added(pmc, pmc->sfmode);
        }
 }
 
@@ -1751,7 +1761,7 @@ void ip_mc_up(struct in_device *in_dev)
 #ifdef CONFIG_IP_MULTICAST
                igmpv3_del_delrec(in_dev, pmc);
 #endif
-               igmp_group_added(pmc);
+               igmp_group_added(pmc, pmc->sfmode);
        }
 }
 
@@ -2130,8 +2140,8 @@ static void ip_mc_clear_src(struct ip_mc_list *pmc)
 
 /* Join a multicast group
  */
-
-int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr)
+static int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr,
+                             unsigned int mode)
 {
        __be32 addr = imr->imr_multiaddr.s_addr;
        struct ip_mc_socklist *iml, *i;
@@ -2172,15 +2182,30 @@ int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr)
        memcpy(&iml->multi, imr, sizeof(*imr));
        iml->next_rcu = inet->mc_list;
        iml->sflist = NULL;
-       iml->sfmode = MCAST_EXCLUDE;
+       iml->sfmode = mode;
        rcu_assign_pointer(inet->mc_list, iml);
-       ip_mc_inc_group(in_dev, addr);
+       __ip_mc_inc_group(in_dev, addr, mode);
        err = 0;
 done:
        return err;
 }
+
+/* Join ASM (Any-Source Multicast) group
+ */
+int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr)
+{
+       return __ip_mc_join_group(sk, imr, MCAST_EXCLUDE);
+}
 EXPORT_SYMBOL(ip_mc_join_group);
 
+/* Join SSM (Source-Specific Multicast) group
+ */
+int ip_mc_join_group_ssm(struct sock *sk, struct ip_mreqn *imr,
+                        unsigned int mode)
+{
+       return __ip_mc_join_group(sk, imr, mode);
+}
+
 static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
                           struct in_device *in_dev)
 {
index c9e35b81d0931df8429a33e8d03e719b87da0747..1e4cf3ab560fac154fefb7acd3539eb6e91ed84e 100644 (file)
@@ -90,7 +90,7 @@ static void inet_frags_free_cb(void *ptr, void *arg)
 
 void inet_frags_exit_net(struct netns_frags *nf)
 {
-       nf->low_thresh = 0; /* prevent creation of new frags */
+       nf->high_thresh = 0; /* prevent creation of new frags */
 
        rhashtable_free_and_destroy(&nf->rhashtable, inet_frags_free_cb, NULL);
 }
index 31ff46daae974645dfe73c97e6e507a0ad62dd4b..3647167c8fa313f9eb7a5c5ad34cb0cb7a7aea5e 100644 (file)
@@ -243,9 +243,9 @@ static inline int compute_score(struct sock *sk, struct net *net,
                        bool dev_match = (sk->sk_bound_dev_if == dif ||
                                          sk->sk_bound_dev_if == sdif);
 
-                       if (exact_dif && !dev_match)
+                       if (!dev_match)
                                return -1;
-                       if (sk->sk_bound_dev_if && dev_match)
+                       if (sk->sk_bound_dev_if)
                                score += 4;
                }
                if (sk->sk_incoming_cpu == raw_smp_processor_id())
index af5a830ff6ad320ae68066ab86476962db978f79..0e3edd25f881f1ad09201be0930734523721ebfc 100644 (file)
@@ -523,6 +523,8 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
        to->dev = from->dev;
        to->mark = from->mark;
 
+       skb_copy_hash(to, from);
+
        /* Copy the flags to each fragment. */
        IPCB(to)->flags = IPCB(from)->flags;
 
@@ -1145,7 +1147,8 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
        cork->fragsize = ip_sk_use_pmtu(sk) ?
                         dst_mtu(&rt->dst) : rt->dst.dev->mtu;
 
-       cork->gso_size = sk->sk_type == SOCK_DGRAM ? ipc->gso_size : 0;
+       cork->gso_size = sk->sk_type == SOCK_DGRAM &&
+                        sk->sk_protocol == IPPROTO_UDP ? ipc->gso_size : 0;
        cork->dst = &rt->dst;
        cork->length = 0;
        cork->ttl = ipc->ttl;
index fc32fdbeefa61c18da5b9330d4da73ca6db992bd..c0fe5ad996f238091f5b9585adb586a571f653f0 100644 (file)
@@ -150,15 +150,18 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
 {
        struct sockaddr_in sin;
        const struct iphdr *iph = ip_hdr(skb);
-       __be16 *ports = (__be16 *)skb_transport_header(skb);
+       __be16 *ports;
+       int end;
 
-       if (skb_transport_offset(skb) + 4 > (int)skb->len)
+       end = skb_transport_offset(skb) + 4;
+       if (end > 0 && !pskb_may_pull(skb, end))
                return;
 
        /* All current transport protocols have the port numbers in the
         * first four bytes of the transport header and this function is
         * written with this assumption in mind.
         */
+       ports = (__be16 *)skb_transport_header(skb);
 
        sin.sin_family = AF_INET;
        sin.sin_addr.s_addr = iph->daddr;
@@ -984,7 +987,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
                        mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr;
                        mreq.imr_address.s_addr = mreqs.imr_interface;
                        mreq.imr_ifindex = 0;
-                       err = ip_mc_join_group(sk, &mreq);
+                       err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE);
                        if (err && err != -EADDRINUSE)
                                break;
                        omode = MCAST_INCLUDE;
@@ -1061,7 +1064,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
                        mreq.imr_multiaddr = psin->sin_addr;
                        mreq.imr_address.s_addr = 0;
                        mreq.imr_ifindex = greqs.gsr_interface;
-                       err = ip_mc_join_group(sk, &mreq);
+                       err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE);
                        if (err && err != -EADDRINUSE)
                                break;
                        greqs.gsr_interface = mreq.imr_ifindex;
index ca0dad90803a92bdcbb1e199554985ad4626fada..e77872c93c206693f4bcfdde98a044c6e7cfb780 100644 (file)
@@ -1898,6 +1898,7 @@ static struct xt_match ipt_builtin_mt[] __read_mostly = {
                .checkentry = icmp_checkentry,
                .proto      = IPPROTO_ICMP,
                .family     = NFPROTO_IPV4,
+               .me         = THIS_MODULE,
        },
 };
 
index 805e83ec3ad9347abc6ce778f296319746772f1c..16471410496592f52ac7927d218a44341f139339 100644 (file)
@@ -37,7 +37,7 @@ nf_tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb,
                 * to a listener socket if there's one */
                struct sock *sk2;
 
-               sk2 = nf_tproxy_get_sock_v4(net, skb, hp, iph->protocol,
+               sk2 = nf_tproxy_get_sock_v4(net, skb, iph->protocol,
                                            iph->saddr, laddr ? laddr : iph->daddr,
                                            hp->source, lport ? lport : hp->dest,
                                            skb->dev, NF_TPROXY_LOOKUP_LISTENER);
@@ -71,7 +71,7 @@ __be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
 EXPORT_SYMBOL_GPL(nf_tproxy_laddr4);
 
 struct sock *
-nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp,
+nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb,
                      const u8 protocol,
                      const __be32 saddr, const __be32 daddr,
                      const __be16 sport, const __be16 dport,
@@ -79,16 +79,21 @@ nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp,
                      const enum nf_tproxy_lookup_t lookup_type)
 {
        struct sock *sk;
-       struct tcphdr *tcph;
 
        switch (protocol) {
-       case IPPROTO_TCP:
+       case IPPROTO_TCP: {
+               struct tcphdr _hdr, *hp;
+
+               hp = skb_header_pointer(skb, ip_hdrlen(skb),
+                                       sizeof(struct tcphdr), &_hdr);
+               if (hp == NULL)
+                       return NULL;
+
                switch (lookup_type) {
                case NF_TPROXY_LOOKUP_LISTENER:
-                       tcph = hp;
                        sk = inet_lookup_listener(net, &tcp_hashinfo, skb,
                                                    ip_hdrlen(skb) +
-                                                     __tcp_hdrlen(tcph),
+                                                     __tcp_hdrlen(hp),
                                                    saddr, sport,
                                                    daddr, dport,
                                                    in->ifindex, 0);
@@ -110,6 +115,7 @@ nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp,
                        BUG();
                }
                break;
+               }
        case IPPROTO_UDP:
                sk = udp4_lib_lookup(net, saddr, sport, daddr, dport,
                                     in->ifindex);
index d06247ba08b2667b1049329e8921af9388545c54..5fa335fd385254def583b9a5100fbe7b9ce94cd6 100644 (file)
@@ -189,8 +189,9 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
        if (write && ret == 0) {
                low = make_kgid(user_ns, urange[0]);
                high = make_kgid(user_ns, urange[1]);
-               if (!gid_valid(low) || !gid_valid(high) ||
-                   (urange[1] < urange[0]) || gid_lt(high, low)) {
+               if (!gid_valid(low) || !gid_valid(high))
+                       return -EINVAL;
+               if (urange[1] < urange[0] || gid_lt(high, low)) {
                        low = make_kgid(&init_user_ns, 1);
                        high = make_kgid(&init_user_ns, 0);
                }
@@ -265,8 +266,9 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
            ipv4.sysctl_tcp_fastopen);
        struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
        struct tcp_fastopen_context *ctxt;
-       int ret;
        u32  user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
+       __le32 key[4];
+       int ret, i;
 
        tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL);
        if (!tbl.data)
@@ -275,11 +277,14 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
        rcu_read_lock();
        ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
        if (ctxt)
-               memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
+               memcpy(key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
        else
-               memset(user_key, 0, sizeof(user_key));
+               memset(key, 0, sizeof(key));
        rcu_read_unlock();
 
+       for (i = 0; i < ARRAY_SIZE(key); i++)
+               user_key[i] = le32_to_cpu(key[i]);
+
        snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x",
                user_key[0], user_key[1], user_key[2], user_key[3]);
        ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
@@ -290,13 +295,17 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
                        ret = -EINVAL;
                        goto bad_key;
                }
-               tcp_fastopen_reset_cipher(net, NULL, user_key,
+
+               for (i = 0; i < ARRAY_SIZE(user_key); i++)
+                       key[i] = cpu_to_le32(user_key[i]);
+
+               tcp_fastopen_reset_cipher(net, NULL, key,
                                          TCP_FASTOPEN_KEY_LENGTH);
        }
 
 bad_key:
        pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n",
-              user_key[0], user_key[1], user_key[2], user_key[3],
+               user_key[0], user_key[1], user_key[2], user_key[3],
               (char *)tbl.data, ret);
        kfree(tbl.data);
        return ret;
index 141acd92e58aeddeb9a0ba1eaacf3bd520a836a3..4491faf83f4f93cf4384f7b192ffe3022567cc0a 100644 (file)
@@ -494,21 +494,32 @@ static inline bool tcp_stream_is_readable(const struct tcp_sock *tp,
 }
 
 /*
- * Socket is not locked. We are protected from async events by poll logic and
- * correct handling of state changes made by other threads is impossible in
- * any case.
+ *     Wait for a TCP event.
+ *
+ *     Note that we don't need to lock the socket, as the upper poll layers
+ *     take care of normal races (between the test and the event) and we don't
+ *     go look at any of the socket buffers directly.
  */
-__poll_t tcp_poll_mask(struct socket *sock, __poll_t events)
+__poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
 {
+       __poll_t mask;
        struct sock *sk = sock->sk;
        const struct tcp_sock *tp = tcp_sk(sk);
-       __poll_t mask = 0;
        int state;
 
+       sock_poll_wait(file, sk_sleep(sk), wait);
+
        state = inet_sk_state_load(sk);
        if (state == TCP_LISTEN)
                return inet_csk_listen_poll(sk);
 
+       /* Socket is not locked. We are protected from async events
+        * by poll logic and correct handling of state changes
+        * made by other threads is impossible in any case.
+        */
+
+       mask = 0;
+
        /*
         * EPOLLHUP is certainly not done right. But poll() doesn't
         * have a notion of HUP in just one direction, and for a
@@ -589,7 +600,7 @@ __poll_t tcp_poll_mask(struct socket *sock, __poll_t events)
 
        return mask;
 }
-EXPORT_SYMBOL(tcp_poll_mask);
+EXPORT_SYMBOL(tcp_poll);
 
 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
 {
@@ -1987,7 +1998,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
                         * shouldn't happen.
                         */
                        if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
-                                "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
+                                "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n",
                                 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
                                 flags))
                                break;
@@ -2002,7 +2013,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
                        if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
                                goto found_fin_ok;
                        WARN(!(flags & MSG_PEEK),
-                            "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
+                            "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n",
                             *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
                }
 
@@ -2551,6 +2562,8 @@ int tcp_disconnect(struct sock *sk, int flags)
 
        tcp_clear_xmit_timers(sk);
        __skb_queue_purge(&sk->sk_receive_queue);
+       tp->copied_seq = tp->rcv_nxt;
+       tp->urg_data = 0;
        tcp_write_queue_purge(sk);
        tcp_fastopen_active_disable_ofo_check(sk);
        skb_rbtree_purge(&tp->out_of_order_queue);
@@ -2810,14 +2823,17 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
        case TCP_REPAIR:
                if (!tcp_can_repair_sock(sk))
                        err = -EPERM;
-               else if (val == 1) {
+               else if (val == TCP_REPAIR_ON) {
                        tp->repair = 1;
                        sk->sk_reuse = SK_FORCE_REUSE;
                        tp->repair_queue = TCP_NO_QUEUE;
-               } else if (val == 0) {
+               } else if (val == TCP_REPAIR_OFF) {
                        tp->repair = 0;
                        sk->sk_reuse = SK_NO_REUSE;
                        tcp_send_window_probe(sk);
+               } else if (val == TCP_REPAIR_OFF_NO_WP) {
+                       tp->repair = 0;
+                       sk->sk_reuse = SK_NO_REUSE;
                } else
                        err = -EINVAL;
 
@@ -3709,8 +3725,7 @@ int tcp_abort(struct sock *sk, int err)
                        struct request_sock *req = inet_reqsk(sk);
 
                        local_bh_disable();
-                       inet_csk_reqsk_queue_drop_and_put(req->rsk_listener,
-                                                         req);
+                       inet_csk_reqsk_queue_drop(req->rsk_listener, req);
                        local_bh_enable();
                        return 0;
                }
index 5f5e5936760e65739859d0d8d9717b3204482a43..8b637f9f23a232a137f4a7f2d685a599cc063c1b 100644 (file)
@@ -55,7 +55,6 @@ struct dctcp {
        u32 dctcp_alpha;
        u32 next_seq;
        u32 ce_state;
-       u32 delayed_ack_reserved;
        u32 loss_cwnd;
 };
 
@@ -96,7 +95,6 @@ static void dctcp_init(struct sock *sk)
 
                ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
 
-               ca->delayed_ack_reserved = 0;
                ca->loss_cwnd = 0;
                ca->ce_state = 0;
 
@@ -131,23 +129,14 @@ static void dctcp_ce_state_0_to_1(struct sock *sk)
        struct dctcp *ca = inet_csk_ca(sk);
        struct tcp_sock *tp = tcp_sk(sk);
 
-       /* State has changed from CE=0 to CE=1 and delayed
-        * ACK has not sent yet.
-        */
-       if (!ca->ce_state && ca->delayed_ack_reserved) {
-               u32 tmp_rcv_nxt;
-
-               /* Save current rcv_nxt. */
-               tmp_rcv_nxt = tp->rcv_nxt;
-
-               /* Generate previous ack with CE=0. */
-               tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
-               tp->rcv_nxt = ca->prior_rcv_nxt;
-
-               tcp_send_ack(sk);
-
-               /* Recover current rcv_nxt. */
-               tp->rcv_nxt = tmp_rcv_nxt;
+       if (!ca->ce_state) {
+               /* State has changed from CE=0 to CE=1, force an immediate
+                * ACK to reflect the new CE state. If an ACK was delayed,
+                * send that first to reflect the prior CE state.
+                */
+               if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
+                       __tcp_send_ack(sk, ca->prior_rcv_nxt);
+               tcp_enter_quickack_mode(sk, 1);
        }
 
        ca->prior_rcv_nxt = tp->rcv_nxt;
@@ -161,23 +150,14 @@ static void dctcp_ce_state_1_to_0(struct sock *sk)
        struct dctcp *ca = inet_csk_ca(sk);
        struct tcp_sock *tp = tcp_sk(sk);
 
-       /* State has changed from CE=1 to CE=0 and delayed
-        * ACK has not sent yet.
-        */
-       if (ca->ce_state && ca->delayed_ack_reserved) {
-               u32 tmp_rcv_nxt;
-
-               /* Save current rcv_nxt. */
-               tmp_rcv_nxt = tp->rcv_nxt;
-
-               /* Generate previous ack with CE=1. */
-               tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
-               tp->rcv_nxt = ca->prior_rcv_nxt;
-
-               tcp_send_ack(sk);
-
-               /* Recover current rcv_nxt. */
-               tp->rcv_nxt = tmp_rcv_nxt;
+       if (ca->ce_state) {
+               /* State has changed from CE=1 to CE=0, force an immediate
+                * ACK to reflect the new CE state. If an ACK was delayed,
+                * send that first to reflect the prior CE state.
+                */
+               if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
+                       __tcp_send_ack(sk, ca->prior_rcv_nxt);
+               tcp_enter_quickack_mode(sk, 1);
        }
 
        ca->prior_rcv_nxt = tp->rcv_nxt;
@@ -248,25 +228,6 @@ static void dctcp_state(struct sock *sk, u8 new_state)
        }
 }
 
-static void dctcp_update_ack_reserved(struct sock *sk, enum tcp_ca_event ev)
-{
-       struct dctcp *ca = inet_csk_ca(sk);
-
-       switch (ev) {
-       case CA_EVENT_DELAYED_ACK:
-               if (!ca->delayed_ack_reserved)
-                       ca->delayed_ack_reserved = 1;
-               break;
-       case CA_EVENT_NON_DELAYED_ACK:
-               if (ca->delayed_ack_reserved)
-                       ca->delayed_ack_reserved = 0;
-               break;
-       default:
-               /* Don't care for the rest. */
-               break;
-       }
-}
-
 static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
 {
        switch (ev) {
@@ -276,10 +237,6 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
        case CA_EVENT_ECN_NO_CE:
                dctcp_ce_state_1_to_0(sk);
                break;
-       case CA_EVENT_DELAYED_ACK:
-       case CA_EVENT_NON_DELAYED_ACK:
-               dctcp_update_ack_reserved(sk, ev);
-               break;
        default:
                /* Don't care for the rest. */
                break;
index 355d3dffd021ccad0f30891994289d916f7d276c..3bcd30a2ba06827e061d86ba22680986824e3ee4 100644 (file)
@@ -215,7 +215,7 @@ static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks)
                icsk->icsk_ack.quick = quickacks;
 }
 
-static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
+void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
 
@@ -223,6 +223,7 @@ static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
        icsk->icsk_ack.pingpong = 0;
        icsk->icsk_ack.ato = TCP_ATO_MIN;
 }
+EXPORT_SYMBOL(tcp_enter_quickack_mode);
 
 /* Send ACKs quickly, if "quick" count is not exhausted
  * and the session is not interactive.
@@ -265,7 +266,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
                 * it is probably a retransmit.
                 */
                if (tp->ecn_flags & TCP_ECN_SEEN)
-                       tcp_enter_quickack_mode(sk, 1);
+                       tcp_enter_quickack_mode(sk, 2);
                break;
        case INET_ECN_CE:
                if (tcp_ca_needs_ecn(sk))
@@ -273,7 +274,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
 
                if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
                        /* Better not delay acks, sender can have a very low cwnd */
-                       tcp_enter_quickack_mode(sk, 1);
+                       tcp_enter_quickack_mode(sk, 2);
                        tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
                }
                tp->ecn_flags |= TCP_ECN_SEEN;
@@ -3181,6 +3182,15 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
 
                if (tcp_is_reno(tp)) {
                        tcp_remove_reno_sacks(sk, pkts_acked);
+
+                       /* If any of the cumulatively ACKed segments was
+                        * retransmitted, non-SACK case cannot confirm that
+                        * progress was due to original transmission due to
+                        * lack of TCPCB_SACKED_ACKED bits even if some of
+                        * the packets may have been never retransmitted.
+                        */
+                       if (flag & FLAG_RETRANS_DATA_ACKED)
+                               flag &= ~FLAG_ORIG_SACK_ACKED;
                } else {
                        int delta;
 
@@ -4348,6 +4358,23 @@ static bool tcp_try_coalesce(struct sock *sk,
        return true;
 }
 
+static bool tcp_ooo_try_coalesce(struct sock *sk,
+                            struct sk_buff *to,
+                            struct sk_buff *from,
+                            bool *fragstolen)
+{
+       bool res = tcp_try_coalesce(sk, to, from, fragstolen);
+
+       /* In case tcp_drop() is called later, update to->gso_segs */
+       if (res) {
+               u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) +
+                              max_t(u16, 1, skb_shinfo(from)->gso_segs);
+
+               skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
+       }
+       return res;
+}
+
 static void tcp_drop(struct sock *sk, struct sk_buff *skb)
 {
        sk_drops_add(sk, skb);
@@ -4471,8 +4498,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
        /* In the typical case, we are adding an skb to the end of the list.
         * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
         */
-       if (tcp_try_coalesce(sk, tp->ooo_last_skb,
-                            skb, &fragstolen)) {
+       if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb,
+                                skb, &fragstolen)) {
 coalesce_done:
                tcp_grow_window(sk, skb);
                kfree_skb_partial(skb, fragstolen);
@@ -4500,7 +4527,7 @@ coalesce_done:
                                /* All the bits are present. Drop. */
                                NET_INC_STATS(sock_net(sk),
                                              LINUX_MIB_TCPOFOMERGE);
-                               __kfree_skb(skb);
+                               tcp_drop(sk, skb);
                                skb = NULL;
                                tcp_dsack_set(sk, seq, end_seq);
                                goto add_sack;
@@ -4519,11 +4546,11 @@ coalesce_done:
                                                 TCP_SKB_CB(skb1)->end_seq);
                                NET_INC_STATS(sock_net(sk),
                                              LINUX_MIB_TCPOFOMERGE);
-                               __kfree_skb(skb1);
+                               tcp_drop(sk, skb1);
                                goto merge_right;
                        }
-               } else if (tcp_try_coalesce(sk, skb1,
-                                           skb, &fragstolen)) {
+               } else if (tcp_ooo_try_coalesce(sk, skb1,
+                                               skb, &fragstolen)) {
                        goto coalesce_done;
                }
                p = &parent->rb_right;
@@ -4892,6 +4919,7 @@ end:
 static void tcp_collapse_ofo_queue(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
+       u32 range_truesize, sum_tiny = 0;
        struct sk_buff *skb, *head;
        u32 start, end;
 
@@ -4903,6 +4931,7 @@ new_range:
        }
        start = TCP_SKB_CB(skb)->seq;
        end = TCP_SKB_CB(skb)->end_seq;
+       range_truesize = skb->truesize;
 
        for (head = skb;;) {
                skb = skb_rb_next(skb);
@@ -4913,11 +4942,20 @@ new_range:
                if (!skb ||
                    after(TCP_SKB_CB(skb)->seq, end) ||
                    before(TCP_SKB_CB(skb)->end_seq, start)) {
-                       tcp_collapse(sk, NULL, &tp->out_of_order_queue,
-                                    head, skb, start, end);
+                       /* Do not attempt collapsing tiny skbs */
+                       if (range_truesize != head->truesize ||
+                           end - start >= SKB_WITH_OVERHEAD(SK_MEM_QUANTUM)) {
+                               tcp_collapse(sk, NULL, &tp->out_of_order_queue,
+                                            head, skb, start, end);
+                       } else {
+                               sum_tiny += range_truesize;
+                               if (sum_tiny > sk->sk_rcvbuf >> 3)
+                                       return;
+                       }
                        goto new_range;
                }
 
+               range_truesize += skb->truesize;
                if (unlikely(before(TCP_SKB_CB(skb)->seq, start)))
                        start = TCP_SKB_CB(skb)->seq;
                if (after(TCP_SKB_CB(skb)->end_seq, end))
@@ -4932,6 +4970,7 @@ new_range:
  * 2) not add too big latencies if thousands of packets sit there.
  *    (But if application shrinks SO_RCVBUF, we could still end up
  *     freeing whole queue here)
+ * 3) Drop at least 12.5 % of sk_rcvbuf to avoid malicious attacks.
  *
  * Return true if queue has shrunk.
  */
@@ -4939,20 +4978,26 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct rb_node *node, *prev;
+       int goal;
 
        if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
                return false;
 
        NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED);
+       goal = sk->sk_rcvbuf >> 3;
        node = &tp->ooo_last_skb->rbnode;
        do {
                prev = rb_prev(node);
                rb_erase(node, &tp->out_of_order_queue);
+               goal -= rb_to_skb(node)->truesize;
                tcp_drop(sk, rb_to_skb(node));
-               sk_mem_reclaim(sk);
-               if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
-                   !tcp_under_memory_pressure(sk))
-                       break;
+               if (!prev || goal <= 0) {
+                       sk_mem_reclaim(sk);
+                       if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
+                           !tcp_under_memory_pressure(sk))
+                               break;
+                       goal = sk->sk_rcvbuf >> 3;
+               }
                node = prev;
        } while (node);
        tp->ooo_last_skb = rb_to_skb(prev);
@@ -4987,6 +5032,9 @@ static int tcp_prune_queue(struct sock *sk)
        else if (tcp_under_memory_pressure(sk))
                tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
 
+       if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
+               return 0;
+
        tcp_collapse_ofo_queue(sk);
        if (!skb_queue_empty(&sk->sk_receive_queue))
                tcp_collapse(sk, &sk->sk_receive_queue, NULL,
index bea17f1e8302585d70c1e0108ae1c33d149230d8..3b2711e33e4c7c06ed8caec20cf0241f36068f54 100644 (file)
@@ -156,11 +156,24 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
         */
        if (tcptw->tw_ts_recent_stamp &&
            (!twp || (reuse && get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
-               tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
-               if (tp->write_seq == 0)
-                       tp->write_seq = 1;
-               tp->rx_opt.ts_recent       = tcptw->tw_ts_recent;
-               tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
+               /* In case of repair and re-using TIME-WAIT sockets we still
+                * want to be sure that it is safe as above but honor the
+                * sequence numbers and time stamps set as part of the repair
+                * process.
+                *
+                * Without this check re-using a TIME-WAIT socket with TCP
+                * repair would accumulate a -1 on the repair assigned
+                * sequence number. The first time it is reused the sequence
+                * is -1, the second time -2, etc. This fixes that issue
+                * without appearing to create any others.
+                */
+               if (likely(!tp->repair)) {
+                       tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
+                       if (tp->write_seq == 0)
+                               tp->write_seq = 1;
+                       tp->rx_opt.ts_recent       = tcptw->tw_ts_recent;
+                       tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
+               }
                sock_hold(sktw);
                return 1;
        }
index 8e08b409c71e1f8e69422f1756d48b5bc55411c3..c4172c1fb198d4bcd1fcaace00308b3f86b0a843 100644 (file)
@@ -160,7 +160,8 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
 }
 
 /* Account for an ACK we sent. */
-static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
+static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
+                                     u32 rcv_nxt)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
@@ -171,6 +172,9 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
                if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
                        __sock_put(sk);
        }
+
+       if (unlikely(rcv_nxt != tp->rcv_nxt))
+               return;  /* Special ACK sent by DCTCP to reflect ECN */
        tcp_dec_quickack_mode(sk, pkts);
        inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
 }
@@ -1023,8 +1027,8 @@ static void tcp_update_skb_after_send(struct tcp_sock *tp, struct sk_buff *skb)
  * We are working here with either a clone of the original
  * SKB, or a fresh unique copy made by the retransmit engine.
  */
-static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
-                           gfp_t gfp_mask)
+static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
+                             int clone_it, gfp_t gfp_mask, u32 rcv_nxt)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct inet_sock *inet;
@@ -1100,7 +1104,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
        th->source              = inet->inet_sport;
        th->dest                = inet->inet_dport;
        th->seq                 = htonl(tcb->seq);
-       th->ack_seq             = htonl(tp->rcv_nxt);
+       th->ack_seq             = htonl(rcv_nxt);
        *(((__be16 *)th) + 6)   = htons(((tcp_header_size >> 2) << 12) |
                                        tcb->tcp_flags);
 
@@ -1141,7 +1145,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
        icsk->icsk_af_ops->send_check(sk, skb);
 
        if (likely(tcb->tcp_flags & TCPHDR_ACK))
-               tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
+               tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
 
        if (skb->len != tcp_header_size) {
                tcp_event_data_sent(tp, sk);
@@ -1178,6 +1182,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
        return err;
 }
 
+static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
+                           gfp_t gfp_mask)
+{
+       return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask,
+                                 tcp_sk(sk)->rcv_nxt);
+}
+
 /* This routine just queues the buffer for sending.
  *
  * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
@@ -3523,8 +3534,6 @@ void tcp_send_delayed_ack(struct sock *sk)
        int ato = icsk->icsk_ack.ato;
        unsigned long timeout;
 
-       tcp_ca_event(sk, CA_EVENT_DELAYED_ACK);
-
        if (ato > TCP_DELACK_MIN) {
                const struct tcp_sock *tp = tcp_sk(sk);
                int max_ato = HZ / 2;
@@ -3573,7 +3582,7 @@ void tcp_send_delayed_ack(struct sock *sk)
 }
 
 /* This routine sends an ack and also updates the window. */
-void tcp_send_ack(struct sock *sk)
+void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
 {
        struct sk_buff *buff;
 
@@ -3581,8 +3590,6 @@ void tcp_send_ack(struct sock *sk)
        if (sk->sk_state == TCP_CLOSE)
                return;
 
-       tcp_ca_event(sk, CA_EVENT_NON_DELAYED_ACK);
-
        /* We are not putting this on the write queue, so
         * tcp_transmit_skb() will set the ownership to this
         * sock.
@@ -3608,9 +3615,14 @@ void tcp_send_ack(struct sock *sk)
        skb_set_tcp_pure_ack(buff);
 
        /* Send it off, this clears delayed acks for us. */
-       tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0);
+       __tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt);
+}
+EXPORT_SYMBOL_GPL(__tcp_send_ack);
+
+void tcp_send_ack(struct sock *sk)
+{
+       __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt);
 }
-EXPORT_SYMBOL_GPL(tcp_send_ack);
 
 /* This routine sends a packet with an out of date sequence
  * number. It assumes the other end will try to ack it.
index 9bb27df4dac5ec5f133b15e972f384bdc1d165b1..24e116ddae79ce0696e3f63290385ae15e28ac18 100644 (file)
@@ -2591,7 +2591,7 @@ int compat_udp_getsockopt(struct sock *sk, int level, int optname,
  *     udp_poll - wait for a UDP event.
  *     @file - file struct
  *     @sock - socket
- *     @events - events to wait for
+ *     @wait - poll table
  *
  *     This is same as datagram poll, except for the special case of
  *     blocking sockets. If application is using a blocking fd
@@ -2600,23 +2600,23 @@ int compat_udp_getsockopt(struct sock *sk, int level, int optname,
  *     but then block when reading it. Add special case code
  *     to work around these arguably broken applications.
  */
-__poll_t udp_poll_mask(struct socket *sock, __poll_t events)
+__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait)
 {
-       __poll_t mask = datagram_poll_mask(sock, events);
+       __poll_t mask = datagram_poll(file, sock, wait);
        struct sock *sk = sock->sk;
 
        if (!skb_queue_empty(&udp_sk(sk)->reader_queue))
                mask |= EPOLLIN | EPOLLRDNORM;
 
        /* Check for false positives due to checksum errors */
-       if ((mask & EPOLLRDNORM) && !(sock->file->f_flags & O_NONBLOCK) &&
+       if ((mask & EPOLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
            !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
                mask &= ~(EPOLLIN | EPOLLRDNORM);
 
        return mask;
 
 }
-EXPORT_SYMBOL(udp_poll_mask);
+EXPORT_SYMBOL(udp_poll);
 
 int udp_abort(struct sock *sk, int err)
 {
index 92dc9e5a7ff3d0a7509bfa2a66e9189c8341a5fa..69c54540d5b4f2664b78b56468b09e3c1f6ac888 100644 (file)
@@ -394,7 +394,7 @@ unflush:
 out_unlock:
        rcu_read_unlock();
 out:
-       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_flush_final(skb, pp, flush);
        return pp;
 }
 EXPORT_SYMBOL(udp_gro_receive);
index 0eff75525da101e4fce2798626a317366f94623f..b3885ca22d6fb7aa6165c2773ae02d9885099d8f 100644 (file)
@@ -108,6 +108,7 @@ config IPV6_MIP6
 config IPV6_ILA
        tristate "IPv6: Identifier Locator Addressing (ILA)"
        depends on NETFILTER
+       select DST_CACHE
        select LWTUNNEL
        ---help---
          Support for IPv6 Identifier Locator Addressing (ILA).
index c134286d6a4179516709570ad534d1ae26fd0bce..f66a1cae3366fe7b176c176027c2c7b9b39ec278 100644 (file)
@@ -2374,7 +2374,8 @@ static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
                        continue;
                if ((rt->fib6_flags & noflags) != 0)
                        continue;
-               fib6_info_hold(rt);
+               if (!fib6_info_hold_safe(rt))
+                       continue;
                break;
        }
 out:
@@ -4528,6 +4529,7 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp,
                               unsigned long expires, u32 flags)
 {
        struct fib6_info *f6i;
+       u32 prio;
 
        f6i = addrconf_get_prefix_route(&ifp->addr,
                                        ifp->prefix_len,
@@ -4536,13 +4538,15 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp,
        if (!f6i)
                return -ENOENT;
 
-       if (f6i->fib6_metric != ifp->rt_priority) {
+       prio = ifp->rt_priority ? : IP6_RT_PRIO_ADDRCONF;
+       if (f6i->fib6_metric != prio) {
+               /* delete old one */
+               ip6_del_rt(dev_net(ifp->idev->dev), f6i);
+
                /* add new one */
                addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
                                      ifp->rt_priority, ifp->idev->dev,
                                      expires, flags, GFP_KERNEL);
-               /* delete old one */
-               ip6_del_rt(dev_net(ifp->idev->dev), f6i);
        } else {
                if (!expires)
                        fib6_clean_expires(f6i);
index 74f2a261e8df4dc78a3baddb31609cdc70ba6035..9ed0eae91758f8506b4f6ca0fe3a9c2dc3fe1323 100644 (file)
@@ -570,7 +570,7 @@ const struct proto_ops inet6_stream_ops = {
        .socketpair        = sock_no_socketpair,        /* a do nothing */
        .accept            = inet_accept,               /* ok           */
        .getname           = inet6_getname,
-       .poll_mask         = tcp_poll_mask,             /* ok           */
+       .poll              = tcp_poll,                  /* ok           */
        .ioctl             = inet6_ioctl,               /* must change  */
        .listen            = inet_listen,               /* ok           */
        .shutdown          = inet_shutdown,             /* ok           */
@@ -603,7 +603,7 @@ const struct proto_ops inet6_dgram_ops = {
        .socketpair        = sock_no_socketpair,        /* a do nothing */
        .accept            = sock_no_accept,            /* a do nothing */
        .getname           = inet6_getname,
-       .poll_mask         = udp_poll_mask,             /* ok           */
+       .poll              = udp_poll,                  /* ok           */
        .ioctl             = inet6_ioctl,               /* must change  */
        .listen            = sock_no_listen,            /* ok           */
        .shutdown          = inet_shutdown,             /* ok           */
index 1323b9679cf718d0023bf5880dcd60fb8602d9db..1c0bb9fb76e61fa7d12317190ebac38847530858 100644 (file)
@@ -799,8 +799,7 @@ static int calipso_opt_update(struct sock *sk, struct ipv6_opt_hdr *hop)
 {
        struct ipv6_txoptions *old = txopt_get(inet6_sk(sk)), *txopts;
 
-       txopts = ipv6_renew_options_kern(sk, old, IPV6_HOPOPTS,
-                                        hop, hop ? ipv6_optlen(hop) : 0);
+       txopts = ipv6_renew_options(sk, old, IPV6_HOPOPTS, hop);
        txopt_put(old);
        if (IS_ERR(txopts))
                return PTR_ERR(txopts);
@@ -1222,8 +1221,7 @@ static int calipso_req_setattr(struct request_sock *req,
        if (IS_ERR(new))
                return PTR_ERR(new);
 
-       txopts = ipv6_renew_options_kern(sk, req_inet->ipv6_opt, IPV6_HOPOPTS,
-                                        new, new ? ipv6_optlen(new) : 0);
+       txopts = ipv6_renew_options(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, new);
 
        kfree(new);
 
@@ -1260,8 +1258,7 @@ static void calipso_req_delattr(struct request_sock *req)
        if (calipso_opt_del(req_inet->ipv6_opt->hopopt, &new))
                return; /* Nothing to do */
 
-       txopts = ipv6_renew_options_kern(sk, req_inet->ipv6_opt, IPV6_HOPOPTS,
-                                        new, new ? ipv6_optlen(new) : 0);
+       txopts = ipv6_renew_options(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, new);
 
        if (!IS_ERR(txopts)) {
                txopts = xchg(&req_inet->ipv6_opt, txopts);
index 2ee08b6a86a4881210f5a0c81206a64a562e5a56..1a1f876f8e282d636a13ae1f48c3f90a9f754bbc 100644 (file)
@@ -700,13 +700,16 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
        }
        if (np->rxopt.bits.rxorigdstaddr) {
                struct sockaddr_in6 sin6;
-               __be16 *ports = (__be16 *) skb_transport_header(skb);
+               __be16 *ports;
+               int end;
 
-               if (skb_transport_offset(skb) + 4 <= (int)skb->len) {
+               end = skb_transport_offset(skb) + 4;
+               if (end <= 0 || pskb_may_pull(skb, end)) {
                        /* All current transport protocols have the port numbers in the
                         * first four bytes of the transport header and this function is
                         * written with this assumption in mind.
                         */
+                       ports = (__be16 *)skb_transport_header(skb);
 
                        sin6.sin6_family = AF_INET6;
                        sin6.sin6_addr = ipv6_hdr(skb)->daddr;
index 5bc2bf3733abd387de8d21932c95ef32eea30d80..20291c2036fcdcd23ccdc2f5b5ae2a1734b2833d 100644 (file)
@@ -1015,29 +1015,21 @@ ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
 }
 EXPORT_SYMBOL_GPL(ipv6_dup_options);
 
-static int ipv6_renew_option(void *ohdr,
-                            struct ipv6_opt_hdr __user *newopt, int newoptlen,
-                            int inherit,
-                            struct ipv6_opt_hdr **hdr,
-                            char **p)
+static void ipv6_renew_option(int renewtype,
+                             struct ipv6_opt_hdr **dest,
+                             struct ipv6_opt_hdr *old,
+                             struct ipv6_opt_hdr *new,
+                             int newtype, char **p)
 {
-       if (inherit) {
-               if (ohdr) {
-                       memcpy(*p, ohdr, ipv6_optlen((struct ipv6_opt_hdr *)ohdr));
-                       *hdr = (struct ipv6_opt_hdr *)*p;
-                       *p += CMSG_ALIGN(ipv6_optlen(*hdr));
-               }
-       } else {
-               if (newopt) {
-                       if (copy_from_user(*p, newopt, newoptlen))
-                               return -EFAULT;
-                       *hdr = (struct ipv6_opt_hdr *)*p;
-                       if (ipv6_optlen(*hdr) > newoptlen)
-                               return -EINVAL;
-                       *p += CMSG_ALIGN(newoptlen);
-               }
-       }
-       return 0;
+       struct ipv6_opt_hdr *src;
+
+       src = (renewtype == newtype ? new : old);
+       if (!src)
+               return;
+
+       memcpy(*p, src, ipv6_optlen(src));
+       *dest = (struct ipv6_opt_hdr *)*p;
+       *p += CMSG_ALIGN(ipv6_optlen(*dest));
 }
 
 /**
@@ -1063,13 +1055,11 @@ static int ipv6_renew_option(void *ohdr,
  */
 struct ipv6_txoptions *
 ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
-                  int newtype,
-                  struct ipv6_opt_hdr __user *newopt, int newoptlen)
+                  int newtype, struct ipv6_opt_hdr *newopt)
 {
        int tot_len = 0;
        char *p;
        struct ipv6_txoptions *opt2;
-       int err;
 
        if (opt) {
                if (newtype != IPV6_HOPOPTS && opt->hopopt)
@@ -1082,8 +1072,8 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
                        tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt));
        }
 
-       if (newopt && newoptlen)
-               tot_len += CMSG_ALIGN(newoptlen);
+       if (newopt)
+               tot_len += CMSG_ALIGN(ipv6_optlen(newopt));
 
        if (!tot_len)
                return NULL;
@@ -1098,29 +1088,19 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
        opt2->tot_len = tot_len;
        p = (char *)(opt2 + 1);
 
-       err = ipv6_renew_option(opt ? opt->hopopt : NULL, newopt, newoptlen,
-                               newtype != IPV6_HOPOPTS,
-                               &opt2->hopopt, &p);
-       if (err)
-               goto out;
-
-       err = ipv6_renew_option(opt ? opt->dst0opt : NULL, newopt, newoptlen,
-                               newtype != IPV6_RTHDRDSTOPTS,
-                               &opt2->dst0opt, &p);
-       if (err)
-               goto out;
-
-       err = ipv6_renew_option(opt ? opt->srcrt : NULL, newopt, newoptlen,
-                               newtype != IPV6_RTHDR,
-                               (struct ipv6_opt_hdr **)&opt2->srcrt, &p);
-       if (err)
-               goto out;
-
-       err = ipv6_renew_option(opt ? opt->dst1opt : NULL, newopt, newoptlen,
-                               newtype != IPV6_DSTOPTS,
-                               &opt2->dst1opt, &p);
-       if (err)
-               goto out;
+       ipv6_renew_option(IPV6_HOPOPTS, &opt2->hopopt,
+                         (opt ? opt->hopopt : NULL),
+                         newopt, newtype, &p);
+       ipv6_renew_option(IPV6_RTHDRDSTOPTS, &opt2->dst0opt,
+                         (opt ? opt->dst0opt : NULL),
+                         newopt, newtype, &p);
+       ipv6_renew_option(IPV6_RTHDR,
+                         (struct ipv6_opt_hdr **)&opt2->srcrt,
+                         (opt ? (struct ipv6_opt_hdr *)opt->srcrt : NULL),
+                         newopt, newtype, &p);
+       ipv6_renew_option(IPV6_DSTOPTS, &opt2->dst1opt,
+                         (opt ? opt->dst1opt : NULL),
+                         newopt, newtype, &p);
 
        opt2->opt_nflen = (opt2->hopopt ? ipv6_optlen(opt2->hopopt) : 0) +
                          (opt2->dst0opt ? ipv6_optlen(opt2->dst0opt) : 0) +
@@ -1128,37 +1108,6 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
        opt2->opt_flen = (opt2->dst1opt ? ipv6_optlen(opt2->dst1opt) : 0);
 
        return opt2;
-out:
-       sock_kfree_s(sk, opt2, opt2->tot_len);
-       return ERR_PTR(err);
-}
-
-/**
- * ipv6_renew_options_kern - replace a specific ext hdr with a new one.
- *
- * @sk: sock from which to allocate memory
- * @opt: original options
- * @newtype: option type to replace in @opt
- * @newopt: new option of type @newtype to replace (kernel-mem)
- * @newoptlen: length of @newopt
- *
- * See ipv6_renew_options().  The difference is that @newopt is
- * kernel memory, rather than user memory.
- */
-struct ipv6_txoptions *
-ipv6_renew_options_kern(struct sock *sk, struct ipv6_txoptions *opt,
-                       int newtype, struct ipv6_opt_hdr *newopt,
-                       int newoptlen)
-{
-       struct ipv6_txoptions *ret_val;
-       const mm_segment_t old_fs = get_fs();
-
-       set_fs(KERNEL_DS);
-       ret_val = ipv6_renew_options(sk, opt, newtype,
-                                    (struct ipv6_opt_hdr __user *)newopt,
-                                    newoptlen);
-       set_fs(old_fs);
-       return ret_val;
 }
 
 struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
index be491bf6ab6e9ff4d1a9d84bc78c4582f4fe8e01..ef2505aefc159d9a5a3fc544179bc5d086377dd2 100644 (file)
@@ -402,9 +402,10 @@ static int icmp6_iif(const struct sk_buff *skb)
 
        /* for local traffic to local address, skb dev is the loopback
         * device. Check if there is a dst attached to the skb and if so
-        * get the real device index.
+        * get the real device index. Same is needed for replies to a link
+        * local address on a device enslaved to an L3 master device
         */
-       if (unlikely(iif == LOOPBACK_IFINDEX)) {
+       if (unlikely(iif == LOOPBACK_IFINDEX || netif_is_l3_master(skb->dev))) {
                const struct rt6_info *rt6 = skb_rt6_info(skb);
 
                if (rt6)
index 2febe26de6a150155e269da0c38e5cb1122aca8d..595ad408dba09184eb814eee1870e04c17b79f77 100644 (file)
@@ -113,9 +113,9 @@ static inline int compute_score(struct sock *sk, struct net *net,
                        bool dev_match = (sk->sk_bound_dev_if == dif ||
                                          sk->sk_bound_dev_if == sdif);
 
-                       if (exact_dif && !dev_match)
+                       if (!dev_match)
                                return -1;
-                       if (sk->sk_bound_dev_if && dev_match)
+                       if (sk->sk_bound_dev_if)
                                score++;
                }
                if (sk->sk_incoming_cpu == raw_smp_processor_id())
index 39d1d487eca25faceacbc3619fc6c4c38088d62a..d212738e9d100d4e3270f9188466da6b8a3d186c 100644 (file)
@@ -167,8 +167,9 @@ struct fib6_info *fib6_info_alloc(gfp_t gfp_flags)
        return f6i;
 }
 
-void fib6_info_destroy(struct fib6_info *f6i)
+void fib6_info_destroy_rcu(struct rcu_head *head)
 {
+       struct fib6_info *f6i = container_of(head, struct fib6_info, rcu);
        struct rt6_exception_bucket *bucket;
        struct dst_metrics *m;
 
@@ -206,7 +207,7 @@ void fib6_info_destroy(struct fib6_info *f6i)
 
        kfree(f6i);
 }
-EXPORT_SYMBOL_GPL(fib6_info_destroy);
+EXPORT_SYMBOL_GPL(fib6_info_destroy_rcu);
 
 static struct fib6_node *node_alloc(struct net *net)
 {
@@ -934,20 +935,19 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
 {
        struct fib6_info *leaf = rcu_dereference_protected(fn->leaf,
                                    lockdep_is_held(&rt->fib6_table->tb6_lock));
-       enum fib_event_type event = FIB_EVENT_ENTRY_ADD;
-       struct fib6_info *iter = NULL, *match = NULL;
+       struct fib6_info *iter = NULL;
        struct fib6_info __rcu **ins;
+       struct fib6_info __rcu **fallback_ins = NULL;
        int replace = (info->nlh &&
                       (info->nlh->nlmsg_flags & NLM_F_REPLACE));
-       int append = (info->nlh &&
-                      (info->nlh->nlmsg_flags & NLM_F_APPEND));
        int add = (!info->nlh ||
                   (info->nlh->nlmsg_flags & NLM_F_CREATE));
        int found = 0;
+       bool rt_can_ecmp = rt6_qualify_for_ecmp(rt);
        u16 nlflags = NLM_F_EXCL;
        int err;
 
-       if (append)
+       if (info->nlh && (info->nlh->nlmsg_flags & NLM_F_APPEND))
                nlflags |= NLM_F_APPEND;
 
        ins = &fn->leaf;
@@ -969,8 +969,13 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
 
                        nlflags &= ~NLM_F_EXCL;
                        if (replace) {
-                               found++;
-                               break;
+                               if (rt_can_ecmp == rt6_qualify_for_ecmp(iter)) {
+                                       found++;
+                                       break;
+                               }
+                               if (rt_can_ecmp)
+                                       fallback_ins = fallback_ins ?: ins;
+                               goto next_iter;
                        }
 
                        if (rt6_duplicate_nexthop(iter, rt)) {
@@ -985,51 +990,71 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
                                fib6_metric_set(iter, RTAX_MTU, rt->fib6_pmtu);
                                return -EEXIST;
                        }
-
-                       /* first route that matches */
-                       if (!match)
-                               match = iter;
+                       /* If we have the same destination and the same metric,
+                        * but not the same gateway, then the route we try to
+                        * add is sibling to this route, increment our counter
+                        * of siblings, and later we will add our route to the
+                        * list.
+                        * Only static routes (which don't have flag
+                        * RTF_EXPIRES) are used for ECMPv6.
+                        *
+                        * To avoid long list, we only had siblings if the
+                        * route have a gateway.
+                        */
+                       if (rt_can_ecmp &&
+                           rt6_qualify_for_ecmp(iter))
+                               rt->fib6_nsiblings++;
                }
 
                if (iter->fib6_metric > rt->fib6_metric)
                        break;
 
+next_iter:
                ins = &iter->fib6_next;
        }
 
+       if (fallback_ins && !found) {
+               /* No ECMP-able route found, replace first non-ECMP one */
+               ins = fallback_ins;
+               iter = rcu_dereference_protected(*ins,
+                                   lockdep_is_held(&rt->fib6_table->tb6_lock));
+               found++;
+       }
+
        /* Reset round-robin state, if necessary */
        if (ins == &fn->leaf)
                fn->rr_ptr = NULL;
 
        /* Link this route to others same route. */
-       if (append && match) {
+       if (rt->fib6_nsiblings) {
+               unsigned int fib6_nsiblings;
                struct fib6_info *sibling, *temp_sibling;
 
-               if (rt->fib6_flags & RTF_REJECT) {
-                       NL_SET_ERR_MSG(extack,
-                                      "Can not append a REJECT route");
-                       return -EINVAL;
-               } else if (match->fib6_flags & RTF_REJECT) {
-                       NL_SET_ERR_MSG(extack,
-                                      "Can not append to a REJECT route");
-                       return -EINVAL;
+               /* Find the first route that have the same metric */
+               sibling = leaf;
+               while (sibling) {
+                       if (sibling->fib6_metric == rt->fib6_metric &&
+                           rt6_qualify_for_ecmp(sibling)) {
+                               list_add_tail(&rt->fib6_siblings,
+                                             &sibling->fib6_siblings);
+                               break;
+                       }
+                       sibling = rcu_dereference_protected(sibling->fib6_next,
+                                   lockdep_is_held(&rt->fib6_table->tb6_lock));
                }
-               event = FIB_EVENT_ENTRY_APPEND;
-               rt->fib6_nsiblings = match->fib6_nsiblings;
-               list_add_tail(&rt->fib6_siblings, &match->fib6_siblings);
-               match->fib6_nsiblings++;
-
                /* For each sibling in the list, increment the counter of
                 * siblings. BUG() if counters does not match, list of siblings
                 * is broken!
                 */
+               fib6_nsiblings = 0;
                list_for_each_entry_safe(sibling, temp_sibling,
-                                        &match->fib6_siblings, fib6_siblings) {
+                                        &rt->fib6_siblings, fib6_siblings) {
                        sibling->fib6_nsiblings++;
-                       BUG_ON(sibling->fib6_nsiblings != match->fib6_nsiblings);
+                       BUG_ON(sibling->fib6_nsiblings != rt->fib6_nsiblings);
+                       fib6_nsiblings++;
                }
-
-               rt6_multipath_rebalance(match);
+               BUG_ON(fib6_nsiblings != rt->fib6_nsiblings);
+               rt6_multipath_rebalance(temp_sibling);
        }
 
        /*
@@ -1042,8 +1067,9 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
 add:
                nlflags |= NLM_F_CREATE;
 
-               err = call_fib6_entry_notifiers(info->nl_net, event, rt,
-                                               extack);
+               err = call_fib6_entry_notifiers(info->nl_net,
+                                               FIB_EVENT_ENTRY_ADD,
+                                               rt, extack);
                if (err)
                        return err;
 
@@ -1061,7 +1087,7 @@ add:
                }
 
        } else {
-               struct fib6_info *tmp;
+               int nsiblings;
 
                if (!found) {
                        if (add)
@@ -1076,57 +1102,48 @@ add:
                if (err)
                        return err;
 
-               /* if route being replaced has siblings, set tmp to
-                * last one, otherwise tmp is current route. this is
-                * used to set fib6_next for new route
-                */
-               if (iter->fib6_nsiblings)
-                       tmp = list_last_entry(&iter->fib6_siblings,
-                                             struct fib6_info,
-                                             fib6_siblings);
-               else
-                       tmp = iter;
-
-               /* insert new route */
                atomic_inc(&rt->fib6_ref);
                rcu_assign_pointer(rt->fib6_node, fn);
-               rt->fib6_next = tmp->fib6_next;
+               rt->fib6_next = iter->fib6_next;
                rcu_assign_pointer(*ins, rt);
-
                if (!info->skip_notify)
                        inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE);
                if (!(fn->fn_flags & RTN_RTINFO)) {
                        info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
                        fn->fn_flags |= RTN_RTINFO;
                }
+               nsiblings = iter->fib6_nsiblings;
+               iter->fib6_node = NULL;
+               fib6_purge_rt(iter, fn, info->nl_net);
+               if (rcu_access_pointer(fn->rr_ptr) == iter)
+                       fn->rr_ptr = NULL;
+               fib6_info_release(iter);
 
-               /* delete old route */
-               rt = iter;
-
-               if (rt->fib6_nsiblings) {
-                       struct fib6_info *tmp;
-
+               if (nsiblings) {
                        /* Replacing an ECMP route, remove all siblings */
-                       list_for_each_entry_safe(iter, tmp, &rt->fib6_siblings,
-                                                fib6_siblings) {
-                               iter->fib6_node = NULL;
-                               fib6_purge_rt(iter, fn, info->nl_net);
-                               if (rcu_access_pointer(fn->rr_ptr) == iter)
-                                       fn->rr_ptr = NULL;
-                               fib6_info_release(iter);
-
-                               rt->fib6_nsiblings--;
-                               info->nl_net->ipv6.rt6_stats->fib_rt_entries--;
+                       ins = &rt->fib6_next;
+                       iter = rcu_dereference_protected(*ins,
+                                   lockdep_is_held(&rt->fib6_table->tb6_lock));
+                       while (iter) {
+                               if (iter->fib6_metric > rt->fib6_metric)
+                                       break;
+                               if (rt6_qualify_for_ecmp(iter)) {
+                                       *ins = iter->fib6_next;
+                                       iter->fib6_node = NULL;
+                                       fib6_purge_rt(iter, fn, info->nl_net);
+                                       if (rcu_access_pointer(fn->rr_ptr) == iter)
+                                               fn->rr_ptr = NULL;
+                                       fib6_info_release(iter);
+                                       nsiblings--;
+                                       info->nl_net->ipv6.rt6_stats->fib_rt_entries--;
+                               } else {
+                                       ins = &iter->fib6_next;
+                               }
+                               iter = rcu_dereference_protected(*ins,
+                                       lockdep_is_held(&rt->fib6_table->tb6_lock));
                        }
+                       WARN_ON(nsiblings != 0);
                }
-
-               WARN_ON(rt->fib6_nsiblings != 0);
-
-               rt->fib6_node = NULL;
-               fib6_purge_rt(rt, fn, info->nl_net);
-               if (rcu_access_pointer(fn->rr_ptr) == rt)
-                       fn->rr_ptr = NULL;
-               fib6_info_release(rt);
        }
 
        return 0;
index c8cf2fdbb13b88cc1bf6b494a75407cdc16977eb..cd2cfb04e5d82010a5eb1800a53fc8007479c6f9 100644 (file)
@@ -927,7 +927,6 @@ tx_err:
 static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
                                         struct net_device *dev)
 {
-       struct ipv6hdr *ipv6h = ipv6_hdr(skb);
        struct ip6_tnl *t = netdev_priv(dev);
        struct dst_entry *dst = skb_dst(skb);
        struct net_device_stats *stats;
@@ -1010,6 +1009,8 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
                        goto tx_err;
                }
        } else {
+               struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+
                switch (skb->protocol) {
                case htons(ETH_P_IP):
                        memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
index 021e5aef6ba31b7a9face6eb363a6409761385a7..3168847c30d1d4a0021b7effc8653befce1d4d22 100644 (file)
@@ -570,6 +570,8 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
        to->dev = from->dev;
        to->mark = from->mark;
 
+       skb_copy_hash(to, from);
+
 #ifdef CONFIG_NET_SCHED
        to->tc_index = from->tc_index;
 #endif
@@ -1219,7 +1221,8 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
        if (mtu < IPV6_MIN_MTU)
                return -EINVAL;
        cork->base.fragsize = mtu;
-       cork->base.gso_size = sk->sk_type == SOCK_DGRAM ? ipc6->gso_size : 0;
+       cork->base.gso_size = sk->sk_type == SOCK_DGRAM &&
+                             sk->sk_protocol == IPPROTO_UDP ? ipc6->gso_size : 0;
 
        if (dst_allfrag(xfrm_dst_path(&rt->dst)))
                cork->base.flags |= IPCORK_ALLFRAG;
index 4d780c7f013060732dda2db760d7ba0474c812e3..568ca4187cd101e745988ee262f79431ef8d28cc 100644 (file)
@@ -398,6 +398,12 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
        case IPV6_DSTOPTS:
        {
                struct ipv6_txoptions *opt;
+               struct ipv6_opt_hdr *new = NULL;
+
+               /* hop-by-hop / destination options are privileged option */
+               retv = -EPERM;
+               if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
+                       break;
 
                /* remove any sticky options header with a zero option
                 * length, per RFC3542.
@@ -409,17 +415,22 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
                else if (optlen < sizeof(struct ipv6_opt_hdr) ||
                         optlen & 0x7 || optlen > 8 * 255)
                        goto e_inval;
-
-               /* hop-by-hop / destination options are privileged option */
-               retv = -EPERM;
-               if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
-                       break;
+               else {
+                       new = memdup_user(optval, optlen);
+                       if (IS_ERR(new)) {
+                               retv = PTR_ERR(new);
+                               break;
+                       }
+                       if (unlikely(ipv6_optlen(new) > optlen)) {
+                               kfree(new);
+                               goto e_inval;
+                       }
+               }
 
                opt = rcu_dereference_protected(np->opt,
                                                lockdep_sock_is_held(sk));
-               opt = ipv6_renew_options(sk, opt, optname,
-                                        (struct ipv6_opt_hdr __user *)optval,
-                                        optlen);
+               opt = ipv6_renew_options(sk, opt, optname, new);
+               kfree(new);
                if (IS_ERR(opt)) {
                        retv = PTR_ERR(opt);
                        break;
@@ -718,8 +729,9 @@ done:
                        struct sockaddr_in6 *psin6;
 
                        psin6 = (struct sockaddr_in6 *)&greqs.gsr_group;
-                       retv = ipv6_sock_mc_join(sk, greqs.gsr_interface,
-                                                &psin6->sin6_addr);
+                       retv = ipv6_sock_mc_join_ssm(sk, greqs.gsr_interface,
+                                                    &psin6->sin6_addr,
+                                                    MCAST_INCLUDE);
                        /* prior join w/ different source is ok */
                        if (retv && retv != -EADDRINUSE)
                                break;
index 975021df7c1cf2eae6897e3dd57ea20998f4ea90..f60f310785fd6989ac37dfd05a35c60e58b7986a 100644 (file)
@@ -95,6 +95,8 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
                          int delta);
 static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
                            struct inet6_dev *idev);
+static int __ipv6_dev_mc_inc(struct net_device *dev,
+                            const struct in6_addr *addr, unsigned int mode);
 
 #define MLD_QRV_DEFAULT                2
 /* RFC3810, 9.2. Query Interval */
@@ -132,7 +134,8 @@ static int unsolicited_report_interval(struct inet6_dev *idev)
        return iv > 0 ? iv : 1;
 }
 
-int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
+static int __ipv6_sock_mc_join(struct sock *sk, int ifindex,
+                              const struct in6_addr *addr, unsigned int mode)
 {
        struct net_device *dev = NULL;
        struct ipv6_mc_socklist *mc_lst;
@@ -179,7 +182,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
        }
 
        mc_lst->ifindex = dev->ifindex;
-       mc_lst->sfmode = MCAST_EXCLUDE;
+       mc_lst->sfmode = mode;
        rwlock_init(&mc_lst->sflock);
        mc_lst->sflist = NULL;
 
@@ -187,7 +190,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
         *      now add/increase the group membership on the device
         */
 
-       err = ipv6_dev_mc_inc(dev, addr);
+       err = __ipv6_dev_mc_inc(dev, addr, mode);
 
        if (err) {
                sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
@@ -199,8 +202,19 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
 
        return 0;
 }
+
+int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
+{
+       return __ipv6_sock_mc_join(sk, ifindex, addr, MCAST_EXCLUDE);
+}
 EXPORT_SYMBOL(ipv6_sock_mc_join);
 
+int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex,
+                         const struct in6_addr *addr, unsigned int mode)
+{
+       return __ipv6_sock_mc_join(sk, ifindex, addr, mode);
+}
+
 /*
  *     socket leave on multicast group
  */
@@ -646,7 +660,7 @@ bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
        return rv;
 }
 
-static void igmp6_group_added(struct ifmcaddr6 *mc)
+static void igmp6_group_added(struct ifmcaddr6 *mc, unsigned int mode)
 {
        struct net_device *dev = mc->idev->dev;
        char buf[MAX_ADDR_LEN];
@@ -672,7 +686,13 @@ static void igmp6_group_added(struct ifmcaddr6 *mc)
        }
        /* else v2 */
 
-       mc->mca_crcount = mc->idev->mc_qrv;
+       /* Based on RFC3810 6.1, for newly added INCLUDE SSM, we
+        * should not send filter-mode change record as the mode
+        * should be from IN() to IN(A).
+        */
+       if (mode == MCAST_EXCLUDE)
+               mc->mca_crcount = mc->idev->mc_qrv;
+
        mld_ifc_event(mc->idev);
 }
 
@@ -770,13 +790,13 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
        spin_lock_bh(&im->mca_lock);
        if (pmc) {
                im->idev = pmc->idev;
-               im->mca_crcount = idev->mc_qrv;
-               im->mca_sfmode = pmc->mca_sfmode;
-               if (pmc->mca_sfmode == MCAST_INCLUDE) {
+               if (im->mca_sfmode == MCAST_INCLUDE) {
                        im->mca_tomb = pmc->mca_tomb;
                        im->mca_sources = pmc->mca_sources;
                        for (psf = im->mca_sources; psf; psf = psf->sf_next)
-                               psf->sf_crcount = im->mca_crcount;
+                               psf->sf_crcount = idev->mc_qrv;
+               } else {
+                       im->mca_crcount = idev->mc_qrv;
                }
                in6_dev_put(pmc->idev);
                kfree(pmc);
@@ -831,7 +851,8 @@ static void ma_put(struct ifmcaddr6 *mc)
 }
 
 static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
-                                  const struct in6_addr *addr)
+                                  const struct in6_addr *addr,
+                                  unsigned int mode)
 {
        struct ifmcaddr6 *mc;
 
@@ -849,9 +870,8 @@ static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
        refcount_set(&mc->mca_refcnt, 1);
        spin_lock_init(&mc->mca_lock);
 
-       /* initial mode is (EX, empty) */
-       mc->mca_sfmode = MCAST_EXCLUDE;
-       mc->mca_sfcount[MCAST_EXCLUDE] = 1;
+       mc->mca_sfmode = mode;
+       mc->mca_sfcount[mode] = 1;
 
        if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) ||
            IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
@@ -863,7 +883,8 @@ static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
 /*
  *     device multicast group inc (add if not found)
  */
-int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
+static int __ipv6_dev_mc_inc(struct net_device *dev,
+                            const struct in6_addr *addr, unsigned int mode)
 {
        struct ifmcaddr6 *mc;
        struct inet6_dev *idev;
@@ -887,14 +908,13 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
                if (ipv6_addr_equal(&mc->mca_addr, addr)) {
                        mc->mca_users++;
                        write_unlock_bh(&idev->lock);
-                       ip6_mc_add_src(idev, &mc->mca_addr, MCAST_EXCLUDE, 0,
-                               NULL, 0);
+                       ip6_mc_add_src(idev, &mc->mca_addr, mode, 0, NULL, 0);
                        in6_dev_put(idev);
                        return 0;
                }
        }
 
-       mc = mca_alloc(idev, addr);
+       mc = mca_alloc(idev, addr, mode);
        if (!mc) {
                write_unlock_bh(&idev->lock);
                in6_dev_put(idev);
@@ -911,11 +931,16 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
        write_unlock_bh(&idev->lock);
 
        mld_del_delrec(idev, mc);
-       igmp6_group_added(mc);
+       igmp6_group_added(mc, mode);
        ma_put(mc);
        return 0;
 }
 
+int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
+{
+       return __ipv6_dev_mc_inc(dev, addr, MCAST_EXCLUDE);
+}
+
 /*
  *     device multicast group del
  */
@@ -1751,7 +1776,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
 
                psf_next = psf->sf_next;
 
-               if (!is_in(pmc, psf, type, gdeleted, sdeleted)) {
+               if (!is_in(pmc, psf, type, gdeleted, sdeleted) && !crsend) {
                        psf_prev = psf;
                        continue;
                }
@@ -2066,7 +2091,7 @@ static void mld_send_initial_cr(struct inet6_dev *idev)
                if (pmc->mca_sfcount[MCAST_EXCLUDE])
                        type = MLD2_CHANGE_TO_EXCLUDE;
                else
-                       type = MLD2_CHANGE_TO_INCLUDE;
+                       type = MLD2_ALLOW_NEW_SOURCES;
                skb = add_grec(skb, pmc, type, 0, 0, 1);
                spin_unlock_bh(&pmc->mca_lock);
        }
@@ -2082,7 +2107,8 @@ void ipv6_mc_dad_complete(struct inet6_dev *idev)
                mld_send_initial_cr(idev);
                idev->mc_dad_count--;
                if (idev->mc_dad_count)
-                       mld_dad_start_timer(idev, idev->mc_maxdelay);
+                       mld_dad_start_timer(idev,
+                                           unsolicited_report_interval(idev));
        }
 }
 
@@ -2094,7 +2120,8 @@ static void mld_dad_timer_expire(struct timer_list *t)
        if (idev->mc_dad_count) {
                idev->mc_dad_count--;
                if (idev->mc_dad_count)
-                       mld_dad_start_timer(idev, idev->mc_maxdelay);
+                       mld_dad_start_timer(idev,
+                                           unsolicited_report_interval(idev));
        }
        in6_dev_put(idev);
 }
@@ -2452,7 +2479,8 @@ static void mld_ifc_timer_expire(struct timer_list *t)
        if (idev->mc_ifc_count) {
                idev->mc_ifc_count--;
                if (idev->mc_ifc_count)
-                       mld_ifc_start_timer(idev, idev->mc_maxdelay);
+                       mld_ifc_start_timer(idev,
+                                           unsolicited_report_interval(idev));
        }
        in6_dev_put(idev);
 }
@@ -2543,7 +2571,7 @@ void ipv6_mc_up(struct inet6_dev *idev)
        ipv6_mc_reset(idev);
        for (i = idev->mc_list; i; i = i->next) {
                mld_del_delrec(idev, i);
-               igmp6_group_added(i);
+               igmp6_group_added(i, i->mca_sfmode);
        }
        read_unlock_bh(&idev->lock);
 }
index e640d2f3c55cf00568ba195a5f667a6da616ca47..0ec273997d1dc6eff71f62c66bbe214e369ab8f9 100644 (file)
@@ -811,7 +811,7 @@ static void ndisc_recv_ns(struct sk_buff *skb)
                        return;
                }
        }
-       if (ndopts.nd_opts_nonce)
+       if (ndopts.nd_opts_nonce && ndopts.nd_opts_nonce->nd_opt_len == 1)
                memcpy(&nonce, (u8 *)(ndopts.nd_opts_nonce + 1), 6);
 
        inc = ipv6_addr_is_multicast(daddr);
index 7eab959734bc736cc103551fb50bce84f9aeaec7..daf2e9e9193d19f8f89890f96ca0439d8d55c1c6 100644 (file)
@@ -1909,6 +1909,7 @@ static struct xt_match ip6t_builtin_mt[] __read_mostly = {
                .checkentry = icmp6_checkentry,
                .proto      = IPPROTO_ICMPV6,
                .family     = NFPROTO_IPV6,
+               .me         = THIS_MODULE,
        },
 };
 
index 5e0332014c1738999e680c1853829f384e880284..e4d9e6976d3c295e68b13c0ceecd5fa76db4fbc1 100644 (file)
@@ -107,7 +107,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
        if (hdr == NULL)
                goto err_reg;
 
-       net->nf_frag.sysctl.frags_hdr = hdr;
+       net->nf_frag_frags_hdr = hdr;
        return 0;
 
 err_reg:
@@ -121,8 +121,8 @@ static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
 {
        struct ctl_table *table;
 
-       table = net->nf_frag.sysctl.frags_hdr->ctl_table_arg;
-       unregister_net_sysctl_table(net->nf_frag.sysctl.frags_hdr);
+       table = net->nf_frag_frags_hdr->ctl_table_arg;
+       unregister_net_sysctl_table(net->nf_frag_frags_hdr);
        if (!net_eq(net, &init_net))
                kfree(table);
 }
@@ -585,6 +585,8 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
            fq->q.meat == fq->q.len &&
            nf_ct_frag6_reasm(fq, skb, dev))
                ret = 0;
+       else
+               skb_dst_drop(skb);
 
 out_unlock:
        spin_unlock_bh(&fq->q.lock);
index bf1d6c421e3bd0d5524559d507eb14ce9874496f..5dfd33af64515518a2f94b13a62a8ae4dce846da 100644 (file)
@@ -55,7 +55,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
                 * to a listener socket if there's one */
                struct sock *sk2;
 
-               sk2 = nf_tproxy_get_sock_v6(net, skb, thoff, hp, tproto,
+               sk2 = nf_tproxy_get_sock_v6(net, skb, thoff, tproto,
                                            &iph->saddr,
                                            nf_tproxy_laddr6(skb, laddr, &iph->daddr),
                                            hp->source,
@@ -72,7 +72,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
 EXPORT_SYMBOL_GPL(nf_tproxy_handle_time_wait6);
 
 struct sock *
-nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp,
+nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff,
                      const u8 protocol,
                      const struct in6_addr *saddr, const struct in6_addr *daddr,
                      const __be16 sport, const __be16 dport,
@@ -80,15 +80,20 @@ nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp,
                      const enum nf_tproxy_lookup_t lookup_type)
 {
        struct sock *sk;
-       struct tcphdr *tcph;
 
        switch (protocol) {
-       case IPPROTO_TCP:
+       case IPPROTO_TCP: {
+               struct tcphdr _hdr, *hp;
+
+               hp = skb_header_pointer(skb, thoff,
+                                       sizeof(struct tcphdr), &_hdr);
+               if (hp == NULL)
+                       return NULL;
+
                switch (lookup_type) {
                case NF_TPROXY_LOOKUP_LISTENER:
-                       tcph = hp;
                        sk = inet6_lookup_listener(net, &tcp_hashinfo, skb,
-                                                  thoff + __tcp_hdrlen(tcph),
+                                                  thoff + __tcp_hdrlen(hp),
                                                   saddr, sport,
                                                   daddr, ntohs(dport),
                                                   in->ifindex, 0);
@@ -110,6 +115,7 @@ nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp,
                        BUG();
                }
                break;
+               }
        case IPPROTO_UDP:
                sk = udp6_lib_lookup(net, saddr, sport, daddr, dport,
                                     in->ifindex);
index ce6f0d15b5dd5d8a9531a8316a932d3d30a3491b..afc307c89d1a977a00693999ec0f54b50005b7bd 100644 (file)
@@ -1334,7 +1334,7 @@ void raw6_proc_exit(void)
 }
 #endif /* CONFIG_PROC_FS */
 
-/* Same as inet6_dgram_ops, sans udp_poll_mask.  */
+/* Same as inet6_dgram_ops, sans udp_poll.  */
 const struct proto_ops inet6_sockraw_ops = {
        .family            = PF_INET6,
        .owner             = THIS_MODULE,
@@ -1344,7 +1344,7 @@ const struct proto_ops inet6_sockraw_ops = {
        .socketpair        = sock_no_socketpair,        /* a do nothing */
        .accept            = sock_no_accept,            /* a do nothing */
        .getname           = inet6_getname,
-       .poll_mask         = datagram_poll_mask,        /* ok           */
+       .poll              = datagram_poll,             /* ok           */
        .ioctl             = inet6_ioctl,               /* must change  */
        .listen            = sock_no_listen,            /* ok           */
        .shutdown          = inet_shutdown,             /* ok           */
index 86a0e4333d42212d03f53e0d54fcf4e03a328607..ec18b3ce8b6d8fc84c9bdb828448b9da88b3ed48 100644 (file)
@@ -972,10 +972,10 @@ static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort)
        rt->dst.lastuse = jiffies;
 }
 
+/* Caller must already hold reference to @from */
 static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
 {
        rt->rt6i_flags &= ~RTF_EXPIRES;
-       fib6_info_hold(from);
        rcu_assign_pointer(rt->from, from);
        dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true);
        if (from->fib6_metrics != &dst_default_metrics) {
@@ -984,6 +984,7 @@ static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
        }
 }
 
+/* Caller must already hold reference to @ort */
 static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort)
 {
        struct net_device *dev = fib6_info_nh_dev(ort);
@@ -1044,9 +1045,14 @@ static struct rt6_info *ip6_create_rt_rcu(struct fib6_info *rt)
        struct net_device *dev = rt->fib6_nh.nh_dev;
        struct rt6_info *nrt;
 
+       if (!fib6_info_hold_safe(rt))
+               return NULL;
+
        nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
        if (nrt)
                ip6_rt_copy_init(nrt, rt);
+       else
+               fib6_info_release(rt);
 
        return nrt;
 }
@@ -1178,10 +1184,15 @@ static struct rt6_info *ip6_rt_cache_alloc(struct fib6_info *ort,
         *      Clone the route.
         */
 
+       if (!fib6_info_hold_safe(ort))
+               return NULL;
+
        dev = ip6_rt_get_dev_rcu(ort);
        rt = ip6_dst_alloc(dev_net(dev), dev, 0);
-       if (!rt)
+       if (!rt) {
+               fib6_info_release(ort);
                return NULL;
+       }
 
        ip6_rt_copy_init(rt, ort);
        rt->rt6i_flags |= RTF_CACHE;
@@ -1210,12 +1221,17 @@ static struct rt6_info *ip6_rt_pcpu_alloc(struct fib6_info *rt)
        struct net_device *dev;
        struct rt6_info *pcpu_rt;
 
+       if (!fib6_info_hold_safe(rt))
+               return NULL;
+
        rcu_read_lock();
        dev = ip6_rt_get_dev_rcu(rt);
        pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags);
        rcu_read_unlock();
-       if (!pcpu_rt)
+       if (!pcpu_rt) {
+               fib6_info_release(rt);
                return NULL;
+       }
        ip6_rt_copy_init(pcpu_rt, rt);
        pcpu_rt->rt6i_flags |= RTF_PCPU;
        return pcpu_rt;
@@ -2486,7 +2502,7 @@ restart:
 
 out:
        if (ret)
-               dst_hold(&ret->dst);
+               ip6_hold_safe(net, &ret, true);
        else
                ret = ip6_create_rt_rcu(rt);
 
@@ -3303,7 +3319,8 @@ static int ip6_route_del(struct fib6_config *cfg,
                                continue;
                        if (cfg->fc_protocol && cfg->fc_protocol != rt->fib6_protocol)
                                continue;
-                       fib6_info_hold(rt);
+                       if (!fib6_info_hold_safe(rt))
+                               continue;
                        rcu_read_unlock();
 
                        /* if gateway was specified only delete the one hop */
@@ -3409,6 +3426,9 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
 
        rcu_read_lock();
        from = rcu_dereference(rt->from);
+       /* This fib6_info_hold() is safe here because we hold reference to rt
+        * and rt already holds reference to fib6_info.
+        */
        fib6_info_hold(from);
        rcu_read_unlock();
 
@@ -3470,7 +3490,8 @@ static struct fib6_info *rt6_get_route_info(struct net *net,
                        continue;
                if (!ipv6_addr_equal(&rt->fib6_nh.nh_gw, gwaddr))
                        continue;
-               fib6_info_hold(rt);
+               if (!fib6_info_hold_safe(rt))
+                       continue;
                break;
        }
 out:
@@ -3530,8 +3551,8 @@ struct fib6_info *rt6_get_dflt_router(struct net *net,
                    ipv6_addr_equal(&rt->fib6_nh.nh_gw, addr))
                        break;
        }
-       if (rt)
-               fib6_info_hold(rt);
+       if (rt && !fib6_info_hold_safe(rt))
+               rt = NULL;
        rcu_read_unlock();
        return rt;
 }
@@ -3579,8 +3600,8 @@ restart:
                struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;
 
                if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
-                   (!idev || idev->cnf.accept_ra != 2)) {
-                       fib6_info_hold(rt);
+                   (!idev || idev->cnf.accept_ra != 2) &&
+                   fib6_info_hold_safe(rt)) {
                        rcu_read_unlock();
                        ip6_del_rt(net, rt);
                        goto restart;
@@ -3842,7 +3863,7 @@ static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt)
                        lockdep_is_held(&rt->fib6_table->tb6_lock));
        while (iter) {
                if (iter->fib6_metric == rt->fib6_metric &&
-                   iter->fib6_nsiblings)
+                   rt6_qualify_for_ecmp(iter))
                        return iter;
                iter = rcu_dereference_protected(iter->fib6_next,
                                lockdep_is_held(&rt->fib6_table->tb6_lock));
@@ -4388,6 +4409,13 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
                        rt = NULL;
                        goto cleanup;
                }
+               if (!rt6_qualify_for_ecmp(rt)) {
+                       err = -EINVAL;
+                       NL_SET_ERR_MSG(extack,
+                                      "Device only routes can not be added for IPv6 using the multipath API.");
+                       fib6_info_release(rt);
+                       goto cleanup;
+               }
 
                rt->fib6_nh.nh_weight = rtnh->rtnh_hops + 1;
 
@@ -4439,7 +4467,6 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
                 */
                cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
                                                     NLM_F_REPLACE);
-               cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_APPEND;
                nhn++;
        }
 
index 33fb35cbfac132b1a85cd2c9ce62b4344cbe8afe..558fe8cc6d43858ca828cbd8dc8ea65e63bc6602 100644 (file)
@@ -373,7 +373,7 @@ static int seg6_hmac_init_algo(void)
                        return -ENOMEM;
 
                for_each_possible_cpu(cpu) {
-                       tfm = crypto_alloc_shash(algo->name, 0, GFP_KERNEL);
+                       tfm = crypto_alloc_shash(algo->name, 0, 0);
                        if (IS_ERR(tfm))
                                return PTR_ERR(tfm);
                        p_tfm = per_cpu_ptr(algo->tfms, cpu);
index 19ccf0dc996ca7da1f47bd887b18e4755257e462..a8854dd3e9c5ef64a7a480bb6ff891fac0e6d1ea 100644 (file)
@@ -101,7 +101,7 @@ static __be32 seg6_make_flowlabel(struct net *net, struct sk_buff *skb,
 
        if (do_flowlabel > 0) {
                hash = skb_get_hash(skb);
-               rol32(hash, 16);
+               hash = rol32(hash, 16);
                flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
        } else if (!do_flowlabel && skb->protocol == htons(ETH_P_IPV6)) {
                flowlabel = ip6_flowlabel(inner_hdr);
index 7efa9fd7e1094dc43ca464e5c6f06ea36031d476..03e6b7a2bc530d1a19c565f00a03575b898b6f88 100644 (file)
@@ -938,7 +938,8 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
                                           &tcp_hashinfo, NULL, 0,
                                           &ipv6h->saddr,
                                           th->source, &ipv6h->daddr,
-                                          ntohs(th->source), tcp_v6_iif(skb),
+                                          ntohs(th->source),
+                                          tcp_v6_iif_l3_slave(skb),
                                           tcp_v6_sdif(skb));
                if (!sk1)
                        goto out;
@@ -1609,7 +1610,8 @@ do_time_wait:
                                            skb, __tcp_hdrlen(th),
                                            &ipv6_hdr(skb)->saddr, th->source,
                                            &ipv6_hdr(skb)->daddr,
-                                           ntohs(th->dest), tcp_v6_iif(skb),
+                                           ntohs(th->dest),
+                                           tcp_v6_iif_l3_slave(skb),
                                            sdif);
                if (sk2) {
                        struct inet_timewait_sock *tw = inet_twsk(sk);
index 68e86257a549988b5f87098b24c8e3d0bd1dc1ce..893a022f962081416fa1b9e5f96416a8c2e92e5c 100644 (file)
@@ -1488,11 +1488,14 @@ static inline __poll_t iucv_accept_poll(struct sock *parent)
        return 0;
 }
 
-static __poll_t iucv_sock_poll_mask(struct socket *sock, __poll_t events)
+__poll_t iucv_sock_poll(struct file *file, struct socket *sock,
+                           poll_table *wait)
 {
        struct sock *sk = sock->sk;
        __poll_t mask = 0;
 
+       sock_poll_wait(file, sk_sleep(sk), wait);
+
        if (sk->sk_state == IUCV_LISTEN)
                return iucv_accept_poll(sk);
 
@@ -2385,7 +2388,7 @@ static const struct proto_ops iucv_sock_ops = {
        .getname        = iucv_sock_getname,
        .sendmsg        = iucv_sock_sendmsg,
        .recvmsg        = iucv_sock_recvmsg,
-       .poll_mask      = iucv_sock_poll_mask,
+       .poll           = iucv_sock_poll,
        .ioctl          = sock_no_ioctl,
        .mmap           = sock_no_mmap,
        .socketpair     = sock_no_socketpair,
index 84b7d5c6fec81a7c62ed4744d48726dee8c7e426..d3601d421571b9825ff0a6cea9b75cb52fd51dea 100644 (file)
@@ -1336,9 +1336,9 @@ static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
        struct list_head *head;
        int index = 0;
 
-       /* For SOCK_SEQPACKET sock type, datagram_poll_mask checks the sk_state,
-        * so  we set sk_state, otherwise epoll_wait always returns right away
-        * with EPOLLHUP
+       /* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
+        * we set sk_state, otherwise epoll_wait always returns right away with
+        * EPOLLHUP
         */
        kcm->sk.sk_state = TCP_ESTABLISHED;
 
@@ -1903,7 +1903,7 @@ static const struct proto_ops kcm_dgram_ops = {
        .socketpair =   sock_no_socketpair,
        .accept =       sock_no_accept,
        .getname =      sock_no_getname,
-       .poll_mask =    datagram_poll_mask,
+       .poll =         datagram_poll,
        .ioctl =        kcm_ioctl,
        .listen =       sock_no_listen,
        .shutdown =     sock_no_shutdown,
@@ -1924,7 +1924,7 @@ static const struct proto_ops kcm_seqpacket_ops = {
        .socketpair =   sock_no_socketpair,
        .accept =       sock_no_accept,
        .getname =      sock_no_getname,
-       .poll_mask =    datagram_poll_mask,
+       .poll =         datagram_poll,
        .ioctl =        kcm_ioctl,
        .listen =       sock_no_listen,
        .shutdown =     sock_no_shutdown,
index 8bdc1cbe490a4ae819db32851ea6a8184b0727b0..5e1d2946ffbf2a2cf4e65db44658c7f374e72e25 100644 (file)
@@ -3751,7 +3751,7 @@ static const struct proto_ops pfkey_ops = {
 
        /* Now the operations that really occur. */
        .release        =       pfkey_release,
-       .poll_mask      =       datagram_poll_mask,
+       .poll           =       datagram_poll,
        .sendmsg        =       pfkey_sendmsg,
        .recvmsg        =       pfkey_recvmsg,
 };
index 181073bf69251392c3a7fd23197a278f37dd67f0..a9c05b2bc1b0bc3471bbf62dc3b7c11e971a7f08 100644 (file)
@@ -613,7 +613,7 @@ static const struct proto_ops l2tp_ip_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = sock_no_accept,
        .getname           = l2tp_ip_getname,
-       .poll_mask         = datagram_poll_mask,
+       .poll              = datagram_poll,
        .ioctl             = inet_ioctl,
        .listen            = sock_no_listen,
        .shutdown          = inet_shutdown,
index 336e4c00abbcdaef7385c90e24d2088131efe095..957369192ca181d6da21c9dda03d0e8a9726643e 100644 (file)
@@ -754,7 +754,7 @@ static const struct proto_ops l2tp_ip6_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = sock_no_accept,
        .getname           = l2tp_ip6_getname,
-       .poll_mask         = datagram_poll_mask,
+       .poll              = datagram_poll,
        .ioctl             = inet6_ioctl,
        .listen            = sock_no_listen,
        .shutdown          = inet_shutdown,
index 55188382845c310c98eb86cdfc3b78e1d03e8e0f..e398797878a9740e2b3a1525802a032705981630 100644 (file)
@@ -1818,7 +1818,7 @@ static const struct proto_ops pppol2tp_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = pppol2tp_getname,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
        .setsockopt     = pppol2tp_setsockopt,
index 804de84901868a4cffd2ec5d6c9e979af937cb59..1beeea9549fa6ec1f7b0e5f9af8ff3250a316f59 100644 (file)
@@ -1192,7 +1192,7 @@ static const struct proto_ops llc_ui_ops = {
        .socketpair  = sock_no_socketpair,
        .accept      = llc_ui_accept,
        .getname     = llc_ui_getname,
-       .poll_mask   = datagram_poll_mask,
+       .poll        = datagram_poll,
        .ioctl       = llc_ui_ioctl,
        .listen      = llc_ui_listen,
        .shutdown    = llc_ui_shutdown,
index 0a38cc1cbebcee97ed7e8779ab487e2e0943e84c..932985ca4e66829ffa559fac1a10243e93043101 100644 (file)
@@ -2254,11 +2254,8 @@ static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
                     sdata->control_port_over_nl80211)) {
                struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
                bool noencrypt = status->flag & RX_FLAG_DECRYPTED;
-               struct ethhdr *ehdr = eth_hdr(skb);
 
-               cfg80211_rx_control_port(dev, skb->data, skb->len,
-                                        ehdr->h_source,
-                                        be16_to_cpu(skb->protocol), noencrypt);
+               cfg80211_rx_control_port(dev, skb, noencrypt);
                dev_kfree_skb(skb);
        } else {
                /* deliver to local stack */
index 44b5dfe8727d936d39338006bc89b125c848d12b..fa1f1e63a2640fd405e42e5aeae9718b4ef12d2a 100644 (file)
@@ -4845,7 +4845,9 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
        skb_reset_network_header(skb);
        skb_reset_mac_header(skb);
 
+       local_bh_disable();
        __ieee80211_subif_start_xmit(skb, skb->dev, flags);
+       local_bh_enable();
 
        return 0;
 }
index 5e2e511c4a6f69cf0b613c1b3facd0665d672cfd..d02fbfec37835bce6a27ecfdc146b95ba0ca077f 100644 (file)
@@ -2111,7 +2111,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                if (!sta->uploaded)
                        continue;
 
-               if (sta->sdata->vif.type != NL80211_IFTYPE_AP)
+               if (sta->sdata->vif.type != NL80211_IFTYPE_AP &&
+                   sta->sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
                        continue;
 
                for (state = IEEE80211_STA_NOTEXIST;
index e7b05de1e6d1e136eb509293c4fde81468e12642..25e483e8278bd0404bf044c1a1748fdd1db77580 100644 (file)
@@ -73,8 +73,8 @@ static int ncsi_aen_handler_lsc(struct ncsi_dev_priv *ndp,
        ncm->data[2] = data;
        ncm->data[4] = ntohl(lsc->oem_status);
 
-       netdev_info(ndp->ndev.dev, "NCSI: LSC AEN - channel %u state %s\n",
-                   nc->id, data & 0x1 ? "up" : "down");
+       netdev_dbg(ndp->ndev.dev, "NCSI: LSC AEN - channel %u state %s\n",
+                  nc->id, data & 0x1 ? "up" : "down");
 
        chained = !list_empty(&nc->link);
        state = nc->state;
@@ -148,9 +148,9 @@ static int ncsi_aen_handler_hncdsc(struct ncsi_dev_priv *ndp,
        hncdsc = (struct ncsi_aen_hncdsc_pkt *)h;
        ncm->data[3] = ntohl(hncdsc->status);
        spin_unlock_irqrestore(&nc->lock, flags);
-       netdev_printk(KERN_DEBUG, ndp->ndev.dev,
-                     "NCSI: host driver %srunning on channel %u\n",
-                     ncm->data[3] & 0x1 ? "" : "not ", nc->id);
+       netdev_dbg(ndp->ndev.dev,
+                  "NCSI: host driver %srunning on channel %u\n",
+                  ncm->data[3] & 0x1 ? "" : "not ", nc->id);
 
        return 0;
 }
index 5561e221b71f10b223b381c2ed4b0752bedbc225..091284760d21fa02dc0f9997a2c68ce7f1f618e6 100644 (file)
@@ -788,8 +788,8 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
                }
                break;
        case ncsi_dev_state_config_done:
-               netdev_printk(KERN_DEBUG, ndp->ndev.dev,
-                             "NCSI: channel %u config done\n", nc->id);
+               netdev_dbg(ndp->ndev.dev, "NCSI: channel %u config done\n",
+                          nc->id);
                spin_lock_irqsave(&nc->lock, flags);
                if (nc->reconfigure_needed) {
                        /* This channel's configuration has been updated
@@ -804,8 +804,7 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
                        list_add_tail_rcu(&nc->link, &ndp->channel_queue);
                        spin_unlock_irqrestore(&ndp->lock, flags);
 
-                       netdev_printk(KERN_DEBUG, dev,
-                                     "Dirty NCSI channel state reset\n");
+                       netdev_dbg(dev, "Dirty NCSI channel state reset\n");
                        ncsi_process_next_channel(ndp);
                        break;
                }
@@ -816,9 +815,9 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
                } else {
                        hot_nc = NULL;
                        nc->state = NCSI_CHANNEL_INACTIVE;
-                       netdev_warn(ndp->ndev.dev,
-                                   "NCSI: channel %u link down after config\n",
-                                   nc->id);
+                       netdev_dbg(ndp->ndev.dev,
+                                  "NCSI: channel %u link down after config\n",
+                                  nc->id);
                }
                spin_unlock_irqrestore(&nc->lock, flags);
 
@@ -908,9 +907,9 @@ static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
        }
 
        ncm = &found->modes[NCSI_MODE_LINK];
-       netdev_printk(KERN_DEBUG, ndp->ndev.dev,
-                     "NCSI: Channel %u added to queue (link %s)\n",
-                     found->id, ncm->data[2] & 0x1 ? "up" : "down");
+       netdev_dbg(ndp->ndev.dev,
+                  "NCSI: Channel %u added to queue (link %s)\n",
+                  found->id, ncm->data[2] & 0x1 ? "up" : "down");
 
 out:
        spin_lock_irqsave(&ndp->lock, flags);
@@ -1199,14 +1198,14 @@ int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
        switch (old_state) {
        case NCSI_CHANNEL_INACTIVE:
                ndp->ndev.state = ncsi_dev_state_config;
-               netdev_info(ndp->ndev.dev, "NCSI: configuring channel %u\n",
-                           nc->id);
+               netdev_dbg(ndp->ndev.dev, "NCSI: configuring channel %u\n",
+                          nc->id);
                ncsi_configure_channel(ndp);
                break;
        case NCSI_CHANNEL_ACTIVE:
                ndp->ndev.state = ncsi_dev_state_suspend;
-               netdev_info(ndp->ndev.dev, "NCSI: suspending channel %u\n",
-                           nc->id);
+               netdev_dbg(ndp->ndev.dev, "NCSI: suspending channel %u\n",
+                          nc->id);
                ncsi_suspend_channel(ndp);
                break;
        default:
@@ -1226,8 +1225,6 @@ out:
                return ncsi_choose_active_channel(ndp);
        }
 
-       netdev_printk(KERN_DEBUG, ndp->ndev.dev,
-                     "NCSI: No more channels to process\n");
        ncsi_report_link(ndp, false);
        return -ENODEV;
 }
@@ -1318,9 +1315,9 @@ static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
                                if ((ndp->ndev.state & 0xff00) ==
                                                ncsi_dev_state_config ||
                                                !list_empty(&nc->link)) {
-                                       netdev_printk(KERN_DEBUG, nd->dev,
-                                                     "NCSI: channel %p marked dirty\n",
-                                                     nc);
+                                       netdev_dbg(nd->dev,
+                                                  "NCSI: channel %p marked dirty\n",
+                                                  nc);
                                        nc->reconfigure_needed = true;
                                }
                                spin_unlock_irqrestore(&nc->lock, flags);
@@ -1338,8 +1335,7 @@ static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
                        list_add_tail_rcu(&nc->link, &ndp->channel_queue);
                        spin_unlock_irqrestore(&ndp->lock, flags);
 
-                       netdev_printk(KERN_DEBUG, nd->dev,
-                                     "NCSI: kicked channel %p\n", nc);
+                       netdev_dbg(nd->dev, "NCSI: kicked channel %p\n", nc);
                        n++;
                }
        }
@@ -1370,8 +1366,8 @@ int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
        list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
                n_vids++;
                if (vlan->vid == vid) {
-                       netdev_printk(KERN_DEBUG, dev,
-                                     "NCSI: vid %u already registered\n", vid);
+                       netdev_dbg(dev, "NCSI: vid %u already registered\n",
+                                  vid);
                        return 0;
                }
        }
@@ -1390,7 +1386,7 @@ int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
        vlan->vid = vid;
        list_add_rcu(&vlan->list, &ndp->vlan_vids);
 
-       netdev_printk(KERN_DEBUG, dev, "NCSI: Added new vid %u\n", vid);
+       netdev_dbg(dev, "NCSI: Added new vid %u\n", vid);
 
        found = ncsi_kick_channels(ndp) != 0;
 
@@ -1419,8 +1415,7 @@ int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
        /* Remove the VLAN id from our internal list */
        list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
                if (vlan->vid == vid) {
-                       netdev_printk(KERN_DEBUG, dev,
-                                     "NCSI: vid %u found, removing\n", vid);
+                       netdev_dbg(dev, "NCSI: vid %u found, removing\n", vid);
                        list_del_rcu(&vlan->list);
                        found = true;
                        kfree(vlan);
@@ -1547,7 +1542,7 @@ void ncsi_stop_dev(struct ncsi_dev *nd)
                }
        }
 
-       netdev_printk(KERN_DEBUG, ndp->ndev.dev, "NCSI: Stopping device\n");
+       netdev_dbg(ndp->ndev.dev, "NCSI: Stopping device\n");
        ncsi_report_link(ndp, true);
 }
 EXPORT_SYMBOL_GPL(ncsi_stop_dev);
index dbd7d1fad277ebe3fb09f7ec68f7178433a9c438..f0a1c536ef15a0d35a3078bf85b5f4bee704f894 100644 (file)
@@ -460,6 +460,13 @@ config NF_TABLES
 
 if NF_TABLES
 
+config NF_TABLES_SET
+       tristate "Netfilter nf_tables set infrastructure"
+       help
+         This option enables the nf_tables set infrastructure that allows to
+         look up for elements in a set and to build one-way mappings between
+         matchings and actions.
+
 config NF_TABLES_INET
        depends on IPV6
        select NF_TABLES_IPV4
@@ -493,24 +500,6 @@ config NFT_FLOW_OFFLOAD
          This option adds the "flow_offload" expression that you can use to
          choose what flows are placed into the hardware.
 
-config NFT_SET_RBTREE
-       tristate "Netfilter nf_tables rbtree set module"
-       help
-         This option adds the "rbtree" set type (Red Black tree) that is used
-         to build interval-based sets.
-
-config NFT_SET_HASH
-       tristate "Netfilter nf_tables hash set module"
-       help
-         This option adds the "hash" set type that is used to build one-way
-         mappings between matchings and actions.
-
-config NFT_SET_BITMAP
-       tristate "Netfilter nf_tables bitmap set module"
-       help
-         This option adds the "bitmap" set type that is used to build sets
-         whose keys are smaller or equal to 16 bits.
-
 config NFT_COUNTER
        tristate "Netfilter nf_tables counter module"
        help
index 44449389e527b082b9ea171d5c1759b7c7c7f227..8a76dced974d1c10eca35dca78cf2ab284cb2490 100644 (file)
@@ -78,7 +78,11 @@ nf_tables-objs := nf_tables_core.o nf_tables_api.o nft_chain_filter.o \
                  nft_bitwise.o nft_byteorder.o nft_payload.o nft_lookup.o \
                  nft_dynset.o nft_meta.o nft_rt.o nft_exthdr.o
 
+nf_tables_set-objs := nf_tables_set_core.o \
+                     nft_set_hash.o nft_set_bitmap.o nft_set_rbtree.o
+
 obj-$(CONFIG_NF_TABLES)                += nf_tables.o
+obj-$(CONFIG_NF_TABLES_SET)    += nf_tables_set.o
 obj-$(CONFIG_NFT_COMPAT)       += nft_compat.o
 obj-$(CONFIG_NFT_CONNLIMIT)    += nft_connlimit.o
 obj-$(CONFIG_NFT_NUMGEN)       += nft_numgen.o
@@ -91,9 +95,6 @@ obj-$(CONFIG_NFT_QUEUE)               += nft_queue.o
 obj-$(CONFIG_NFT_QUOTA)                += nft_quota.o
 obj-$(CONFIG_NFT_REJECT)       += nft_reject.o
 obj-$(CONFIG_NFT_REJECT_INET)  += nft_reject_inet.o
-obj-$(CONFIG_NFT_SET_RBTREE)   += nft_set_rbtree.o
-obj-$(CONFIG_NFT_SET_HASH)     += nft_set_hash.o
-obj-$(CONFIG_NFT_SET_BITMAP)   += nft_set_bitmap.o
 obj-$(CONFIG_NFT_COUNTER)      += nft_counter.o
 obj-$(CONFIG_NFT_LOG)          += nft_log.o
 obj-$(CONFIG_NFT_MASQ)         += nft_masq.o
index d8383609fe2825b707cfb8ebc54381761ccc1108..510039862aa93c99904d2dbd3a7969327d0d896a 100644 (file)
@@ -47,6 +47,8 @@ struct nf_conncount_tuple {
        struct hlist_node               node;
        struct nf_conntrack_tuple       tuple;
        struct nf_conntrack_zone        zone;
+       int                             cpu;
+       u32                             jiffies32;
 };
 
 struct nf_conncount_rb {
@@ -91,11 +93,42 @@ bool nf_conncount_add(struct hlist_head *head,
                return false;
        conn->tuple = *tuple;
        conn->zone = *zone;
+       conn->cpu = raw_smp_processor_id();
+       conn->jiffies32 = (u32)jiffies;
        hlist_add_head(&conn->node, head);
        return true;
 }
 EXPORT_SYMBOL_GPL(nf_conncount_add);
 
+static const struct nf_conntrack_tuple_hash *
+find_or_evict(struct net *net, struct nf_conncount_tuple *conn)
+{
+       const struct nf_conntrack_tuple_hash *found;
+       unsigned long a, b;
+       int cpu = raw_smp_processor_id();
+       __s32 age;
+
+       found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
+       if (found)
+               return found;
+       b = conn->jiffies32;
+       a = (u32)jiffies;
+
+       /* conn might have been added just before by another cpu and
+        * might still be unconfirmed.  In this case, nf_conntrack_find()
+        * returns no result.  Thus only evict if this cpu added the
+        * stale entry or if the entry is older than two jiffies.
+        */
+       age = a - b;
+       if (conn->cpu == cpu || age >= 2) {
+               hlist_del(&conn->node);
+               kmem_cache_free(conncount_conn_cachep, conn);
+               return ERR_PTR(-ENOENT);
+       }
+
+       return ERR_PTR(-EAGAIN);
+}
+
 unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
                                 const struct nf_conntrack_tuple *tuple,
                                 const struct nf_conntrack_zone *zone,
@@ -103,18 +136,27 @@ unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
 {
        const struct nf_conntrack_tuple_hash *found;
        struct nf_conncount_tuple *conn;
-       struct hlist_node *n;
        struct nf_conn *found_ct;
+       struct hlist_node *n;
        unsigned int length = 0;
 
        *addit = tuple ? true : false;
 
        /* check the saved connections */
        hlist_for_each_entry_safe(conn, n, head, node) {
-               found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
-               if (found == NULL) {
-                       hlist_del(&conn->node);
-                       kmem_cache_free(conncount_conn_cachep, conn);
+               found = find_or_evict(net, conn);
+               if (IS_ERR(found)) {
+                       /* Not found, but might be about to be confirmed */
+                       if (PTR_ERR(found) == -EAGAIN) {
+                               length++;
+                               if (!tuple)
+                                       continue;
+
+                               if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
+                                   nf_ct_zone_id(&conn->zone, conn->zone.dir) ==
+                                   nf_ct_zone_id(zone, zone->dir))
+                                       *addit = false;
+                       }
                        continue;
                }
 
index 3465da2a98bd4ff68fc8e52935aad047c69855e8..3d52804250274602c521f3cfe6c0c3b8fa9e78e9 100644 (file)
@@ -2043,7 +2043,7 @@ int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp)
                return -EOPNOTSUPP;
 
        /* On boot, we can set this without any fancy locking. */
-       if (!nf_conntrack_htable_size)
+       if (!nf_conntrack_hash)
                return param_set_uint(val, kp);
 
        rc = kstrtouint(val, 0, &hashsize);
index 551a1eddf0fab75eccf803b9711e069e61e60d5d..a75b11c393128d79107fc447c5109b7d0a786ea5 100644 (file)
@@ -465,6 +465,11 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
 
        nf_ct_expect_iterate_destroy(expect_iter_me, NULL);
        nf_ct_iterate_destroy(unhelp, me);
+
+       /* Maybe someone has gotten the helper already when unhelp above.
+        * So need to wait it.
+        */
+       synchronize_rcu();
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister);
 
index abe647d5b8c63256da8895388363ad9dde11edc0..9ce6336d1e559459235f755be7db4a7adb9ebfc1 100644 (file)
@@ -243,14 +243,14 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
                 * We currently ignore Sync packets
                 *
                 *      sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
-                       sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
+                       sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
                },
                [DCCP_PKT_SYNCACK] = {
                /*
                 * We currently ignore SyncAck packets
                 *
                 *      sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
-                       sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
+                       sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
                },
        },
        [CT_DCCP_ROLE_SERVER] = {
@@ -371,14 +371,14 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
                 * We currently ignore Sync packets
                 *
                 *      sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
-                       sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
+                       sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
                },
                [DCCP_PKT_SYNCACK] = {
                /*
                 * We currently ignore SyncAck packets
                 *
                 *      sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
-                       sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
+                       sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
                },
        },
 };
index 4264570475788be388e603c1bc70330c812d0eb3..a61d6df6e5f64f5b2086d14f35c88a0491f77ce6 100644 (file)
@@ -424,6 +424,10 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
        if (write) {
                struct ctl_table tmp = *table;
 
+               /* proc_dostring() can append to existing strings, so we need to
+                * initialize it as an empty string.
+                */
+               buf[0] = '\0';
                tmp.data = buf;
                r = proc_dostring(&tmp, write, buffer, lenp, ppos);
                if (r)
@@ -442,14 +446,17 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
                rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
                mutex_unlock(&nf_log_mutex);
        } else {
+               struct ctl_table tmp = *table;
+
+               tmp.data = buf;
                mutex_lock(&nf_log_mutex);
                logger = nft_log_dereference(net->nf.nf_loggers[tindex]);
                if (!logger)
-                       table->data = "NONE";
+                       strlcpy(buf, "NONE", sizeof(buf));
                else
-                       table->data = logger->name;
-               r = proc_dostring(table, write, buffer, lenp, ppos);
+                       strlcpy(buf, logger->name, sizeof(buf));
                mutex_unlock(&nf_log_mutex);
+               r = proc_dostring(&tmp, write, buffer, lenp, ppos);
        }
 
        return r;
index 896d4a36081d4bb527b10c5db27df1a4dab32df8..f5745e4c6513e7a6bc8d1814e6efb3f497f76870 100644 (file)
@@ -75,6 +75,7 @@ static void nft_ctx_init(struct nft_ctx *ctx,
 {
        ctx->net        = net;
        ctx->family     = family;
+       ctx->level      = 0;
        ctx->table      = table;
        ctx->chain      = chain;
        ctx->nla        = nla;
@@ -1597,7 +1598,6 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
        struct nft_base_chain *basechain;
        struct nft_stats *stats = NULL;
        struct nft_chain_hook hook;
-       const struct nlattr *name;
        struct nf_hook_ops *ops;
        struct nft_trans *trans;
        int err;
@@ -1645,12 +1645,11 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
                        return PTR_ERR(stats);
        }
 
+       err = -ENOMEM;
        trans = nft_trans_alloc(ctx, NFT_MSG_NEWCHAIN,
                                sizeof(struct nft_trans_chain));
-       if (trans == NULL) {
-               free_percpu(stats);
-               return -ENOMEM;
-       }
+       if (trans == NULL)
+               goto err;
 
        nft_trans_chain_stats(trans) = stats;
        nft_trans_chain_update(trans) = true;
@@ -1660,19 +1659,37 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
        else
                nft_trans_chain_policy(trans) = -1;
 
-       name = nla[NFTA_CHAIN_NAME];
-       if (nla[NFTA_CHAIN_HANDLE] && name) {
-               nft_trans_chain_name(trans) =
-                       nla_strdup(name, GFP_KERNEL);
-               if (!nft_trans_chain_name(trans)) {
-                       kfree(trans);
-                       free_percpu(stats);
-                       return -ENOMEM;
+       if (nla[NFTA_CHAIN_HANDLE] &&
+           nla[NFTA_CHAIN_NAME]) {
+               struct nft_trans *tmp;
+               char *name;
+
+               err = -ENOMEM;
+               name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL);
+               if (!name)
+                       goto err;
+
+               err = -EEXIST;
+               list_for_each_entry(tmp, &ctx->net->nft.commit_list, list) {
+                       if (tmp->msg_type == NFT_MSG_NEWCHAIN &&
+                           tmp->ctx.table == table &&
+                           nft_trans_chain_update(tmp) &&
+                           nft_trans_chain_name(tmp) &&
+                           strcmp(name, nft_trans_chain_name(tmp)) == 0) {
+                               kfree(name);
+                               goto err;
+                       }
                }
+
+               nft_trans_chain_name(trans) = name;
        }
        list_add_tail(&trans->list, &ctx->net->nft.commit_list);
 
        return 0;
+err:
+       free_percpu(stats);
+       kfree(trans);
+       return err;
 }
 
 static int nf_tables_newchain(struct net *net, struct sock *nlsk,
@@ -2254,6 +2271,39 @@ done:
        return skb->len;
 }
 
+static int nf_tables_dump_rules_start(struct netlink_callback *cb)
+{
+       const struct nlattr * const *nla = cb->data;
+       struct nft_rule_dump_ctx *ctx = NULL;
+
+       if (nla[NFTA_RULE_TABLE] || nla[NFTA_RULE_CHAIN]) {
+               ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
+               if (!ctx)
+                       return -ENOMEM;
+
+               if (nla[NFTA_RULE_TABLE]) {
+                       ctx->table = nla_strdup(nla[NFTA_RULE_TABLE],
+                                                       GFP_ATOMIC);
+                       if (!ctx->table) {
+                               kfree(ctx);
+                               return -ENOMEM;
+                       }
+               }
+               if (nla[NFTA_RULE_CHAIN]) {
+                       ctx->chain = nla_strdup(nla[NFTA_RULE_CHAIN],
+                                               GFP_ATOMIC);
+                       if (!ctx->chain) {
+                               kfree(ctx->table);
+                               kfree(ctx);
+                               return -ENOMEM;
+                       }
+               }
+       }
+
+       cb->data = ctx;
+       return 0;
+}
+
 static int nf_tables_dump_rules_done(struct netlink_callback *cb)
 {
        struct nft_rule_dump_ctx *ctx = cb->data;
@@ -2283,38 +2333,13 @@ static int nf_tables_getrule(struct net *net, struct sock *nlsk,
 
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
                struct netlink_dump_control c = {
+                       .start= nf_tables_dump_rules_start,
                        .dump = nf_tables_dump_rules,
                        .done = nf_tables_dump_rules_done,
                        .module = THIS_MODULE,
+                       .data = (void *)nla,
                };
 
-               if (nla[NFTA_RULE_TABLE] || nla[NFTA_RULE_CHAIN]) {
-                       struct nft_rule_dump_ctx *ctx;
-
-                       ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
-                       if (!ctx)
-                               return -ENOMEM;
-
-                       if (nla[NFTA_RULE_TABLE]) {
-                               ctx->table = nla_strdup(nla[NFTA_RULE_TABLE],
-                                                       GFP_ATOMIC);
-                               if (!ctx->table) {
-                                       kfree(ctx);
-                                       return -ENOMEM;
-                               }
-                       }
-                       if (nla[NFTA_RULE_CHAIN]) {
-                               ctx->chain = nla_strdup(nla[NFTA_RULE_CHAIN],
-                                                       GFP_ATOMIC);
-                               if (!ctx->chain) {
-                                       kfree(ctx->table);
-                                       kfree(ctx);
-                                       return -ENOMEM;
-                               }
-                       }
-                       c.data = ctx;
-               }
-
                return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
        }
 
@@ -2384,6 +2409,9 @@ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain)
        struct nft_rule *rule;
        int err;
 
+       if (ctx->level == NFT_JUMP_STACK_SIZE)
+               return -EMLINK;
+
        list_for_each_entry(rule, &chain->rules, list) {
                if (!nft_is_active_next(ctx->net, rule))
                        continue;
@@ -3161,6 +3189,18 @@ done:
        return skb->len;
 }
 
+static int nf_tables_dump_sets_start(struct netlink_callback *cb)
+{
+       struct nft_ctx *ctx_dump = NULL;
+
+       ctx_dump = kmemdup(cb->data, sizeof(*ctx_dump), GFP_ATOMIC);
+       if (ctx_dump == NULL)
+               return -ENOMEM;
+
+       cb->data = ctx_dump;
+       return 0;
+}
+
 static int nf_tables_dump_sets_done(struct netlink_callback *cb)
 {
        kfree(cb->data);
@@ -3188,18 +3228,12 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk,
 
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
                struct netlink_dump_control c = {
+                       .start = nf_tables_dump_sets_start,
                        .dump = nf_tables_dump_sets,
                        .done = nf_tables_dump_sets_done,
+                       .data = &ctx,
                        .module = THIS_MODULE,
                };
-               struct nft_ctx *ctx_dump;
-
-               ctx_dump = kmalloc(sizeof(*ctx_dump), GFP_ATOMIC);
-               if (ctx_dump == NULL)
-                       return -ENOMEM;
-
-               *ctx_dump = ctx;
-               c.data = ctx_dump;
 
                return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
        }
@@ -3849,6 +3883,15 @@ nla_put_failure:
        return -ENOSPC;
 }
 
+static int nf_tables_dump_set_start(struct netlink_callback *cb)
+{
+       struct nft_set_dump_ctx *dump_ctx = cb->data;
+
+       cb->data = kmemdup(dump_ctx, sizeof(*dump_ctx), GFP_ATOMIC);
+
+       return cb->data ? 0 : -ENOMEM;
+}
+
 static int nf_tables_dump_set_done(struct netlink_callback *cb)
 {
        kfree(cb->data);
@@ -4002,20 +4045,17 @@ static int nf_tables_getsetelem(struct net *net, struct sock *nlsk,
 
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
                struct netlink_dump_control c = {
+                       .start = nf_tables_dump_set_start,
                        .dump = nf_tables_dump_set,
                        .done = nf_tables_dump_set_done,
                        .module = THIS_MODULE,
                };
-               struct nft_set_dump_ctx *dump_ctx;
-
-               dump_ctx = kmalloc(sizeof(*dump_ctx), GFP_ATOMIC);
-               if (!dump_ctx)
-                       return -ENOMEM;
-
-               dump_ctx->set = set;
-               dump_ctx->ctx = ctx;
+               struct nft_set_dump_ctx dump_ctx = {
+                       .set = set,
+                       .ctx = ctx,
+               };
 
-               c.data = dump_ctx;
+               c.data = &dump_ctx;
                return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
        }
 
@@ -4975,38 +5015,42 @@ done:
        return skb->len;
 }
 
-static int nf_tables_dump_obj_done(struct netlink_callback *cb)
+static int nf_tables_dump_obj_start(struct netlink_callback *cb)
 {
-       struct nft_obj_filter *filter = cb->data;
+       const struct nlattr * const *nla = cb->data;
+       struct nft_obj_filter *filter = NULL;
 
-       if (filter) {
-               kfree(filter->table);
-               kfree(filter);
+       if (nla[NFTA_OBJ_TABLE] || nla[NFTA_OBJ_TYPE]) {
+               filter = kzalloc(sizeof(*filter), GFP_ATOMIC);
+               if (!filter)
+                       return -ENOMEM;
+
+               if (nla[NFTA_OBJ_TABLE]) {
+                       filter->table = nla_strdup(nla[NFTA_OBJ_TABLE], GFP_ATOMIC);
+                       if (!filter->table) {
+                               kfree(filter);
+                               return -ENOMEM;
+                       }
+               }
+
+               if (nla[NFTA_OBJ_TYPE])
+                       filter->type = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
        }
 
+       cb->data = filter;
        return 0;
 }
 
-static struct nft_obj_filter *
-nft_obj_filter_alloc(const struct nlattr * const nla[])
+static int nf_tables_dump_obj_done(struct netlink_callback *cb)
 {
-       struct nft_obj_filter *filter;
-
-       filter = kzalloc(sizeof(*filter), GFP_ATOMIC);
-       if (!filter)
-               return ERR_PTR(-ENOMEM);
+       struct nft_obj_filter *filter = cb->data;
 
-       if (nla[NFTA_OBJ_TABLE]) {
-               filter->table = nla_strdup(nla[NFTA_OBJ_TABLE], GFP_ATOMIC);
-               if (!filter->table) {
-                       kfree(filter);
-                       return ERR_PTR(-ENOMEM);
-               }
+       if (filter) {
+               kfree(filter->table);
+               kfree(filter);
        }
-       if (nla[NFTA_OBJ_TYPE])
-               filter->type = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
 
-       return filter;
+       return 0;
 }
 
 /* called with rcu_read_lock held */
@@ -5027,21 +5071,13 @@ static int nf_tables_getobj(struct net *net, struct sock *nlsk,
 
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
                struct netlink_dump_control c = {
+                       .start = nf_tables_dump_obj_start,
                        .dump = nf_tables_dump_obj,
                        .done = nf_tables_dump_obj_done,
                        .module = THIS_MODULE,
+                       .data = (void *)nla,
                };
 
-               if (nla[NFTA_OBJ_TABLE] ||
-                   nla[NFTA_OBJ_TYPE]) {
-                       struct nft_obj_filter *filter;
-
-                       filter = nft_obj_filter_alloc(nla);
-                       if (IS_ERR(filter))
-                               return -ENOMEM;
-
-                       c.data = filter;
-               }
                return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
        }
 
@@ -5320,8 +5356,6 @@ static int nf_tables_flowtable_parse_hook(const struct nft_ctx *ctx,
                flowtable->ops[i].priv          = &flowtable->data;
                flowtable->ops[i].hook          = flowtable->data.type->hook;
                flowtable->ops[i].dev           = dev_array[i];
-               flowtable->dev_name[i]          = kstrdup(dev_array[i]->name,
-                                                         GFP_KERNEL);
        }
 
        return err;
@@ -5479,10 +5513,8 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
 err6:
        i = flowtable->ops_len;
 err5:
-       for (k = i - 1; k >= 0; k--) {
-               kfree(flowtable->dev_name[k]);
+       for (k = i - 1; k >= 0; k--)
                nf_unregister_net_hook(net, &flowtable->ops[k]);
-       }
 
        kfree(flowtable->ops);
 err4:
@@ -5581,9 +5613,10 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
                goto nla_put_failure;
 
        for (i = 0; i < flowtable->ops_len; i++) {
-               if (flowtable->dev_name[i][0] &&
-                   nla_put_string(skb, NFTA_DEVICE_NAME,
-                                  flowtable->dev_name[i]))
+               const struct net_device *dev = READ_ONCE(flowtable->ops[i].dev);
+
+               if (dev &&
+                   nla_put_string(skb, NFTA_DEVICE_NAME, dev->name))
                        goto nla_put_failure;
        }
        nla_nest_end(skb, nest_devs);
@@ -5650,37 +5683,39 @@ done:
        return skb->len;
 }
 
-static int nf_tables_dump_flowtable_done(struct netlink_callback *cb)
+static int nf_tables_dump_flowtable_start(struct netlink_callback *cb)
 {
-       struct nft_flowtable_filter *filter = cb->data;
+       const struct nlattr * const *nla = cb->data;
+       struct nft_flowtable_filter *filter = NULL;
 
-       if (!filter)
-               return 0;
+       if (nla[NFTA_FLOWTABLE_TABLE]) {
+               filter = kzalloc(sizeof(*filter), GFP_ATOMIC);
+               if (!filter)
+                       return -ENOMEM;
 
-       kfree(filter->table);
-       kfree(filter);
+               filter->table = nla_strdup(nla[NFTA_FLOWTABLE_TABLE],
+                                          GFP_ATOMIC);
+               if (!filter->table) {
+                       kfree(filter);
+                       return -ENOMEM;
+               }
+       }
 
+       cb->data = filter;
        return 0;
 }
 
-static struct nft_flowtable_filter *
-nft_flowtable_filter_alloc(const struct nlattr * const nla[])
+static int nf_tables_dump_flowtable_done(struct netlink_callback *cb)
 {
-       struct nft_flowtable_filter *filter;
+       struct nft_flowtable_filter *filter = cb->data;
 
-       filter = kzalloc(sizeof(*filter), GFP_ATOMIC);
        if (!filter)
-               return ERR_PTR(-ENOMEM);
+               return 0;
 
-       if (nla[NFTA_FLOWTABLE_TABLE]) {
-               filter->table = nla_strdup(nla[NFTA_FLOWTABLE_TABLE],
-                                          GFP_ATOMIC);
-               if (!filter->table) {
-                       kfree(filter);
-                       return ERR_PTR(-ENOMEM);
-               }
-       }
-       return filter;
+       kfree(filter->table);
+       kfree(filter);
+
+       return 0;
 }
 
 /* called with rcu_read_lock held */
@@ -5700,20 +5735,13 @@ static int nf_tables_getflowtable(struct net *net, struct sock *nlsk,
 
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
                struct netlink_dump_control c = {
+                       .start = nf_tables_dump_flowtable_start,
                        .dump = nf_tables_dump_flowtable,
                        .done = nf_tables_dump_flowtable_done,
                        .module = THIS_MODULE,
+                       .data = (void *)nla,
                };
 
-               if (nla[NFTA_FLOWTABLE_TABLE]) {
-                       struct nft_flowtable_filter *filter;
-
-                       filter = nft_flowtable_filter_alloc(nla);
-                       if (IS_ERR(filter))
-                               return -ENOMEM;
-
-                       c.data = filter;
-               }
                return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
        }
 
@@ -5783,6 +5811,7 @@ static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
        kfree(flowtable->name);
        flowtable->data.type->free(&flowtable->data);
        module_put(flowtable->data.type->owner);
+       kfree(flowtable);
 }
 
 static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net,
@@ -5825,7 +5854,6 @@ static void nft_flowtable_event(unsigned long event, struct net_device *dev,
                        continue;
 
                nf_unregister_net_hook(dev_net(dev), &flowtable->ops[i]);
-               flowtable->dev_name[i][0] = '\0';
                flowtable->ops[i].dev = NULL;
                break;
        }
@@ -6086,6 +6114,9 @@ static void nft_commit_release(struct nft_trans *trans)
        case NFT_MSG_DELTABLE:
                nf_tables_table_destroy(&trans->ctx);
                break;
+       case NFT_MSG_NEWCHAIN:
+               kfree(nft_trans_chain_name(trans));
+               break;
        case NFT_MSG_DELCHAIN:
                nf_tables_chain_destroy(&trans->ctx);
                break;
@@ -6315,13 +6346,15 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
                        nf_tables_table_notify(&trans->ctx, NFT_MSG_DELTABLE);
                        break;
                case NFT_MSG_NEWCHAIN:
-                       if (nft_trans_chain_update(trans))
+                       if (nft_trans_chain_update(trans)) {
                                nft_chain_commit_update(trans);
-                       else
+                               nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN);
+                               /* trans destroyed after rcu grace period */
+                       } else {
                                nft_clear(net, trans->ctx.chain);
-
-                       nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN);
-                       nft_trans_destroy(trans);
+                               nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN);
+                               nft_trans_destroy(trans);
+                       }
                        break;
                case NFT_MSG_DELCHAIN:
                        nft_chain_del(trans->ctx.chain);
@@ -6471,7 +6504,7 @@ static int __nf_tables_abort(struct net *net)
                case NFT_MSG_NEWCHAIN:
                        if (nft_trans_chain_update(trans)) {
                                free_percpu(nft_trans_chain_stats(trans));
-
+                               kfree(nft_trans_chain_name(trans));
                                nft_trans_destroy(trans);
                        } else {
                                trans->ctx.table->use--;
@@ -6837,13 +6870,6 @@ int nft_validate_register_store(const struct nft_ctx *ctx,
                        err = nf_tables_check_loops(ctx, data->verdict.chain);
                        if (err < 0)
                                return err;
-
-                       if (ctx->chain->level + 1 >
-                           data->verdict.chain->level) {
-                               if (ctx->chain->level + 1 == NFT_JUMP_STACK_SIZE)
-                                       return -EMLINK;
-                               data->verdict.chain->level = ctx->chain->level + 1;
-                       }
                }
 
                return 0;
diff --git a/net/netfilter/nf_tables_set_core.c b/net/netfilter/nf_tables_set_core.c
new file mode 100644 (file)
index 0000000..8147896
--- /dev/null
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <net/netfilter/nf_tables_core.h>
+
+static int __init nf_tables_set_module_init(void)
+{
+       nft_register_set(&nft_set_hash_fast_type);
+       nft_register_set(&nft_set_hash_type);
+       nft_register_set(&nft_set_rhash_type);
+       nft_register_set(&nft_set_bitmap_type);
+       nft_register_set(&nft_set_rbtree_type);
+
+       return 0;
+}
+
+static void __exit nf_tables_set_module_exit(void)
+{
+       nft_unregister_set(&nft_set_rbtree_type);
+       nft_unregister_set(&nft_set_bitmap_type);
+       nft_unregister_set(&nft_set_rhash_type);
+       nft_unregister_set(&nft_set_hash_type);
+       nft_unregister_set(&nft_set_hash_fast_type);
+}
+
+module_init(nf_tables_set_module_init);
+module_exit(nf_tables_set_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NFT_SET();
index 4ccd2988f9db637166358335d8e26299c7237bec..ea4ba551abb28cb25c833dc408e23d1313b21bb4 100644 (file)
@@ -1243,6 +1243,9 @@ static int nfqnl_recv_unsupp(struct net *net, struct sock *ctnl,
 static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
        [NFQA_CFG_CMD]          = { .len = sizeof(struct nfqnl_msg_config_cmd) },
        [NFQA_CFG_PARAMS]       = { .len = sizeof(struct nfqnl_msg_config_params) },
+       [NFQA_CFG_QUEUE_MAXLEN] = { .type = NLA_U32 },
+       [NFQA_CFG_MASK]         = { .type = NLA_U32 },
+       [NFQA_CFG_FLAGS]        = { .type = NLA_U32 },
 };
 
 static const struct nf_queue_handler nfqh = {
index 8d1ff654e5aff1dfd5c2ace7693876568ea3377a..32535eea51b296ab1f2cb5bdd06972497f380a78 100644 (file)
@@ -832,10 +832,18 @@ nft_target_select_ops(const struct nft_ctx *ctx,
        rev = ntohl(nla_get_be32(tb[NFTA_TARGET_REV]));
        family = ctx->family;
 
+       if (strcmp(tg_name, XT_ERROR_TARGET) == 0 ||
+           strcmp(tg_name, XT_STANDARD_TARGET) == 0 ||
+           strcmp(tg_name, "standard") == 0)
+               return ERR_PTR(-EINVAL);
+
        /* Re-use the existing target if it's already loaded. */
        list_for_each_entry(nft_target, &nft_target_list, head) {
                struct xt_target *target = nft_target->ops.data;
 
+               if (!target->target)
+                       continue;
+
                if (nft_target_cmp(target, tg_name, rev, family))
                        return &nft_target->ops;
        }
@@ -844,6 +852,11 @@ nft_target_select_ops(const struct nft_ctx *ctx,
        if (IS_ERR(target))
                return ERR_PTR(-ENOENT);
 
+       if (!target->target) {
+               err = -EINVAL;
+               goto err;
+       }
+
        if (target->targetsize > nla_len(tb[NFTA_TARGET_INFO])) {
                err = -EINVAL;
                goto err;
index 15adf8ca82c3783efcb510efa85aa894afd1c2da..0777a93211e2b576e57eec2f4aaec71d57f3700d 100644 (file)
@@ -98,6 +98,7 @@ static int nft_immediate_validate(const struct nft_ctx *ctx,
                                  const struct nft_data **d)
 {
        const struct nft_immediate_expr *priv = nft_expr_priv(expr);
+       struct nft_ctx *pctx = (struct nft_ctx *)ctx;
        const struct nft_data *data;
        int err;
 
@@ -109,9 +110,11 @@ static int nft_immediate_validate(const struct nft_ctx *ctx,
        switch (data->verdict.code) {
        case NFT_JUMP:
        case NFT_GOTO:
+               pctx->level++;
                err = nft_chain_validate(ctx, data->verdict.chain);
                if (err < 0)
                        return err;
+               pctx->level--;
                break;
        default:
                break;
index 42e6fadf1417eba7ce4512d43cce339fc627e204..c2a1d84cdfc460d86b50ae6d28dde2653f1666dd 100644 (file)
@@ -155,7 +155,9 @@ static int nft_lookup_validate_setelem(const struct nft_ctx *ctx,
                                       struct nft_set_elem *elem)
 {
        const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
+       struct nft_ctx *pctx = (struct nft_ctx *)ctx;
        const struct nft_data *data;
+       int err;
 
        if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
            *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END)
@@ -165,10 +167,17 @@ static int nft_lookup_validate_setelem(const struct nft_ctx *ctx,
        switch (data->verdict.code) {
        case NFT_JUMP:
        case NFT_GOTO:
-               return nft_chain_validate(ctx, data->verdict.chain);
+               pctx->level++;
+               err = nft_chain_validate(ctx, data->verdict.chain);
+               if (err < 0)
+                       return err;
+               pctx->level--;
+               break;
        default:
-               return 0;
+               break;
        }
+
+       return 0;
 }
 
 static int nft_lookup_validate(const struct nft_ctx *ctx,
index d6626e01c7ee6b0c25a2197f75309030edca34c6..128bc16f52dd436aa78ac21ae45be4cf69a70f00 100644 (file)
@@ -296,7 +296,7 @@ static bool nft_bitmap_estimate(const struct nft_set_desc *desc, u32 features,
        return true;
 }
 
-static struct nft_set_type nft_bitmap_type __read_mostly = {
+struct nft_set_type nft_set_bitmap_type __read_mostly = {
        .owner          = THIS_MODULE,
        .ops            = {
                .privsize       = nft_bitmap_privsize,
@@ -314,20 +314,3 @@ static struct nft_set_type nft_bitmap_type __read_mostly = {
                .get            = nft_bitmap_get,
        },
 };
-
-static int __init nft_bitmap_module_init(void)
-{
-       return nft_register_set(&nft_bitmap_type);
-}
-
-static void __exit nft_bitmap_module_exit(void)
-{
-       nft_unregister_set(&nft_bitmap_type);
-}
-
-module_init(nft_bitmap_module_init);
-module_exit(nft_bitmap_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
-MODULE_ALIAS_NFT_SET();
index 6f9a1365a09f07c517804cec45e31cd657f93337..90c3e7e6cacba2878d36209c0b2cf76b9d2f5c82 100644 (file)
@@ -387,6 +387,7 @@ static void nft_rhash_destroy(const struct nft_set *set)
        struct nft_rhash *priv = nft_set_priv(set);
 
        cancel_delayed_work_sync(&priv->gc_work);
+       rcu_barrier();
        rhashtable_free_and_destroy(&priv->ht, nft_rhash_elem_destroy,
                                    (void *)set);
 }
@@ -654,7 +655,7 @@ static bool nft_hash_fast_estimate(const struct nft_set_desc *desc, u32 features
        return true;
 }
 
-static struct nft_set_type nft_rhash_type __read_mostly = {
+struct nft_set_type nft_set_rhash_type __read_mostly = {
        .owner          = THIS_MODULE,
        .features       = NFT_SET_MAP | NFT_SET_OBJECT |
                          NFT_SET_TIMEOUT | NFT_SET_EVAL,
@@ -677,7 +678,7 @@ static struct nft_set_type nft_rhash_type __read_mostly = {
        },
 };
 
-static struct nft_set_type nft_hash_type __read_mostly = {
+struct nft_set_type nft_set_hash_type __read_mostly = {
        .owner          = THIS_MODULE,
        .features       = NFT_SET_MAP | NFT_SET_OBJECT,
        .ops            = {
@@ -697,7 +698,7 @@ static struct nft_set_type nft_hash_type __read_mostly = {
        },
 };
 
-static struct nft_set_type nft_hash_fast_type __read_mostly = {
+struct nft_set_type nft_set_hash_fast_type __read_mostly = {
        .owner          = THIS_MODULE,
        .features       = NFT_SET_MAP | NFT_SET_OBJECT,
        .ops            = {
@@ -716,26 +717,3 @@ static struct nft_set_type nft_hash_fast_type __read_mostly = {
                .get            = nft_hash_get,
        },
 };
-
-static int __init nft_hash_module_init(void)
-{
-       if (nft_register_set(&nft_hash_fast_type) ||
-           nft_register_set(&nft_hash_type) ||
-           nft_register_set(&nft_rhash_type))
-               return 1;
-       return 0;
-}
-
-static void __exit nft_hash_module_exit(void)
-{
-       nft_unregister_set(&nft_rhash_type);
-       nft_unregister_set(&nft_hash_type);
-       nft_unregister_set(&nft_hash_fast_type);
-}
-
-module_init(nft_hash_module_init);
-module_exit(nft_hash_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_ALIAS_NFT_SET();
index 7f3a9a211034b2dee751dd776e1b5f59db6c6b61..9873d734b49480ff0722ca73d18cf7ab774e98fb 100644 (file)
@@ -381,7 +381,7 @@ static void nft_rbtree_gc(struct work_struct *work)
 
                gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
                if (!gcb)
-                       goto out;
+                       break;
 
                atomic_dec(&set->nelems);
                nft_set_gc_batch_add(gcb, rbe);
@@ -390,10 +390,12 @@ static void nft_rbtree_gc(struct work_struct *work)
                        rbe = rb_entry(prev, struct nft_rbtree_elem, node);
                        atomic_dec(&set->nelems);
                        nft_set_gc_batch_add(gcb, rbe);
+                       prev = NULL;
                }
                node = rb_next(node);
+               if (!node)
+                       break;
        }
-out:
        if (gcb) {
                for (i = 0; i < gcb->head.cnt; i++) {
                        rbe = gcb->elems[i];
@@ -440,6 +442,7 @@ static void nft_rbtree_destroy(const struct nft_set *set)
        struct rb_node *node;
 
        cancel_delayed_work_sync(&priv->gc_work);
+       rcu_barrier();
        while ((node = priv->root.rb_node) != NULL) {
                rb_erase(node, &priv->root);
                rbe = rb_entry(node, struct nft_rbtree_elem, node);
@@ -462,7 +465,7 @@ static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
        return true;
 }
 
-static struct nft_set_type nft_rbtree_type __read_mostly = {
+struct nft_set_type nft_set_rbtree_type __read_mostly = {
        .owner          = THIS_MODULE,
        .features       = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
        .ops            = {
@@ -481,20 +484,3 @@ static struct nft_set_type nft_rbtree_type __read_mostly = {
                .get            = nft_rbtree_get,
        },
 };
-
-static int __init nft_rbtree_module_init(void)
-{
-       return nft_register_set(&nft_rbtree_type);
-}
-
-static void __exit nft_rbtree_module_exit(void)
-{
-       nft_unregister_set(&nft_rbtree_type);
-}
-
-module_init(nft_rbtree_module_init);
-module_exit(nft_rbtree_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_ALIAS_NFT_SET();
index 58fce4e749a97deb7f50ee96cb328d45624ccc8c..d76550a8b642aafd96853332d18db898e43ff587 100644 (file)
@@ -61,7 +61,7 @@ tproxy_tg4(struct net *net, struct sk_buff *skb, __be32 laddr, __be16 lport,
         * addresses, this happens if the redirect already happened
         * and the current packet belongs to an already established
         * connection */
-       sk = nf_tproxy_get_sock_v4(net, skb, hp, iph->protocol,
+       sk = nf_tproxy_get_sock_v4(net, skb, iph->protocol,
                                   iph->saddr, iph->daddr,
                                   hp->source, hp->dest,
                                   skb->dev, NF_TPROXY_LOOKUP_ESTABLISHED);
@@ -77,7 +77,7 @@ tproxy_tg4(struct net *net, struct sk_buff *skb, __be32 laddr, __be16 lport,
        else if (!sk)
                /* no, there's no established connection, check if
                 * there's a listener on the redirected addr/port */
-               sk = nf_tproxy_get_sock_v4(net, skb, hp, iph->protocol,
+               sk = nf_tproxy_get_sock_v4(net, skb, iph->protocol,
                                           iph->saddr, laddr,
                                           hp->source, lport,
                                           skb->dev, NF_TPROXY_LOOKUP_LISTENER);
@@ -150,7 +150,7 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
         * addresses, this happens if the redirect already happened
         * and the current packet belongs to an already established
         * connection */
-       sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff, hp, tproto,
+       sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff, tproto,
                                   &iph->saddr, &iph->daddr,
                                   hp->source, hp->dest,
                                   xt_in(par), NF_TPROXY_LOOKUP_ESTABLISHED);
@@ -171,7 +171,7 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
        else if (!sk)
                /* no there's no established connection, check if
                 * there's a listener on the redirected addr/port */
-               sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff, hp,
+               sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff,
                                           tproto, &iph->saddr, laddr,
                                           hp->source, lport,
                                           xt_in(par), NF_TPROXY_LOOKUP_LISTENER);
index 1189b84413d5a8236f878a9cc99bcfa09368ec69..393573a99a5a34d3ebaad3a71b36293b6c2fb19f 100644 (file)
@@ -2658,7 +2658,7 @@ static const struct proto_ops netlink_ops = {
        .socketpair =   sock_no_socketpair,
        .accept =       sock_no_accept,
        .getname =      netlink_getname,
-       .poll_mask =    datagram_poll_mask,
+       .poll =         datagram_poll,
        .ioctl =        netlink_ioctl,
        .listen =       sock_no_listen,
        .shutdown =     sock_no_shutdown,
index 93fbcafbf3886d34b0be87244c405b8319df89dd..03f37c4e64fe44cd822952225736084ad151b2e8 100644 (file)
@@ -1355,7 +1355,7 @@ static const struct proto_ops nr_proto_ops = {
        .socketpair     =       sock_no_socketpair,
        .accept         =       nr_accept,
        .getname        =       nr_getname,
-       .poll_mask      =       datagram_poll_mask,
+       .poll           =       datagram_poll,
        .ioctl          =       nr_ioctl,
        .listen         =       nr_listen,
        .shutdown       =       sock_no_shutdown,
index 2ceefa183ceed6ba3d06f2aae958104a514f2146..6a196e438b6c03d4c86e0a8a78af1c496a7e599b 100644 (file)
@@ -752,11 +752,14 @@ int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap,
                pr_debug("Fragment %zd bytes remaining %zd",
                         frag_len, remaining_len);
 
-               pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT,
+               pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, 0,
                                         frag_len + LLCP_HEADER_SIZE, &err);
                if (pdu == NULL) {
-                       pr_err("Could not allocate PDU\n");
-                       continue;
+                       pr_err("Could not allocate PDU (error=%d)\n", err);
+                       len -= remaining_len;
+                       if (len == 0)
+                               len = err;
+                       break;
                }
 
                pdu = llcp_add_header(pdu, dsap, ssap, LLCP_PDU_UI);
index ab5bb14b49af92241b12584925983de43b143bb7..ea0c0c6f187429426f4849347c09b847f0111fff 100644 (file)
@@ -548,13 +548,16 @@ static inline __poll_t llcp_accept_poll(struct sock *parent)
        return 0;
 }
 
-static __poll_t llcp_sock_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t llcp_sock_poll(struct file *file, struct socket *sock,
+                                  poll_table *wait)
 {
        struct sock *sk = sock->sk;
        __poll_t mask = 0;
 
        pr_debug("%p\n", sk);
 
+       sock_poll_wait(file, sk_sleep(sk), wait);
+
        if (sk->sk_state == LLCP_LISTEN)
                return llcp_accept_poll(sk);
 
@@ -896,7 +899,7 @@ static const struct proto_ops llcp_sock_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = llcp_sock_accept,
        .getname        = llcp_sock_getname,
-       .poll_mask      = llcp_sock_poll_mask,
+       .poll           = llcp_sock_poll,
        .ioctl          = sock_no_ioctl,
        .listen         = llcp_sock_listen,
        .shutdown       = sock_no_shutdown,
@@ -916,7 +919,7 @@ static const struct proto_ops llcp_rawsock_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = llcp_sock_getname,
-       .poll_mask      = llcp_sock_poll_mask,
+       .poll           = llcp_sock_poll,
        .ioctl          = sock_no_ioctl,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
index 60c322531c498f1d43582be5b76f3a2f575ed5bc..e2188deb08dc3bb16e2a60808b274a4a092fd2ee 100644 (file)
@@ -284,7 +284,7 @@ static const struct proto_ops rawsock_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = sock_no_getname,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .ioctl          = sock_no_ioctl,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
@@ -304,7 +304,7 @@ static const struct proto_ops rawsock_raw_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = sock_no_getname,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .ioctl          = sock_no_ioctl,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
index 9696ef96b719bf24625adea2a959deac1d2a975f..1a30e165eeb4fd1b884a0d5cd79c6823a5de9feb 100644 (file)
@@ -104,7 +104,7 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
        __skb_pull(skb, nsh_len);
 
        skb_reset_mac_header(skb);
-       skb_reset_mac_len(skb);
+       skb->mac_len = proto == htons(ETH_P_TEB) ? ETH_HLEN : 0;
        skb->protocol = proto;
 
        features &= NETIF_F_SG;
index 50809748c1279ea17b7499acbec5699443804f64..9b27d0cd766d560fdb67ee2e3bbfc415963db8c6 100644 (file)
@@ -2262,6 +2262,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
                if (po->stats.stats1.tp_drops)
                        status |= TP_STATUS_LOSING;
        }
+
+       if (do_vnet &&
+           virtio_net_hdr_from_skb(skb, h.raw + macoff -
+                                   sizeof(struct virtio_net_hdr),
+                                   vio_le(), true, 0))
+               goto drop_n_account;
+
        po->stats.stats1.tp_packets++;
        if (copy_skb) {
                status |= TP_STATUS_COPY;
@@ -2269,15 +2276,6 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
        }
        spin_unlock(&sk->sk_receive_queue.lock);
 
-       if (do_vnet) {
-               if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
-                                           sizeof(struct virtio_net_hdr),
-                                           vio_le(), true, 0)) {
-                       spin_lock(&sk->sk_receive_queue.lock);
-                       goto drop_n_account;
-               }
-       }
-
        skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
 
        if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
@@ -2880,6 +2878,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
                        goto out_free;
        } else if (reserve) {
                skb_reserve(skb, -reserve);
+               if (len < reserve)
+                       skb_reset_network_header(skb);
        }
 
        /* Returns -EFAULT on error */
@@ -4078,11 +4078,12 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd,
        return 0;
 }
 
-static __poll_t packet_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t packet_poll(struct file *file, struct socket *sock,
+                               poll_table *wait)
 {
        struct sock *sk = sock->sk;
        struct packet_sock *po = pkt_sk(sk);
-       __poll_t mask = datagram_poll_mask(sock, events);
+       __poll_t mask = datagram_poll(file, sock, wait);
 
        spin_lock_bh(&sk->sk_receive_queue.lock);
        if (po->rx_ring.pg_vec) {
@@ -4424,7 +4425,7 @@ static const struct proto_ops packet_ops_spkt = {
        .socketpair =   sock_no_socketpair,
        .accept =       sock_no_accept,
        .getname =      packet_getname_spkt,
-       .poll_mask =    datagram_poll_mask,
+       .poll =         datagram_poll,
        .ioctl =        packet_ioctl,
        .listen =       sock_no_listen,
        .shutdown =     sock_no_shutdown,
@@ -4445,7 +4446,7 @@ static const struct proto_ops packet_ops = {
        .socketpair =   sock_no_socketpair,
        .accept =       sock_no_accept,
        .getname =      packet_getname,
-       .poll_mask =    packet_poll_mask,
+       .poll =         packet_poll,
        .ioctl =        packet_ioctl,
        .listen =       sock_no_listen,
        .shutdown =     sock_no_shutdown,
index c295c4e20f012f31c1b443c5f859969caf412cec..30187990257fdb07a57c03707d6e1af0740b42f0 100644 (file)
@@ -340,12 +340,15 @@ static int pn_socket_getname(struct socket *sock, struct sockaddr *addr,
        return sizeof(struct sockaddr_pn);
 }
 
-static __poll_t pn_socket_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t pn_socket_poll(struct file *file, struct socket *sock,
+                                       poll_table *wait)
 {
        struct sock *sk = sock->sk;
        struct pep_sock *pn = pep_sk(sk);
        __poll_t mask = 0;
 
+       poll_wait(file, sk_sleep(sk), wait);
+
        if (sk->sk_state == TCP_CLOSE)
                return EPOLLERR;
        if (!skb_queue_empty(&sk->sk_receive_queue))
@@ -445,7 +448,7 @@ const struct proto_ops phonet_dgram_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = pn_socket_getname,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .ioctl          = pn_socket_ioctl,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
@@ -470,7 +473,7 @@ const struct proto_ops phonet_stream_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = pn_socket_accept,
        .getname        = pn_socket_getname,
-       .poll_mask      = pn_socket_poll_mask,
+       .poll           = pn_socket_poll,
        .ioctl          = pn_socket_ioctl,
        .listen         = pn_socket_listen,
        .shutdown       = sock_no_shutdown,
index 1b5025ea5b0426272145b56fa42e21d908612243..86e1e37eb4e8a68beeecd3bfeeb597951259ea81 100644 (file)
@@ -191,8 +191,13 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
        hdr->type = cpu_to_le32(type);
        hdr->src_node_id = cpu_to_le32(from->sq_node);
        hdr->src_port_id = cpu_to_le32(from->sq_port);
-       hdr->dst_node_id = cpu_to_le32(to->sq_node);
-       hdr->dst_port_id = cpu_to_le32(to->sq_port);
+       if (to->sq_port == QRTR_PORT_CTRL) {
+               hdr->dst_node_id = cpu_to_le32(node->nid);
+               hdr->dst_port_id = cpu_to_le32(QRTR_NODE_BCAST);
+       } else {
+               hdr->dst_node_id = cpu_to_le32(to->sq_node);
+               hdr->dst_port_id = cpu_to_le32(to->sq_port);
+       }
 
        hdr->size = cpu_to_le32(len);
        hdr->confirm_rx = 0;
@@ -764,6 +769,10 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
        node = NULL;
        if (addr->sq_node == QRTR_NODE_BCAST) {
                enqueue_fn = qrtr_bcast_enqueue;
+               if (addr->sq_port != QRTR_PORT_CTRL) {
+                       release_sock(sk);
+                       return -ENOTCONN;
+               }
        } else if (addr->sq_node == ipc->us.sq_node) {
                enqueue_fn = qrtr_local_enqueue;
        } else {
@@ -1023,7 +1032,7 @@ static const struct proto_ops qrtr_proto_ops = {
        .recvmsg        = qrtr_recvmsg,
        .getname        = qrtr_getname,
        .ioctl          = qrtr_ioctl,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .shutdown       = sock_no_shutdown,
        .setsockopt     = sock_no_setsockopt,
        .getsockopt     = sock_no_getsockopt,
index abef75da89a7450092aefc46ed902e6602fba7a6..cfb05953b0e57afad21fd708f0df42d63c77cd55 100644 (file)
@@ -659,11 +659,19 @@ static void rds_conn_info(struct socket *sock, unsigned int len,
 
 int rds_conn_init(void)
 {
+       int ret;
+
+       ret = rds_loop_net_init(); /* register pernet callback */
+       if (ret)
+               return ret;
+
        rds_conn_slab = kmem_cache_create("rds_connection",
                                          sizeof(struct rds_connection),
                                          0, 0, NULL);
-       if (!rds_conn_slab)
+       if (!rds_conn_slab) {
+               rds_loop_net_exit();
                return -ENOMEM;
+       }
 
        rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info);
        rds_info_register_func(RDS_INFO_SEND_MESSAGES,
@@ -676,6 +684,7 @@ int rds_conn_init(void)
 
 void rds_conn_exit(void)
 {
+       rds_loop_net_exit(); /* unregister pernet callback */
        rds_loop_exit();
 
        WARN_ON(!hlist_empty(rds_conn_hash));
index dac6218a460ed4d4a5b7b03ad4f6056a68784a16..feea1f96ee2ad582dce8f815442da1bbf6e0508a 100644 (file)
@@ -33,6 +33,8 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/in.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
 
 #include "rds_single_path.h"
 #include "rds.h"
 
 static DEFINE_SPINLOCK(loop_conns_lock);
 static LIST_HEAD(loop_conns);
+static atomic_t rds_loop_unloading = ATOMIC_INIT(0);
+
+static void rds_loop_set_unloading(void)
+{
+       atomic_set(&rds_loop_unloading, 1);
+}
+
+static bool rds_loop_is_unloading(struct rds_connection *conn)
+{
+       return atomic_read(&rds_loop_unloading) != 0;
+}
 
 /*
  * This 'loopback' transport is a special case for flows that originate
@@ -165,6 +178,8 @@ void rds_loop_exit(void)
        struct rds_loop_connection *lc, *_lc;
        LIST_HEAD(tmp_list);
 
+       rds_loop_set_unloading();
+       synchronize_rcu();
        /* avoid calling conn_destroy with irqs off */
        spin_lock_irq(&loop_conns_lock);
        list_splice(&loop_conns, &tmp_list);
@@ -177,6 +192,46 @@ void rds_loop_exit(void)
        }
 }
 
+static void rds_loop_kill_conns(struct net *net)
+{
+       struct rds_loop_connection *lc, *_lc;
+       LIST_HEAD(tmp_list);
+
+       spin_lock_irq(&loop_conns_lock);
+       list_for_each_entry_safe(lc, _lc, &loop_conns, loop_node)  {
+               struct net *c_net = read_pnet(&lc->conn->c_net);
+
+               if (net != c_net)
+                       continue;
+               list_move_tail(&lc->loop_node, &tmp_list);
+       }
+       spin_unlock_irq(&loop_conns_lock);
+
+       list_for_each_entry_safe(lc, _lc, &tmp_list, loop_node) {
+               WARN_ON(lc->conn->c_passive);
+               rds_conn_destroy(lc->conn);
+       }
+}
+
+static void __net_exit rds_loop_exit_net(struct net *net)
+{
+       rds_loop_kill_conns(net);
+}
+
+static struct pernet_operations rds_loop_net_ops = {
+       .exit = rds_loop_exit_net,
+};
+
+int rds_loop_net_init(void)
+{
+       return register_pernet_device(&rds_loop_net_ops);
+}
+
+void rds_loop_net_exit(void)
+{
+       unregister_pernet_device(&rds_loop_net_ops);
+}
+
 /*
  * This is missing .xmit_* because loop doesn't go through generic
  * rds_send_xmit() and doesn't call rds_recv_incoming().  .listen_stop and
@@ -194,4 +249,5 @@ struct rds_transport rds_loop_transport = {
        .inc_free               = rds_loop_inc_free,
        .t_name                 = "loopback",
        .t_type                 = RDS_TRANS_LOOP,
+       .t_unloading            = rds_loop_is_unloading,
 };
index 469fa4b2da4f38b5fb62358507cb9d9ca62aa825..bbc8cdd030df3137ea250578cb3d429a86fd68f2 100644 (file)
@@ -5,6 +5,8 @@
 /* loop.c */
 extern struct rds_transport rds_loop_transport;
 
+int rds_loop_net_init(void);
+void rds_loop_net_exit(void);
 void rds_loop_exit(void);
 
 #endif
index ebe42e7eb45697030367c4baba455b50c973c409..d00a0ef39a56b38cae4114654c44a3bddccb35ba 100644 (file)
@@ -1470,7 +1470,7 @@ static const struct proto_ops rose_proto_ops = {
        .socketpair     =       sock_no_socketpair,
        .accept         =       rose_accept,
        .getname        =       rose_getname,
-       .poll_mask      =       datagram_poll_mask,
+       .poll           =       datagram_poll,
        .ioctl          =       rose_ioctl,
        .listen         =       rose_listen,
        .shutdown       =       sock_no_shutdown,
index 3b1ac93efee22248ab01c3c8a610e874e99356b5..2b463047dd7ba93267feb584e1ffda280449a0b3 100644 (file)
@@ -734,11 +734,15 @@ static int rxrpc_getsockopt(struct socket *sock, int level, int optname,
 /*
  * permit an RxRPC socket to be polled
  */
-static __poll_t rxrpc_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t rxrpc_poll(struct file *file, struct socket *sock,
+                              poll_table *wait)
 {
        struct sock *sk = sock->sk;
        struct rxrpc_sock *rx = rxrpc_sk(sk);
-       __poll_t mask = 0;
+       __poll_t mask;
+
+       sock_poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
 
        /* the socket is readable if there are any messages waiting on the Rx
         * queue */
@@ -945,7 +949,7 @@ static const struct proto_ops rxrpc_rpc_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = sock_no_getname,
-       .poll_mask      = rxrpc_poll_mask,
+       .poll           = rxrpc_poll,
        .ioctl          = sock_no_ioctl,
        .listen         = rxrpc_listen,
        .shutdown       = rxrpc_shutdown,
index 526a8e491626efb65fcda10d875e6f55ca2168e8..6e7124e57918e98433f0d3302565ae4e0b9eaaf4 100644 (file)
@@ -91,7 +91,7 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
        }
        params_old = rtnl_dereference(p->params);
 
-       params_new->action = parm->action;
+       p->tcf_action = parm->action;
        params_new->update_flags = parm->update_flags;
        rcu_assign_pointer(p->params, params_new);
        if (params_old)
@@ -561,7 +561,7 @@ static int tcf_csum(struct sk_buff *skb, const struct tc_action *a,
        tcf_lastuse_update(&p->tcf_tm);
        bstats_cpu_update(this_cpu_ptr(p->common.cpu_bstats), skb);
 
-       action = params->action;
+       action = READ_ONCE(p->tcf_action);
        if (unlikely(action == TC_ACT_SHOT))
                goto drop_stats;
 
@@ -599,11 +599,11 @@ static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
                .index   = p->tcf_index,
                .refcnt  = p->tcf_refcnt - ref,
                .bindcnt = p->tcf_bindcnt - bind,
+               .action  = p->tcf_action,
        };
        struct tcf_t t;
 
        params = rtnl_dereference(p->params);
-       opt.action = params->action;
        opt.update_flags = params->update_flags;
 
        if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
index 8527cfdc446d9bb82e8fa9fe1364dc13249b1e03..20d7d36b2fc9b9d3af256f48795da6e387f7f781 100644 (file)
@@ -415,7 +415,8 @@ static void tcf_ife_cleanup(struct tc_action *a)
        spin_unlock_bh(&ife->tcf_lock);
 
        p = rcu_dereference_protected(ife->params, 1);
-       kfree_rcu(p, rcu);
+       if (p)
+               kfree_rcu(p, rcu);
 }
 
 /* under ife->tcf_lock for existing action */
@@ -516,8 +517,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
                        saddr = nla_data(tb[TCA_IFE_SMAC]);
        }
 
-       ife->tcf_action = parm->action;
-
        if (parm->flags & IFE_ENCODE) {
                if (daddr)
                        ether_addr_copy(p->eth_dst, daddr);
@@ -543,10 +542,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
                                       NULL, NULL);
                if (err) {
 metadata_parse_err:
-                       if (exists)
-                               tcf_idr_release(*a, bind);
                        if (ret == ACT_P_CREATED)
-                               _tcf_ife_cleanup(*a);
+                               tcf_idr_release(*a, bind);
 
                        if (exists)
                                spin_unlock_bh(&ife->tcf_lock);
@@ -567,7 +564,7 @@ metadata_parse_err:
                err = use_all_metadata(ife);
                if (err) {
                        if (ret == ACT_P_CREATED)
-                               _tcf_ife_cleanup(*a);
+                               tcf_idr_release(*a, bind);
 
                        if (exists)
                                spin_unlock_bh(&ife->tcf_lock);
@@ -576,6 +573,7 @@ metadata_parse_err:
                }
        }
 
+       ife->tcf_action = parm->action;
        if (exists)
                spin_unlock_bh(&ife->tcf_lock);
 
index 626dac81a48a6b2ab97e9d0c786b08989f693288..9bc6c2ae98a56ceb2a4719be91a1937b5441a58d 100644 (file)
@@ -36,7 +36,7 @@ static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a,
 
        tcf_lastuse_update(&t->tcf_tm);
        bstats_cpu_update(this_cpu_ptr(t->common.cpu_bstats), skb);
-       action = params->action;
+       action = READ_ONCE(t->tcf_action);
 
        switch (params->tcft_action) {
        case TCA_TUNNEL_KEY_ACT_RELEASE:
@@ -182,7 +182,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
 
        params_old = rtnl_dereference(t->params);
 
-       params_new->action = parm->action;
+       t->tcf_action = parm->action;
        params_new->tcft_action = parm->t_action;
        params_new->tcft_enc_metadata = metadata;
 
@@ -254,13 +254,13 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
                .index    = t->tcf_index,
                .refcnt   = t->tcf_refcnt - ref,
                .bindcnt  = t->tcf_bindcnt - bind,
+               .action   = t->tcf_action,
        };
        struct tcf_t tm;
 
        params = rtnl_dereference(t->params);
 
        opt.t_action = params->tcft_action;
-       opt.action = params->action;
 
        if (nla_put(skb, TCA_TUNNEL_KEY_PARMS, sizeof(opt), &opt))
                goto nla_put_failure;
index cdc3c87c53e62d4db4bb18fa5f59d7889b9866cb..f74513a7c7a8ed179bfbeabb17fe60dd2f9b6eb2 100644 (file)
@@ -1053,7 +1053,7 @@ static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
        for (tp = rtnl_dereference(chain->filter_chain);
             tp; tp = rtnl_dereference(tp->next))
                tfilter_notify(net, oskb, n, tp, block,
-                              q, parent, 0, event, false);
+                              q, parent, NULL, event, false);
 }
 
 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
@@ -1444,7 +1444,7 @@ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
                        memset(&cb->args[1], 0,
                               sizeof(cb->args) - sizeof(cb->args[0]));
                if (cb->args[1] == 0) {
-                       if (tcf_fill_node(net, skb, tp, block, q, parent, 0,
+                       if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
                                          NETLINK_CB(cb->skb).portid,
                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                          RTM_NEWTFILTER) <= 0)
index 2b5be42a9f1ca8e63952158ed2b9339e1a308d0b..9e8b26a80fb3ea9e57b6b22d259eaefe171eca09 100644 (file)
@@ -66,7 +66,7 @@ struct fl_flow_mask {
        struct rhashtable_params filter_ht_params;
        struct flow_dissector dissector;
        struct list_head filters;
-       struct rcu_head rcu;
+       struct rcu_work rwork;
        struct list_head list;
 };
 
@@ -203,6 +203,20 @@ static int fl_init(struct tcf_proto *tp)
        return rhashtable_init(&head->ht, &mask_ht_params);
 }
 
+static void fl_mask_free(struct fl_flow_mask *mask)
+{
+       rhashtable_destroy(&mask->ht);
+       kfree(mask);
+}
+
+static void fl_mask_free_work(struct work_struct *work)
+{
+       struct fl_flow_mask *mask = container_of(to_rcu_work(work),
+                                                struct fl_flow_mask, rwork);
+
+       fl_mask_free(mask);
+}
+
 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask,
                        bool async)
 {
@@ -210,12 +224,11 @@ static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask,
                return false;
 
        rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
-       rhashtable_destroy(&mask->ht);
        list_del_rcu(&mask->list);
        if (async)
-               kfree_rcu(mask, rcu);
+               tcf_queue_work(&mask->rwork, fl_mask_free_work);
        else
-               kfree(mask);
+               fl_mask_free(mask);
 
        return true;
 }
index c98a61e980baa68931f7e974582eb1c43ed60cf5..9c4c2bb547d7ea1da26e956a77b23592d467365b 100644 (file)
@@ -21,7 +21,7 @@ static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch,
                             struct sk_buff **to_free)
 {
        qdisc_drop(skb, sch, to_free);
-       return NET_XMIT_SUCCESS;
+       return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 }
 
 static struct sk_buff *blackhole_dequeue(struct Qdisc *sch)
index cd2e0e342fb6235840860ff15ceaeb73eddaa492..6c0a9d5dbf9441d00a832915e23d6b82bd8ab313 100644 (file)
@@ -479,24 +479,28 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
        q->cparams.mtu = psched_mtu(qdisc_dev(sch));
 
        if (opt) {
-               int err = fq_codel_change(sch, opt, extack);
+               err = fq_codel_change(sch, opt, extack);
                if (err)
-                       return err;
+                       goto init_failure;
        }
 
        err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
        if (err)
-               return err;
+               goto init_failure;
 
        if (!q->flows) {
                q->flows = kvcalloc(q->flows_cnt,
                                    sizeof(struct fq_codel_flow),
                                    GFP_KERNEL);
-               if (!q->flows)
-                       return -ENOMEM;
+               if (!q->flows) {
+                       err = -ENOMEM;
+                       goto init_failure;
+               }
                q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL);
-               if (!q->backlogs)
-                       return -ENOMEM;
+               if (!q->backlogs) {
+                       err = -ENOMEM;
+                       goto alloc_failure;
+               }
                for (i = 0; i < q->flows_cnt; i++) {
                        struct fq_codel_flow *flow = q->flows + i;
 
@@ -509,6 +513,13 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
        else
                sch->flags &= ~TCQ_F_CAN_BYPASS;
        return 0;
+
+alloc_failure:
+       kvfree(q->flows);
+       q->flows = NULL;
+init_failure:
+       q->flows_cnt = 0;
+       return err;
 }
 
 static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
index 3ae9877ea2057d0ba517c84d38f6ba6a79ff6ef8..3278a76f6861576ba7e42cf9f91a62f96443cb3a 100644 (file)
@@ -1385,8 +1385,8 @@ hfsc_schedule_watchdog(struct Qdisc *sch)
                if (next_time == 0 || next_time > q->root.cl_cfmin)
                        next_time = q->root.cl_cfmin;
        }
-       WARN_ON(next_time == 0);
-       qdisc_watchdog_schedule(&q->watchdog, next_time);
+       if (next_time)
+               qdisc_watchdog_schedule(&q->watchdog, next_time);
 }
 
 static int
index 79daa98208c391c780440144d69bc7be875c3476..bfb9f812e2ef9fa605b08dc1f534781573c3abf8 100644 (file)
@@ -237,7 +237,9 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
        /* Account for a different sized first fragment */
        if (msg_len >= first_len) {
                msg->can_delay = 0;
-               SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_FRAGUSRMSGS);
+               if (msg_len > first_len)
+                       SCTP_INC_STATS(sock_net(asoc->base.sk),
+                                      SCTP_MIB_FRAGUSRMSGS);
        } else {
                /* Which may be the only one... */
                first_len = msg_len;
index 7339918a805d93db8a94fed627f99962e07e3267..0cd2e764f47ff0874438301324de25e4bf33dd95 100644 (file)
@@ -1010,7 +1010,7 @@ static const struct proto_ops inet6_seqpacket_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = inet_accept,
        .getname           = sctp_getname,
-       .poll_mask         = sctp_poll_mask,
+       .poll              = sctp_poll,
        .ioctl             = inet6_ioctl,
        .listen            = sctp_inet_listen,
        .shutdown          = inet_shutdown,
index 5dffbc4930086699cefa10f704de5fd2068169c8..67f73d3a1356b93d3896b6985a65e70615902b18 100644 (file)
@@ -1016,7 +1016,7 @@ static const struct proto_ops inet_seqpacket_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = inet_accept,
        .getname           = inet_getname,      /* Semantics are different.  */
-       .poll_mask         = sctp_poll_mask,
+       .poll              = sctp_poll,
        .ioctl             = inet_ioctl,
        .listen            = sctp_inet_listen,
        .shutdown          = inet_shutdown,     /* Looks harmless.  */
index d20f7addee19ecb794fa85f9ed73e8b40784a095..ce620e878538be99e1f79784582d0da48ba292ea 100644 (file)
@@ -7717,12 +7717,14 @@ out:
  * here, again, by modeling the current TCP/UDP code.  We don't have
  * a good way to test with it yet.
  */
-__poll_t sctp_poll_mask(struct socket *sock, __poll_t events)
+__poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
 {
        struct sock *sk = sock->sk;
        struct sctp_sock *sp = sctp_sk(sk);
        __poll_t mask;
 
+       poll_wait(file, sk_sleep(sk), wait);
+
        sock_rps_record_flow(sk);
 
        /* A TCP-style listening socket becomes readable when the accept queue
index 445b7ef61677cfdb1172486e432b9bd6a0f853d5..12cac85da994356ef24cf264e1fb8451f2e303dc 100644 (file)
@@ -282,7 +282,7 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
 
        if (dst) {
                /* Re-fetch, as under layers may have a higher minimum size */
-               pmtu = SCTP_TRUNC4(dst_mtu(dst));
+               pmtu = sctp_dst_mtu(dst);
                change = t->pathmtu != pmtu;
        }
        t->pathmtu = pmtu;
index da7f02edcd374c44437e34a2705f410317ea536d..05e4ffe5aabde6baa711b1396484cf037fbccaee 100644 (file)
@@ -45,6 +45,7 @@ static DEFINE_MUTEX(smc_create_lgr_pending);  /* serialize link group
                                                 */
 
 static void smc_tcp_listen_work(struct work_struct *);
+static void smc_connect_work(struct work_struct *);
 
 static void smc_set_keepalive(struct sock *sk, int val)
 {
@@ -122,6 +123,12 @@ static int smc_release(struct socket *sock)
                goto out;
 
        smc = smc_sk(sk);
+
+       /* cleanup for a dangling non-blocking connect */
+       flush_work(&smc->connect_work);
+       kfree(smc->connect_info);
+       smc->connect_info = NULL;
+
        if (sk->sk_state == SMC_LISTEN)
                /* smc_close_non_accepted() is called and acquires
                 * sock lock for child sockets again
@@ -140,7 +147,8 @@ static int smc_release(struct socket *sock)
                smc->clcsock = NULL;
        }
        if (smc->use_fallback) {
-               sock_put(sk); /* passive closing */
+               if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT)
+                       sock_put(sk); /* passive closing */
                sk->sk_state = SMC_CLOSED;
                sk->sk_state_change(sk);
        }
@@ -186,6 +194,7 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
        sk->sk_protocol = protocol;
        smc = smc_sk(sk);
        INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
+       INIT_WORK(&smc->connect_work, smc_connect_work);
        INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
        INIT_LIST_HEAD(&smc->accept_q);
        spin_lock_init(&smc->accept_q_lock);
@@ -409,12 +418,18 @@ static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code)
 {
        int rc;
 
-       if (reason_code < 0) /* error, fallback is not possible */
+       if (reason_code < 0) { /* error, fallback is not possible */
+               if (smc->sk.sk_state == SMC_INIT)
+                       sock_put(&smc->sk); /* passive closing */
                return reason_code;
+       }
        if (reason_code != SMC_CLC_DECL_REPLY) {
                rc = smc_clc_send_decline(smc, reason_code);
-               if (rc < 0)
+               if (rc < 0) {
+                       if (smc->sk.sk_state == SMC_INIT)
+                               sock_put(&smc->sk); /* passive closing */
                        return rc;
+               }
        }
        return smc_connect_fallback(smc);
 }
@@ -427,8 +442,6 @@ static int smc_connect_abort(struct smc_sock *smc, int reason_code,
                smc_lgr_forget(smc->conn.lgr);
        mutex_unlock(&smc_create_lgr_pending);
        smc_conn_free(&smc->conn);
-       if (reason_code < 0 && smc->sk.sk_state == SMC_INIT)
-               sock_put(&smc->sk); /* passive closing */
        return reason_code;
 }
 
@@ -576,6 +589,35 @@ static int __smc_connect(struct smc_sock *smc)
        return 0;
 }
 
+static void smc_connect_work(struct work_struct *work)
+{
+       struct smc_sock *smc = container_of(work, struct smc_sock,
+                                           connect_work);
+       int rc;
+
+       lock_sock(&smc->sk);
+       rc = kernel_connect(smc->clcsock, &smc->connect_info->addr,
+                           smc->connect_info->alen, smc->connect_info->flags);
+       if (smc->clcsock->sk->sk_err) {
+               smc->sk.sk_err = smc->clcsock->sk->sk_err;
+               goto out;
+       }
+       if (rc < 0) {
+               smc->sk.sk_err = -rc;
+               goto out;
+       }
+
+       rc = __smc_connect(smc);
+       if (rc < 0)
+               smc->sk.sk_err = -rc;
+
+out:
+       smc->sk.sk_state_change(&smc->sk);
+       kfree(smc->connect_info);
+       smc->connect_info = NULL;
+       release_sock(&smc->sk);
+}
+
 static int smc_connect(struct socket *sock, struct sockaddr *addr,
                       int alen, int flags)
 {
@@ -605,15 +647,32 @@ static int smc_connect(struct socket *sock, struct sockaddr *addr,
 
        smc_copy_sock_settings_to_clc(smc);
        tcp_sk(smc->clcsock->sk)->syn_smc = 1;
-       rc = kernel_connect(smc->clcsock, addr, alen, flags);
-       if (rc)
-               goto out;
+       if (flags & O_NONBLOCK) {
+               if (smc->connect_info) {
+                       rc = -EALREADY;
+                       goto out;
+               }
+               smc->connect_info = kzalloc(alen + 2 * sizeof(int), GFP_KERNEL);
+               if (!smc->connect_info) {
+                       rc = -ENOMEM;
+                       goto out;
+               }
+               smc->connect_info->alen = alen;
+               smc->connect_info->flags = flags ^ O_NONBLOCK;
+               memcpy(&smc->connect_info->addr, addr, alen);
+               schedule_work(&smc->connect_work);
+               rc = -EINPROGRESS;
+       } else {
+               rc = kernel_connect(smc->clcsock, addr, alen, flags);
+               if (rc)
+                       goto out;
 
-       rc = __smc_connect(smc);
-       if (rc < 0)
-               goto out;
-       else
-               rc = 0; /* success cases including fallback */
+               rc = __smc_connect(smc);
+               if (rc < 0)
+                       goto out;
+               else
+                       rc = 0; /* success cases including fallback */
+       }
 
 out:
        release_sock(sk);
@@ -1273,40 +1332,26 @@ static __poll_t smc_accept_poll(struct sock *parent)
        return mask;
 }
 
-static __poll_t smc_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t smc_poll(struct file *file, struct socket *sock,
+                            poll_table *wait)
 {
        struct sock *sk = sock->sk;
        __poll_t mask = 0;
        struct smc_sock *smc;
-       int rc;
 
        if (!sk)
                return EPOLLNVAL;
 
        smc = smc_sk(sock->sk);
-       sock_hold(sk);
-       lock_sock(sk);
        if ((sk->sk_state == SMC_INIT) || smc->use_fallback) {
                /* delegate to CLC child sock */
-               release_sock(sk);
-               mask = smc->clcsock->ops->poll_mask(smc->clcsock, events);
-               lock_sock(sk);
+               mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
                sk->sk_err = smc->clcsock->sk->sk_err;
-               if (sk->sk_err) {
+               if (sk->sk_err)
                        mask |= EPOLLERR;
-               } else {
-                       /* if non-blocking connect finished ... */
-                       if (sk->sk_state == SMC_INIT &&
-                           mask & EPOLLOUT &&
-                           smc->clcsock->sk->sk_state != TCP_CLOSE) {
-                               rc = __smc_connect(smc);
-                               if (rc < 0)
-                                       mask |= EPOLLERR;
-                               /* success cases including fallback */
-                               mask |= EPOLLOUT | EPOLLWRNORM;
-                       }
-               }
        } else {
+               if (sk->sk_state != SMC_CLOSED)
+                       sock_poll_wait(file, sk_sleep(sk), wait);
                if (sk->sk_err)
                        mask |= EPOLLERR;
                if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
@@ -1332,10 +1377,7 @@ static __poll_t smc_poll_mask(struct socket *sock, __poll_t events)
                }
                if (smc->conn.urg_state == SMC_URG_VALID)
                        mask |= EPOLLPRI;
-
        }
-       release_sock(sk);
-       sock_put(sk);
 
        return mask;
 }
@@ -1415,7 +1457,8 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
 
        if (optlen < sizeof(int))
                return -EINVAL;
-       get_user(val, (int __user *)optval);
+       if (get_user(val, (int __user *)optval))
+               return -EFAULT;
 
        lock_sock(sk);
        switch (optname) {
@@ -1483,10 +1526,13 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
                        return -EBADF;
                return smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
        }
+       lock_sock(&smc->sk);
        switch (cmd) {
        case SIOCINQ: /* same as FIONREAD */
-               if (smc->sk.sk_state == SMC_LISTEN)
+               if (smc->sk.sk_state == SMC_LISTEN) {
+                       release_sock(&smc->sk);
                        return -EINVAL;
+               }
                if (smc->sk.sk_state == SMC_INIT ||
                    smc->sk.sk_state == SMC_CLOSED)
                        answ = 0;
@@ -1495,8 +1541,10 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
                break;
        case SIOCOUTQ:
                /* output queue size (not send + not acked) */
-               if (smc->sk.sk_state == SMC_LISTEN)
+               if (smc->sk.sk_state == SMC_LISTEN) {
+                       release_sock(&smc->sk);
                        return -EINVAL;
+               }
                if (smc->sk.sk_state == SMC_INIT ||
                    smc->sk.sk_state == SMC_CLOSED)
                        answ = 0;
@@ -1506,8 +1554,10 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
                break;
        case SIOCOUTQNSD:
                /* output queue size (not send only) */
-               if (smc->sk.sk_state == SMC_LISTEN)
+               if (smc->sk.sk_state == SMC_LISTEN) {
+                       release_sock(&smc->sk);
                        return -EINVAL;
+               }
                if (smc->sk.sk_state == SMC_INIT ||
                    smc->sk.sk_state == SMC_CLOSED)
                        answ = 0;
@@ -1515,8 +1565,10 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
                        answ = smc_tx_prepared_sends(&smc->conn);
                break;
        case SIOCATMARK:
-               if (smc->sk.sk_state == SMC_LISTEN)
+               if (smc->sk.sk_state == SMC_LISTEN) {
+                       release_sock(&smc->sk);
                        return -EINVAL;
+               }
                if (smc->sk.sk_state == SMC_INIT ||
                    smc->sk.sk_state == SMC_CLOSED) {
                        answ = 0;
@@ -1532,8 +1584,10 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
                }
                break;
        default:
+               release_sock(&smc->sk);
                return -ENOIOCTLCMD;
        }
+       release_sock(&smc->sk);
 
        return put_user(answ, (int __user *)arg);
 }
@@ -1619,7 +1673,7 @@ static const struct proto_ops smc_sock_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = smc_accept,
        .getname        = smc_getname,
-       .poll_mask      = smc_poll_mask,
+       .poll           = smc_poll,
        .ioctl          = smc_ioctl,
        .listen         = smc_listen,
        .shutdown       = smc_shutdown,
index 51ae1f10d81aa9390e76e392096e3f93c15b65fe..d7ca265704821a1862f84f209550c4b19fc0db59 100644 (file)
@@ -187,11 +187,19 @@ struct smc_connection {
        struct work_struct      close_work;     /* peer sent some closing */
 };
 
+struct smc_connect_info {
+       int                     flags;
+       int                     alen;
+       struct sockaddr         addr;
+};
+
 struct smc_sock {                              /* smc sock container */
        struct sock             sk;
        struct socket           *clcsock;       /* internal tcp socket */
        struct smc_connection   conn;           /* smc connection */
        struct smc_sock         *listen_smc;    /* listen parent */
+       struct smc_connect_info *connect_info;  /* connect address & flags */
+       struct work_struct      connect_work;   /* handle non-blocking connect*/
        struct work_struct      tcp_listen_work;/* handle tcp socket accepts */
        struct work_struct      smc_listen_work;/* prepare new accept socket */
        struct list_head        accept_q;       /* sockets to be accepted */
index 717449b1da0b73d924488d43cd04ed0871607d1b..ae5d168653cecf804b20e49f27bb39bcf0385081 100644 (file)
@@ -250,6 +250,7 @@ out:
 int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
                     u8 expected_type)
 {
+       long rcvtimeo = smc->clcsock->sk->sk_rcvtimeo;
        struct sock *clc_sk = smc->clcsock->sk;
        struct smc_clc_msg_hdr *clcm = buf;
        struct msghdr msg = {NULL, 0};
@@ -306,7 +307,6 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
        memset(&msg, 0, sizeof(struct msghdr));
        iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, datlen);
        krflags = MSG_WAITALL;
-       smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME;
        len = sock_recvmsg(smc->clcsock, &msg, krflags);
        if (len < datlen || !smc_clc_msg_hdr_valid(clcm)) {
                smc->sk.sk_err = EPROTO;
@@ -322,6 +322,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
        }
 
 out:
+       smc->clcsock->sk->sk_rcvtimeo = rcvtimeo;
        return reason_code;
 }
 
index fa41d988174146f6888d29db743b074d7b1ee1db..ac961dfb1ea1b775b666be3fdc0f292545703533 100644 (file)
@@ -107,6 +107,8 @@ static void smc_close_active_abort(struct smc_sock *smc)
        }
        switch (sk->sk_state) {
        case SMC_INIT:
+               sk->sk_state = SMC_PEERABORTWAIT;
+               break;
        case SMC_ACTIVE:
                sk->sk_state = SMC_PEERABORTWAIT;
                release_sock(sk);
index cee66640075242fc7fe863734ebf301d261e02d6..f82886b7d1d8394adada4998159a708c3c897a82 100644 (file)
@@ -495,7 +495,8 @@ out:
 
 void smc_tx_consumer_update(struct smc_connection *conn, bool force)
 {
-       union smc_host_cursor cfed, cons;
+       union smc_host_cursor cfed, cons, prod;
+       int sender_free = conn->rmb_desc->len;
        int to_confirm;
 
        smc_curs_write(&cons,
@@ -505,11 +506,18 @@ void smc_tx_consumer_update(struct smc_connection *conn, bool force)
                       smc_curs_read(&conn->rx_curs_confirmed, conn),
                       conn);
        to_confirm = smc_curs_diff(conn->rmb_desc->len, &cfed, &cons);
+       if (to_confirm > conn->rmbe_update_limit) {
+               smc_curs_write(&prod,
+                              smc_curs_read(&conn->local_rx_ctrl.prod, conn),
+                              conn);
+               sender_free = conn->rmb_desc->len -
+                             smc_curs_diff(conn->rmb_desc->len, &prod, &cfed);
+       }
 
        if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
            force ||
            ((to_confirm > conn->rmbe_update_limit) &&
-            ((to_confirm > (conn->rmb_desc->len / 2)) ||
+            ((sender_free <= (conn->rmb_desc->len / 2)) ||
              conn->local_rx_ctrl.prod_flags.write_blocked))) {
                if ((smc_cdc_get_slot_and_msg_send(conn) < 0) &&
                    conn->alert_token_local) { /* connection healthy */
index 8a109012608a6132a65293c86cd175426b851cbe..85633622c94d011796517feb4d935b7ccba68445 100644 (file)
@@ -117,10 +117,8 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from);
 static int sock_mmap(struct file *file, struct vm_area_struct *vma);
 
 static int sock_close(struct inode *inode, struct file *file);
-static struct wait_queue_head *sock_get_poll_head(struct file *file,
-               __poll_t events);
-static __poll_t sock_poll_mask(struct file *file, __poll_t);
-static __poll_t sock_poll(struct file *file, struct poll_table_struct *wait);
+static __poll_t sock_poll(struct file *file,
+                             struct poll_table_struct *wait);
 static long sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
 #ifdef CONFIG_COMPAT
 static long compat_sock_ioctl(struct file *file,
@@ -143,8 +141,6 @@ static const struct file_operations socket_file_ops = {
        .llseek =       no_llseek,
        .read_iter =    sock_read_iter,
        .write_iter =   sock_write_iter,
-       .get_poll_head = sock_get_poll_head,
-       .poll_mask =    sock_poll_mask,
        .poll =         sock_poll,
        .unlocked_ioctl = sock_ioctl,
 #ifdef CONFIG_COMPAT
@@ -1130,48 +1126,16 @@ out_release:
 }
 EXPORT_SYMBOL(sock_create_lite);
 
-static struct wait_queue_head *sock_get_poll_head(struct file *file,
-               __poll_t events)
-{
-       struct socket *sock = file->private_data;
-
-       if (!sock->ops->poll_mask)
-               return NULL;
-       sock_poll_busy_loop(sock, events);
-       return sk_sleep(sock->sk);
-}
-
-static __poll_t sock_poll_mask(struct file *file, __poll_t events)
-{
-       struct socket *sock = file->private_data;
-
-       /*
-        * We need to be sure we are in sync with the socket flags modification.
-        *
-        * This memory barrier is paired in the wq_has_sleeper.
-        */
-       smp_mb();
-
-       /* this socket can poll_ll so tell the system call */
-       return sock->ops->poll_mask(sock, events) |
-               (sk_can_busy_loop(sock->sk) ? POLL_BUSY_LOOP : 0);
-}
-
 /* No kernel lock held - perfect */
 static __poll_t sock_poll(struct file *file, poll_table *wait)
 {
        struct socket *sock = file->private_data;
-       __poll_t events = poll_requested_events(wait), mask = 0;
-
-       if (sock->ops->poll) {
-               sock_poll_busy_loop(sock, events);
-               mask = sock->ops->poll(file, sock, wait);
-       } else if (sock->ops->poll_mask) {
-               sock_poll_wait(file, sock_get_poll_head(file, events), wait);
-               mask = sock->ops->poll_mask(sock, events);
-       }
+       __poll_t events = poll_requested_events(wait);
 
-       return mask | sock_poll_busy_flag(sock);
+       sock_poll_busy_loop(sock, events);
+       if (!sock->ops->poll)
+               return 0;
+       return sock->ops->poll(file, sock, wait) | sock_poll_busy_flag(sock);
 }
 
 static int sock_mmap(struct file *file, struct vm_area_struct *vma)
index 1a96951835999091c81ba451700f0a74565d9c59..625acb27efcc272ccdc0f60d4d693d6761ed139b 100644 (file)
@@ -35,7 +35,6 @@ struct _strp_msg {
         */
        struct strp_msg strp;
        int accum_len;
-       int early_eaten;
 };
 
 static inline struct _strp_msg *_strp_msg(struct sk_buff *skb)
@@ -115,20 +114,6 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
        head = strp->skb_head;
        if (head) {
                /* Message already in progress */
-
-               stm = _strp_msg(head);
-               if (unlikely(stm->early_eaten)) {
-                       /* Already some number of bytes on the receive sock
-                        * data saved in skb_head, just indicate they
-                        * are consumed.
-                        */
-                       eaten = orig_len <= stm->early_eaten ?
-                               orig_len : stm->early_eaten;
-                       stm->early_eaten -= eaten;
-
-                       return eaten;
-               }
-
                if (unlikely(orig_offset)) {
                        /* Getting data with a non-zero offset when a message is
                         * in progress is not expected. If it does happen, we
@@ -297,9 +282,9 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
                                }
 
                                stm->accum_len += cand_len;
+                               eaten += cand_len;
                                strp->need_bytes = stm->strp.full_len -
                                                       stm->accum_len;
-                               stm->early_eaten = cand_len;
                                STRP_STATS_ADD(strp->stats.bytes, cand_len);
                                desc->count = 0; /* Stop reading socket */
                                break;
@@ -392,7 +377,7 @@ static int strp_read_sock(struct strparser *strp)
 /* Lower sock lock held */
 void strp_data_ready(struct strparser *strp)
 {
-       if (unlikely(strp->stopped))
+       if (unlikely(strp->stopped) || strp->paused)
                return;
 
        /* This check is needed to synchronize with do_strp_work.
@@ -407,9 +392,6 @@ void strp_data_ready(struct strparser *strp)
                return;
        }
 
-       if (strp->paused)
-               return;
-
        if (strp->need_bytes) {
                if (strp_peek_len(strp) < strp->need_bytes)
                        return;
index 3c85af058227d14bda8d9f598ec45e7b8db1785e..3fabf9f6a0f9d92eaccbc33a9600ca2d1370aa18 100644 (file)
@@ -987,8 +987,6 @@ bool xprt_prepare_transmit(struct rpc_task *task)
                task->tk_status = -EAGAIN;
                goto out_unlock;
        }
-       if (!bc_prealloc(req) && !req->rq_xmit_bytes_sent)
-               req->rq_xid = xprt_alloc_xid(xprt);
        ret = true;
 out_unlock:
        spin_unlock_bh(&xprt->transport_lock);
@@ -1298,7 +1296,12 @@ void xprt_retry_reserve(struct rpc_task *task)
 
 static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
 {
-       return (__force __be32)xprt->xid++;
+       __be32 xid;
+
+       spin_lock(&xprt->reserve_lock);
+       xid = (__force __be32)xprt->xid++;
+       spin_unlock(&xprt->reserve_lock);
+       return xid;
 }
 
 static inline void xprt_init_xid(struct rpc_xprt *xprt)
@@ -1316,6 +1319,7 @@ void xprt_request_init(struct rpc_task *task)
        req->rq_task    = task;
        req->rq_xprt    = xprt;
        req->rq_buffer  = NULL;
+       req->rq_xid     = xprt_alloc_xid(xprt);
        req->rq_connect_cookie = xprt->connect_cookie - 1;
        req->rq_bytes_sent = 0;
        req->rq_snd_buf.len = 0;
index 9f666e0650e23c0d4275ae219c23c5e301df5ac4..2830709957bddeb13adf0f352abb9aaacba3ec55 100644 (file)
@@ -133,6 +133,8 @@ static void disc_dupl_alert(struct tipc_bearer *b, u32 node_addr,
 }
 
 /* tipc_disc_addr_trial(): - handle an address uniqueness trial from peer
+ * Returns true if message should be dropped by caller, i.e., if it is a
+ * trial message or we are inside trial period. Otherwise false.
  */
 static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d,
                                     struct tipc_media_addr *maddr,
@@ -168,8 +170,9 @@ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d,
                msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
        }
 
+       /* Accept regular link requests/responses only after trial period */
        if (mtyp != DSC_TRIAL_MSG)
-               return false;
+               return trial;
 
        sugg_addr = tipc_node_try_addr(net, peer_id, src);
        if (sugg_addr)
@@ -284,7 +287,6 @@ static void tipc_disc_timeout(struct timer_list *t)
 {
        struct tipc_discoverer *d = from_timer(d, t, timer);
        struct tipc_net *tn = tipc_net(d->net);
-       u32 self = tipc_own_addr(d->net);
        struct tipc_media_addr maddr;
        struct sk_buff *skb = NULL;
        struct net *net = d->net;
@@ -298,12 +300,14 @@ static void tipc_disc_timeout(struct timer_list *t)
                goto exit;
        }
 
-       /* Did we just leave the address trial period ? */
-       if (!self && !time_before(jiffies, tn->addr_trial_end)) {
-               self = tn->trial_addr;
-               tipc_net_finalize(net, self);
-               msg_set_prevnode(buf_msg(d->skb), self);
+       /* Trial period over ? */
+       if (!time_before(jiffies, tn->addr_trial_end)) {
+               /* Did we just leave it ? */
+               if (!tipc_own_addr(net))
+                       tipc_net_finalize(net, tn->trial_addr);
+
                msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
+               msg_set_prevnode(buf_msg(d->skb), tipc_own_addr(net));
        }
 
        /* Adjust timeout interval according to discovery phase */
index 4fbaa0464405370601cb2fd1dd3b03733836d342..a7f6964c3a4b725a7cd06411dc1b5f3d48df778d 100644 (file)
@@ -121,12 +121,17 @@ int tipc_net_init(struct net *net, u8 *node_id, u32 addr)
 
 void tipc_net_finalize(struct net *net, u32 addr)
 {
-       tipc_set_node_addr(net, addr);
-       smp_mb();
-       tipc_named_reinit(net);
-       tipc_sk_reinit(net);
-       tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
-                            TIPC_CLUSTER_SCOPE, 0, addr);
+       struct tipc_net *tn = tipc_net(net);
+
+       spin_lock_bh(&tn->node_list_lock);
+       if (!tipc_own_addr(net)) {
+               tipc_set_node_addr(net, addr);
+               tipc_named_reinit(net);
+               tipc_sk_reinit(net);
+               tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
+                                    TIPC_CLUSTER_SCOPE, 0, addr);
+       }
+       spin_unlock_bh(&tn->node_list_lock);
 }
 
 void tipc_net_stop(struct net *net)
index 6a44eb812baf4a2fe31eeb55b04023f9f402666b..0453bd451ce80c1935bb6588facc0f2c23ae8644 100644 (file)
@@ -797,6 +797,7 @@ static u32 tipc_node_suggest_addr(struct net *net, u32 addr)
 }
 
 /* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not
+ * Returns suggested address if any, otherwise 0
  */
 u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
 {
@@ -819,12 +820,14 @@ u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
        if (n) {
                addr = n->addr;
                tipc_node_put(n);
+               return addr;
        }
-       /* Even this node may be in trial phase */
+
+       /* Even this node may be in conflict */
        if (tn->trial_addr == addr)
                return tipc_node_suggest_addr(net, addr);
 
-       return addr;
+       return 0;
 }
 
 void tipc_node_check_dest(struct net *net, u32 addr,
index 14a5d055717d2a7b95ea353b15f53dfb81a39515..930852c54d7a6e97207c61a7c942e487781457e7 100644 (file)
@@ -692,9 +692,10 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
 }
 
 /**
- * tipc_poll - read pollmask
+ * tipc_poll - read and possibly block on pollmask
  * @file: file structure associated with the socket
  * @sock: socket for which to calculate the poll bits
+ * @wait: ???
  *
  * Returns pollmask value
  *
@@ -708,12 +709,15 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
  * imply that the operation will succeed, merely that it should be performed
  * and will not block.
  */
-static __poll_t tipc_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t tipc_poll(struct file *file, struct socket *sock,
+                             poll_table *wait)
 {
        struct sock *sk = sock->sk;
        struct tipc_sock *tsk = tipc_sk(sk);
        __poll_t revents = 0;
 
+       sock_poll_wait(file, sk_sleep(sk), wait);
+
        if (sk->sk_shutdown & RCV_SHUTDOWN)
                revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
        if (sk->sk_shutdown == SHUTDOWN_MASK)
@@ -3033,7 +3037,7 @@ static const struct proto_ops msg_ops = {
        .socketpair     = tipc_socketpair,
        .accept         = sock_no_accept,
        .getname        = tipc_getname,
-       .poll_mask      = tipc_poll_mask,
+       .poll           = tipc_poll,
        .ioctl          = tipc_ioctl,
        .listen         = sock_no_listen,
        .shutdown       = tipc_shutdown,
@@ -3054,7 +3058,7 @@ static const struct proto_ops packet_ops = {
        .socketpair     = tipc_socketpair,
        .accept         = tipc_accept,
        .getname        = tipc_getname,
-       .poll_mask      = tipc_poll_mask,
+       .poll           = tipc_poll,
        .ioctl          = tipc_ioctl,
        .listen         = tipc_listen,
        .shutdown       = tipc_shutdown,
@@ -3075,7 +3079,7 @@ static const struct proto_ops stream_ops = {
        .socketpair     = tipc_socketpair,
        .accept         = tipc_accept,
        .getname        = tipc_getname,
-       .poll_mask      = tipc_poll_mask,
+       .poll           = tipc_poll,
        .ioctl          = tipc_ioctl,
        .listen         = tipc_listen,
        .shutdown       = tipc_shutdown,
index a127d61e8af984d3aaefde49c94f48a9a9187d53..301f224304698950544088c16518ea2e14ff41a6 100644 (file)
@@ -712,7 +712,7 @@ static int __init tls_register(void)
        build_protos(tls_prots[TLSV4], &tcp_prot);
 
        tls_sw_proto_ops = inet_stream_ops;
-       tls_sw_proto_ops.poll_mask = tls_sw_poll_mask;
+       tls_sw_proto_ops.poll = tls_sw_poll;
        tls_sw_proto_ops.splice_read = tls_sw_splice_read;
 
 #ifdef CONFIG_TLS_DEVICE
index f127fac88acfe0046b0a7dd55bab4d6d486de105..1f3d9789af30fb88cf9e7550b40dcda1e897e262 100644 (file)
@@ -440,7 +440,7 @@ alloc_encrypted:
                        ret = tls_push_record(sk, msg->msg_flags, record_type);
                        if (!ret)
                                continue;
-                       if (ret == -EAGAIN)
+                       if (ret < 0)
                                goto send_end;
 
                        copied -= try_to_copy;
@@ -646,6 +646,9 @@ static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
                        return NULL;
                }
 
+               if (sk->sk_shutdown & RCV_SHUTDOWN)
+                       return NULL;
+
                if (sock_flag(sk, SOCK_DONE))
                        return NULL;
 
@@ -701,6 +704,10 @@ static int decrypt_skb(struct sock *sk, struct sk_buff *skb,
        nsg = skb_to_sgvec(skb, &sgin[1],
                           rxm->offset + tls_ctx->rx.prepend_size,
                           rxm->full_len - tls_ctx->rx.prepend_size);
+       if (nsg < 0) {
+               ret = nsg;
+               goto out;
+       }
 
        tls_make_aad(ctx->rx_aad_ciphertext,
                     rxm->full_len - tls_ctx->rx.overhead_size,
@@ -712,6 +719,7 @@ static int decrypt_skb(struct sock *sk, struct sk_buff *skb,
                                rxm->full_len - tls_ctx->rx.overhead_size,
                                skb, sk->sk_allocation);
 
+out:
        if (sgin != &sgin_arr[0])
                kfree(sgin);
 
@@ -919,22 +927,23 @@ splice_read_end:
        return copied ? : err;
 }
 
-__poll_t tls_sw_poll_mask(struct socket *sock, __poll_t events)
+unsigned int tls_sw_poll(struct file *file, struct socket *sock,
+                        struct poll_table_struct *wait)
 {
+       unsigned int ret;
        struct sock *sk = sock->sk;
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
-       __poll_t mask;
 
-       /* Grab EPOLLOUT and EPOLLHUP from the underlying socket */
-       mask = ctx->sk_poll_mask(sock, events);
+       /* Grab POLLOUT and POLLHUP from the underlying socket */
+       ret = ctx->sk_poll(file, sock, wait);
 
-       /* Clear EPOLLIN bits, and set based on recv_pkt */
-       mask &= ~(EPOLLIN | EPOLLRDNORM);
+       /* Clear POLLIN bits, and set based on recv_pkt */
+       ret &= ~(POLLIN | POLLRDNORM);
        if (ctx->recv_pkt)
-               mask |= EPOLLIN | EPOLLRDNORM;
+               ret |= POLLIN | POLLRDNORM;
 
-       return mask;
+       return ret;
 }
 
 static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
@@ -1191,7 +1200,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
                sk->sk_data_ready = tls_data_ready;
                write_unlock_bh(&sk->sk_callback_lock);
 
-               sw_ctx_rx->sk_poll_mask = sk->sk_socket->ops->poll_mask;
+               sw_ctx_rx->sk_poll = sk->sk_socket->ops->poll;
 
                strp_check_rcv(&sw_ctx_rx->strp);
        }
index 95b02a71fd47161735c51988463e5f5e4a7d44b3..e5473c03d667ad51308c3e8b705f3b1187f619e8 100644 (file)
@@ -638,8 +638,9 @@ static int unix_stream_connect(struct socket *, struct sockaddr *,
 static int unix_socketpair(struct socket *, struct socket *);
 static int unix_accept(struct socket *, struct socket *, int, bool);
 static int unix_getname(struct socket *, struct sockaddr *, int);
-static __poll_t unix_poll_mask(struct socket *, __poll_t);
-static __poll_t unix_dgram_poll_mask(struct socket *, __poll_t);
+static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
+static __poll_t unix_dgram_poll(struct file *, struct socket *,
+                                   poll_table *);
 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
 static int unix_shutdown(struct socket *, int);
 static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
@@ -680,7 +681,7 @@ static const struct proto_ops unix_stream_ops = {
        .socketpair =   unix_socketpair,
        .accept =       unix_accept,
        .getname =      unix_getname,
-       .poll_mask =    unix_poll_mask,
+       .poll =         unix_poll,
        .ioctl =        unix_ioctl,
        .listen =       unix_listen,
        .shutdown =     unix_shutdown,
@@ -703,7 +704,7 @@ static const struct proto_ops unix_dgram_ops = {
        .socketpair =   unix_socketpair,
        .accept =       sock_no_accept,
        .getname =      unix_getname,
-       .poll_mask =    unix_dgram_poll_mask,
+       .poll =         unix_dgram_poll,
        .ioctl =        unix_ioctl,
        .listen =       sock_no_listen,
        .shutdown =     unix_shutdown,
@@ -725,7 +726,7 @@ static const struct proto_ops unix_seqpacket_ops = {
        .socketpair =   unix_socketpair,
        .accept =       unix_accept,
        .getname =      unix_getname,
-       .poll_mask =    unix_dgram_poll_mask,
+       .poll =         unix_dgram_poll,
        .ioctl =        unix_ioctl,
        .listen =       unix_listen,
        .shutdown =     unix_shutdown,
@@ -2629,10 +2630,13 @@ static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
        return err;
 }
 
-static __poll_t unix_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
 {
        struct sock *sk = sock->sk;
-       __poll_t mask = 0;
+       __poll_t mask;
+
+       sock_poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
 
        /* exceptional events? */
        if (sk->sk_err)
@@ -2661,11 +2665,15 @@ static __poll_t unix_poll_mask(struct socket *sock, __poll_t events)
        return mask;
 }
 
-static __poll_t unix_dgram_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
+                                   poll_table *wait)
 {
        struct sock *sk = sock->sk, *other;
-       int writable;
-       __poll_t mask = 0;
+       unsigned int writable;
+       __poll_t mask;
+
+       sock_poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
 
        /* exceptional events? */
        if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
@@ -2691,7 +2699,7 @@ static __poll_t unix_dgram_poll_mask(struct socket *sock, __poll_t events)
        }
 
        /* No write status requested, avoid expensive OUT tests. */
-       if (!(events & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
+       if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
                return mask;
 
        writable = unix_writable(sk);
index bb5d5fa68c357af4962602b2bced2164c6e5ab44..c1076c19b8580688ff041f71aee0d05ce0906030 100644 (file)
@@ -850,11 +850,18 @@ static int vsock_shutdown(struct socket *sock, int mode)
        return err;
 }
 
-static __poll_t vsock_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t vsock_poll(struct file *file, struct socket *sock,
+                              poll_table *wait)
 {
-       struct sock *sk = sock->sk;
-       struct vsock_sock *vsk = vsock_sk(sk);
-       __poll_t mask = 0;
+       struct sock *sk;
+       __poll_t mask;
+       struct vsock_sock *vsk;
+
+       sk = sock->sk;
+       vsk = vsock_sk(sk);
+
+       poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
 
        if (sk->sk_err)
                /* Signify that there has been an error on this socket. */
@@ -1084,7 +1091,7 @@ static const struct proto_ops vsock_dgram_ops = {
        .socketpair = sock_no_socketpair,
        .accept = sock_no_accept,
        .getname = vsock_getname,
-       .poll_mask = vsock_poll_mask,
+       .poll = vsock_poll,
        .ioctl = sock_no_ioctl,
        .listen = sock_no_listen,
        .shutdown = vsock_shutdown,
@@ -1842,7 +1849,7 @@ static const struct proto_ops vsock_stream_ops = {
        .socketpair = sock_no_socketpair,
        .accept = vsock_accept,
        .getname = vsock_getname,
-       .poll_mask = vsock_poll_mask,
+       .poll = vsock_poll,
        .ioctl = sock_no_ioctl,
        .listen = vsock_listen,
        .shutdown = vsock_shutdown,
index 8e03bd3f3668b573c4d61a786e90a238abe9fe66..5d3cce9e8744d5207753107aeb55518f2848f50a 100644 (file)
@@ -201,7 +201,7 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
                return -ENODEV;
        }
 
-       if (le32_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid)
+       if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid)
                return virtio_transport_send_pkt_loopback(vsock, pkt);
 
        if (pkt->reply)
index c7bbe5f0aae8839bdfe5ac7b7bd02c6aad8ac8dc..80bc986c79e5aea8d50121be481833738a1d50b7 100644 (file)
@@ -4409,6 +4409,7 @@ static int parse_station_flags(struct genl_info *info,
                params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHENTICATED) |
                                         BIT(NL80211_STA_FLAG_MFP) |
                                         BIT(NL80211_STA_FLAG_AUTHORIZED);
+               break;
        default:
                return -EINVAL;
        }
@@ -6231,7 +6232,7 @@ do {                                                                          \
                                  nl80211_check_s32);
        /*
         * Check HT operation mode based on
-        * IEEE 802.11 2012 8.4.2.59 HT Operation element.
+        * IEEE 802.11-2016 9.4.2.57 HT Operation element.
         */
        if (tb[NL80211_MESHCONF_HT_OPMODE]) {
                ht_opmode = nla_get_u16(tb[NL80211_MESHCONF_HT_OPMODE]);
@@ -6241,22 +6242,9 @@ do {                                                                         \
                                  IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
                        return -EINVAL;
 
-               if ((ht_opmode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT) &&
-                   (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
-                       return -EINVAL;
+               /* NON_HT_STA bit is reserved, but some programs set it */
+               ht_opmode &= ~IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT;
 
-               switch (ht_opmode & IEEE80211_HT_OP_MODE_PROTECTION) {
-               case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
-               case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
-                       if (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)
-                               return -EINVAL;
-                       break;
-               case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
-               case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
-                       if (!(ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
-                               return -EINVAL;
-                       break;
-               }
                cfg->ht_opmode = ht_opmode;
                mask |= (1 << (NL80211_MESHCONF_HT_OPMODE - 1));
        }
@@ -10962,9 +10950,12 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
                                    rem) {
                        u8 *mask_pat;
 
-                       nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
-                                        nl80211_packet_pattern_policy,
-                                        info->extack);
+                       err = nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
+                                              nl80211_packet_pattern_policy,
+                                              info->extack);
+                       if (err)
+                               goto error;
+
                        err = -EINVAL;
                        if (!pat_tb[NL80211_PKTPAT_MASK] ||
                            !pat_tb[NL80211_PKTPAT_PATTERN])
@@ -11213,8 +11204,11 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev,
                            rem) {
                u8 *mask_pat;
 
-               nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
-                                nl80211_packet_pattern_policy, NULL);
+               err = nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
+                                      nl80211_packet_pattern_policy, NULL);
+               if (err)
+                       return err;
+
                if (!pat_tb[NL80211_PKTPAT_MASK] ||
                    !pat_tb[NL80211_PKTPAT_PATTERN])
                        return -EINVAL;
@@ -14930,20 +14924,24 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
 EXPORT_SYMBOL(cfg80211_mgmt_tx_status);
 
 static int __nl80211_rx_control_port(struct net_device *dev,
-                                    const u8 *buf, size_t len,
-                                    const u8 *addr, u16 proto,
+                                    struct sk_buff *skb,
                                     bool unencrypted, gfp_t gfp)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+       struct ethhdr *ehdr = eth_hdr(skb);
+       const u8 *addr = ehdr->h_source;
+       u16 proto = be16_to_cpu(skb->protocol);
        struct sk_buff *msg;
        void *hdr;
+       struct nlattr *frame;
+
        u32 nlportid = READ_ONCE(wdev->conn_owner_nlportid);
 
        if (!nlportid)
                return -ENOENT;
 
-       msg = nlmsg_new(100 + len, gfp);
+       msg = nlmsg_new(100 + skb->len, gfp);
        if (!msg)
                return -ENOMEM;
 
@@ -14957,13 +14955,17 @@ static int __nl80211_rx_control_port(struct net_device *dev,
            nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
            nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
                              NL80211_ATTR_PAD) ||
-           nla_put(msg, NL80211_ATTR_FRAME, len, buf) ||
            nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) ||
            nla_put_u16(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE, proto) ||
            (unencrypted && nla_put_flag(msg,
                                         NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT)))
                goto nla_put_failure;
 
+       frame = nla_reserve(msg, NL80211_ATTR_FRAME, skb->len);
+       if (!frame)
+               goto nla_put_failure;
+
+       skb_copy_bits(skb, 0, nla_data(frame), skb->len);
        genlmsg_end(msg, hdr);
 
        return genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid);
@@ -14974,14 +14976,12 @@ static int __nl80211_rx_control_port(struct net_device *dev,
 }
 
 bool cfg80211_rx_control_port(struct net_device *dev,
-                             const u8 *buf, size_t len,
-                             const u8 *addr, u16 proto, bool unencrypted)
+                             struct sk_buff *skb, bool unencrypted)
 {
        int ret;
 
-       trace_cfg80211_rx_control_port(dev, buf, len, addr, proto, unencrypted);
-       ret = __nl80211_rx_control_port(dev, buf, len, addr, proto,
-                                       unencrypted, GFP_ATOMIC);
+       trace_cfg80211_rx_control_port(dev, skb, unencrypted);
+       ret = __nl80211_rx_control_port(dev, skb, unencrypted, GFP_ATOMIC);
        trace_cfg80211_return_bool(ret == 0);
        return ret == 0;
 }
index bbe6298e4bb9e09bd59a2c602af9f6c360829a3d..4fc66a117b7d74f86a1589a7a02b88f02f203b7d 100644 (file)
@@ -2240,7 +2240,9 @@ static void wiphy_update_regulatory(struct wiphy *wiphy,
                 * as some drivers used this to restore its orig_* reg domain.
                 */
                if (initiator == NL80211_REGDOM_SET_BY_CORE &&
-                   wiphy->regulatory_flags & REGULATORY_CUSTOM_REG)
+                   wiphy->regulatory_flags & REGULATORY_CUSTOM_REG &&
+                   !(wiphy->regulatory_flags &
+                     REGULATORY_WIPHY_SELF_MANAGED))
                        reg_call_notifier(wiphy, lr);
                return;
        }
@@ -2787,26 +2789,6 @@ static void notify_self_managed_wiphys(struct regulatory_request *request)
        }
 }
 
-static bool reg_only_self_managed_wiphys(void)
-{
-       struct cfg80211_registered_device *rdev;
-       struct wiphy *wiphy;
-       bool self_managed_found = false;
-
-       ASSERT_RTNL();
-
-       list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
-               wiphy = &rdev->wiphy;
-               if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED)
-                       self_managed_found = true;
-               else
-                       return false;
-       }
-
-       /* make sure at least one self-managed wiphy exists */
-       return self_managed_found;
-}
-
 /*
  * Processes regulatory hints, this is all the NL80211_REGDOM_SET_BY_*
  * Regulatory hints come on a first come first serve basis and we
@@ -2839,10 +2821,6 @@ static void reg_process_pending_hints(void)
        spin_unlock(&reg_requests_lock);
 
        notify_self_managed_wiphys(reg_request);
-       if (reg_only_self_managed_wiphys()) {
-               reg_free_request(reg_request);
-               return;
-       }
 
        reg_process_hint(reg_request);
 
index 2b417a2fe63ffb564b744e8e159c3bdbcf43a4ed..7c73510b161f3b84ce3d7d24dd3b9aa3b472823e 100644 (file)
@@ -2627,23 +2627,25 @@ TRACE_EVENT(cfg80211_mgmt_tx_status,
 );
 
 TRACE_EVENT(cfg80211_rx_control_port,
-       TP_PROTO(struct net_device *netdev, const u8 *buf, size_t len,
-                const u8 *addr, u16 proto, bool unencrypted),
-       TP_ARGS(netdev, buf, len, addr, proto, unencrypted),
+       TP_PROTO(struct net_device *netdev, struct sk_buff *skb,
+                bool unencrypted),
+       TP_ARGS(netdev, skb, unencrypted),
        TP_STRUCT__entry(
                NETDEV_ENTRY
-               MAC_ENTRY(addr)
+               __field(int, len)
+               MAC_ENTRY(from)
                __field(u16, proto)
                __field(bool, unencrypted)
        ),
        TP_fast_assign(
                NETDEV_ASSIGN;
-               MAC_ASSIGN(addr, addr);
-               __entry->proto = proto;
+               __entry->len = skb->len;
+               MAC_ASSIGN(from, eth_hdr(skb)->h_source);
+               __entry->proto = be16_to_cpu(skb->protocol);
                __entry->unencrypted = unencrypted;
        ),
-       TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT " proto: 0x%x, unencrypted: %s",
-                 NETDEV_PR_ARG, MAC_PR_ARG(addr),
+       TP_printk(NETDEV_PR_FMT ", len=%d, " MAC_PR_FMT ", proto: 0x%x, unencrypted: %s",
+                 NETDEV_PR_ARG, __entry->len, MAC_PR_ARG(from),
                  __entry->proto, BOOL_TO_STR(__entry->unencrypted))
 );
 
index f93365ae0fdd76b6aab9b6227cfcbb96f41eed82..d49aa79b79970d403b5c165d4000b2aa1d493442 100644 (file)
@@ -1750,7 +1750,7 @@ static const struct proto_ops x25_proto_ops = {
        .socketpair =   sock_no_socketpair,
        .accept =       x25_accept,
        .getname =      x25_getname,
-       .poll_mask =    datagram_poll_mask,
+       .poll =         datagram_poll,
        .ioctl =        x25_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = compat_x25_ioctl,
index 36919a254ba370c37b4e199bfd68c285e25fdeb6..72335c2e8108996d07702086f1f1391faa33fd7d 100644 (file)
@@ -118,6 +118,9 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
        u64 addr;
        int err;
 
+       if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
+               return -EINVAL;
+
        if (!xskq_peek_addr(xs->umem->fq, &addr) ||
            len > xs->umem->chunk_size_nohr) {
                xs->rx_dropped++;
@@ -196,8 +199,11 @@ static void xsk_destruct_skb(struct sk_buff *skb)
 {
        u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
        struct xdp_sock *xs = xdp_sk(skb->sk);
+       unsigned long flags;
 
+       spin_lock_irqsave(&xs->tx_completion_lock, flags);
        WARN_ON_ONCE(xskq_produce_addr(xs->umem->cq, addr));
+       spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
 
        sock_wfree(skb);
 }
@@ -212,9 +218,6 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
        struct sk_buff *skb;
        int err = 0;
 
-       if (unlikely(!xs->tx))
-               return -ENOBUFS;
-
        mutex_lock(&xs->mutex);
 
        while (xskq_peek_desc(xs->tx, &desc)) {
@@ -227,22 +230,13 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
                        goto out;
                }
 
-               if (xskq_reserve_addr(xs->umem->cq)) {
-                       err = -EAGAIN;
+               if (xskq_reserve_addr(xs->umem->cq))
                        goto out;
-               }
 
-               len = desc.len;
-               if (unlikely(len > xs->dev->mtu)) {
-                       err = -EMSGSIZE;
+               if (xs->queue_id >= xs->dev->real_num_tx_queues)
                        goto out;
-               }
-
-               if (xs->queue_id >= xs->dev->real_num_tx_queues) {
-                       err = -ENXIO;
-                       goto out;
-               }
 
+               len = desc.len;
                skb = sock_alloc_send_skb(sk, len, 1, &err);
                if (unlikely(!skb)) {
                        err = -EAGAIN;
@@ -265,15 +259,15 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
                skb->destructor = xsk_destruct_skb;
 
                err = dev_direct_xmit(skb, xs->queue_id);
+               xskq_discard_desc(xs->tx);
                /* Ignore NET_XMIT_CN as packet might have been sent */
                if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) {
-                       err = -EAGAIN;
-                       /* SKB consumed by dev_direct_xmit() */
+                       /* SKB completed but not sent */
+                       err = -EBUSY;
                        goto out;
                }
 
                sent_frame = true;
-               xskq_discard_desc(xs->tx);
        }
 
 out:
@@ -294,15 +288,18 @@ static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
                return -ENXIO;
        if (unlikely(!(xs->dev->flags & IFF_UP)))
                return -ENETDOWN;
+       if (unlikely(!xs->tx))
+               return -ENOBUFS;
        if (need_wait)
                return -EOPNOTSUPP;
 
        return (xs->zc) ? xsk_zc_xmit(sk) : xsk_generic_xmit(sk, m, total_len);
 }
 
-static __poll_t xsk_poll_mask(struct socket *sock, __poll_t events)
+static unsigned int xsk_poll(struct file *file, struct socket *sock,
+                            struct poll_table_struct *wait)
 {
-       __poll_t mask = datagram_poll_mask(sock, events);
+       unsigned int mask = datagram_poll(file, sock, wait);
        struct sock *sk = sock->sk;
        struct xdp_sock *xs = xdp_sk(sk);
 
@@ -693,7 +690,7 @@ static const struct proto_ops xsk_proto_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = sock_no_getname,
-       .poll_mask      = xsk_poll_mask,
+       .poll           = xsk_poll,
        .ioctl          = sock_no_ioctl,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
@@ -751,6 +748,7 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
 
        xs = xdp_sk(sk);
        mutex_init(&xs->mutex);
+       spin_lock_init(&xs->tx_completion_lock);
 
        local_bh_disable();
        sock_prot_inuse_add(net, &xsk_proto, 1);
index ef6a6f0ec949049de2fc03d1a675ee0c1f48ba5e..52ecaf770642785140358ea4ff2713ccaca8a489 100644 (file)
@@ -62,14 +62,9 @@ static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt)
        return (entries > dcnt) ? dcnt : entries;
 }
 
-static inline u32 xskq_nb_free_lazy(struct xsk_queue *q, u32 producer)
-{
-       return q->nentries - (producer - q->cons_tail);
-}
-
 static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt)
 {
-       u32 free_entries = xskq_nb_free_lazy(q, producer);
+       u32 free_entries = q->nentries - (producer - q->cons_tail);
 
        if (free_entries >= dcnt)
                return free_entries;
@@ -129,7 +124,7 @@ static inline int xskq_produce_addr(struct xsk_queue *q, u64 addr)
 {
        struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
 
-       if (xskq_nb_free(q, q->prod_tail, LAZY_UPDATE_THRESHOLD) == 0)
+       if (xskq_nb_free(q, q->prod_tail, 1) == 0)
                return -ENOSPC;
 
        ring->desc[q->prod_tail++ & q->ring_mask] = addr;
diff --git a/samples/bpf/.gitignore b/samples/bpf/.gitignore
new file mode 100644 (file)
index 0000000..8ae4940
--- /dev/null
@@ -0,0 +1,49 @@
+cpustat
+fds_example
+lathist
+load_sock_ops
+lwt_len_hist
+map_perf_test
+offwaketime
+per_socket_stats_example
+sampleip
+sock_example
+sockex1
+sockex2
+sockex3
+spintest
+syscall_nrs.h
+syscall_tp
+task_fd_query
+tc_l2_redirect
+test_cgrp2_array_pin
+test_cgrp2_attach
+test_cgrp2_attach2
+test_cgrp2_sock
+test_cgrp2_sock2
+test_current_task_under_cgroup
+test_lru_dist
+test_map_in_map
+test_overhead
+test_probe_write_user
+trace_event
+trace_output
+tracex1
+tracex2
+tracex3
+tracex4
+tracex5
+tracex6
+tracex7
+xdp1
+xdp2
+xdp_adjust_tail
+xdp_fwd
+xdp_monitor
+xdp_redirect
+xdp_redirect_cpu
+xdp_redirect_map
+xdp_router_ipv4
+xdp_rxq_info
+xdp_tx_iptunnel
+xdpsock
index 95c16324760c0be1af8be927e1adffae0b582525..0b6f22feb2c9ce37787ea5384276c85a4e1171eb 100644 (file)
@@ -6,6 +6,7 @@
  */
 #define KBUILD_MODNAME "foo"
 #include <linux/if_ether.h>
+#include <linux/if_vlan.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
 #include <linux/in.h>
@@ -108,11 +109,6 @@ static int parse_ipv6(void *data, uint64_t nh_off, void *data_end)
        return 0;
 }
 
-struct vlan_hdr {
-       uint16_t h_vlan_TCI;
-       uint16_t h_vlan_encapsulated_proto;
-};
-
 SEC("varlen")
 int handle_ingress(struct __sk_buff *skb)
 {
index 6caf47afa635ca680bb56b43ef78c7f62b293dd7..9d6dcaa9db9206ebe6a5bb14fe98c7001a543b6d 100644 (file)
@@ -6,6 +6,7 @@
  */
 #define _GNU_SOURCE
 #include <sched.h>
+#include <errno.h>
 #include <stdio.h>
 #include <sys/types.h>
 #include <asm/unistd.h>
@@ -44,8 +45,13 @@ static void test_task_rename(int cpu)
                exit(1);
        }
        start_time = time_get_ns();
-       for (i = 0; i < MAX_CNT; i++)
-               write(fd, buf, sizeof(buf));
+       for (i = 0; i < MAX_CNT; i++) {
+               if (write(fd, buf, sizeof(buf)) < 0) {
+                       printf("task rename failed: %s\n", strerror(errno));
+                       close(fd);
+                       return;
+               }
+       }
        printf("task_rename:%d: %lld events per sec\n",
               cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
        close(fd);
@@ -63,8 +69,13 @@ static void test_urandom_read(int cpu)
                exit(1);
        }
        start_time = time_get_ns();
-       for (i = 0; i < MAX_CNT; i++)
-               read(fd, buf, sizeof(buf));
+       for (i = 0; i < MAX_CNT; i++) {
+               if (read(fd, buf, sizeof(buf)) < 0) {
+                       printf("failed to read from /dev/urandom: %s\n", strerror(errno));
+                       close(fd);
+                       return;
+               }
+       }
        printf("urandom_read:%d: %lld events per sec\n",
               cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
        close(fd);
index 1fa1becfa641510ae67db4d0ea64c3971f6d2f4d..d08046ab81f043505e0ea42a2e8c85661ae68f76 100644 (file)
@@ -122,6 +122,16 @@ static void print_stacks(void)
        }
 }
 
+static inline int generate_load(void)
+{
+       if (system("dd if=/dev/zero of=/dev/null count=5000k status=none") < 0) {
+               printf("failed to generate some load with dd: %s\n", strerror(errno));
+               return -1;
+       }
+
+       return 0;
+}
+
 static void test_perf_event_all_cpu(struct perf_event_attr *attr)
 {
        int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
@@ -142,7 +152,11 @@ static void test_perf_event_all_cpu(struct perf_event_attr *attr)
                assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0);
                assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE) == 0);
        }
-       system("dd if=/dev/zero of=/dev/null count=5000k status=none");
+
+       if (generate_load() < 0) {
+               error = 1;
+               goto all_cpu_err;
+       }
        print_stacks();
 all_cpu_err:
        for (i--; i >= 0; i--) {
@@ -156,7 +170,7 @@ all_cpu_err:
 
 static void test_perf_event_task(struct perf_event_attr *attr)
 {
-       int pmu_fd;
+       int pmu_fd, error = 0;
 
        /* per task perf event, enable inherit so the "dd ..." command can be traced properly.
         * Enabling inherit will cause bpf_perf_prog_read_time helper failure.
@@ -171,10 +185,17 @@ static void test_perf_event_task(struct perf_event_attr *attr)
        }
        assert(ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0);
        assert(ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE) == 0);
-       system("dd if=/dev/zero of=/dev/null count=5000k status=none");
+
+       if (generate_load() < 0) {
+               error = 1;
+               goto err;
+       }
        print_stacks();
+err:
        ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
        close(pmu_fd);
+       if (error)
+               int_exit(0);
 }
 
 static void test_bpf_perf_event(void)
index b9c9549c4c272a944d818cef153e496f96dcec84..4bde9d066c4616430533cef02e32d295b0329b11 100755 (executable)
@@ -16,8 +16,8 @@
 BPF_FILE=xdp2skb_meta_kern.o
 DIR=$(dirname $0)
 
-export TC=/usr/sbin/tc
-export IP=/usr/sbin/ip
+[ -z "$TC" ] && TC=tc
+[ -z "$IP" ] && IP=ip
 
 function usage() {
     echo ""
@@ -53,7 +53,7 @@ function _call_cmd() {
     local allow_fail="$2"
     shift 2
     if [[ -n "$VERBOSE" ]]; then
-       echo "$(basename $cmd) $@"
+       echo "$cmd $@"
     fi
     if [[ -n "$DRYRUN" ]]; then
        return
index 6673cdb9f55cab3fb32faaca755f805e8c10ed8f..a7e94e7ff87df5f60f7a57522de77b5929e46029 100644 (file)
@@ -48,9 +48,9 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags)
        struct ethhdr *eth = data;
        struct ipv6hdr *ip6h;
        struct iphdr *iph;
-       int out_index;
        u16 h_proto;
        u64 nh_off;
+       int rc;
 
        nh_off = sizeof(*eth);
        if (data + nh_off > data_end)
@@ -101,7 +101,7 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags)
 
        fib_params.ifindex = ctx->ingress_ifindex;
 
-       out_index = bpf_fib_lookup(ctx, &fib_params, sizeof(fib_params), flags);
+       rc = bpf_fib_lookup(ctx, &fib_params, sizeof(fib_params), flags);
 
        /* verify egress index has xdp support
         * TO-DO bpf_map_lookup_elem(&tx_port, &key) fails with
@@ -109,7 +109,7 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags)
         * NOTE: without verification that egress index supports XDP
         *       forwarding packets are dropped.
         */
-       if (out_index > 0) {
+       if (rc == 0) {
                if (h_proto == htons(ETH_P_IP))
                        ip_decrease_ttl(iph);
                else if (h_proto == htons(ETH_P_IPV6))
@@ -117,7 +117,7 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags)
 
                memcpy(eth->h_dest, fib_params.dmac, ETH_ALEN);
                memcpy(eth->h_source, fib_params.smac, ETH_ALEN);
-               return bpf_redirect_map(&tx_port, out_index, 0);
+               return bpf_redirect_map(&tx_port, fib_params.ifindex, 0);
        }
 
        return XDP_PASS;
index d69c8d78d3fdef775f27d97b94401fabb5ccfd72..5904b15438313399d8bfa8fe7412ed30b5342556 100644 (file)
@@ -729,7 +729,7 @@ static void kick_tx(int fd)
        int ret;
 
        ret = sendto(fd, NULL, 0, MSG_DONTWAIT, NULL, 0);
-       if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN)
+       if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN || errno == EBUSY)
                return;
        lassert(0);
 }
index 2960e26c6ea4c756064db41cc7d39c14f058068b..2535c3677c7b66a1650fc3a1a7fb9c99684ae8ea 100644 (file)
@@ -178,6 +178,8 @@ static const char *vbe_name(u32 index)
        return "(invalid)";
 }
 
+static struct page *__mbochs_get_page(struct mdev_state *mdev_state,
+                                     pgoff_t pgoff);
 static struct page *mbochs_get_page(struct mdev_state *mdev_state,
                                    pgoff_t pgoff);
 
@@ -394,7 +396,7 @@ static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count,
                   MBOCHS_MEMORY_BAR_OFFSET + mdev_state->memsize) {
                pos -= MBOCHS_MMIO_BAR_OFFSET;
                poff = pos & ~PAGE_MASK;
-               pg = mbochs_get_page(mdev_state, pos >> PAGE_SHIFT);
+               pg = __mbochs_get_page(mdev_state, pos >> PAGE_SHIFT);
                map = kmap(pg);
                if (is_write)
                        memcpy(map + poff, buf, count);
@@ -657,7 +659,7 @@ static void mbochs_put_pages(struct mdev_state *mdev_state)
        dev_dbg(dev, "%s: %d pages released\n", __func__, count);
 }
 
-static int mbochs_region_vm_fault(struct vm_fault *vmf)
+static vm_fault_t mbochs_region_vm_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct mdev_state *mdev_state = vma->vm_private_data;
@@ -695,7 +697,7 @@ static int mbochs_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
        return 0;
 }
 
-static int mbochs_dmabuf_vm_fault(struct vm_fault *vmf)
+static vm_fault_t mbochs_dmabuf_vm_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct mbochs_dmabuf *dmabuf = vma->vm_private_data;
@@ -803,29 +805,26 @@ static void mbochs_release_dmabuf(struct dma_buf *buf)
        mutex_unlock(&mdev_state->ops_lock);
 }
 
-static void *mbochs_kmap_atomic_dmabuf(struct dma_buf *buf,
-                                      unsigned long page_num)
+static void *mbochs_kmap_dmabuf(struct dma_buf *buf, unsigned long page_num)
 {
        struct mbochs_dmabuf *dmabuf = buf->priv;
        struct page *page = dmabuf->pages[page_num];
 
-       return kmap_atomic(page);
+       return kmap(page);
 }
 
-static void *mbochs_kmap_dmabuf(struct dma_buf *buf, unsigned long page_num)
+static void mbochs_kunmap_dmabuf(struct dma_buf *buf, unsigned long page_num,
+                                void *vaddr)
 {
-       struct mbochs_dmabuf *dmabuf = buf->priv;
-       struct page *page = dmabuf->pages[page_num];
-
-       return kmap(page);
+       kunmap(vaddr);
 }
 
 static struct dma_buf_ops mbochs_dmabuf_ops = {
        .map_dma_buf      = mbochs_map_dmabuf,
        .unmap_dma_buf    = mbochs_unmap_dmabuf,
        .release          = mbochs_release_dmabuf,
-       .map_atomic       = mbochs_kmap_atomic_dmabuf,
        .map              = mbochs_kmap_dmabuf,
+       .unmap            = mbochs_kunmap_dmabuf,
        .mmap             = mbochs_mmap_dmabuf,
 };
 
index c8156d61678cfbc6907a9176efbccb03aa8387ce..86321f06461e9835103950242930187c62837e1d 100644 (file)
@@ -214,7 +214,7 @@ hdr-inst := -f $(srctree)/scripts/Makefile.headersinst obj
 # Prefix -I with $(srctree) if it is not an absolute path.
 # skip if -I has no parameter
 addtree = $(if $(patsubst -I%,%,$(1)), \
-$(if $(filter-out -I/% -I./% -I../%,$(1)),$(patsubst -I%,-I$(srctree)/%,$(1)),$(1)))
+$(if $(filter-out -I/% -I./% -I../%,$(1)),$(patsubst -I%,-I$(srctree)/%,$(1)),$(1)),$(1))
 
 # Find all -I options and call addtree
 flags = $(foreach o,$($(1)),$(if $(filter -I%,$(o)),$(call addtree,$(o)),$(o)))
index 34d9e9ce97c29c5e0ca78e6b06085a6b29ffd8bd..514ed63ff5710789fda060eb06cf10813a4124ad 100644 (file)
@@ -239,6 +239,7 @@ cmd_record_mcount =                                         \
             "$(CC_FLAGS_FTRACE)" ]; then                       \
                $(sub_cmd_record_mcount)                        \
        fi;
+endif # -record-mcount
 endif # CONFIG_FTRACE_MCOUNT_RECORD
 
 ifdef CONFIG_STACK_VALIDATION
@@ -263,7 +264,6 @@ ifneq ($(RETPOLINE_CFLAGS),)
   objtool_args += --retpoline
 endif
 endif
-endif
 
 
 ifdef CONFIG_MODVERSIONS
@@ -590,7 +590,4 @@ endif
 # We never want them to be removed automatically.
 .SECONDARY: $(targets)
 
-# Declare the contents of the .PHONY variable as phony.  We keep that
-# information in a variable se we can use it in if_changed and friends.
-
 .PHONY: $(PHONY)
index 808d09f27ad4063424211a8264297a0b3945d3c5..17ef94c635cd5dcfd23c355576a351730076dd73 100644 (file)
@@ -88,7 +88,4 @@ PHONY += $(subdir-ymn)
 $(subdir-ymn):
        $(Q)$(MAKE) $(clean)=$@
 
-# Declare the contents of the .PHONY variable as phony.  We keep that
-# information in a variable se we can use it in if_changed and friends.
-
 .PHONY: $(PHONY)
index a763b4775d062965a82af7761aaa8cb5a28aeb8e..40867a41615ba812987100133793183e6f82a1d5 100644 (file)
@@ -54,8 +54,4 @@ PHONY += $(subdir-ym)
 $(subdir-ym):
        $(Q)$(MAKE) $(modbuiltin)=$@
 
-
-# Declare the contents of the .PHONY variable as phony.  We keep that
-# information in a variable se we can use it in if_changed and friends.
-
 .PHONY: $(PHONY)
index 51ca0244fc8ac4f8e2981fb9dbc9df3efe507861..ff5ca9817a85ab394740c7ec8f8459f02a9656f9 100644 (file)
@@ -35,8 +35,4 @@ modinst_dir = $(if $(KBUILD_EXTMOD),$(ext-mod-dir),kernel/$(@D))
 $(modules):
        $(call cmd,modules_install,$(MODLIB)/$(modinst_dir))
 
-
-# Declare the contents of the .PHONY variable as phony.  We keep that
-# information in a variable so we can use it in if_changed and friends.
-
 .PHONY: $(PHONY)
index df4174405feb331a772abe871046d9260c43c690..dd92dbbbaa687b73f31b922187c0da15f66266e0 100644 (file)
@@ -149,8 +149,4 @@ ifneq ($(cmd_files),)
   include $(cmd_files)
 endif
 
-
-# Declare the contents of the .PHONY variable as phony.  We keep that
-# information in a variable se we can use it in if_changed and friends.
-
 .PHONY: $(PHONY)
index 171483bc0538d7faa5e4a34c5804a2f8e721f2ea..da56aa78d245da2835d7714d6bb81e15cb1cf3f4 100644 (file)
@@ -27,7 +27,4 @@ modinst_dir = $(if $(KBUILD_EXTMOD),$(ext-mod-dir),kernel/$(@D))
 $(modules):
        $(call cmd,sign_ko,$(MODLIB)/$(modinst_dir))
 
-# Declare the contents of the .PHONY variable as phony.  We keep that
-# information in a variable se we can use it in if_changed and friends.
-
 .PHONY: $(PHONY)
index 208eb2825dab017a9d3fdc0bdb8beef053b5626d..6efcead3198989d2ab2ab6772c72d8bb61c89c4e 100755 (executable)
@@ -1,7 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 
-cat << "END" | $@ -x c - -o /dev/null >/dev/null 2>&1 && echo "y"
+cat << "END" | $@ -x c - -o /dev/null >/dev/null 2>&1
 #include <stdio.h>
 int main(void)
 {
index e3b7362b0ee457b9601a8628609d8ebf78fcb09a..447857ffaf6be157841f8b0283d8ac67cb37dc5e 100755 (executable)
@@ -2606,12 +2606,6 @@ sub process {
                             "A patch subject line should describe the change not the tool that found it\n" . $herecurr);
                }
 
-# Check for old stable address
-               if ($line =~ /^\s*cc:\s*.*<?\bstable\@kernel\.org\b>?.*$/i) {
-                       ERROR("STABLE_ADDRESS",
-                             "The 'stable' address should be 'stable\@vger.kernel.org'\n" . $herecurr);
-               }
-
 # Check for unwanted Gerrit info
                if ($in_commit_log && $line =~ /^\s*change-id:/i) {
                        ERROR("GERRIT_CHANGE_ID",
@@ -5819,14 +5813,14 @@ sub process {
                    defined $stat &&
                    $stat =~ /^\+(?![^\{]*\{\s*).*\b(\w+)\s*\(.*$String\s*,/s &&
                    $1 !~ /^_*volatile_*$/) {
-                       my $specifier;
-                       my $extension;
-                       my $bad_specifier = "";
                        my $stat_real;
 
                        my $lc = $stat =~ tr@\n@@;
                        $lc = $lc + $linenr;
                        for (my $count = $linenr; $count <= $lc; $count++) {
+                               my $specifier;
+                               my $extension;
+                               my $bad_specifier = "";
                                my $fmt = get_quoted_string($lines[$count - 1], raw_line($count, 0));
                                $fmt =~ s/%%//g;
 
index 5061abcc25409c4e095e8c5b25d3a8de3fb4db68..e6239f39abadd480f285e4b541b21141337f9497 100755 (executable)
@@ -57,6 +57,8 @@ try_decompress '\3757zXZ\000' abcde unxz
 try_decompress 'BZh'          xy    bunzip2
 try_decompress '\135\0\0\0'   xxx   unlzma
 try_decompress '\211\114\132' xy    'lzop -d'
+try_decompress '\002!L\030'   xxx   'lz4 -d'
+try_decompress '(\265/\375'   xxx   unzstd
 
 # Bail out:
 echo "$me: Cannot find vmlinux." >&2
index 3755af0cd9f7f24c1942fd9df216c525f79d04b3..75e4e22b986adcfd07197777c5d59d5601d3c920 100755 (executable)
@@ -1,4 +1,4 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 
-echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
+echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -m64 -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
index 94a383b21df6405f4a9f6b6c08758d6c822381d8..f63b41b0dd498d23b65b3c12fe47e3b2c87e148d 100644 (file)
@@ -171,6 +171,9 @@ struct symbol {
  * config BAZ
  *         int "BAZ Value"
  *         range 1..255
+ *
+ * Please, also check zconf.y:print_symbol() when modifying the
+ * list of property types!
  */
 enum prop_type {
        P_UNKNOWN,
index 65da87fce907ad2bc7b52adba4651dc2c32786be..5ca2df790d3cfa5f4253a33a303219aaa8fc4394 100644 (file)
@@ -156,7 +156,7 @@ static char *do_shell(int argc, char *argv[])
                nread--;
 
        /* remove trailing new lines */
-       while (buf[nread - 1] == '\n')
+       while (nread > 0 && buf[nread - 1] == '\n')
                nread--;
 
        buf[nread] = 0;
index 6f9b0aa32a82239b2bc1540d1b75949859ea48e9..4b68272ebdb96cb25e8d91de0d4cbc792a3079b6 100644 (file)
@@ -31,7 +31,7 @@ struct symbol *symbol_hash[SYMBOL_HASHSIZE];
 static struct menu *current_menu, *current_entry;
 
 %}
-%expect 32
+%expect 31
 
 %union
 {
@@ -337,7 +337,7 @@ choice_block:
 
 /* if entry */
 
-if_entry: T_IF expr nl
+if_entry: T_IF expr T_EOL
 {
        printd(DEBUG_PARSE, "%s:%d:if\n", zconf_curname(), zconf_lineno());
        menu_add_entry(NULL);
@@ -717,6 +717,10 @@ static void print_symbol(FILE *out, struct menu *menu)
                        print_quoted_string(out, prop->text);
                        fputc('\n', out);
                        break;
+               case P_SYMBOL:
+                       fputs( "  symbol ", out);
+                       fprintf(out, "%s\n", prop->sym->name);
+                       break;
                default:
                        fprintf(out, "  unknown prop %d!\n", prop->type);
                        break;
index 66f08bb1cce978a1074141cc90adc7255c317ede..26de7d5aa5c89a5fd4f051c67029341f4f8849fa 100755 (executable)
@@ -152,6 +152,7 @@ regex_asm=(
 )
 regex_c=(
        '/^SYSCALL_DEFINE[0-9](\([[:alnum:]_]*\).*/sys_\1/'
+       '/^BPF_CALL_[0-9](\([[:alnum:]_]*\).*/\1/'
        '/^COMPAT_SYSCALL_DEFINE[0-9](\([[:alnum:]_]*\).*/compat_sys_\1/'
        '/^TRACE_EVENT(\([[:alnum:]_]*\).*/trace_\1/'
        '/^TRACE_EVENT(\([[:alnum:]_]*\).*/trace_\1_rcuidle/'
@@ -245,7 +246,7 @@ exuberant()
 {
        setup_regex exuberant asm c
        all_target_sources | xargs $1 -a                        \
-       -I __initdata,__exitdata,__initconst,                   \
+       -I __initdata,__exitdata,__initconst,__ro_after_init    \
        -I __initdata_memblock                                  \
        -I __refdata,__attribute,__maybe_unused,__always_unused \
        -I __acquires,__releases,__deprecated                   \
index f7403821db7f0aafdec4a2e9a6804b1b8c2a599b..b203f7758f9765f056c3e0d07e0286c49b181253 100644 (file)
@@ -142,6 +142,8 @@ static void kdf_dealloc(struct kdf_sdesc *sdesc)
  * The src pointer is defined as Z || other info where Z is the shared secret
  * from DH and other info is an arbitrary string (see SP800-56A section
  * 5.8.1.2).
+ *
+ * 'dlen' must be a multiple of the digest size.
  */
 static int kdf_ctr(struct kdf_sdesc *sdesc, const u8 *src, unsigned int slen,
                   u8 *dst, unsigned int dlen, unsigned int zlen)
@@ -205,8 +207,8 @@ static int keyctl_dh_compute_kdf(struct kdf_sdesc *sdesc,
 {
        uint8_t *outbuf = NULL;
        int ret;
-       size_t outbuf_len = round_up(buflen,
-                                    crypto_shash_digestsize(sdesc->shash.tfm));
+       size_t outbuf_len = roundup(buflen,
+                                   crypto_shash_digestsize(sdesc->shash.tfm));
 
        outbuf = kmalloc(outbuf_len, GFP_KERNEL);
        if (!outbuf) {
index f3d374d2ca045ce7325b20ad3cecb6304418d1b3..79d3709b06717a1f6452fe85b9922244b9f70381 100644 (file)
@@ -441,22 +441,16 @@ static int sel_release_policy(struct inode *inode, struct file *filp)
 static ssize_t sel_read_policy(struct file *filp, char __user *buf,
                               size_t count, loff_t *ppos)
 {
-       struct selinux_fs_info *fsi = file_inode(filp)->i_sb->s_fs_info;
        struct policy_load_memory *plm = filp->private_data;
        int ret;
 
-       mutex_lock(&fsi->mutex);
-
        ret = avc_has_perm(&selinux_state,
                           current_sid(), SECINITSID_SECURITY,
                          SECCLASS_SECURITY, SECURITY__READ_POLICY, NULL);
        if (ret)
-               goto out;
+               return ret;
 
-       ret = simple_read_from_buffer(buf, count, ppos, plm->data, plm->len);
-out:
-       mutex_unlock(&fsi->mutex);
-       return ret;
+       return simple_read_from_buffer(buf, count, ppos, plm->data, plm->len);
 }
 
 static vm_fault_t sel_mmap_policy_fault(struct vm_fault *vmf)
@@ -1188,25 +1182,29 @@ static ssize_t sel_read_bool(struct file *filep, char __user *buf,
        ret = -EINVAL;
        if (index >= fsi->bool_num || strcmp(name,
                                             fsi->bool_pending_names[index]))
-               goto out;
+               goto out_unlock;
 
        ret = -ENOMEM;
        page = (char *)get_zeroed_page(GFP_KERNEL);
        if (!page)
-               goto out;
+               goto out_unlock;
 
        cur_enforcing = security_get_bool_value(fsi->state, index);
        if (cur_enforcing < 0) {
                ret = cur_enforcing;
-               goto out;
+               goto out_unlock;
        }
        length = scnprintf(page, PAGE_SIZE, "%d %d", cur_enforcing,
                          fsi->bool_pending_values[index]);
-       ret = simple_read_from_buffer(buf, count, ppos, page, length);
-out:
        mutex_unlock(&fsi->mutex);
+       ret = simple_read_from_buffer(buf, count, ppos, page, length);
+out_free:
        free_page((unsigned long)page);
        return ret;
+
+out_unlock:
+       mutex_unlock(&fsi->mutex);
+       goto out_free;
 }
 
 static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
@@ -1219,6 +1217,17 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
        unsigned index = file_inode(filep)->i_ino & SEL_INO_MASK;
        const char *name = filep->f_path.dentry->d_name.name;
 
+       if (count >= PAGE_SIZE)
+               return -ENOMEM;
+
+       /* No partial writes. */
+       if (*ppos != 0)
+               return -EINVAL;
+
+       page = memdup_user_nul(buf, count);
+       if (IS_ERR(page))
+               return PTR_ERR(page);
+
        mutex_lock(&fsi->mutex);
 
        length = avc_has_perm(&selinux_state,
@@ -1233,22 +1242,6 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
                                             fsi->bool_pending_names[index]))
                goto out;
 
-       length = -ENOMEM;
-       if (count >= PAGE_SIZE)
-               goto out;
-
-       /* No partial writes. */
-       length = -EINVAL;
-       if (*ppos != 0)
-               goto out;
-
-       page = memdup_user_nul(buf, count);
-       if (IS_ERR(page)) {
-               length = PTR_ERR(page);
-               page = NULL;
-               goto out;
-       }
-
        length = -EINVAL;
        if (sscanf(page, "%d", &new_value) != 1)
                goto out;
@@ -1280,6 +1273,17 @@ static ssize_t sel_commit_bools_write(struct file *filep,
        ssize_t length;
        int new_value;
 
+       if (count >= PAGE_SIZE)
+               return -ENOMEM;
+
+       /* No partial writes. */
+       if (*ppos != 0)
+               return -EINVAL;
+
+       page = memdup_user_nul(buf, count);
+       if (IS_ERR(page))
+               return PTR_ERR(page);
+
        mutex_lock(&fsi->mutex);
 
        length = avc_has_perm(&selinux_state,
@@ -1289,22 +1293,6 @@ static ssize_t sel_commit_bools_write(struct file *filep,
        if (length)
                goto out;
 
-       length = -ENOMEM;
-       if (count >= PAGE_SIZE)
-               goto out;
-
-       /* No partial writes. */
-       length = -EINVAL;
-       if (*ppos != 0)
-               goto out;
-
-       page = memdup_user_nul(buf, count);
-       if (IS_ERR(page)) {
-               length = PTR_ERR(page);
-               page = NULL;
-               goto out;
-       }
-
        length = -EINVAL;
        if (sscanf(page, "%d", &new_value) != 1)
                goto out;
index 7ad226018f51674b8e97a5d6ff2aaeabd9163bbd..19de675d4504501f8a48c28302dd9ad552b70877 100644 (file)
@@ -2296,6 +2296,7 @@ static void smack_task_to_inode(struct task_struct *p, struct inode *inode)
        struct smack_known *skp = smk_of_task_struct(p);
 
        isp->smk_inode = skp;
+       isp->smk_flags |= SMK_INODE_INSTANT;
 }
 
 /*
index 69616d00481c2cdff6331d8406bb18550db7824d..b53026a72e734e29f63a76eb711458013fa70290 100644 (file)
@@ -635,7 +635,7 @@ static int snd_rawmidi_info_select_user(struct snd_card *card,
 int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream,
                              struct snd_rawmidi_params * params)
 {
-       char *newbuf;
+       char *newbuf, *oldbuf;
        struct snd_rawmidi_runtime *runtime = substream->runtime;
        
        if (substream->append && substream->use_count > 1)
@@ -648,13 +648,17 @@ int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream,
                return -EINVAL;
        }
        if (params->buffer_size != runtime->buffer_size) {
-               newbuf = krealloc(runtime->buffer, params->buffer_size,
-                                 GFP_KERNEL);
+               newbuf = kmalloc(params->buffer_size, GFP_KERNEL);
                if (!newbuf)
                        return -ENOMEM;
+               spin_lock_irq(&runtime->lock);
+               oldbuf = runtime->buffer;
                runtime->buffer = newbuf;
                runtime->buffer_size = params->buffer_size;
                runtime->avail = runtime->buffer_size;
+               runtime->appl_ptr = runtime->hw_ptr = 0;
+               spin_unlock_irq(&runtime->lock);
+               kfree(oldbuf);
        }
        runtime->avail_min = params->avail_min;
        substream->active_sensing = !params->no_active_sensing;
@@ -665,7 +669,7 @@ EXPORT_SYMBOL(snd_rawmidi_output_params);
 int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream,
                             struct snd_rawmidi_params * params)
 {
-       char *newbuf;
+       char *newbuf, *oldbuf;
        struct snd_rawmidi_runtime *runtime = substream->runtime;
 
        snd_rawmidi_drain_input(substream);
@@ -676,12 +680,16 @@ int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream,
                return -EINVAL;
        }
        if (params->buffer_size != runtime->buffer_size) {
-               newbuf = krealloc(runtime->buffer, params->buffer_size,
-                                 GFP_KERNEL);
+               newbuf = kmalloc(params->buffer_size, GFP_KERNEL);
                if (!newbuf)
                        return -ENOMEM;
+               spin_lock_irq(&runtime->lock);
+               oldbuf = runtime->buffer;
                runtime->buffer = newbuf;
                runtime->buffer_size = params->buffer_size;
+               runtime->appl_ptr = runtime->hw_ptr = 0;
+               spin_unlock_irq(&runtime->lock);
+               kfree(oldbuf);
        }
        runtime->avail_min = params->avail_min;
        return 0;
index 61a07fe34cd271e60dc0c31a7dddae750c2532b1..56ca78423040f09e6d0569b651ba631105b8bd02 100644 (file)
@@ -2004,7 +2004,8 @@ static int snd_seq_ioctl_query_next_client(struct snd_seq_client *client,
        struct snd_seq_client *cptr = NULL;
 
        /* search for next client */
-       info->client++;
+       if (info->client < INT_MAX)
+               info->client++;
        if (info->client < 0)
                info->client = 0;
        for (; info->client < SNDRV_SEQ_MAX_CLIENTS; info->client++) {
index 665089c455603c0c683144c419981f8215780e85..b6f076bbc72d14be37893e20b19cbfaedf2f728b 100644 (file)
@@ -1520,7 +1520,7 @@ static int snd_timer_user_next_device(struct snd_timer_id __user *_tid)
                                } else {
                                        if (id.subdevice < 0)
                                                id.subdevice = 0;
-                                       else
+                                       else if (id.subdevice < INT_MAX)
                                                id.subdevice++;
                                }
                        }
index d91c87e41756ea5fceaee73d84e211b9ebba929d..20a171ac4bb2f7cff9715122c4e959c60fc9b485 100644 (file)
@@ -2899,8 +2899,9 @@ static int hda_codec_runtime_suspend(struct device *dev)
        list_for_each_entry(pcm, &codec->pcm_list_head, list)
                snd_pcm_suspend_all(pcm->pcm);
        state = hda_call_codec_suspend(codec);
-       if (codec_has_clkstop(codec) && codec_has_epss(codec) &&
-           (state & AC_PWRST_CLK_STOP_OK))
+       if (codec->link_down_at_suspend ||
+           (codec_has_clkstop(codec) && codec_has_epss(codec) &&
+            (state & AC_PWRST_CLK_STOP_OK)))
                snd_hdac_codec_link_down(&codec->core);
        snd_hdac_link_power(&codec->core, false);
        return 0;
index 681c360f29f9d628cf4462c9bb7ef92879f27d91..a8b1b31f161c26f739892ea6b52e79ba2ebca291 100644 (file)
@@ -258,6 +258,7 @@ struct hda_codec {
        unsigned int power_save_node:1; /* advanced PM for each widget */
        unsigned int auto_runtime_pm:1; /* enable automatic codec runtime pm */
        unsigned int force_pin_prefix:1; /* Add location prefix */
+       unsigned int link_down_at_suspend:1; /* link down at runtime suspend */
 #ifdef CONFIG_PM
        unsigned long power_on_acct;
        unsigned long power_off_acct;
index 04e949aa01ada5492765cd313608624f3a42c7b9..321e95c409c1427ddd5a56f802101fc9fe248bff 100644 (file)
@@ -991,6 +991,7 @@ struct ca0132_spec {
 enum {
        QUIRK_NONE,
        QUIRK_ALIENWARE,
+       QUIRK_ALIENWARE_M17XR4,
        QUIRK_SBZ,
        QUIRK_R3DI,
 };
@@ -1040,13 +1041,15 @@ static const struct hda_pintbl r3di_pincfgs[] = {
 };
 
 static const struct snd_pci_quirk ca0132_quirks[] = {
+       SND_PCI_QUIRK(0x1028, 0x057b, "Alienware M17x R4", QUIRK_ALIENWARE_M17XR4),
        SND_PCI_QUIRK(0x1028, 0x0685, "Alienware 15 2015", QUIRK_ALIENWARE),
        SND_PCI_QUIRK(0x1028, 0x0688, "Alienware 17 2015", QUIRK_ALIENWARE),
        SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE),
        SND_PCI_QUIRK(0x1102, 0x0010, "Sound Blaster Z", QUIRK_SBZ),
        SND_PCI_QUIRK(0x1102, 0x0023, "Sound Blaster Z", QUIRK_SBZ),
        SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI),
-       SND_PCI_QUIRK(0x1458, 0xA036, "Recon3Di", QUIRK_R3DI),
+       SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI),
+       SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI),
        {}
 };
 
@@ -5663,7 +5666,7 @@ static const char * const ca0132_alt_slave_pfxs[] = {
  * I think this has to do with the pin for rear surround being 0x11,
  * and the center/lfe being 0x10. Usually the pin order is the opposite.
  */
-const struct snd_pcm_chmap_elem ca0132_alt_chmaps[] = {
+static const struct snd_pcm_chmap_elem ca0132_alt_chmaps[] = {
        { .channels = 2,
          .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
        { .channels = 4,
@@ -5966,7 +5969,7 @@ static int ca0132_build_pcms(struct hda_codec *codec)
        info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->adcs[0];
 
        /* With the DSP enabled, desktops don't use this ADC. */
-       if (spec->use_alt_functions) {
+       if (!spec->use_alt_functions) {
                info = snd_hda_codec_pcm_new(codec, "CA0132 Analog Mic-In2");
                if (!info)
                        return -ENOMEM;
@@ -6130,7 +6133,10 @@ static void ca0132_init_dmic(struct hda_codec *codec)
         * Bit   6: set to select Data2, clear for Data1
         * Bit   7: set to enable DMic, clear for AMic
         */
-       val = 0x23;
+       if (spec->quirk == QUIRK_ALIENWARE_M17XR4)
+               val = 0x33;
+       else
+               val = 0x23;
        /* keep a copy of dmic ctl val for enable/disable dmic purpuse */
        spec->dmic_ctl = val;
        snd_hda_codec_write(codec, spec->input_pins[0], 0,
@@ -7223,7 +7229,7 @@ static int ca0132_init(struct hda_codec *codec)
 
        snd_hda_sequence_write(codec, spec->base_init_verbs);
 
-       if (spec->quirk != QUIRK_NONE)
+       if (spec->use_alt_functions)
                ca0132_alt_init(codec);
 
        ca0132_download_dsp(codec);
@@ -7237,8 +7243,9 @@ static int ca0132_init(struct hda_codec *codec)
        case QUIRK_R3DI:
                r3di_setup_defaults(codec);
                break;
-       case QUIRK_NONE:
-       case QUIRK_ALIENWARE:
+       case QUIRK_SBZ:
+               break;
+       default:
                ca0132_setup_defaults(codec);
                ca0132_init_analog_mic2(codec);
                ca0132_init_dmic(codec);
@@ -7343,7 +7350,6 @@ static const struct hda_codec_ops ca0132_patch_ops = {
 static void ca0132_config(struct hda_codec *codec)
 {
        struct ca0132_spec *spec = codec->spec;
-       struct auto_pin_cfg *cfg = &spec->autocfg;
 
        spec->dacs[0] = 0x2;
        spec->dacs[1] = 0x3;
@@ -7405,12 +7411,7 @@ static void ca0132_config(struct hda_codec *codec)
                /* SPDIF I/O */
                spec->dig_out = 0x05;
                spec->multiout.dig_out_nid = spec->dig_out;
-               cfg->dig_out_pins[0] = 0x0c;
-               cfg->dig_outs = 1;
-               cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF;
                spec->dig_in = 0x09;
-               cfg->dig_in_pin = 0x0e;
-               cfg->dig_in_type = HDA_PCM_TYPE_SPDIF;
                break;
        case QUIRK_R3DI:
                codec_dbg(codec, "%s: QUIRK_R3DI applied.\n", __func__);
@@ -7438,9 +7439,6 @@ static void ca0132_config(struct hda_codec *codec)
                /* SPDIF I/O */
                spec->dig_out = 0x05;
                spec->multiout.dig_out_nid = spec->dig_out;
-               cfg->dig_out_pins[0] = 0x0c;
-               cfg->dig_outs = 1;
-               cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF;
                break;
        default:
                spec->num_outputs = 2;
@@ -7463,12 +7461,7 @@ static void ca0132_config(struct hda_codec *codec)
                /* SPDIF I/O */
                spec->dig_out = 0x05;
                spec->multiout.dig_out_nid = spec->dig_out;
-               cfg->dig_out_pins[0] = 0x0c;
-               cfg->dig_outs = 1;
-               cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF;
                spec->dig_in = 0x09;
-               cfg->dig_in_pin = 0x0e;
-               cfg->dig_in_type = HDA_PCM_TYPE_SPDIF;
                break;
        }
 }
@@ -7476,7 +7469,7 @@ static void ca0132_config(struct hda_codec *codec)
 static int ca0132_prepare_verbs(struct hda_codec *codec)
 {
 /* Verbs + terminator (an empty element) */
-#define NUM_SPEC_VERBS 4
+#define NUM_SPEC_VERBS 2
        struct ca0132_spec *spec = codec->spec;
 
        spec->chip_init_verbs = ca0132_init_verbs0;
@@ -7488,34 +7481,24 @@ static int ca0132_prepare_verbs(struct hda_codec *codec)
        if (!spec->spec_init_verbs)
                return -ENOMEM;
 
-       /* HP jack autodetection */
-       spec->spec_init_verbs[0].nid = spec->unsol_tag_hp;
-       spec->spec_init_verbs[0].param = AC_VERB_SET_UNSOLICITED_ENABLE;
-       spec->spec_init_verbs[0].verb = AC_USRSP_EN | spec->unsol_tag_hp;
-
-       /* MIC1 jack autodetection */
-       spec->spec_init_verbs[1].nid = spec->unsol_tag_amic1;
-       spec->spec_init_verbs[1].param = AC_VERB_SET_UNSOLICITED_ENABLE;
-       spec->spec_init_verbs[1].verb = AC_USRSP_EN | spec->unsol_tag_amic1;
-
        /* config EAPD */
-       spec->spec_init_verbs[2].nid = 0x0b;
-       spec->spec_init_verbs[2].param = 0x78D;
-       spec->spec_init_verbs[2].verb = 0x00;
+       spec->spec_init_verbs[0].nid = 0x0b;
+       spec->spec_init_verbs[0].param = 0x78D;
+       spec->spec_init_verbs[0].verb = 0x00;
 
        /* Previously commented configuration */
        /*
-       spec->spec_init_verbs[3].nid = 0x0b;
-       spec->spec_init_verbs[3].param = AC_VERB_SET_EAPD_BTLENABLE;
+       spec->spec_init_verbs[2].nid = 0x0b;
+       spec->spec_init_verbs[2].param = AC_VERB_SET_EAPD_BTLENABLE;
+       spec->spec_init_verbs[2].verb = 0x02;
+
+       spec->spec_init_verbs[3].nid = 0x10;
+       spec->spec_init_verbs[3].param = 0x78D;
        spec->spec_init_verbs[3].verb = 0x02;
 
        spec->spec_init_verbs[4].nid = 0x10;
-       spec->spec_init_verbs[4].param = 0x78D;
+       spec->spec_init_verbs[4].param = AC_VERB_SET_EAPD_BTLENABLE;
        spec->spec_init_verbs[4].verb = 0x02;
-
-       spec->spec_init_verbs[5].nid = 0x10;
-       spec->spec_init_verbs[5].param = AC_VERB_SET_EAPD_BTLENABLE;
-       spec->spec_init_verbs[5].verb = 0x02;
        */
 
        /* Terminator: spec->spec_init_verbs[NUM_SPEC_VERBS-1] */
index e7fcfc3b8885fb7470dc1b10a49f305f7bca323d..f641c20095f71bb93edef945be21cbc141de280f 100644 (file)
@@ -964,6 +964,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
        SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
        SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
        SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
+       SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
        SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
index 8840daf9c6a300899efaf02898430d158d5b972a..8a49415aebacb79cd3da6b90b1a34e041da02bbd 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/module.h>
+#include <linux/pm_runtime.h>
 #include <sound/core.h>
 #include <sound/jack.h>
 #include <sound/asoundef.h>
@@ -764,8 +765,10 @@ static void check_presence_and_report(struct hda_codec *codec, hda_nid_t nid,
 
        if (pin_idx < 0)
                return;
+       mutex_lock(&spec->pcm_lock);
        if (hdmi_present_sense(get_pin(spec, pin_idx), 1))
                snd_hda_jack_report_sync(codec);
+       mutex_unlock(&spec->pcm_lock);
 }
 
 static void jack_callback(struct hda_codec *codec,
@@ -1628,21 +1631,23 @@ static void sync_eld_via_acomp(struct hda_codec *codec,
 static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
 {
        struct hda_codec *codec = per_pin->codec;
-       struct hdmi_spec *spec = codec->spec;
        int ret;
 
        /* no temporary power up/down needed for component notifier */
-       if (!codec_has_acomp(codec))
-               snd_hda_power_up_pm(codec);
+       if (!codec_has_acomp(codec)) {
+               ret = snd_hda_power_up_pm(codec);
+               if (ret < 0 && pm_runtime_suspended(hda_codec_dev(codec))) {
+                       snd_hda_power_down_pm(codec);
+                       return false;
+               }
+       }
 
-       mutex_lock(&spec->pcm_lock);
        if (codec_has_acomp(codec)) {
                sync_eld_via_acomp(codec, per_pin);
                ret = false; /* don't call snd_hda_jack_report_sync() */
        } else {
                ret = hdmi_present_sense_via_verbs(per_pin, repoll);
        }
-       mutex_unlock(&spec->pcm_lock);
 
        if (!codec_has_acomp(codec))
                snd_hda_power_down_pm(codec);
@@ -1654,12 +1659,16 @@ static void hdmi_repoll_eld(struct work_struct *work)
 {
        struct hdmi_spec_per_pin *per_pin =
        container_of(to_delayed_work(work), struct hdmi_spec_per_pin, work);
+       struct hda_codec *codec = per_pin->codec;
+       struct hdmi_spec *spec = codec->spec;
 
        if (per_pin->repoll_count++ > 6)
                per_pin->repoll_count = 0;
 
+       mutex_lock(&spec->pcm_lock);
        if (hdmi_present_sense(per_pin, per_pin->repoll_count))
                snd_hda_jack_report_sync(per_pin->codec);
+       mutex_unlock(&spec->pcm_lock);
 }
 
 static void intel_haswell_fixup_connect_list(struct hda_codec *codec,
@@ -3741,6 +3750,11 @@ static int patch_atihdmi(struct hda_codec *codec)
 
        spec->chmap.channels_max = max(spec->chmap.channels_max, 8u);
 
+       /* AMD GPUs have neither EPSS nor CLKSTOP bits, hence preventing
+        * the link-down as is.  Tell the core to allow it.
+        */
+       codec->link_down_at_suspend = 1;
+
        return 0;
 }
 
index e9bd33ea538f239891c031a1a81e075a35c75043..f6af3e1c2b932d34c1de567b4229b0eb686af637 100644 (file)
@@ -2366,6 +2366,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
        SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
        SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
+       SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
        SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
@@ -2545,6 +2546,7 @@ static const struct snd_pci_quirk alc262_fixup_tbl[] = {
        SND_PCI_QUIRK(0x10cf, 0x1397, "Fujitsu Lifebook S7110", ALC262_FIXUP_FSC_S7110),
        SND_PCI_QUIRK(0x10cf, 0x142d, "Fujitsu Lifebook E8410", ALC262_FIXUP_BENQ),
        SND_PCI_QUIRK(0x10f1, 0x2915, "Tyan Thunder n6650W", ALC262_FIXUP_TYAN),
+       SND_PCI_QUIRK(0x1734, 0x1141, "FSC ESPRIMO U9210", ALC262_FIXUP_FSC_H270),
        SND_PCI_QUIRK(0x1734, 0x1147, "FSC Celsius H270", ALC262_FIXUP_FSC_H270),
        SND_PCI_QUIRK(0x17aa, 0x384e, "Lenovo 3000", ALC262_FIXUP_LENOVO_3000),
        SND_PCI_QUIRK(0x17ff, 0x0560, "Benq ED8", ALC262_FIXUP_BENQ),
@@ -4995,7 +4997,6 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
        struct alc_spec *spec = codec->spec;
 
        if (action == HDA_FIXUP_ACT_PRE_PROBE) {
-               spec->shutup = alc_no_shutup; /* reduce click noise */
                spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */
                spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
                codec->power_save_node = 0; /* avoid click noises */
@@ -5394,6 +5395,13 @@ static void alc274_fixup_bind_dacs(struct hda_codec *codec,
 /* for hda_fixup_thinkpad_acpi() */
 #include "thinkpad_helper.c"
 
+static void alc_fixup_thinkpad_acpi(struct hda_codec *codec,
+                                   const struct hda_fixup *fix, int action)
+{
+       alc_fixup_no_shutup(codec, fix, action); /* reduce click noise */
+       hda_fixup_thinkpad_acpi(codec, fix, action);
+}
+
 /* for dell wmi mic mute led */
 #include "dell_wmi_helper.c"
 
@@ -5946,7 +5954,7 @@ static const struct hda_fixup alc269_fixups[] = {
        },
        [ALC269_FIXUP_THINKPAD_ACPI] = {
                .type = HDA_FIXUP_FUNC,
-               .v.func = hda_fixup_thinkpad_acpi,
+               .v.func = alc_fixup_thinkpad_acpi,
                .chained = true,
                .chain_id = ALC269_FIXUP_SKU_IGNORE,
        },
@@ -6562,6 +6570,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC),
        SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
        SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
+       SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE),
        SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
        SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
        SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
@@ -6603,8 +6612,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+       SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
        SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
-       SND_PCI_QUIRK(0x17aa, 0x3138, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
        SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
        SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
        SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
@@ -6782,6 +6791,17 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x14, 0x90170110},
                {0x19, 0x02a11030},
                {0x21, 0x02211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC294_FIXUP_LENOVO_MIC_LOCATION,
+               {0x14, 0x90170110},
+               {0x19, 0x02a11030},
+               {0x1a, 0x02a11040},
+               {0x1b, 0x01014020},
+               {0x21, 0x0221101f}),
+       SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC294_FIXUP_LENOVO_MIC_LOCATION,
+               {0x14, 0x90170110},
+               {0x19, 0x02a11020},
+               {0x1a, 0x02a11030},
+               {0x21, 0x0221101f}),
        SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                {0x12, 0x90a60140},
                {0x14, 0x90170110},
index 6c85f13ab23f17f7ef4031f5f73797fb383584a7..54f6252faca684b23ef91617318c8ae11e3de04e 100644 (file)
@@ -1018,6 +1018,7 @@ static int snd_lx6464es_create(struct snd_card *card,
        chip->port_dsp_bar = pci_ioremap_bar(pci, 2);
        if (!chip->port_dsp_bar) {
                dev_err(card->dev, "cannot remap PCI memory region\n");
+               err = -ENOMEM;
                goto remap_pci_failed;
        }
 
index caae4843cb7001fbee1fa9b222850df7006850fb..16e006f708ca0cbd44a63135bb996b8db7c3ba9e 100644 (file)
@@ -91,6 +91,7 @@ struct kvm_regs {
 #define KVM_VGIC_V3_ADDR_TYPE_DIST     2
 #define KVM_VGIC_V3_ADDR_TYPE_REDIST   3
 #define KVM_VGIC_ITS_ADDR_TYPE         4
+#define KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION    5
 
 #define KVM_VGIC_V3_DIST_SIZE          SZ_64K
 #define KVM_VGIC_V3_REDIST_SIZE                (2 * SZ_64K)
index 04b3256f8e6d5f8e3e368b043f0fdcfeb7c23164..4e76630dd6554673d71ad647c1108bb54f1bcea2 100644 (file)
@@ -91,6 +91,7 @@ struct kvm_regs {
 #define KVM_VGIC_V3_ADDR_TYPE_DIST     2
 #define KVM_VGIC_V3_ADDR_TYPE_REDIST   3
 #define KVM_VGIC_ITS_ADDR_TYPE         4
+#define KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION    5
 
 #define KVM_VGIC_V3_DIST_SIZE          SZ_64K
 #define KVM_VGIC_V3_REDIST_SIZE                (2 * SZ_64K)
index 833ed9a16adfd03e0b6cb70adc19fe03055f7344..1b32b56a03d34ce2a5f0b7f79c621f87d8c89dbf 100644 (file)
@@ -633,6 +633,7 @@ struct kvm_ppc_cpu_char {
 #define KVM_REG_PPC_PSSCR      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd)
 
 #define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe)
+#define KVM_REG_PPC_ONLINE     (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbf)
 
 /* Transactional Memory checkpointed state:
  * This is all GPRs, all VSX regs and a subset of SPRs
index 389c36fd82990f3f6b390342f56375ac0067054a..ac5ba55066dd76a26f133d91623309036bcad4c8 100644 (file)
 #define __NR_pkey_alloc                384
 #define __NR_pkey_free         385
 #define __NR_pkey_mprotect     386
+#define __NR_rseq              387
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
index fb00a2fca9901eb02ea7b730ddbac957e8ecc947..5701f5cecd3125fbce64ead21d89d02fc8fa25af 100644 (file)
 #define X86_FEATURE_AMD_IBPB           (13*32+12) /* "" Indirect Branch Prediction Barrier */
 #define X86_FEATURE_AMD_IBRS           (13*32+14) /* "" Indirect Branch Restricted Speculation */
 #define X86_FEATURE_AMD_STIBP          (13*32+15) /* "" Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_AMD_SSBD           (13*32+24) /* "" Speculative Store Bypass Disable */
 #define X86_FEATURE_VIRT_SSBD          (13*32+25) /* Virtualized Speculative Store Bypass Disable */
+#define X86_FEATURE_AMD_SSB_NO         (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
 
 /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
 #define X86_FEATURE_DTHERM             (14*32+ 0) /* Digital Thermal Sensor */
index 32f9e397a6c07a988edd80a3338fd2a29930d3e0..3f140eff039fc699f99a30f231211120fa88ed53 100644 (file)
@@ -217,6 +217,14 @@ int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
        int err;
        int fd;
 
+       if (argc < 3) {
+               p_err("too few arguments, id ID and FILE path is required");
+               return -1;
+       } else if (argc > 3) {
+               p_err("too many arguments");
+               return -1;
+       }
+
        if (!is_prefix(*argv, "id")) {
                p_err("expected 'id' got %s", *argv);
                return -1;
@@ -230,9 +238,6 @@ int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
        }
        NEXT_ARG();
 
-       if (argc != 1)
-               usage();
-
        fd = get_fd_by_id(id);
        if (fd < 0) {
                p_err("can't get prog by id (%u): %s", id, strerror(errno));
index ac6b1a12c9b7cd6319dc3697500d56c938de8d90..b76b77dcfd1fcc52ded0a99b7e7f3cec8cde90ce 100644 (file)
@@ -29,9 +29,10 @@ static bool has_perf_query_support(void)
        if (perf_query_supported)
                goto out;
 
-       fd = open(bin_name, O_RDONLY);
+       fd = open("/", O_RDONLY);
        if (fd < 0) {
-               p_err("perf_query_support: %s", strerror(errno));
+               p_err("perf_query_support: cannot open directory \"/\" (%s)",
+                     strerror(errno));
                goto out;
        }
 
index a4f435203feff52f9d7c9a04bf8d5c10c31d2c73..959aa53ab6789f839442326359701b17ba9e337c 100644 (file)
@@ -90,7 +90,9 @@ static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
        }
 
        wallclock_secs = (real_time_ts.tv_sec - boot_time_ts.tv_sec) +
-               nsecs / 1000000000;
+               (real_time_ts.tv_nsec - boot_time_ts.tv_nsec + nsecs) /
+               1000000000;
+
 
        if (!localtime_r(&wallclock_secs, &load_tm)) {
                snprintf(buf, size, "%llu", nsecs / 1000000000);
@@ -692,15 +694,19 @@ static int do_load(int argc, char **argv)
                return -1;
        }
 
-       if (do_pin_fd(prog_fd, argv[1])) {
-               p_err("failed to pin program");
-               return -1;
-       }
+       if (do_pin_fd(prog_fd, argv[1]))
+               goto err_close_obj;
 
        if (json_output)
                jsonw_null(json_wtr);
 
+       bpf_object__close(obj);
+
        return 0;
+
+err_close_obj:
+       bpf_object__close(obj);
+       return -1;
 }
 
 static int do_help(int argc, char **argv)
index a4bbb984941df2c150ec7209e4905ac1baacbf81..950c1504ca37ecda43542a46730fe805a0f05866 100644 (file)
@@ -63,8 +63,8 @@ dep-cmd = $(if $(wildcard $(fixdep)),
            $(fixdep) $(depfile) $@ '$(make-cmd)' > $(dot-target).tmp;           \
            rm -f $(depfile);                                                    \
            mv -f $(dot-target).tmp $(dot-target).cmd,                           \
-           printf '\# cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \
-           printf '\# using basic dep data\n\n' >> $(dot-target).cmd;           \
+           printf '$(pound) cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \
+           printf '$(pound) using basic dep data\n\n' >> $(dot-target).cmd;           \
            cat $(depfile) >> $(dot-target).cmd;                                 \
            printf '\n%s\n' 'cmd_$@ := $(make-cmd)' >> $(dot-target).cmd)
 
@@ -98,4 +98,4 @@ cxx_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(CXXFLAGS) -D"BUILD_STR(s)=\#s" $(CXX
 ###
 ## HOSTCC C flags
 
-host_c_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(CHOSTFLAGS) -D"BUILD_STR(s)=\#s" $(CHOSTFLAGS_$(basetarget).o) $(CHOSTFLAGS_$(obj))
+host_c_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(HOSTCFLAGS) -D"BUILD_STR(s)=\#s" $(HOSTCFLAGS_$(basetarget).o) $(HOSTCFLAGS_$(obj))
index 5eb4b5ad79cb778f0e949a07f719743d7a62d3c7..5edf65e684ab70bb65bfd0e8dc821a61b605be6f 100644 (file)
@@ -43,7 +43,7 @@ $(OUTPUT)fixdep-in.o: FORCE
        $(Q)$(MAKE) $(build)=fixdep
 
 $(OUTPUT)fixdep: $(OUTPUT)fixdep-in.o
-       $(QUIET_LINK)$(HOSTCC) $(LDFLAGS) -o $@ $<
+       $(QUIET_LINK)$(HOSTCC) $(HOSTLDFLAGS) -o $@ $<
 
 FORCE:
 
index 6fdff5945c8a08f27af713f6b59cb27b315da447..9c660e1688abe1cd6bf0e22bf709515e8a463e0d 100644 (file)
@@ -680,6 +680,13 @@ struct drm_get_cap {
  */
 #define DRM_CLIENT_CAP_ATOMIC  3
 
+/**
+ * DRM_CLIENT_CAP_ASPECT_RATIO
+ *
+ * If set to 1, the DRM core will provide aspect ratio information in modes.
+ */
+#define DRM_CLIENT_CAP_ASPECT_RATIO    4
+
 /** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
 struct drm_set_client_cap {
        __u64 capability;
index e0b06784f2279d9f428da4a0bce526721a452570..59b19b6a40d73ea6575f8810a6f4345a931c5a01 100644 (file)
@@ -2630,7 +2630,7 @@ struct bpf_fib_lookup {
        union {
                /* inputs to lookup */
                __u8    tos;            /* AF_INET  */
-               __be32  flowlabel;      /* AF_INET6 */
+               __be32  flowinfo;       /* AF_INET6, flow_label + priority */
 
                /* output: metric of fib result (IPv4/IPv6 only) */
                __u32   rt_metric;
index 68699f654118592527096dc26336f57da6a01cdc..cf01b68242448512416c1b1aa25f0904915aad0a 100644 (file)
@@ -333,6 +333,7 @@ enum {
        IFLA_BRPORT_BCAST_FLOOD,
        IFLA_BRPORT_GROUP_FWD_MASK,
        IFLA_BRPORT_NEIGH_SUPPRESS,
+       IFLA_BRPORT_ISOLATED,
        __IFLA_BRPORT_MAX
 };
 #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -516,6 +517,7 @@ enum {
        IFLA_VXLAN_COLLECT_METADATA,
        IFLA_VXLAN_LABEL,
        IFLA_VXLAN_GPE,
+       IFLA_VXLAN_TTL_INHERIT,
        __IFLA_VXLAN_MAX
 };
 #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
index 39e364c70caf780312808e179a1bb234aa45460e..b6270a3b38e9f3fb410e8c80d8658b2c01a8ef96 100644 (file)
@@ -948,6 +948,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_S390_BPB 152
 #define KVM_CAP_GET_MSR_FEATURES 153
 #define KVM_CAP_HYPERV_EVENTFD 154
+#define KVM_CAP_HYPERV_TLBFLUSH 155
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
index 38047c6aa57576d170b3281eb0de0376894e9f41..f4a25bd1871fb856a8295b77c825323cd3f7fc25 100644 (file)
@@ -164,6 +164,7 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
                "lbug_with_loc",
                "fortify_panic",
                "usercopy_abort",
+               "machine_real_restart",
        };
 
        if (func->bind == STB_WEAK)
index 4e60e105583ee803916589ca56df0e81e12b8fb3..7ec85d567598c5047fbe00b9660c9e7fc76870cf 100644 (file)
@@ -302,19 +302,34 @@ static int read_symbols(struct elf *elf)
                                continue;
                        sym->pfunc = sym->cfunc = sym;
                        coldstr = strstr(sym->name, ".cold.");
-                       if (coldstr) {
-                               coldstr[0] = '\0';
-                               pfunc = find_symbol_by_name(elf, sym->name);
-                               coldstr[0] = '.';
-
-                               if (!pfunc) {
-                                       WARN("%s(): can't find parent function",
-                                            sym->name);
-                                       goto err;
-                               }
-
-                               sym->pfunc = pfunc;
-                               pfunc->cfunc = sym;
+                       if (!coldstr)
+                               continue;
+
+                       coldstr[0] = '\0';
+                       pfunc = find_symbol_by_name(elf, sym->name);
+                       coldstr[0] = '.';
+
+                       if (!pfunc) {
+                               WARN("%s(): can't find parent function",
+                                    sym->name);
+                               goto err;
+                       }
+
+                       sym->pfunc = pfunc;
+                       pfunc->cfunc = sym;
+
+                       /*
+                        * Unfortunately, -fnoreorder-functions puts the child
+                        * inside the parent.  Remove the overlap so we can
+                        * have sane assumptions.
+                        *
+                        * Note that pfunc->len now no longer matches
+                        * pfunc->sym.st_size.
+                        */
+                       if (sym->sec == pfunc->sec &&
+                           sym->offset >= pfunc->offset &&
+                           sym->offset + sym->len == pfunc->offset + pfunc->len) {
+                               pfunc->len -= sym->len;
                        }
                }
        }
@@ -504,10 +519,12 @@ struct section *elf_create_section(struct elf *elf, const char *name,
        sec->sh.sh_flags = SHF_ALLOC;
 
 
-       /* Add section name to .shstrtab */
+       /* Add section name to .shstrtab (or .strtab for Clang) */
        shstrtab = find_section_by_name(elf, ".shstrtab");
+       if (!shstrtab)
+               shstrtab = find_section_by_name(elf, ".strtab");
        if (!shstrtab) {
-               WARN("can't find .shstrtab section");
+               WARN("can't find .shstrtab or .strtab section");
                return NULL;
        }
 
index 5dfe102fb5b533979a2fb726b621cedd398d0935..b10a90b6a7181f8968420a875a2b2fc2b3919321 100644 (file)
@@ -178,6 +178,9 @@ Print count deltas for fixed number of times.
 This option should be used together with "-I" option.
        example: 'perf stat -I 1000 --interval-count 2 -e cycles -a'
 
+--interval-clear::
+Clear the screen before next interval.
+
 --timeout msecs::
 Stop the 'perf stat' session and print count deltas after N milliseconds (minimum: 10 ms).
 This option is not supported with the "-I" option.
index b5ac356ba323c8a363b96e10082205078f12a3f8..f5a3b402589eacc6c8a9789b099c86bd3b946587 100644 (file)
@@ -207,8 +207,7 @@ ifdef PYTHON_CONFIG
   PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null)
   PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS))
   PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil
-  PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null)
-  PYTHON_EMBED_CCOPTS := $(filter-out -specs=%,$(PYTHON_EMBED_CCOPTS))
+  PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --includes 2>/dev/null)
   FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
 endif
 
index 3598b8b75d274c8ebcc6fc0452091dec34c797b7..ef5d59a5742e2467fc409d52aebbfe4111ce0710 100644 (file)
@@ -243,7 +243,7 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain)
        u64 ip;
        u64 skip_slot = -1;
 
-       if (chain->nr < 3)
+       if (!chain || chain->nr < 3)
                return skip_slot;
 
        ip = chain->ips[2];
index 4dfe42666d0ce6e20214e70f0c2a6a3884106290..f0b1709a5ffb2b0901d7f2492252876d17bc25a0 100644 (file)
 330    common  pkey_alloc              __x64_sys_pkey_alloc
 331    common  pkey_free               __x64_sys_pkey_free
 332    common  statx                   __x64_sys_statx
+333    common  io_pgetevents           __x64_sys_io_pgetevents
+334    common  rseq                    __x64_sys_rseq
 
 #
 # x32-specific system call numbers start at 512 to avoid cache impact
index 4b2caf6d48e794d3cd5a88aaa978db10aabb3185..fead6b3b4206e409fc4042ce5d850cae2629bae9 100644 (file)
@@ -226,7 +226,7 @@ int arch_sdt_arg_parse_op(char *old_op, char **new_op)
                else if (rm[2].rm_so != rm[2].rm_eo)
                        prefix[0] = '+';
                else
-                       strncpy(prefix, "+0", 2);
+                       scnprintf(prefix, sizeof(prefix), "+0");
        }
 
        /* Rename register */
index 63eb49082774c94dfbabe7a18db73bdc2403fb6a..44195514b19e65a5ee0287b48fa0ab25fa44d66f 100644 (file)
@@ -1098,7 +1098,7 @@ static void *worker_thread(void *__tdata)
        u8 *global_data;
        u8 *process_data;
        u8 *thread_data;
-       u64 bytes_done;
+       u64 bytes_done, secs;
        long work_done;
        u32 l;
        struct rusage rusage;
@@ -1254,7 +1254,8 @@ static void *worker_thread(void *__tdata)
        timersub(&stop, &start0, &diff);
        td->runtime_ns = diff.tv_sec * NSEC_PER_SEC;
        td->runtime_ns += diff.tv_usec * NSEC_PER_USEC;
-       td->speed_gbs = bytes_done / (td->runtime_ns / NSEC_PER_SEC) / 1e9;
+       secs = td->runtime_ns / NSEC_PER_SEC;
+       td->speed_gbs = secs ? bytes_done / secs / 1e9 : 0;
 
        getrusage(RUSAGE_THREAD, &rusage);
        td->system_time_ns = rusage.ru_stime.tv_sec * NSEC_PER_SEC;
index 5eb22cc563636c11d4e12bf42c71f00b3e1255db..8180319285af3377810c30c0298f37c73cb9bb8d 100644 (file)
@@ -283,6 +283,15 @@ out_put:
        return ret;
 }
 
+static int process_feature_event(struct perf_tool *tool,
+                                union perf_event *event,
+                                struct perf_session *session)
+{
+       if (event->feat.feat_id < HEADER_LAST_FEATURE)
+               return perf_event__process_feature(tool, event, session);
+       return 0;
+}
+
 static int hist_entry__tty_annotate(struct hist_entry *he,
                                    struct perf_evsel *evsel,
                                    struct perf_annotate *ann)
@@ -471,7 +480,7 @@ int cmd_annotate(int argc, const char **argv)
                        .attr   = perf_event__process_attr,
                        .build_id = perf_event__process_build_id,
                        .tracing_data   = perf_event__process_tracing_data,
-                       .feature        = perf_event__process_feature,
+                       .feature        = process_feature_event,
                        .ordered_events = true,
                        .ordering_requires_timestamps = true,
                },
index 307b3594525f34cc9e14d758b71bcdf266414c3e..6a8738f7ead3613e691a7dc6aac523d585de39b2 100644 (file)
@@ -56,16 +56,16 @@ struct c2c_hist_entry {
 
        struct compute_stats     cstats;
 
+       unsigned long            paddr;
+       unsigned long            paddr_cnt;
+       bool                     paddr_zero;
+       char                    *nodestr;
+
        /*
         * must be at the end,
         * because of its callchain dynamic entry
         */
        struct hist_entry       he;
-
-       unsigned long            paddr;
-       unsigned long            paddr_cnt;
-       bool                     paddr_zero;
-       char                    *nodestr;
 };
 
 static char const *coalesce_default = "pid,iaddr";
index cdb5b694983273de734fa1f45848c49eeac239d2..c04dc7b537971a07801153db1f9cdaf5da8a2c34 100644 (file)
@@ -217,7 +217,8 @@ static int process_feature_event(struct perf_tool *tool,
        }
 
        /*
-        * All features are received, we can force the
+        * (feat_id = HEADER_LAST_FEATURE) is the end marker which
+        * means all features are received, now we can force the
         * group if needed.
         */
        setup_forced_leader(rep, session->evlist);
index b3bf35512d2198a94e46a7ecaf6052bca616ee7d..568ddfac3213e084c1f4c6077cd73943bf0644b9 100644 (file)
@@ -180,6 +180,18 @@ static struct {
                                  PERF_OUTPUT_EVNAME | PERF_OUTPUT_TRACE
        },
 
+       [PERF_TYPE_HW_CACHE] = {
+               .user_set = false,
+
+               .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
+                             PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
+                             PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
+                             PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
+                             PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD,
+
+               .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
+       },
+
        [PERF_TYPE_RAW] = {
                .user_set = false,
 
@@ -1822,6 +1834,7 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
        struct perf_evlist *evlist;
        struct perf_evsel *evsel, *pos;
        int err;
+       static struct perf_evsel_script *es;
 
        err = perf_event__process_attr(tool, event, pevlist);
        if (err)
@@ -1830,6 +1843,19 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
        evlist = *pevlist;
        evsel = perf_evlist__last(*pevlist);
 
+       if (!evsel->priv) {
+               if (scr->per_event_dump) {
+                       evsel->priv = perf_evsel_script__new(evsel,
+                                               scr->session->data);
+               } else {
+                       es = zalloc(sizeof(*es));
+                       if (!es)
+                               return -ENOMEM;
+                       es->fp = stdout;
+                       evsel->priv = es;
+               }
+       }
+
        if (evsel->attr.type >= PERF_TYPE_MAX &&
            evsel->attr.type != PERF_TYPE_SYNTH)
                return 0;
@@ -3018,6 +3044,15 @@ int process_cpu_map_event(struct perf_tool *tool __maybe_unused,
        return set_maps(script);
 }
 
+static int process_feature_event(struct perf_tool *tool,
+                                union perf_event *event,
+                                struct perf_session *session)
+{
+       if (event->feat.feat_id < HEADER_LAST_FEATURE)
+               return perf_event__process_feature(tool, event, session);
+       return 0;
+}
+
 #ifdef HAVE_AUXTRACE_SUPPORT
 static int perf_script__process_auxtrace_info(struct perf_tool *tool,
                                              union perf_event *event,
@@ -3062,7 +3097,7 @@ int cmd_script(int argc, const char **argv)
                        .attr            = process_attr,
                        .event_update   = perf_event__process_event_update,
                        .tracing_data    = perf_event__process_tracing_data,
-                       .feature         = perf_event__process_feature,
+                       .feature         = process_feature_event,
                        .build_id        = perf_event__process_build_id,
                        .id_index        = perf_event__process_id_index,
                        .auxtrace_info   = perf_script__process_auxtrace_info,
@@ -3113,8 +3148,9 @@ int cmd_script(int argc, const char **argv)
                     "+field to add and -field to remove."
                     "Valid types: hw,sw,trace,raw,synth. "
                     "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,"
-                    "addr,symoff,period,iregs,uregs,brstack,brstacksym,flags,"
-                    "bpf-output,callindent,insn,insnlen,brstackinsn,synth,phys_addr",
+                    "addr,symoff,srcline,period,iregs,uregs,brstack,"
+                    "brstacksym,flags,bpf-output,brstackinsn,brstackoff,"
+                    "callindent,insn,insnlen,synth,phys_addr,metric,misc",
                     parse_output_fields),
        OPT_BOOLEAN('a', "all-cpus", &system_wide,
                    "system-wide collection from all CPUs"),
index 096ccb25c11ff7786c6df0c24b695cd0eff5bec0..05be023c3f0eda0066394651602f5be4ba6823ca 100644 (file)
@@ -65,6 +65,7 @@
 #include "util/tool.h"
 #include "util/string2.h"
 #include "util/metricgroup.h"
+#include "util/top.h"
 #include "asm/bug.h"
 
 #include <linux/time64.h>
@@ -144,6 +145,8 @@ static struct target target = {
 
 typedef int (*aggr_get_id_t)(struct cpu_map *m, int cpu);
 
+#define METRIC_ONLY_LEN 20
+
 static int                     run_count                       =  1;
 static bool                    no_inherit                      = false;
 static volatile pid_t          child_pid                       = -1;
@@ -173,6 +176,7 @@ static struct cpu_map               *aggr_map;
 static aggr_get_id_t           aggr_get_id;
 static bool                    append_file;
 static bool                    interval_count;
+static bool                    interval_clear;
 static const char              *output_name;
 static int                     output_fd;
 static int                     print_free_counters_hint;
@@ -180,6 +184,7 @@ static int                  print_mixed_hw_group_error;
 static u64                     *walltime_run;
 static bool                    ru_display                      = false;
 static struct rusage           ru_data;
+static unsigned int            metric_only_len                 = METRIC_ONLY_LEN;
 
 struct perf_stat {
        bool                     record;
@@ -967,8 +972,6 @@ static void print_metric_csv(void *ctx,
        fprintf(out, "%s%s%s%s", csv_sep, vals, csv_sep, unit);
 }
 
-#define METRIC_ONLY_LEN 20
-
 /* Filter out some columns that don't work well in metrics only mode */
 
 static bool valid_only_metric(const char *unit)
@@ -999,22 +1002,20 @@ static void print_metric_only(void *ctx, const char *color, const char *fmt,
 {
        struct outstate *os = ctx;
        FILE *out = os->fh;
-       int n;
-       char buf[1024];
-       unsigned mlen = METRIC_ONLY_LEN;
+       char buf[1024], str[1024];
+       unsigned mlen = metric_only_len;
 
        if (!valid_only_metric(unit))
                return;
        unit = fixunit(buf, os->evsel, unit);
-       if (color)
-               n = color_fprintf(out, color, fmt, val);
-       else
-               n = fprintf(out, fmt, val);
-       if (n > METRIC_ONLY_LEN)
-               n = METRIC_ONLY_LEN;
        if (mlen < strlen(unit))
                mlen = strlen(unit) + 1;
-       fprintf(out, "%*s", mlen - n, "");
+
+       if (color)
+               mlen += strlen(color) + sizeof(PERF_COLOR_RESET) - 1;
+
+       color_snprintf(str, sizeof(str), color ?: "", fmt, val);
+       fprintf(out, "%*s ", mlen, str);
 }
 
 static void print_metric_only_csv(void *ctx, const char *color __maybe_unused,
@@ -1054,7 +1055,7 @@ static void print_metric_header(void *ctx, const char *color __maybe_unused,
        if (csv_output)
                fprintf(os->fh, "%s%s", unit, csv_sep);
        else
-               fprintf(os->fh, "%-*s ", METRIC_ONLY_LEN, unit);
+               fprintf(os->fh, "%*s ", metric_only_len, unit);
 }
 
 static void nsec_printout(int id, int nr, struct perf_evsel *evsel, double avg)
@@ -1704,9 +1705,12 @@ static void print_interval(char *prefix, struct timespec *ts)
        FILE *output = stat_config.output;
        static int num_print_interval;
 
+       if (interval_clear)
+               puts(CONSOLE_CLEAR);
+
        sprintf(prefix, "%6lu.%09lu%s", ts->tv_sec, ts->tv_nsec, csv_sep);
 
-       if (num_print_interval == 0 && !csv_output) {
+       if ((num_print_interval == 0 && !csv_output) || interval_clear) {
                switch (stat_config.aggr_mode) {
                case AGGR_SOCKET:
                        fprintf(output, "#           time socket cpus");
@@ -1719,7 +1723,7 @@ static void print_interval(char *prefix, struct timespec *ts)
                                fprintf(output, "             counts %*s events\n", unit_width, "unit");
                        break;
                case AGGR_NONE:
-                       fprintf(output, "#           time CPU");
+                       fprintf(output, "#           time CPU    ");
                        if (!metric_only)
                                fprintf(output, "                counts %*s events\n", unit_width, "unit");
                        break;
@@ -1738,7 +1742,7 @@ static void print_interval(char *prefix, struct timespec *ts)
                }
        }
 
-       if (num_print_interval == 0 && metric_only)
+       if ((num_print_interval == 0 || interval_clear) && metric_only)
                print_metric_headers(" ", true);
        if (++num_print_interval == 25)
                num_print_interval = 0;
@@ -2057,6 +2061,8 @@ static const struct option stat_options[] = {
                    "(overhead is possible for values <= 100ms)"),
        OPT_INTEGER(0, "interval-count", &stat_config.times,
                    "print counts for fixed number of times"),
+       OPT_BOOLEAN(0, "interval-clear", &interval_clear,
+                   "clear screen in between new interval"),
        OPT_UINTEGER(0, "timeout", &stat_config.timeout,
                    "stop workload and print counts after a timeout period in ms (>= 10ms)"),
        OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode,
@@ -2436,14 +2442,13 @@ static int add_default_attributes(void)
        (PERF_COUNT_HW_CACHE_OP_PREFETCH        <<  8) |
        (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
 };
+       struct parse_events_error errinfo;
 
        /* Set attrs if no event is selected and !null_run: */
        if (null_run)
                return 0;
 
        if (transaction_run) {
-               struct parse_events_error errinfo;
-
                if (pmu_have_event("cpu", "cycles-ct") &&
                    pmu_have_event("cpu", "el-start"))
                        err = parse_events(evsel_list, transaction_attrs,
@@ -2454,6 +2459,7 @@ static int add_default_attributes(void)
                                           &errinfo);
                if (err) {
                        fprintf(stderr, "Cannot set up transaction events\n");
+                       parse_events_print_error(&errinfo, transaction_attrs);
                        return -1;
                }
                return 0;
@@ -2479,10 +2485,11 @@ static int add_default_attributes(void)
                    pmu_have_event("msr", "smi")) {
                        if (!force_metric_only)
                                metric_only = true;
-                       err = parse_events(evsel_list, smi_cost_attrs, NULL);
+                       err = parse_events(evsel_list, smi_cost_attrs, &errinfo);
                } else {
                        fprintf(stderr, "To measure SMI cost, it needs "
                                "msr/aperf/, msr/smi/ and cpu/cycles/ support\n");
+                       parse_events_print_error(&errinfo, smi_cost_attrs);
                        return -1;
                }
                if (err) {
@@ -2517,12 +2524,13 @@ static int add_default_attributes(void)
                if (topdown_attrs[0] && str) {
                        if (warn)
                                arch_topdown_group_warn();
-                       err = parse_events(evsel_list, str, NULL);
+                       err = parse_events(evsel_list, str, &errinfo);
                        if (err) {
                                fprintf(stderr,
                                        "Cannot set up top down events %s: %d\n",
                                        str, err);
                                free(str);
+                               parse_events_print_error(&errinfo, str);
                                return -1;
                        }
                } else {
index 0c6d1002b524eaf62ef62cc32763b041b2f33ba1..ac1bcdc17dae7554f51a780b843605c441c6abbf 100644 (file)
@@ -35,6 +35,7 @@
 #include <sys/mman.h>
 #include <syscall.h> /* for gettid() */
 #include <err.h>
+#include <linux/kernel.h>
 
 #include "jvmti_agent.h"
 #include "../util/jitdump.h"
@@ -249,7 +250,7 @@ void *jvmti_open(void)
        /*
         * jitdump file name
         */
-       snprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid());
+       scnprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid());
 
        fd = open(dump_path, O_CREAT|O_TRUNC|O_RDWR, 0666);
        if (fd == -1)
index 17783913d3306a15c13d45745f447aa5ebafb487..215ba30b85343ad1874b1fc52c05fccbd8948bb3 100644 (file)
@@ -1,7 +1,7 @@
 hostprogs := jevents
 
 jevents-y      += json.o jsmn.o jevents.o
-CHOSTFLAGS_jevents.o   = -I$(srctree)/tools/include
+HOSTCFLAGS_jevents.o   = -I$(srctree)/tools/include
 pmu-events-y   += pmu-events.o
 JDIR           =  pmu-events/arch/$(SRCARCH)
 JSON           =  $(shell [ -d $(JDIR) ] &&                            \
index 38dfb720fb6f78757dfe3e7aa8d01014ffdc7927..54ace2f6bc3650388ec1f7e96c7c0b1ea2495232 100644 (file)
@@ -31,10 +31,8 @@ def flag_str(event_name, field_name, value):
     string = ""
 
     if flag_fields[event_name][field_name]:
-       print_delim = 0
-        keys = flag_fields[event_name][field_name]['values'].keys()
-        keys.sort()
-        for idx in keys:
+        print_delim = 0
+        for idx in sorted(flag_fields[event_name][field_name]['values']):
             if not value and not idx:
                 string += flag_fields[event_name][field_name]['values'][idx]
                 break
@@ -51,14 +49,12 @@ def symbol_str(event_name, field_name, value):
     string = ""
 
     if symbolic_fields[event_name][field_name]:
-        keys = symbolic_fields[event_name][field_name]['values'].keys()
-        keys.sort()
-        for idx in keys:
+        for idx in sorted(symbolic_fields[event_name][field_name]['values']):
             if not value and not idx:
-               string = symbolic_fields[event_name][field_name]['values'][idx]
+                string = symbolic_fields[event_name][field_name]['values'][idx]
                 break
-           if (value == idx):
-               string = symbolic_fields[event_name][field_name]['values'][idx]
+            if (value == idx):
+                string = symbolic_fields[event_name][field_name]['values'][idx]
                 break
 
     return string
@@ -74,19 +70,17 @@ def trace_flag_str(value):
     string = ""
     print_delim = 0
 
-    keys = trace_flags.keys()
-
-    for idx in keys:
-       if not value and not idx:
-           string += "NONE"
-           break
-
-       if idx and (value & idx) == idx:
-           if print_delim:
-               string += " | ";
-           string += trace_flags[idx]
-           print_delim = 1
-           value &= ~idx
+    for idx in trace_flags:
+        if not value and not idx:
+            string += "NONE"
+            break
+
+        if idx and (value & idx) == idx:
+            if print_delim:
+                string += " | ";
+            string += trace_flags[idx]
+            print_delim = 1
+            value &= ~idx
 
     return string
 
index 81a56cd2b3c166315bfb376d93265bcdf0463e66..21a7a129809443a9231019d955fb5d945278cd46 100755 (executable)
@@ -8,6 +8,7 @@
 # PerfEvent is the base class for all perf event sample, PebsEvent
 # is a HW base Intel x86 PEBS event, and user could add more SW/HW
 # event classes based on requirements.
+from __future__ import print_function
 
 import struct
 
@@ -44,7 +45,8 @@ class PerfEvent(object):
                 PerfEvent.event_num += 1
 
         def show(self):
-                print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
+                print("PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" %
+                      (self.name, self.symbol, self.comm, self.dso))
 
 #
 # Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
index fdd92f699055713e2d1fec1c99a61489e5812a64..cac7b2542ee8c99b814b9a0baddeb60ba6727c0b 100644 (file)
@@ -11,7 +11,7 @@
 try:
        import wx
 except ImportError:
-       raise ImportError, "You need to install the wxpython lib for this script"
+       raise ImportError("You need to install the wxpython lib for this script")
 
 
 class RootFrame(wx.Frame):
index f6c84966e4f89cb2ffcd0ad8d4b94a34d134811c..7384dcb628c4326c3d98dca18639b8439fc5c8ac 100644 (file)
@@ -5,6 +5,7 @@
 # This software may be distributed under the terms of the GNU General
 # Public License ("GPL") version 2 as published by the Free Software
 # Foundation.
+from __future__ import print_function
 
 import errno, os
 
@@ -33,7 +34,7 @@ def nsecs_str(nsecs):
     return str
 
 def add_stats(dict, key, value):
-       if not dict.has_key(key):
+       if key not in dict:
                dict[key] = (value, value, value, 1)
        else:
                min, max, avg, count = dict[key]
@@ -72,10 +73,10 @@ try:
 except:
        if not audit_package_warned:
                audit_package_warned = True
-               print "Install the audit-libs-python package to get syscall names.\n" \
-                    "For example:\n  # apt-get install python-audit (Ubuntu)" \
-                    "\n  # yum install audit-libs-python (Fedora)" \
-                    "\n  etc.\n"
+               print("Install the audit-libs-python package to get syscall names.\n"
+                    "For example:\n  # apt-get install python-audit (Ubuntu)"
+                    "\n  # yum install audit-libs-python (Fedora)"
+                    "\n  etc.\n")
 
 def syscall_name(id):
        try:
index de66cb3b72c9e6be9dc5d884611e0522def92631..3473e7f66081c93104e951afb4553f5b59e03b6d 100644 (file)
@@ -9,13 +9,17 @@
 # This software is distributed under the terms of the GNU General
 # Public License ("GPL") version 2 as published by the Free Software
 # Foundation.
-
+from __future__ import print_function
 
 import os
 import sys
 
 from collections import defaultdict
-from UserList import UserList
+try:
+    from UserList import UserList
+except ImportError:
+    # Python 3: UserList moved to the collections package
+    from collections import UserList
 
 sys.path.append(os.environ['PERF_EXEC_PATH'] + \
        '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
@@ -300,7 +304,7 @@ class TimeSliceList(UserList):
                if i == -1:
                        return
 
-               for i in xrange(i, len(self.data)):
+               for i in range(i, len(self.data)):
                        timeslice = self.data[i]
                        if timeslice.start > end:
                                return
@@ -336,8 +340,8 @@ class SchedEventProxy:
                on_cpu_task = self.current_tsk[headers.cpu]
 
                if on_cpu_task != -1 and on_cpu_task != prev_pid:
-                       print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
-                               (headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
+                       print("Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
+                               headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
 
                threads[prev_pid] = prev_comm
                threads[next_pid] = next_comm
index 2bde505e2e7ea0c2b1157e2734be89d291351e66..dd850a26d579914fde328d16cbb3ade2d3a8b992 100644 (file)
@@ -422,7 +422,7 @@ static const char *shell_test__description(char *description, size_t size,
 
 #define for_each_shell_test(dir, base, ent)    \
        while ((ent = readdir(dir)) != NULL)    \
-               if (!is_directory(base, ent))
+               if (!is_directory(base, ent) && ent->d_name[0] != '.')
 
 static const char *shell_tests__dir(char *path, size_t size)
 {
index 7d40770684549d8691d63a403816b76b5bb7c3ad..61211918bfbaa5eaaba1b90c6664ebbe506da98b 100644 (file)
@@ -1309,6 +1309,11 @@ static int test__checkevent_config_cache(struct perf_evlist *evlist)
        return 0;
 }
 
+static bool test__intel_pt_valid(void)
+{
+       return !!perf_pmu__find("intel_pt");
+}
+
 static int test__intel_pt(struct perf_evlist *evlist)
 {
        struct perf_evsel *evsel = perf_evlist__first(evlist);
@@ -1375,6 +1380,7 @@ struct evlist_test {
        const char *name;
        __u32 type;
        const int id;
+       bool (*valid)(void);
        int (*check)(struct perf_evlist *evlist);
 };
 
@@ -1648,6 +1654,7 @@ static struct evlist_test test__events[] = {
        },
        {
                .name  = "intel_pt//u",
+               .valid = test__intel_pt_valid,
                .check = test__intel_pt,
                .id    = 52,
        },
@@ -1686,17 +1693,24 @@ static struct terms_test test__terms[] = {
 
 static int test_event(struct evlist_test *e)
 {
+       struct parse_events_error err = { .idx = 0, };
        struct perf_evlist *evlist;
        int ret;
 
+       if (e->valid && !e->valid()) {
+               pr_debug("... SKIP");
+               return 0;
+       }
+
        evlist = perf_evlist__new();
        if (evlist == NULL)
                return -ENOMEM;
 
-       ret = parse_events(evlist, e->name, NULL);
+       ret = parse_events(evlist, e->name, &err);
        if (ret) {
-               pr_debug("failed to parse event '%s', err %d\n",
-                        e->name, ret);
+               pr_debug("failed to parse event '%s', err %d, str '%s'\n",
+                        e->name, ret, err.str);
+               parse_events_print_error(&err, e->name);
        } else {
                ret = e->check(evlist);
        }
@@ -1714,10 +1728,11 @@ static int test_events(struct evlist_test *events, unsigned cnt)
        for (i = 0; i < cnt; i++) {
                struct evlist_test *e = &events[i];
 
-               pr_debug("running test %d '%s'\n", e->id, e->name);
+               pr_debug("running test %d '%s'", e->id, e->name);
                ret1 = test_event(e);
                if (ret1)
                        ret2 = ret1;
+               pr_debug("\n");
        }
 
        return ret2;
@@ -1799,7 +1814,7 @@ static int test_pmu_events(void)
        }
 
        while (!ret && (ent = readdir(dir))) {
-               struct evlist_test e;
+               struct evlist_test e = { .id = 0, };
                char name[2 * NAME_MAX + 1 + 12 + 3];
 
                /* Names containing . are special and cannot be used directly */
index 2630570396937fc4b26b844e5bfefc0e74d16409..94e513e62b34f378827cd5a2bdf4984cc00a33f0 100755 (executable)
@@ -14,35 +14,40 @@ libc=$(grep -w libc /proc/self/maps | head -1 | sed -r 's/.*[[:space:]](\/.*)/\1
 nm -Dg $libc 2>/dev/null | fgrep -q inet_pton || exit 254
 
 trace_libc_inet_pton_backtrace() {
-       idx=0
-       expected[0]="ping[][0-9 \.:]+probe_libc:inet_pton: \([[:xdigit:]]+\)"
-       expected[1]=".*inet_pton\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$"
+
+       expected=`mktemp -u /tmp/expected.XXX`
+
+       echo "ping[][0-9 \.:]+probe_libc:inet_pton: \([[:xdigit:]]+\)" > $expected
+       echo ".*inet_pton\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected
        case "$(uname -m)" in
        s390x)
                eventattr='call-graph=dwarf,max-stack=4'
-               expected[2]="gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$"
-               expected[3]="(__GI_)?getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$"
-               expected[4]="main\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$"
+               echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected
+               echo "(__GI_)?getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected
+               echo "main\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected
                ;;
        *)
                eventattr='max-stack=3'
-               expected[2]="getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$"
-               expected[3]=".*\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$"
+               echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
+               echo ".*\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected
                ;;
        esac
 
-       file=`mktemp -u /tmp/perf.data.XXX`
+       perf_data=`mktemp -u /tmp/perf.data.XXX`
+       perf_script=`mktemp -u /tmp/perf.script.XXX`
+       perf record -e probe_libc:inet_pton/$eventattr/ -o $perf_data ping -6 -c 1 ::1 > /dev/null 2>&1
+       perf script -i $perf_data > $perf_script
 
-       perf record -e probe_libc:inet_pton/$eventattr/ -o $file ping -6 -c 1 ::1 > /dev/null 2>&1
-       perf script -i $file | while read line ; do
+       exec 3<$perf_script
+       exec 4<$expected
+       while read line <&3 && read -r pattern <&4; do
+               [ -z "$pattern" ] && break
                echo $line
-               echo "$line" | egrep -q "${expected[$idx]}"
+               echo "$line" | egrep -q "$pattern"
                if [ $? -ne 0 ] ; then
-                       printf "FAIL: expected backtrace entry %d \"%s\" got \"%s\"\n" $idx "${expected[$idx]}" "$line"
+                       printf "FAIL: expected backtrace entry \"%s\" got \"%s\"\n" "$pattern" "$line"
                        exit 1
                fi
-               let idx+=1
-               [ -z "${expected[$idx]}" ] && break
        done
 
        # If any statements are executed from this point onwards,
@@ -58,6 +63,6 @@ skip_if_no_perf_probe && \
 perf probe -q $libc inet_pton && \
 trace_libc_inet_pton_backtrace
 err=$?
-rm -f ${file}
+rm -f ${perf_data} ${perf_script} ${expected}
 perf probe -q -d probe_libc:inet_pton
 exit $err
index 55ad9793d5443da34ee4c6c76ea5d7fcba80f6fd..4ce276efe6b4c1855e904a30a2e9efcb6ed01a4e 100755 (executable)
@@ -17,7 +17,7 @@ skip_if_no_perf_probe || exit 2
 file=$(mktemp /tmp/temporary_file.XXXXX)
 
 trace_open_vfs_getname() {
-       evts=$(echo $(perf list syscalls:sys_enter_open* |& egrep 'open(at)? ' | sed -r 's/.*sys_enter_([a-z]+) +\[.*$/\1/') | sed 's/ /,/')
+       evts=$(echo $(perf list syscalls:sys_enter_open* 2>&1 | egrep 'open(at)? ' | sed -r 's/.*sys_enter_([a-z]+) +\[.*$/\1/') | sed 's/ /,/')
        perf trace -e $evts touch $file 2>&1 | \
        egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ open(at)?\((dfd: +CWD, +)?filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$"
 }
index 40e30a26b23cc260536977fb9a0b17db54aa207a..9497d02f69e6669d8ca19ed753beb1a8477f4006 100644 (file)
@@ -45,6 +45,7 @@ static int session_write_header(char *path)
 
        perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY);
        perf_header__set_feat(&session->header, HEADER_NRCPUS);
+       perf_header__set_feat(&session->header, HEADER_ARCH);
 
        session->header.data_size += DATA_SIZE;
 
index b085f1b3e34dacdd4764d0704e0c65c6644debf1..4ab663ec3e5ea108ee7df9a189ecc2bc4e996843 100644 (file)
@@ -382,7 +382,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
                        gtk_tree_store_set(store, &iter, col_idx++, s, -1);
                }
 
-               if (hists__has_callchains(hists) &&
+               if (hist_entry__has_callchains(h) &&
                    symbol_conf.use_callchain && hists__has(hists, sym)) {
                        if (callchain_param.mode == CHAIN_GRAPH_REL)
                                total = symbol_conf.cumulate_callchain ?
index bf31ceab33bd487d0021ccd9818384dde51fd371..89512504551b0b198a44ebe30e81d0972b86ce77 100644 (file)
@@ -146,8 +146,15 @@ getBPFObjectFromModule(llvm::Module *Module)
        raw_svector_ostream ostream(*Buffer);
 
        legacy::PassManager PM;
-       if (TargetMachine->addPassesToEmitFile(PM, ostream,
-                                              TargetMachine::CGFT_ObjectFile)) {
+       bool NotAdded;
+#if CLANG_VERSION_MAJOR < 7
+       NotAdded = TargetMachine->addPassesToEmitFile(PM, ostream,
+                                                     TargetMachine::CGFT_ObjectFile);
+#else
+       NotAdded = TargetMachine->addPassesToEmitFile(PM, ostream, nullptr,
+                                                     TargetMachine::CGFT_ObjectFile);
+#endif
+       if (NotAdded) {
                llvm::errs() << "TargetMachine can't emit a file of this type\n";
                return std::unique_ptr<llvm::SmallVectorImpl<char>>(nullptr);;
        }
index 540cd2dcd3e7098b7335c534a0aa7534ba87c889..653ff65aa2c37991763045c1c1bdb9f76f5d473f 100644 (file)
@@ -2129,6 +2129,7 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
        int cpu_nr = ff->ph->env.nr_cpus_avail;
        u64 size = 0;
        struct perf_header *ph = ff->ph;
+       bool do_core_id_test = true;
 
        ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
        if (!ph->env.cpu)
@@ -2183,6 +2184,13 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
                return 0;
        }
 
+       /* On s390 the socket_id number is not related to the numbers of cpus.
+        * The socket_id number might be higher than the numbers of cpus.
+        * This depends on the configuration.
+        */
+       if (ph->env.arch && !strncmp(ph->env.arch, "s390", 4))
+               do_core_id_test = false;
+
        for (i = 0; i < (u32)cpu_nr; i++) {
                if (do_read_u32(ff, &nr))
                        goto free_cpu;
@@ -2192,7 +2200,7 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
                if (do_read_u32(ff, &nr))
                        goto free_cpu;
 
-               if (nr != (u32)-1 && nr > (u32)cpu_nr) {
+               if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) {
                        pr_debug("socket_id number is too big."
                                 "You may need to upgrade the perf tool.\n");
                        goto free_cpu;
@@ -3456,7 +3464,7 @@ int perf_event__process_feature(struct perf_tool *tool,
                pr_warning("invalid record type %d in pipe-mode\n", type);
                return 0;
        }
-       if (feat == HEADER_RESERVED || feat > HEADER_LAST_FEATURE) {
+       if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) {
                pr_warning("invalid record type %d in pipe-mode\n", type);
                return -1;
        }
index 52e8fda93a4723f8b19b8fcaf7e6635aae505a6e..828cb9794c7668c9e48d3b9fa527cff394580923 100644 (file)
@@ -370,9 +370,11 @@ void hists__delete_entries(struct hists *hists)
 
 static int hist_entry__init(struct hist_entry *he,
                            struct hist_entry *template,
-                           bool sample_self)
+                           bool sample_self,
+                           size_t callchain_size)
 {
        *he = *template;
+       he->callchain_size = callchain_size;
 
        if (symbol_conf.cumulate_callchain) {
                he->stat_acc = malloc(sizeof(he->stat));
@@ -473,7 +475,7 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template,
 
        he = ops->new(callchain_size);
        if (he) {
-               err = hist_entry__init(he, template, sample_self);
+               err = hist_entry__init(he, template, sample_self, callchain_size);
                if (err) {
                        ops->free(he);
                        he = NULL;
@@ -619,9 +621,11 @@ __hists__add_entry(struct hists *hists,
                .raw_data = sample->raw_data,
                .raw_size = sample->raw_size,
                .ops = ops,
-       };
+       }, *he = hists__findnew_entry(hists, &entry, al, sample_self);
 
-       return hists__findnew_entry(hists, &entry, al, sample_self);
+       if (!hists->has_callchains && he && he->callchain_size != 0)
+               hists->has_callchains = true;
+       return he;
 }
 
 struct hist_entry *hists__add_entry(struct hists *hists,
index 06607c434949da48b53099d6129a53e71037711a..73049f7f0f6039e551daedb234d0f1d0a24b544e 100644 (file)
@@ -85,6 +85,7 @@ struct hists {
        struct events_stats     stats;
        u64                     event_stream;
        u16                     col_len[HISTC_NR_COLS];
+       bool                    has_callchains;
        int                     socket_filter;
        struct perf_hpp_list    *hpp_list;
        struct list_head        hpp_formats;
@@ -222,8 +223,7 @@ static inline struct hists *evsel__hists(struct perf_evsel *evsel)
 
 static __pure inline bool hists__has_callchains(struct hists *hists)
 {
-       const struct perf_evsel *evsel = hists_to_evsel(hists);
-       return evsel__has_callchain(evsel);
+       return hists->has_callchains;
 }
 
 int hists__init(void);
index ba4c9dd186434a33c8c33a59ab8884fd7c679dd3..d426761a549d02d67756c541ea7ab0b2a0495e68 100644 (file)
@@ -366,7 +366,7 @@ static int intel_pt_get_cyc(unsigned int byte, const unsigned char *buf,
                if (len < offs)
                        return INTEL_PT_NEED_MORE_BYTES;
                byte = buf[offs++];
-               payload |= (byte >> 1) << shift;
+               payload |= ((uint64_t)byte >> 1) << shift;
        }
 
        packet->type = INTEL_PT_CYC;
index 976e658e38dce762163bb583f1ab02b39231a742..5e94857dfca2c8c47ae289b79dd2a717ae4a82b5 100644 (file)
@@ -266,16 +266,16 @@ static const char *kinc_fetch_script =
 "#!/usr/bin/env sh\n"
 "if ! test -d \"$KBUILD_DIR\"\n"
 "then\n"
-"      exit -1\n"
+"      exit 1\n"
 "fi\n"
 "if ! test -f \"$KBUILD_DIR/include/generated/autoconf.h\"\n"
 "then\n"
-"      exit -1\n"
+"      exit 1\n"
 "fi\n"
 "TMPDIR=`mktemp -d`\n"
 "if test -z \"$TMPDIR\"\n"
 "then\n"
-"    exit -1\n"
+"    exit 1\n"
 "fi\n"
 "cat << EOF > $TMPDIR/Makefile\n"
 "obj-y := dummy.o\n"
index 155d2570274fdae6fbe7caea7bfa1e07953f7948..da8fe57691b8cd0d4c1c22d0cf0fa62595385ac5 100644 (file)
@@ -227,11 +227,16 @@ event_def: event_pmu |
 event_pmu:
 PE_NAME opt_pmu_config
 {
+       struct parse_events_state *parse_state = _parse_state;
+       struct parse_events_error *error = parse_state->error;
        struct list_head *list, *orig_terms, *terms;
 
        if (parse_events_copy_term_list($2, &orig_terms))
                YYABORT;
 
+       if (error)
+               error->idx = @1.first_column;
+
        ALLOC_LIST(list);
        if (parse_events_add_pmu(_parse_state, list, $1, $2, false, false)) {
                struct perf_pmu *pmu = NULL;
index d2fb597c9a8c78d8e8fd8a9890e67f8b8f4432d7..3ba6a1742f9198b2b5279511939d42658ba8a184 100644 (file)
@@ -234,6 +234,74 @@ static int perf_pmu__parse_snapshot(struct perf_pmu_alias *alias,
        return 0;
 }
 
+static void perf_pmu_assign_str(char *name, const char *field, char **old_str,
+                               char **new_str)
+{
+       if (!*old_str)
+               goto set_new;
+
+       if (*new_str) { /* Have new string, check with old */
+               if (strcasecmp(*old_str, *new_str))
+                       pr_debug("alias %s differs in field '%s'\n",
+                                name, field);
+               zfree(old_str);
+       } else          /* Nothing new --> keep old string */
+               return;
+set_new:
+       *old_str = *new_str;
+       *new_str = NULL;
+}
+
+static void perf_pmu_update_alias(struct perf_pmu_alias *old,
+                                 struct perf_pmu_alias *newalias)
+{
+       perf_pmu_assign_str(old->name, "desc", &old->desc, &newalias->desc);
+       perf_pmu_assign_str(old->name, "long_desc", &old->long_desc,
+                           &newalias->long_desc);
+       perf_pmu_assign_str(old->name, "topic", &old->topic, &newalias->topic);
+       perf_pmu_assign_str(old->name, "metric_expr", &old->metric_expr,
+                           &newalias->metric_expr);
+       perf_pmu_assign_str(old->name, "metric_name", &old->metric_name,
+                           &newalias->metric_name);
+       perf_pmu_assign_str(old->name, "value", &old->str, &newalias->str);
+       old->scale = newalias->scale;
+       old->per_pkg = newalias->per_pkg;
+       old->snapshot = newalias->snapshot;
+       memcpy(old->unit, newalias->unit, sizeof(old->unit));
+}
+
+/* Delete an alias entry. */
+static void perf_pmu_free_alias(struct perf_pmu_alias *newalias)
+{
+       zfree(&newalias->name);
+       zfree(&newalias->desc);
+       zfree(&newalias->long_desc);
+       zfree(&newalias->topic);
+       zfree(&newalias->str);
+       zfree(&newalias->metric_expr);
+       zfree(&newalias->metric_name);
+       parse_events_terms__purge(&newalias->terms);
+       free(newalias);
+}
+
+/* Merge an alias, search in alias list. If this name is already
+ * present merge both of them to combine all information.
+ */
+static bool perf_pmu_merge_alias(struct perf_pmu_alias *newalias,
+                                struct list_head *alist)
+{
+       struct perf_pmu_alias *a;
+
+       list_for_each_entry(a, alist, list) {
+               if (!strcasecmp(newalias->name, a->name)) {
+                       perf_pmu_update_alias(a, newalias);
+                       perf_pmu_free_alias(newalias);
+                       return true;
+               }
+       }
+       return false;
+}
+
 static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
                                 char *desc, char *val,
                                 char *long_desc, char *topic,
@@ -241,9 +309,11 @@ static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
                                 char *metric_expr,
                                 char *metric_name)
 {
+       struct parse_events_term *term;
        struct perf_pmu_alias *alias;
        int ret;
        int num;
+       char newval[256];
 
        alias = malloc(sizeof(*alias));
        if (!alias)
@@ -262,6 +332,27 @@ static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
                return ret;
        }
 
+       /* Scan event and remove leading zeroes, spaces, newlines, some
+        * platforms have terms specified as
+        * event=0x0091 (read from files ../<PMU>/events/<FILE>
+        * and terms specified as event=0x91 (read from JSON files).
+        *
+        * Rebuild string to make alias->str member comparable.
+        */
+       memset(newval, 0, sizeof(newval));
+       ret = 0;
+       list_for_each_entry(term, &alias->terms, list) {
+               if (ret)
+                       ret += scnprintf(newval + ret, sizeof(newval) - ret,
+                                        ",");
+               if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
+                       ret += scnprintf(newval + ret, sizeof(newval) - ret,
+                                        "%s=%#x", term->config, term->val.num);
+               else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR)
+                       ret += scnprintf(newval + ret, sizeof(newval) - ret,
+                                        "%s=%s", term->config, term->val.str);
+       }
+
        alias->name = strdup(name);
        if (dir) {
                /*
@@ -285,9 +376,10 @@ static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
                snprintf(alias->unit, sizeof(alias->unit), "%s", unit);
        }
        alias->per_pkg = perpkg && sscanf(perpkg, "%d", &num) == 1 && num == 1;
-       alias->str = strdup(val);
+       alias->str = strdup(newval);
 
-       list_add_tail(&alias->list, list);
+       if (!perf_pmu_merge_alias(alias, list))
+               list_add_tail(&alias->list, list);
 
        return 0;
 }
@@ -303,6 +395,9 @@ static int perf_pmu__new_alias(struct list_head *list, char *dir, char *name, FI
 
        buf[ret] = 0;
 
+       /* Remove trailing newline from sysfs file */
+       rtrim(buf);
+
        return __perf_pmu__new_alias(list, dir, name, NULL, buf, NULL, NULL, NULL,
                                     NULL, NULL, NULL);
 }
index 46e9e19ab1ac43a9bee5349c8826d4c990c976ad..bc32e57d17be76bddbc561bcbafe3b06e5295461 100644 (file)
@@ -908,14 +908,11 @@ static void python_process_tracepoint(struct perf_sample *sample,
        if (_PyTuple_Resize(&t, n) == -1)
                Py_FatalError("error resizing Python tuple");
 
-       if (!dict) {
+       if (!dict)
                call_object(handler, t, handler_name);
-       } else {
+       else
                call_object(handler, t, default_handler_name);
-               Py_DECREF(dict);
-       }
 
-       Py_XDECREF(all_entries_dict);
        Py_DECREF(t);
 }
 
@@ -1235,7 +1232,6 @@ static void python_process_general_event(struct perf_sample *sample,
 
        call_object(handler, t, handler_name);
 
-       Py_DECREF(dict);
        Py_DECREF(t);
 }
 
@@ -1627,6 +1623,7 @@ static int python_generate_script(struct pevent *pevent, const char *outfile)
        fprintf(ofp, "# See the perf-script-python Documentation for the list "
                "of available functions.\n\n");
 
+       fprintf(ofp, "from __future__ import print_function\n\n");
        fprintf(ofp, "import os\n");
        fprintf(ofp, "import sys\n\n");
 
@@ -1636,10 +1633,10 @@ static int python_generate_script(struct pevent *pevent, const char *outfile)
        fprintf(ofp, "from Core import *\n\n\n");
 
        fprintf(ofp, "def trace_begin():\n");
-       fprintf(ofp, "\tprint \"in trace_begin\"\n\n");
+       fprintf(ofp, "\tprint(\"in trace_begin\")\n\n");
 
        fprintf(ofp, "def trace_end():\n");
-       fprintf(ofp, "\tprint \"in trace_end\"\n\n");
+       fprintf(ofp, "\tprint(\"in trace_end\")\n\n");
 
        while ((event = trace_find_next_event(pevent, event))) {
                fprintf(ofp, "def %s__%s(", event->system, event->name);
@@ -1675,7 +1672,7 @@ static int python_generate_script(struct pevent *pevent, const char *outfile)
                        "common_secs, common_nsecs,\n\t\t\t"
                        "common_pid, common_comm)\n\n");
 
-               fprintf(ofp, "\t\tprint \"");
+               fprintf(ofp, "\t\tprint(\"");
 
                not_first = 0;
                count = 0;
@@ -1736,31 +1733,31 @@ static int python_generate_script(struct pevent *pevent, const char *outfile)
                                fprintf(ofp, "%s", f->name);
                }
 
-               fprintf(ofp, ")\n\n");
+               fprintf(ofp, "))\n\n");
 
-               fprintf(ofp, "\t\tprint 'Sample: {'+"
-                       "get_dict_as_string(perf_sample_dict['sample'], ', ')+'}'\n\n");
+               fprintf(ofp, "\t\tprint('Sample: {'+"
+                       "get_dict_as_string(perf_sample_dict['sample'], ', ')+'}')\n\n");
 
                fprintf(ofp, "\t\tfor node in common_callchain:");
                fprintf(ofp, "\n\t\t\tif 'sym' in node:");
-               fprintf(ofp, "\n\t\t\t\tprint \"\\t[%%x] %%s\" %% (node['ip'], node['sym']['name'])");
+               fprintf(ofp, "\n\t\t\t\tprint(\"\\t[%%x] %%s\" %% (node['ip'], node['sym']['name']))");
                fprintf(ofp, "\n\t\t\telse:");
-               fprintf(ofp, "\n\t\t\t\tprint \"\t[%%x]\" %% (node['ip'])\n\n");
-               fprintf(ofp, "\t\tprint \"\\n\"\n\n");
+               fprintf(ofp, "\n\t\t\t\tprint(\"\t[%%x]\" %% (node['ip']))\n\n");
+               fprintf(ofp, "\t\tprint()\n\n");
 
        }
 
        fprintf(ofp, "def trace_unhandled(event_name, context, "
                "event_fields_dict, perf_sample_dict):\n");
 
-       fprintf(ofp, "\t\tprint get_dict_as_string(event_fields_dict)\n");
-       fprintf(ofp, "\t\tprint 'Sample: {'+"
-               "get_dict_as_string(perf_sample_dict['sample'], ', ')+'}'\n\n");
+       fprintf(ofp, "\t\tprint(get_dict_as_string(event_fields_dict))\n");
+       fprintf(ofp, "\t\tprint('Sample: {'+"
+               "get_dict_as_string(perf_sample_dict['sample'], ', ')+'}')\n\n");
 
        fprintf(ofp, "def print_header("
                "event_name, cpu, secs, nsecs, pid, comm):\n"
-               "\tprint \"%%-20s %%5u %%05u.%%09u %%8u %%-20s \" %% \\\n\t"
-               "(event_name, cpu, secs, nsecs, pid, comm),\n\n");
+               "\tprint(\"%%-20s %%5u %%05u.%%09u %%8u %%-20s \" %% \\\n\t"
+               "(event_name, cpu, secs, nsecs, pid, comm), end=\"\")\n\n");
 
        fprintf(ofp, "def get_dict_as_string(a_dict, delimiter=' '):\n"
                "\treturn delimiter.join"
index 7cf2d5cc038ea07accaf5ef631c9b23b7b0c207b..8bf302cafcecd6b285d68e2b2c56130019dea101 100644 (file)
@@ -112,6 +112,8 @@ struct hist_entry {
 
        char                    level;
        u8                      filtered;
+
+       u16                     callchain_size;
        union {
                /*
                 * Since perf diff only supports the stdio output, TUI
@@ -153,7 +155,7 @@ struct hist_entry {
 
 static __pure inline bool hist_entry__has_callchains(struct hist_entry *he)
 {
-       return hists__has_callchains(he->hists);
+       return he->callchain_size != 0;
 }
 
 static inline bool hist_entry__has_pairs(struct hist_entry *he)
index ca9ef70176249294644a1beeea2b141086cfe75c..d39e4ff7d0bf9256b4b5ff2c03b2cd4e24307fe4 100644 (file)
@@ -56,7 +56,7 @@ name as necessary to disambiguate it from others is necessary.  Note that option
 .PP
 \fB--hide column\fP do not show the specified built-in columns.  May be invoked multiple times, or with a comma-separated list of column names.  Use "--hide sysfs" to hide the sysfs statistics columns as a group.
 .PP
-\fB--enable column\fP show the specified built-in columns, which are otherwise disabled, by default.  Currently the only built-in counters disabled by default are "usec" and "Time_Of_Day_Seconds".
+\fB--enable column\fP show the specified built-in columns, which are otherwise disabled, by default.  Currently the only built-in counters disabled by default are "usec", "Time_Of_Day_Seconds", "APIC" and "X2APIC".
 The column name "all" can be used to enable all disabled-by-default built-in counters.
 .PP
 \fB--show column\fP show only the specified built-in columns.  May be invoked multiple times, or with a comma-separated list of column names.  Use "--show sysfs" to show the sysfs statistics columns as a group.
index d6cff3070ebde60d2fa9a54deec6c147b6bda484..4d14bbbf9b639b7152b73d75363672826fd274da 100644 (file)
@@ -109,6 +109,7 @@ unsigned int has_hwp_activity_window;       /* IA32_HWP_REQUEST[bits 41:32] */
 unsigned int has_hwp_epp;              /* IA32_HWP_REQUEST[bits 31:24] */
 unsigned int has_hwp_pkg;              /* IA32_HWP_REQUEST_PKG */
 unsigned int has_misc_feature_control;
+unsigned int first_counter_read = 1;
 
 #define RAPL_PKG               (1 << 0)
                                        /* 0x610 MSR_PKG_POWER_LIMIT */
@@ -170,6 +171,8 @@ struct thread_data {
        unsigned long long  irq_count;
        unsigned int smi_count;
        unsigned int cpu_id;
+       unsigned int apic_id;
+       unsigned int x2apic_id;
        unsigned int flags;
 #define CPU_IS_FIRST_THREAD_IN_CORE    0x2
 #define CPU_IS_FIRST_CORE_IN_PACKAGE   0x4
@@ -381,19 +384,23 @@ int get_msr(int cpu, off_t offset, unsigned long long *msr)
 }
 
 /*
- * Each string in this array is compared in --show and --hide cmdline.
- * Thus, strings that are proper sub-sets must follow their more specific peers.
+ * This list matches the column headers, except
+ * 1. built-in only, the sysfs counters are not here -- we learn of those at run-time
+ * 2. Core and CPU are moved to the end, we can't have strings that contain them
+ *    matching on them for --show and --hide.
  */
 struct msr_counter bic[] = {
        { 0x0, "usec" },
        { 0x0, "Time_Of_Day_Seconds" },
        { 0x0, "Package" },
+       { 0x0, "Node" },
        { 0x0, "Avg_MHz" },
+       { 0x0, "Busy%" },
        { 0x0, "Bzy_MHz" },
        { 0x0, "TSC_MHz" },
        { 0x0, "IRQ" },
        { 0x0, "SMI", "", 32, 0, FORMAT_DELTA, NULL},
-       { 0x0, "Busy%" },
+       { 0x0, "sysfs" },
        { 0x0, "CPU%c1" },
        { 0x0, "CPU%c3" },
        { 0x0, "CPU%c6" },
@@ -424,73 +431,73 @@ struct msr_counter bic[] = {
        { 0x0, "Cor_J" },
        { 0x0, "GFX_J" },
        { 0x0, "RAM_J" },
-       { 0x0, "Core" },
-       { 0x0, "CPU" },
        { 0x0, "Mod%c6" },
-       { 0x0, "sysfs" },
        { 0x0, "Totl%C0" },
        { 0x0, "Any%C0" },
        { 0x0, "GFX%C0" },
        { 0x0, "CPUGFX%" },
-       { 0x0, "Node%" },
+       { 0x0, "Core" },
+       { 0x0, "CPU" },
+       { 0x0, "APIC" },
+       { 0x0, "X2APIC" },
 };
 
-
-
 #define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter))
 #define        BIC_USEC        (1ULL << 0)
 #define        BIC_TOD         (1ULL << 1)
 #define        BIC_Package     (1ULL << 2)
-#define        BIC_Avg_MHz     (1ULL << 3)
-#define        BIC_Bzy_MHz     (1ULL << 4)
-#define        BIC_TSC_MHz     (1ULL << 5)
-#define        BIC_IRQ         (1ULL << 6)
-#define        BIC_SMI         (1ULL << 7)
-#define        BIC_Busy        (1ULL << 8)
-#define        BIC_CPU_c1      (1ULL << 9)
-#define        BIC_CPU_c3      (1ULL << 10)
-#define        BIC_CPU_c6      (1ULL << 11)
-#define        BIC_CPU_c7      (1ULL << 12)
-#define        BIC_ThreadC     (1ULL << 13)
-#define        BIC_CoreTmp     (1ULL << 14)
-#define        BIC_CoreCnt     (1ULL << 15)
-#define        BIC_PkgTmp      (1ULL << 16)
-#define        BIC_GFX_rc6     (1ULL << 17)
-#define        BIC_GFXMHz      (1ULL << 18)
-#define        BIC_Pkgpc2      (1ULL << 19)
-#define        BIC_Pkgpc3      (1ULL << 20)
-#define        BIC_Pkgpc6      (1ULL << 21)
-#define        BIC_Pkgpc7      (1ULL << 22)
-#define        BIC_Pkgpc8      (1ULL << 23)
-#define        BIC_Pkgpc9      (1ULL << 24)
-#define        BIC_Pkgpc10     (1ULL << 25)
-#define BIC_CPU_LPI    (1ULL << 26)
-#define BIC_SYS_LPI    (1ULL << 27)
-#define        BIC_PkgWatt     (1ULL << 26)
-#define        BIC_CorWatt     (1ULL << 27)
-#define        BIC_GFXWatt     (1ULL << 28)
-#define        BIC_PkgCnt      (1ULL << 29)
-#define        BIC_RAMWatt     (1ULL << 30)
-#define        BIC_PKG__       (1ULL << 31)
-#define        BIC_RAM__       (1ULL << 32)
-#define        BIC_Pkg_J       (1ULL << 33)
-#define        BIC_Cor_J       (1ULL << 34)
-#define        BIC_GFX_J       (1ULL << 35)
-#define        BIC_RAM_J       (1ULL << 36)
-#define        BIC_Core        (1ULL << 37)
-#define        BIC_CPU         (1ULL << 38)
-#define        BIC_Mod_c6      (1ULL << 39)
-#define        BIC_sysfs       (1ULL << 40)
-#define        BIC_Totl_c0     (1ULL << 41)
-#define        BIC_Any_c0      (1ULL << 42)
-#define        BIC_GFX_c0      (1ULL << 43)
-#define        BIC_CPUGFX      (1ULL << 44)
-#define        BIC_Node        (1ULL << 45)
-
-#define BIC_DISABLED_BY_DEFAULT        (BIC_USEC | BIC_TOD)
+#define        BIC_Node        (1ULL << 3)
+#define        BIC_Avg_MHz     (1ULL << 4)
+#define        BIC_Busy        (1ULL << 5)
+#define        BIC_Bzy_MHz     (1ULL << 6)
+#define        BIC_TSC_MHz     (1ULL << 7)
+#define        BIC_IRQ         (1ULL << 8)
+#define        BIC_SMI         (1ULL << 9)
+#define        BIC_sysfs       (1ULL << 10)
+#define        BIC_CPU_c1      (1ULL << 11)
+#define        BIC_CPU_c3      (1ULL << 12)
+#define        BIC_CPU_c6      (1ULL << 13)
+#define        BIC_CPU_c7      (1ULL << 14)
+#define        BIC_ThreadC     (1ULL << 15)
+#define        BIC_CoreTmp     (1ULL << 16)
+#define        BIC_CoreCnt     (1ULL << 17)
+#define        BIC_PkgTmp      (1ULL << 18)
+#define        BIC_GFX_rc6     (1ULL << 19)
+#define        BIC_GFXMHz      (1ULL << 20)
+#define        BIC_Pkgpc2      (1ULL << 21)
+#define        BIC_Pkgpc3      (1ULL << 22)
+#define        BIC_Pkgpc6      (1ULL << 23)
+#define        BIC_Pkgpc7      (1ULL << 24)
+#define        BIC_Pkgpc8      (1ULL << 25)
+#define        BIC_Pkgpc9      (1ULL << 26)
+#define        BIC_Pkgpc10     (1ULL << 27)
+#define BIC_CPU_LPI    (1ULL << 28)
+#define BIC_SYS_LPI    (1ULL << 29)
+#define        BIC_PkgWatt     (1ULL << 30)
+#define        BIC_CorWatt     (1ULL << 31)
+#define        BIC_GFXWatt     (1ULL << 32)
+#define        BIC_PkgCnt      (1ULL << 33)
+#define        BIC_RAMWatt     (1ULL << 34)
+#define        BIC_PKG__       (1ULL << 35)
+#define        BIC_RAM__       (1ULL << 36)
+#define        BIC_Pkg_J       (1ULL << 37)
+#define        BIC_Cor_J       (1ULL << 38)
+#define        BIC_GFX_J       (1ULL << 39)
+#define        BIC_RAM_J       (1ULL << 40)
+#define        BIC_Mod_c6      (1ULL << 41)
+#define        BIC_Totl_c0     (1ULL << 42)
+#define        BIC_Any_c0      (1ULL << 43)
+#define        BIC_GFX_c0      (1ULL << 44)
+#define        BIC_CPUGFX      (1ULL << 45)
+#define        BIC_Core        (1ULL << 46)
+#define        BIC_CPU         (1ULL << 47)
+#define        BIC_APIC        (1ULL << 48)
+#define        BIC_X2APIC      (1ULL << 49)
+
+#define BIC_DISABLED_BY_DEFAULT        (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC)
 
 unsigned long long bic_enabled = (0xFFFFFFFFFFFFFFFFULL & ~BIC_DISABLED_BY_DEFAULT);
-unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs;
+unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs | BIC_APIC | BIC_X2APIC;
 
 #define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME)
 #define ENABLE_BIC(COUNTER_NAME) (bic_enabled |= COUNTER_NAME)
@@ -517,17 +524,34 @@ void help(void)
        "when COMMAND completes.\n"
        "If no COMMAND is specified, turbostat wakes every 5-seconds\n"
        "to print statistics, until interrupted.\n"
-       "--add          add a counter\n"
-       "               eg. --add msr0x10,u64,cpu,delta,MY_TSC\n"
-       "--cpu  cpu-set limit output to summary plus cpu-set:\n"
-       "               {core | package | j,k,l..m,n-p }\n"
-       "--quiet        skip decoding system configuration header\n"
-       "--interval sec.subsec  Override default 5-second measurement interval\n"
-       "--help         print this help message\n"
-       "--list         list column headers only\n"
-       "--num_iterations num   number of the measurement iterations\n"
-       "--out file     create or truncate \"file\" for all output\n"
-       "--version      print version information\n"
+       "  -a, --add    add a counter\n"
+       "                 eg. --add msr0x10,u64,cpu,delta,MY_TSC\n"
+       "  -c, --cpu    cpu-set limit output to summary plus cpu-set:\n"
+       "                 {core | package | j,k,l..m,n-p }\n"
+       "  -d, --debug  displays usec, Time_Of_Day_Seconds and more debugging\n"
+       "  -D, --Dump   displays the raw counter values\n"
+       "  -e, --enable [all | column]\n"
+       "               shows all or the specified disabled column\n"
+       "  -H, --hide [column|column,column,...]\n"
+       "               hide the specified column(s)\n"
+       "  -i, --interval sec.subsec\n"
+       "               Override default 5-second measurement interval\n"
+       "  -J, --Joules displays energy in Joules instead of Watts\n"
+       "  -l, --list   list column headers only\n"
+       "  -n, --num_iterations num\n"
+       "               number of the measurement iterations\n"
+       "  -o, --out file\n"
+       "               create or truncate \"file\" for all output\n"
+       "  -q, --quiet  skip decoding system configuration header\n"
+       "  -s, --show [column|column,column,...]\n"
+       "               show only the specified column(s)\n"
+       "  -S, --Summary\n"
+       "               limits output to 1-line system summary per interval\n"
+       "  -T, --TCC temperature\n"
+       "               sets the Thermal Control Circuit temperature in\n"
+       "                 degrees Celsius\n"
+       "  -h, --help   print this help message\n"
+       "  -v, --version        print version information\n"
        "\n"
        "For more help, run \"man turbostat\"\n");
 }
@@ -601,6 +625,10 @@ void print_header(char *delim)
                outp += sprintf(outp, "%sCore", (printed++ ? delim : ""));
        if (DO_BIC(BIC_CPU))
                outp += sprintf(outp, "%sCPU", (printed++ ? delim : ""));
+       if (DO_BIC(BIC_APIC))
+               outp += sprintf(outp, "%sAPIC", (printed++ ? delim : ""));
+       if (DO_BIC(BIC_X2APIC))
+               outp += sprintf(outp, "%sX2APIC", (printed++ ? delim : ""));
        if (DO_BIC(BIC_Avg_MHz))
                outp += sprintf(outp, "%sAvg_MHz", (printed++ ? delim : ""));
        if (DO_BIC(BIC_Busy))
@@ -880,6 +908,10 @@ int format_counters(struct thread_data *t, struct core_data *c,
                        outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
                if (DO_BIC(BIC_CPU))
                        outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
+               if (DO_BIC(BIC_APIC))
+                       outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
+               if (DO_BIC(BIC_X2APIC))
+                       outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
        } else {
                if (DO_BIC(BIC_Package)) {
                        if (p)
@@ -904,6 +936,10 @@ int format_counters(struct thread_data *t, struct core_data *c,
                }
                if (DO_BIC(BIC_CPU))
                        outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->cpu_id);
+               if (DO_BIC(BIC_APIC))
+                       outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->apic_id);
+               if (DO_BIC(BIC_X2APIC))
+                       outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->x2apic_id);
        }
 
        if (DO_BIC(BIC_Avg_MHz))
@@ -1231,6 +1267,12 @@ delta_thread(struct thread_data *new, struct thread_data *old,
        int i;
        struct msr_counter *mp;
 
+       /* we run cpuid just the 1st time, copy the results */
+       if (DO_BIC(BIC_APIC))
+               new->apic_id = old->apic_id;
+       if (DO_BIC(BIC_X2APIC))
+               new->x2apic_id = old->x2apic_id;
+
        /*
         * the timestamps from start of measurement interval are in "old"
         * the timestamp from end of measurement interval are in "new"
@@ -1393,6 +1435,12 @@ int sum_counters(struct thread_data *t, struct core_data *c,
        int i;
        struct msr_counter *mp;
 
+       /* copy un-changing apic_id's */
+       if (DO_BIC(BIC_APIC))
+               average.threads.apic_id = t->apic_id;
+       if (DO_BIC(BIC_X2APIC))
+               average.threads.x2apic_id = t->x2apic_id;
+
        /* remember first tv_begin */
        if (average.threads.tv_begin.tv_sec == 0)
                average.threads.tv_begin = t->tv_begin;
@@ -1619,6 +1667,34 @@ int get_mp(int cpu, struct msr_counter *mp, unsigned long long *counterp)
        return 0;
 }
 
+void get_apic_id(struct thread_data *t)
+{
+       unsigned int eax, ebx, ecx, edx, max_level;
+
+       eax = ebx = ecx = edx = 0;
+
+       if (!genuine_intel)
+               return;
+
+       __cpuid(0, max_level, ebx, ecx, edx);
+
+       __cpuid(1, eax, ebx, ecx, edx);
+       t->apic_id = (ebx >> 24) & 0xf;
+
+       if (max_level < 0xb)
+               return;
+
+       if (!DO_BIC(BIC_X2APIC))
+               return;
+
+       ecx = 0;
+       __cpuid(0xb, eax, ebx, ecx, edx);
+       t->x2apic_id = edx;
+
+       if (debug && (t->apic_id != t->x2apic_id))
+               fprintf(stderr, "cpu%d: apic 0x%x x2apic 0x%x\n", t->cpu_id, t->apic_id, t->x2apic_id);
+}
+
 /*
  * get_counters(...)
  * migrate to cpu
@@ -1632,7 +1708,6 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
        struct msr_counter *mp;
        int i;
 
-
        gettimeofday(&t->tv_begin, (struct timezone *)NULL);
 
        if (cpu_migrate(cpu)) {
@@ -1640,6 +1715,8 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
                return -1;
        }
 
+       if (first_counter_read)
+               get_apic_id(t);
 retry:
        t->tsc = rdtsc();       /* we are running on local CPU of interest */
 
@@ -2432,6 +2509,12 @@ void set_node_data(void)
                if (pni[pkg].count > topo.nodes_per_pkg)
                        topo.nodes_per_pkg = pni[0].count;
 
+       /* Fake 1 node per pkg for machines that don't
+        * expose nodes and thus avoid -nan results
+        */
+       if (topo.nodes_per_pkg == 0)
+               topo.nodes_per_pkg = 1;
+
        for (cpu = 0; cpu < topo.num_cpus; cpu++) {
                pkg = cpus[cpu].physical_package_id;
                node = cpus[cpu].physical_node_id;
@@ -2879,6 +2962,7 @@ void do_sleep(void)
        }
 }
 
+
 void turbostat_loop()
 {
        int retval;
@@ -2892,6 +2976,7 @@ restart:
 
        snapshot_proc_sysfs_files();
        retval = for_all_cpus(get_counters, EVEN_COUNTERS);
+       first_counter_read = 0;
        if (retval < -1) {
                exit(retval);
        } else if (retval == -1) {
@@ -4392,7 +4477,7 @@ void process_cpuid()
        if (!quiet) {
                fprintf(outf, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
                        max_level, family, model, stepping, family, model, stepping);
-               fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s\n",
+               fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s %s\n",
                        ecx & (1 << 0) ? "SSE3" : "-",
                        ecx & (1 << 3) ? "MONITOR" : "-",
                        ecx & (1 << 6) ? "SMX" : "-",
@@ -4401,6 +4486,7 @@ void process_cpuid()
                        edx & (1 << 4) ? "TSC" : "-",
                        edx & (1 << 5) ? "MSR" : "-",
                        edx & (1 << 22) ? "ACPI-TM" : "-",
+                       edx & (1 << 28) ? "HT" : "-",
                        edx & (1 << 29) ? "TM" : "-");
        }
 
@@ -4652,7 +4738,6 @@ void process_cpuid()
        return;
 }
 
-
 /*
  * in /dev/cpu/ return success for names that are numbers
  * ie. filter out ".", "..", "microcode".
@@ -4842,6 +4927,13 @@ void init_counter(struct thread_data *thread_base, struct core_data *core_base,
        struct core_data *c;
        struct pkg_data *p;
 
+
+       /* Workaround for systems where physical_node_id==-1
+        * and logical_node_id==(-1 - topo.num_cpus)
+        */
+       if (node_id < 0)
+               node_id = 0;
+
        t = GET_THREAD(thread_base, thread_id, core_id, node_id, pkg_id);
        c = GET_CORE(core_base, core_id, node_id, pkg_id);
        p = GET_PKG(pkg_base, pkg_id);
@@ -4946,6 +5038,7 @@ int fork_it(char **argv)
 
        snapshot_proc_sysfs_files();
        status = for_all_cpus(get_counters, EVEN_COUNTERS);
+       first_counter_read = 0;
        if (status)
                exit(status);
        /* clear affinity side-effect of get_counters() */
@@ -5009,7 +5102,7 @@ int get_and_dump_counters(void)
 }
 
 void print_version() {
-       fprintf(outf, "turbostat version 18.06.01"
+       fprintf(outf, "turbostat version 18.06.20"
                " - Len Brown <lenb@kernel.org>\n");
 }
 
@@ -5381,7 +5474,7 @@ void cmdline(int argc, char **argv)
                        break;
                case 'e':
                        /* --enable specified counter */
-                       bic_enabled |= bic_lookup(optarg, SHOW_LIST);
+                       bic_enabled = bic_enabled | bic_lookup(optarg, SHOW_LIST);
                        break;
                case 'd':
                        debug++;
@@ -5465,7 +5558,6 @@ void cmdline(int argc, char **argv)
 int main(int argc, char **argv)
 {
        outf = stderr;
-
        cmdline(argc, argv);
 
        if (!quiet)
index a8fb63edcf8948df54b6aaa2f225def65d5a705f..e2926f72a821471214817f7ddb1c253a93b1ee02 100644 (file)
@@ -1991,8 +1991,7 @@ static void nfit_test0_setup(struct nfit_test *t)
        pcap->header.type = ACPI_NFIT_TYPE_CAPABILITIES;
        pcap->header.length = sizeof(*pcap);
        pcap->highest_capability = 1;
-       pcap->capabilities = ACPI_NFIT_CAPABILITY_CACHE_FLUSH |
-               ACPI_NFIT_CAPABILITY_MEM_FLUSH;
+       pcap->capabilities = ACPI_NFIT_CAPABILITY_MEM_FLUSH;
        offset += pcap->header.length;
 
        if (t->setup_hotplug) {
index 7a6214e9ae58d4432668394bf4762a0e2cb5c669..a362e3d7abc633fd33db81aa3bd99da27b6edd4b 100644 (file)
@@ -105,7 +105,7 @@ $(OUTPUT)/test_xdp_noinline.o: CLANG_FLAGS += -fno-inline
 
 BTF_LLC_PROBE := $(shell $(LLC) -march=bpf -mattr=help 2>&1 | grep dwarfris)
 BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF)
-BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --version 2>&1 | grep LLVM)
+BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm')
 
 ifneq ($(BTF_LLC_PROBE),)
 ifneq ($(BTF_PAHOLE_PROBE),)
index 1eefe211a4a88a3dfbac5be585932384061b9edd..b4994a94968bfd9d12965fd630cba7e99458a30a 100644 (file)
@@ -6,4 +6,15 @@ CONFIG_TEST_BPF=m
 CONFIG_CGROUP_BPF=y
 CONFIG_NETDEVSIM=m
 CONFIG_NET_CLS_ACT=y
+CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_IPIP=y
+CONFIG_IPV6=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_NET_IPGRE=y
+CONFIG_IPV6_GRE=y
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_HMAC=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_VXLAN=y
+CONFIG_GENEVE=y
index 35669ccd4d23b26c7505e8829bcf3876e3bcb3e1..9df0d2ac45f8453b9529c4ea90fb19dba3f86480 100755 (executable)
@@ -1,6 +1,15 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+msg="skip all tests:"
+if [ "$(id -u)" != "0" ]; then
+       echo $msg please run this as root >&2
+       exit $ksft_skip
+fi
+
 SRC_TREE=../../../../
 
 test_run()
index ce2e15e4f9760e205ed8e91ab5260a23172ab2e5..677686198df34d799e67c0eb15ab25f4b68eba4c 100755 (executable)
@@ -1,6 +1,15 @@
 #!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+msg="skip all tests:"
+if [ $UID != 0 ]; then
+       echo $msg please run this as root >&2
+       exit $ksft_skip
+fi
+
 GREEN='\033[0;92m'
 RED='\033[0;31m'
 NC='\033[0m' # No Color
index 1c77994b5e713dfe8aae357dd083c4713080f62a..270fa8f49573207bc973cce2302b53341a6fec5b 100755 (executable)
 # An UDP datagram is sent from fb00::1 to fb00::6. The test succeeds if this
 # datagram can be read on NS6 when binding to fb00::6.
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+msg="skip all tests:"
+if [ $UID != 0 ]; then
+       echo $msg please run this as root >&2
+       exit $ksft_skip
+fi
+
 TMP_FILE="/tmp/selftest_lwt_seg6local.txt"
 
 cleanup()
index e78aad0a68bb9963368a5236377144c8e61cb230..be800d0e7a841abfbc60545cf63fe33219db0c35 100755 (executable)
@@ -163,6 +163,10 @@ def bpftool(args, JSON=True, ns="", fail=True):
 
 def bpftool_prog_list(expected=None, ns=""):
     _, progs = bpftool("prog show", JSON=True, ns=ns, fail=True)
+    # Remove the base progs
+    for p in base_progs:
+        if p in progs:
+            progs.remove(p)
     if expected is not None:
         if len(progs) != expected:
             fail(True, "%d BPF programs loaded, expected %d" %
@@ -171,6 +175,10 @@ def bpftool_prog_list(expected=None, ns=""):
 
 def bpftool_map_list(expected=None, ns=""):
     _, maps = bpftool("map show", JSON=True, ns=ns, fail=True)
+    # Remove the base maps
+    for m in base_maps:
+        if m in maps:
+            maps.remove(m)
     if expected is not None:
         if len(maps) != expected:
             fail(True, "%d BPF maps loaded, expected %d" %
@@ -585,8 +593,8 @@ skip(os.getuid() != 0, "test must be run as root")
 # Check tools
 ret, progs = bpftool("prog", fail=False)
 skip(ret != 0, "bpftool not installed")
-# Check no BPF programs are loaded
-skip(len(progs) != 0, "BPF programs already loaded on the system")
+base_progs = progs
+_, base_maps = bpftool("map")
 
 # Check netdevsim
 ret, out = cmd("modprobe netdevsim", fail=False)
index 05c8cb71724ae8c1d8d7c3e3453bce9a83092b96..9e78df207919366fbdd048f7645568ac701ad5f5 100644 (file)
@@ -1413,18 +1413,12 @@ out:
 
 int main(int argc, char **argv)
 {
-       struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY};
        int iov_count = 1, length = 1024, rate = 1;
        struct sockmap_options options = {0};
        int opt, longindex, err, cg_fd = 0;
        char *bpf_file = BPF_SOCKMAP_FILENAME;
        int test = PING_PONG;
 
-       if (setrlimit(RLIMIT_MEMLOCK, &r)) {
-               perror("setrlimit(RLIMIT_MEMLOCK)");
-               return 1;
-       }
-
        if (argc < 2)
                return test_suite();
 
index aeb2901f21f4737558efbecec73974b17c610a38..546aee3e9fb457ae166c0fda8bc0c3b484f1a19b 100755 (executable)
@@ -608,28 +608,26 @@ setup_xfrm_tunnel()
 test_xfrm_tunnel()
 {
        config_device
-        #tcpdump -nei veth1 ip &
-       output=$(mktemp)
-       cat /sys/kernel/debug/tracing/trace_pipe | tee $output &
-        setup_xfrm_tunnel
+       > /sys/kernel/debug/tracing/trace
+       setup_xfrm_tunnel
        tc qdisc add dev veth1 clsact
        tc filter add dev veth1 proto ip ingress bpf da obj test_tunnel_kern.o \
                sec xfrm_get_state
        ip netns exec at_ns0 ping $PING_ARG 10.1.1.200
        sleep 1
-       grep "reqid 1" $output
+       grep "reqid 1" /sys/kernel/debug/tracing/trace
        check_err $?
-       grep "spi 0x1" $output
+       grep "spi 0x1" /sys/kernel/debug/tracing/trace
        check_err $?
-       grep "remote ip 0xac100164" $output
+       grep "remote ip 0xac100164" /sys/kernel/debug/tracing/trace
        check_err $?
        cleanup
 
        if [ $ret -ne 0 ]; then
-                echo -e ${RED}"FAIL: xfrm tunnel"${NC}
-                return 1
-        fi
-        echo -e ${GREEN}"PASS: xfrm tunnel"${NC}
+               echo -e ${RED}"FAIL: xfrm tunnel"${NC}
+               return 1
+       fi
+       echo -e ${GREEN}"PASS: xfrm tunnel"${NC}
 }
 
 attach_bpf()
@@ -657,6 +655,10 @@ cleanup()
        ip link del ip6geneve11 2> /dev/null
        ip link del erspan11 2> /dev/null
        ip link del ip6erspan11 2> /dev/null
+       ip xfrm policy delete dir out src 10.1.1.200/32 dst 10.1.1.100/32 2> /dev/null
+       ip xfrm policy delete dir in src 10.1.1.100/32 dst 10.1.1.200/32 2> /dev/null
+       ip xfrm state delete src 172.16.1.100 dst 172.16.1.200 proto esp spi 0x1 2> /dev/null
+       ip xfrm state delete src 172.16.1.200 dst 172.16.1.100 proto esp spi 0x2 2> /dev/null
 }
 
 cleanup_exit()
@@ -668,7 +670,7 @@ cleanup_exit()
 
 check()
 {
-       ip link help $1 2>&1 | grep -q "^Usage:"
+       ip link help 2>&1 | grep -q "\s$1\s"
        if [ $? -ne 0 ];then
                echo "SKIP $1: iproute2 not support"
        cleanup
index 2ecd27b670d77e29e817d607ac80fd100c683884..41106d9d5cc75570e60d841396541cb3ea8e7439 100644 (file)
@@ -4974,6 +4974,24 @@ static struct bpf_test tests[] = {
                .result = ACCEPT,
                .prog_type = BPF_PROG_TYPE_LWT_XMIT,
        },
+       {
+               "make headroom for LWT_XMIT",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_2, 34),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
+                       /* split for s390 to succeed */
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_2, 42),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_LWT_XMIT,
+       },
        {
                "invalid access of tc_classid for LWT_IN",
                .insns = {
@@ -11986,6 +12004,46 @@ static struct bpf_test tests[] = {
                .errstr = "BPF_XADD stores into R2 packet",
                .prog_type = BPF_PROG_TYPE_XDP,
        },
+       {
+               "xadd/w check whether src/dst got mangled, 1",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+                       BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+                       BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+                       BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
+                       BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_0, 42),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .retval = 3,
+       },
+       {
+               "xadd/w check whether src/dst got mangled, 2",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+                       BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+                       BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+                       BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+                       BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
+                       BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_0, 42),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .retval = 3,
+       },
        {
                "bpf_get_stack return R0 within range",
                .insns = {
@@ -12554,8 +12612,11 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
        }
 
        if (fd_prog >= 0) {
+               __u8 tmp[TEST_DATA_LEN << 2];
+               __u32 size_tmp = sizeof(tmp);
+
                err = bpf_prog_test_run(fd_prog, 1, test->data,
-                                       sizeof(test->data), NULL, NULL,
+                                       sizeof(test->data), tmp, &size_tmp,
                                        &retval, NULL);
                if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
                        printf("Unexpected bpf_prog_test_run error\n");
diff --git a/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc
new file mode 100644 (file)
index 0000000..3b1f45e
--- /dev/null
@@ -0,0 +1,28 @@
+#!/bin/sh
+# description: Snapshot and tracing setting
+# flags: instance
+
+[ ! -f snapshot ] && exit_unsupported
+
+echo "Set tracing off"
+echo 0 > tracing_on
+
+echo "Allocate and take a snapshot"
+echo 1 > snapshot
+
+# Since trace buffer is empty, snapshot is also empty, but allocated
+grep -q "Snapshot is allocated" snapshot
+
+echo "Ensure keep tracing off"
+test `cat tracing_on` -eq 0
+
+echo "Set tracing on"
+echo 1 > tracing_on
+
+echo "Take a snapshot again"
+echo 1 > snapshot
+
+echo "Ensure keep tracing on"
+test `cat tracing_on` -eq 1
+
+exit 0
index 128e548aa377d600f16fa0b9cdb4fbdf9c540cdc..1a0ac3a29ec5f8c9f0052e47e074f40089c634c5 100644 (file)
@@ -12,3 +12,4 @@ tcp_mmap
 udpgso
 udpgso_bench_rx
 udpgso_bench_tx
+tcp_inq
index 7ba089b33e8b8248ec08d8421a582be66c9f7e87..cd3a2f1545b54c23dab9b534bce9528c57b6c2ec 100644 (file)
@@ -12,3 +12,5 @@ CONFIG_NET_IPVTI=y
 CONFIG_INET6_XFRM_MODE_TUNNEL=y
 CONFIG_IPV6_VTI=y
 CONFIG_DUMMY=y
+CONFIG_BRIDGE=y
+CONFIG_VLAN_8021Q=y
old mode 100644 (file)
new mode 100755 (executable)
index 78245d6..0f45633
@@ -740,13 +740,6 @@ ipv6_rt_add()
        run_cmd "$IP -6 ro add unreachable 2001:db8:104::/64"
        log_test $? 2 "Attempt to add duplicate route - reject route"
 
-       # iproute2 prepend only sets NLM_F_CREATE
-       # - adds a new route; does NOT convert existing route to ECMP
-       add_route6 "2001:db8:104::/64" "via 2001:db8:101::2"
-       run_cmd "$IP -6 ro prepend 2001:db8:104::/64 via 2001:db8:103::2"
-       check_route6 "2001:db8:104::/64 via 2001:db8:101::2 dev veth1 metric 1024 2001:db8:104::/64 via 2001:db8:103::2 dev veth3 metric 1024"
-       log_test $? 0 "Add new route for existing prefix (w/o NLM_F_EXCL)"
-
        # route append with same prefix adds a new route
        # - iproute2 sets NLM_F_CREATE | NLM_F_APPEND
        add_route6 "2001:db8:104::/64" "via 2001:db8:101::2"
@@ -754,27 +747,6 @@ ipv6_rt_add()
        check_route6 "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1"
        log_test $? 0 "Append nexthop to existing route - gw"
 
-       add_route6 "2001:db8:104::/64" "via 2001:db8:101::2"
-       run_cmd "$IP -6 ro append 2001:db8:104::/64 dev veth3"
-       check_route6 "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop dev veth3 weight 1"
-       log_test $? 0 "Append nexthop to existing route - dev only"
-
-       # multipath route can not have a nexthop that is a reject route
-       add_route6 "2001:db8:104::/64" "via 2001:db8:101::2"
-       run_cmd "$IP -6 ro append unreachable 2001:db8:104::/64"
-       log_test $? 2 "Append nexthop to existing route - reject route"
-
-       # reject route can not be converted to multipath route
-       run_cmd "$IP -6 ro flush 2001:db8:104::/64"
-       run_cmd "$IP -6 ro add unreachable 2001:db8:104::/64"
-       run_cmd "$IP -6 ro append 2001:db8:104::/64 via 2001:db8:103::2"
-       log_test $? 2 "Append nexthop to existing reject route - gw"
-
-       run_cmd "$IP -6 ro flush 2001:db8:104::/64"
-       run_cmd "$IP -6 ro add unreachable 2001:db8:104::/64"
-       run_cmd "$IP -6 ro append 2001:db8:104::/64 dev veth3"
-       log_test $? 2 "Append nexthop to existing reject route - dev only"
-
        # insert mpath directly
        add_route6 "2001:db8:104::/64" "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2"
        check_route6  "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1"
@@ -819,13 +791,6 @@ ipv6_rt_replace_single()
        check_route6 "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::3 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1"
        log_test $? 0 "Single path with multipath"
 
-       # single path with reject
-       #
-       add_initial_route6 "nexthop via 2001:db8:101::2"
-       run_cmd "$IP -6 ro replace unreachable 2001:db8:104::/64"
-       check_route6 "unreachable 2001:db8:104::/64 dev lo metric 1024"
-       log_test $? 0 "Single path with reject route"
-
        # single path with single path using MULTIPATH attribute
        #
        add_initial_route6 "via 2001:db8:101::2"
@@ -873,12 +838,6 @@ ipv6_rt_replace_mpath()
        check_route6 "2001:db8:104::/64 via 2001:db8:101::3 dev veth1 metric 1024"
        log_test $? 0 "Multipath with single path via multipath attribute"
 
-       # multipath with reject
-       add_initial_route6 "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2"
-       run_cmd "$IP -6 ro replace unreachable 2001:db8:104::/64"
-       check_route6 "unreachable 2001:db8:104::/64 dev lo metric 1024"
-       log_test $? 0 "Multipath with reject route"
-
        # route replace fails - invalid nexthop 1
        add_initial_route6 "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2"
        run_cmd "$IP -6 ro replace 2001:db8:104::/64 nexthop via 2001:db8:111::3 nexthop via 2001:db8:103::3"
index 792fa4d0285e80e6cd36fdd83e3b5946b0538b3f..850767befa47a5fe7ca4bf4733fa670e55c6bf37 100755 (executable)
@@ -35,9 +35,6 @@ run_udp() {
 
        echo "udp gso"
        run_in_netns ${args} -S
-
-       echo "udp gso zerocopy"
-       run_in_netns ${args} -S -z
 }
 
 run_tcp() {
index 6ccb154cb4aa4f36184811d406ed9f4317647e4f..22f8df1ad7d484418235b6dadd290baca3bf3c6c 100755 (executable)
@@ -7,13 +7,16 @@
 #
 # Released under the terms of the GPL v2.
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
 . ./common_tests
 
 if [ -e $REBOOT_FLAG  ]; then
     rm $REBOOT_FLAG
 else
     prlog "pstore_crash_test has not been executed yet. we skip further tests."
-    exit 0
+    exit $ksft_skip
 fi
 
 prlog -n "Mounting pstore filesystem ... "
index 6a9f602a8718691b086b08544e41aa0a17667e18..615252331813416675184c19066730624e53a96c 100644 (file)
@@ -137,6 +137,30 @@ unsigned int yield_mod_cnt, nr_abort;
        "subic. %%" INJECT_ASM_REG ", %%" INJECT_ASM_REG ", 1\n\t" \
        "bne 222b\n\t" \
        "333:\n\t"
+
+#elif defined(__mips__)
+
+#define RSEQ_INJECT_INPUT \
+       , [loop_cnt_1]"m"(loop_cnt[1]) \
+       , [loop_cnt_2]"m"(loop_cnt[2]) \
+       , [loop_cnt_3]"m"(loop_cnt[3]) \
+       , [loop_cnt_4]"m"(loop_cnt[4]) \
+       , [loop_cnt_5]"m"(loop_cnt[5]) \
+       , [loop_cnt_6]"m"(loop_cnt[6])
+
+#define INJECT_ASM_REG "$5"
+
+#define RSEQ_INJECT_CLOBBER \
+       , INJECT_ASM_REG
+
+#define RSEQ_INJECT_ASM(n) \
+       "lw " INJECT_ASM_REG ", %[loop_cnt_" #n "]\n\t" \
+       "beqz " INJECT_ASM_REG ", 333f\n\t" \
+       "222:\n\t" \
+       "addiu " INJECT_ASM_REG ", -1\n\t" \
+       "bnez " INJECT_ASM_REG ", 222b\n\t" \
+       "333:\n\t"
+
 #else
 #error unsupported target
 #endif
index 3b055f9aeaab56bcbe91f9bc493ac2d08c527074..3cea19877227a03c4c501bffa6a687cbe32ad126 100644 (file)
@@ -57,6 +57,7 @@ do {                                                                  \
 #define __RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown,          \
                                abort_label, version, flags,            \
                                start_ip, post_commit_offset, abort_ip) \
+               ".balign 32\n\t"                                        \
                __rseq_str(table_label) ":\n\t"                         \
                ".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
                ".word " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) ", 0x0\n\t" \
diff --git a/tools/testing/selftests/rseq/rseq-mips.h b/tools/testing/selftests/rseq/rseq-mips.h
new file mode 100644 (file)
index 0000000..7f48ecf
--- /dev/null
@@ -0,0 +1,725 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * Author: Paul Burton <paul.burton@mips.com>
+ * (C) Copyright 2018 MIPS Tech LLC
+ *
+ * Based on rseq-arm.h:
+ * (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define RSEQ_SIG       0x53053053
+
+#define rseq_smp_mb()  __asm__ __volatile__ ("sync" ::: "memory")
+#define rseq_smp_rmb() rseq_smp_mb()
+#define rseq_smp_wmb() rseq_smp_mb()
+
+#define rseq_smp_load_acquire(p)                                       \
+__extension__ ({                                                       \
+       __typeof(*p) ____p1 = RSEQ_READ_ONCE(*p);                       \
+       rseq_smp_mb();                                                  \
+       ____p1;                                                         \
+})
+
+#define rseq_smp_acquire__after_ctrl_dep()     rseq_smp_rmb()
+
+#define rseq_smp_store_release(p, v)                                   \
+do {                                                                   \
+       rseq_smp_mb();                                                  \
+       RSEQ_WRITE_ONCE(*p, v);                                         \
+} while (0)
+
+#ifdef RSEQ_SKIP_FASTPATH
+#include "rseq-skip.h"
+#else /* !RSEQ_SKIP_FASTPATH */
+
+#if _MIPS_SZLONG == 64
+# define LONG                  ".dword"
+# define LONG_LA               "dla"
+# define LONG_L                        "ld"
+# define LONG_S                        "sd"
+# define LONG_ADDI             "daddiu"
+# define U32_U64_PAD(x)                x
+#elif _MIPS_SZLONG == 32
+# define LONG                  ".word"
+# define LONG_LA               "la"
+# define LONG_L                        "lw"
+# define LONG_S                        "sw"
+# define LONG_ADDI             "addiu"
+# ifdef __BIG_ENDIAN
+#  define U32_U64_PAD(x)       "0x0, " x
+# else
+#  define U32_U64_PAD(x)       x ", 0x0"
+# endif
+#else
+# error unsupported _MIPS_SZLONG
+#endif
+
+#define __RSEQ_ASM_DEFINE_TABLE(version, flags,        start_ip, \
+                               post_commit_offset, abort_ip) \
+               ".pushsection __rseq_table, \"aw\"\n\t" \
+               ".balign 32\n\t" \
+               ".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
+               LONG " " U32_U64_PAD(__rseq_str(start_ip)) "\n\t" \
+               LONG " " U32_U64_PAD(__rseq_str(post_commit_offset)) "\n\t" \
+               LONG " " U32_U64_PAD(__rseq_str(abort_ip)) "\n\t" \
+               ".popsection\n\t"
+
+#define RSEQ_ASM_DEFINE_TABLE(start_ip, post_commit_ip, abort_ip) \
+       __RSEQ_ASM_DEFINE_TABLE(0x0, 0x0, start_ip, \
+                               (post_commit_ip - start_ip), abort_ip)
+
+#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
+               RSEQ_INJECT_ASM(1) \
+               LONG_LA " $4, " __rseq_str(cs_label) "\n\t" \
+               LONG_S  " $4, %[" __rseq_str(rseq_cs) "]\n\t" \
+               __rseq_str(label) ":\n\t"
+
+#define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label) \
+               RSEQ_INJECT_ASM(2) \
+               "lw  $4, %[" __rseq_str(current_cpu_id) "]\n\t" \
+               "bne $4, %[" __rseq_str(cpu_id) "], " __rseq_str(label) "\n\t"
+
+#define __RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, \
+                               abort_label, version, flags, \
+                               start_ip, post_commit_offset, abort_ip) \
+               ".balign 32\n\t" \
+               __rseq_str(table_label) ":\n\t" \
+               ".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
+               LONG " " U32_U64_PAD(__rseq_str(start_ip)) "\n\t" \
+               LONG " " U32_U64_PAD(__rseq_str(post_commit_offset)) "\n\t" \
+               LONG " " U32_U64_PAD(__rseq_str(abort_ip)) "\n\t" \
+               ".word " __rseq_str(RSEQ_SIG) "\n\t" \
+               __rseq_str(label) ":\n\t" \
+               teardown \
+               "b %l[" __rseq_str(abort_label) "]\n\t"
+
+#define RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, abort_label, \
+                             start_ip, post_commit_ip, abort_ip) \
+       __RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, \
+                               abort_label, 0x0, 0x0, start_ip, \
+                               (post_commit_ip - start_ip), abort_ip)
+
+#define RSEQ_ASM_DEFINE_CMPFAIL(label, teardown, cmpfail_label) \
+               __rseq_str(label) ":\n\t" \
+               teardown \
+               "b %l[" __rseq_str(cmpfail_label) "]\n\t"
+
+#define rseq_workaround_gcc_asm_size_guess()   __asm__ __volatile__("")
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
+{
+       RSEQ_INJECT_C(9)
+
+       rseq_workaround_gcc_asm_size_guess();
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], %l[cmpfail]\n\t"
+               RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], %l[error2]\n\t"
+#endif
+               /* final store */
+               LONG_S " %[newv], %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(5)
+               "b 5f\n\t"
+               RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+               "5:\n\t"
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 [v]                   "m" (*v),
+                 [expect]              "r" (expect),
+                 [newv]                "r" (newv)
+                 RSEQ_INJECT_INPUT
+               : "$4", "memory"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2
+#endif
+       );
+       rseq_workaround_gcc_asm_size_guess();
+       return 0;
+abort:
+       rseq_workaround_gcc_asm_size_guess();
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       rseq_workaround_gcc_asm_size_guess();
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
+                              off_t voffp, intptr_t *load, int cpu)
+{
+       RSEQ_INJECT_C(9)
+
+       rseq_workaround_gcc_asm_size_guess();
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_L " $4, %[v]\n\t"
+               "beq $4, %[expectnot], %l[cmpfail]\n\t"
+               RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+               LONG_L " $4, %[v]\n\t"
+               "beq $4, %[expectnot], %l[error2]\n\t"
+#endif
+               LONG_S " $4, %[load]\n\t"
+               LONG_ADDI " $4, %[voffp]\n\t"
+               LONG_L " $4, 0($4)\n\t"
+               /* final store */
+               LONG_S " $4, %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(5)
+               "b 5f\n\t"
+               RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+               "5:\n\t"
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 /* final store input */
+                 [v]                   "m" (*v),
+                 [expectnot]           "r" (expectnot),
+                 [voffp]               "Ir" (voffp),
+                 [load]                "m" (*load)
+                 RSEQ_INJECT_INPUT
+               : "$4", "memory"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2
+#endif
+       );
+       rseq_workaround_gcc_asm_size_guess();
+       return 0;
+abort:
+       rseq_workaround_gcc_asm_size_guess();
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       rseq_workaround_gcc_asm_size_guess();
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_addv(intptr_t *v, intptr_t count, int cpu)
+{
+       RSEQ_INJECT_C(9)
+
+       rseq_workaround_gcc_asm_size_guess();
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+#endif
+               LONG_L " $4, %[v]\n\t"
+               LONG_ADDI " $4, %[count]\n\t"
+               /* final store */
+               LONG_S " $4, %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(4)
+               "b 5f\n\t"
+               RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+               "5:\n\t"
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 [v]                   "m" (*v),
+                 [count]               "Ir" (count)
+                 RSEQ_INJECT_INPUT
+               : "$4", "memory"
+                 RSEQ_INJECT_CLOBBER
+               : abort
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1
+#endif
+       );
+       rseq_workaround_gcc_asm_size_guess();
+       return 0;
+abort:
+       rseq_workaround_gcc_asm_size_guess();
+       RSEQ_INJECT_FAILED
+       return -1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_bug("cpu_id comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
+                                intptr_t *v2, intptr_t newv2,
+                                intptr_t newv, int cpu)
+{
+       RSEQ_INJECT_C(9)
+
+       rseq_workaround_gcc_asm_size_guess();
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], %l[cmpfail]\n\t"
+               RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], %l[error2]\n\t"
+#endif
+               /* try store */
+               LONG_S " %[newv2], %[v2]\n\t"
+               RSEQ_INJECT_ASM(5)
+               /* final store */
+               LONG_S " %[newv], %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(6)
+               "b 5f\n\t"
+               RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+               "5:\n\t"
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 /* try store input */
+                 [v2]                  "m" (*v2),
+                 [newv2]               "r" (newv2),
+                 /* final store input */
+                 [v]                   "m" (*v),
+                 [expect]              "r" (expect),
+                 [newv]                "r" (newv)
+                 RSEQ_INJECT_INPUT
+               : "$4", "memory"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2
+#endif
+       );
+       rseq_workaround_gcc_asm_size_guess();
+       return 0;
+abort:
+       rseq_workaround_gcc_asm_size_guess();
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       rseq_workaround_gcc_asm_size_guess();
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
+                                        intptr_t *v2, intptr_t newv2,
+                                        intptr_t newv, int cpu)
+{
+       RSEQ_INJECT_C(9)
+
+       rseq_workaround_gcc_asm_size_guess();
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], %l[cmpfail]\n\t"
+               RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], %l[error2]\n\t"
+#endif
+               /* try store */
+               LONG_S " %[newv2], %[v2]\n\t"
+               RSEQ_INJECT_ASM(5)
+               "sync\n\t"      /* full sync provides store-release */
+               /* final store */
+               LONG_S " %[newv], %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(6)
+               "b 5f\n\t"
+               RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+               "5:\n\t"
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 /* try store input */
+                 [v2]                  "m" (*v2),
+                 [newv2]               "r" (newv2),
+                 /* final store input */
+                 [v]                   "m" (*v),
+                 [expect]              "r" (expect),
+                 [newv]                "r" (newv)
+                 RSEQ_INJECT_INPUT
+               : "$4", "memory"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2
+#endif
+       );
+       rseq_workaround_gcc_asm_size_guess();
+       return 0;
+abort:
+       rseq_workaround_gcc_asm_size_guess();
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       rseq_workaround_gcc_asm_size_guess();
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
+                             intptr_t *v2, intptr_t expect2,
+                             intptr_t newv, int cpu)
+{
+       RSEQ_INJECT_C(9)
+
+       rseq_workaround_gcc_asm_size_guess();
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], %l[cmpfail]\n\t"
+               RSEQ_INJECT_ASM(4)
+               LONG_L " $4, %[v2]\n\t"
+               "bne $4, %[expect2], %l[cmpfail]\n\t"
+               RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], %l[error2]\n\t"
+               LONG_L " $4, %[v2]\n\t"
+               "bne $4, %[expect2], %l[error3]\n\t"
+#endif
+               /* final store */
+               LONG_S " %[newv], %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(6)
+               "b 5f\n\t"
+               RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+               "5:\n\t"
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 /* cmp2 input */
+                 [v2]                  "m" (*v2),
+                 [expect2]             "r" (expect2),
+                 /* final store input */
+                 [v]                   "m" (*v),
+                 [expect]              "r" (expect),
+                 [newv]                "r" (newv)
+                 RSEQ_INJECT_INPUT
+               : "$4", "memory"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2, error3
+#endif
+       );
+       rseq_workaround_gcc_asm_size_guess();
+       return 0;
+abort:
+       rseq_workaround_gcc_asm_size_guess();
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       rseq_workaround_gcc_asm_size_guess();
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_bug("1st expected value comparison failed");
+error3:
+       rseq_bug("2nd expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
+                                void *dst, void *src, size_t len,
+                                intptr_t newv, int cpu)
+{
+       uintptr_t rseq_scratch[3];
+
+       RSEQ_INJECT_C(9)
+
+       rseq_workaround_gcc_asm_size_guess();
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+               LONG_S " %[src], %[rseq_scratch0]\n\t"
+               LONG_S "  %[dst], %[rseq_scratch1]\n\t"
+               LONG_S " %[len], %[rseq_scratch2]\n\t"
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], 5f\n\t"
+               RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], 7f\n\t"
+#endif
+               /* try memcpy */
+               "beqz %[len], 333f\n\t" \
+               "222:\n\t" \
+               "lb   $4, 0(%[src])\n\t" \
+               "sb   $4, 0(%[dst])\n\t" \
+               LONG_ADDI " %[src], 1\n\t" \
+               LONG_ADDI " %[dst], 1\n\t" \
+               LONG_ADDI " %[len], -1\n\t" \
+               "bnez %[len], 222b\n\t" \
+               "333:\n\t" \
+               RSEQ_INJECT_ASM(5)
+               /* final store */
+               LONG_S " %[newv], %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(6)
+               /* teardown */
+               LONG_L " %[len], %[rseq_scratch2]\n\t"
+               LONG_L " %[dst], %[rseq_scratch1]\n\t"
+               LONG_L " %[src], %[rseq_scratch0]\n\t"
+               "b 8f\n\t"
+               RSEQ_ASM_DEFINE_ABORT(3, 4,
+                                     /* teardown */
+                                     LONG_L " %[len], %[rseq_scratch2]\n\t"
+                                     LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                                     LONG_L " %[src], %[rseq_scratch0]\n\t",
+                                     abort, 1b, 2b, 4f)
+               RSEQ_ASM_DEFINE_CMPFAIL(5,
+                                       /* teardown */
+                                       LONG_L " %[len], %[rseq_scratch2]\n\t"
+                                       LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                                       LONG_L " %[src], %[rseq_scratch0]\n\t",
+                                       cmpfail)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_DEFINE_CMPFAIL(6,
+                                       /* teardown */
+                                       LONG_L " %[len], %[rseq_scratch2]\n\t"
+                                       LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                                       LONG_L " %[src], %[rseq_scratch0]\n\t",
+                                       error1)
+               RSEQ_ASM_DEFINE_CMPFAIL(7,
+                                       /* teardown */
+                                       LONG_L " %[len], %[rseq_scratch2]\n\t"
+                                       LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                                       LONG_L " %[src], %[rseq_scratch0]\n\t",
+                                       error2)
+#endif
+               "8:\n\t"
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 /* final store input */
+                 [v]                   "m" (*v),
+                 [expect]              "r" (expect),
+                 [newv]                "r" (newv),
+                 /* try memcpy input */
+                 [dst]                 "r" (dst),
+                 [src]                 "r" (src),
+                 [len]                 "r" (len),
+                 [rseq_scratch0]       "m" (rseq_scratch[0]),
+                 [rseq_scratch1]       "m" (rseq_scratch[1]),
+                 [rseq_scratch2]       "m" (rseq_scratch[2])
+                 RSEQ_INJECT_INPUT
+               : "$4", "memory"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2
+#endif
+       );
+       rseq_workaround_gcc_asm_size_guess();
+       return 0;
+abort:
+       rseq_workaround_gcc_asm_size_guess();
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       rseq_workaround_gcc_asm_size_guess();
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_workaround_gcc_asm_size_guess();
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_workaround_gcc_asm_size_guess();
+       rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
+                                        void *dst, void *src, size_t len,
+                                        intptr_t newv, int cpu)
+{
+       uintptr_t rseq_scratch[3];
+
+       RSEQ_INJECT_C(9)
+
+       rseq_workaround_gcc_asm_size_guess();
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+               LONG_S " %[src], %[rseq_scratch0]\n\t"
+               LONG_S " %[dst], %[rseq_scratch1]\n\t"
+               LONG_S " %[len], %[rseq_scratch2]\n\t"
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], 5f\n\t"
+               RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], 7f\n\t"
+#endif
+               /* try memcpy */
+               "beqz %[len], 333f\n\t" \
+               "222:\n\t" \
+               "lb   $4, 0(%[src])\n\t" \
+               "sb   $4, 0(%[dst])\n\t" \
+               LONG_ADDI " %[src], 1\n\t" \
+               LONG_ADDI " %[dst], 1\n\t" \
+               LONG_ADDI " %[len], -1\n\t" \
+               "bnez %[len], 222b\n\t" \
+               "333:\n\t" \
+               RSEQ_INJECT_ASM(5)
+               "sync\n\t"      /* full sync provides store-release */
+               /* final store */
+               LONG_S " %[newv], %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(6)
+               /* teardown */
+               LONG_L " %[len], %[rseq_scratch2]\n\t"
+               LONG_L " %[dst], %[rseq_scratch1]\n\t"
+               LONG_L " %[src], %[rseq_scratch0]\n\t"
+               "b 8f\n\t"
+               RSEQ_ASM_DEFINE_ABORT(3, 4,
+                                     /* teardown */
+                                     LONG_L " %[len], %[rseq_scratch2]\n\t"
+                                     LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                                     LONG_L " %[src], %[rseq_scratch0]\n\t",
+                                     abort, 1b, 2b, 4f)
+               RSEQ_ASM_DEFINE_CMPFAIL(5,
+                                       /* teardown */
+                                       LONG_L " %[len], %[rseq_scratch2]\n\t"
+                                       LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                                       LONG_L " %[src], %[rseq_scratch0]\n\t",
+                                       cmpfail)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_DEFINE_CMPFAIL(6,
+                                       /* teardown */
+                                       LONG_L " %[len], %[rseq_scratch2]\n\t"
+                                       LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                                       LONG_L " %[src], %[rseq_scratch0]\n\t",
+                                       error1)
+               RSEQ_ASM_DEFINE_CMPFAIL(7,
+                                       /* teardown */
+                                       LONG_L " %[len], %[rseq_scratch2]\n\t"
+                                       LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                                       LONG_L " %[src], %[rseq_scratch0]\n\t",
+                                       error2)
+#endif
+               "8:\n\t"
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 /* final store input */
+                 [v]                   "m" (*v),
+                 [expect]              "r" (expect),
+                 [newv]                "r" (newv),
+                 /* try memcpy input */
+                 [dst]                 "r" (dst),
+                 [src]                 "r" (src),
+                 [len]                 "r" (len),
+                 [rseq_scratch0]       "m" (rseq_scratch[0]),
+                 [rseq_scratch1]       "m" (rseq_scratch[1]),
+                 [rseq_scratch2]       "m" (rseq_scratch[2])
+                 RSEQ_INJECT_INPUT
+               : "$4", "memory"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2
+#endif
+       );
+       rseq_workaround_gcc_asm_size_guess();
+       return 0;
+abort:
+       rseq_workaround_gcc_asm_size_guess();
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       rseq_workaround_gcc_asm_size_guess();
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_workaround_gcc_asm_size_guess();
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_workaround_gcc_asm_size_guess();
+       rseq_bug("expected value comparison failed");
+#endif
+}
+
+#endif /* !RSEQ_SKIP_FASTPATH */
index 0a808575cbc443489a5713639f7076eacedbdde4..86ce22417e0d7f89b0af1df374d1fd240306fd8c 100644 (file)
@@ -73,6 +73,8 @@ extern __thread volatile struct rseq __rseq_abi;
 #include <rseq-arm.h>
 #elif defined(__PPC__)
 #include <rseq-ppc.h>
+#elif defined(__mips__)
+#include <rseq-mips.h>
 #else
 #error unsupported target
 #endif
@@ -131,17 +133,27 @@ static inline uint32_t rseq_current_cpu(void)
        return cpu;
 }
 
+static inline void rseq_clear_rseq_cs(void)
+{
+#ifdef __LP64__
+       __rseq_abi.rseq_cs.ptr = 0;
+#else
+       __rseq_abi.rseq_cs.ptr.ptr32 = 0;
+#endif
+}
+
 /*
- * rseq_prepare_unload() should be invoked by each thread using rseq_finish*()
- * at least once between their last rseq_finish*() and library unload of the
- * library defining the rseq critical section (struct rseq_cs). This also
- * applies to use of rseq in code generated by JIT: rseq_prepare_unload()
- * should be invoked at least once by each thread using rseq_finish*() before
- * reclaim of the memory holding the struct rseq_cs.
+ * rseq_prepare_unload() should be invoked by each thread executing a rseq
+ * critical section at least once between their last critical section and
+ * library unload of the library defining the rseq critical section
+ * (struct rseq_cs). This also applies to use of rseq in code generated by
+ * JIT: rseq_prepare_unload() should be invoked at least once by each
+ * thread executing a rseq critical section before reclaim of the memory
+ * holding the struct rseq_cs.
  */
 static inline void rseq_prepare_unload(void)
 {
-       __rseq_abi.rseq_cs = 0;
+       rseq_clear_rseq_cs();
 }
 
 #endif  /* RSEQ_H_ */
old mode 100644 (file)
new mode 100755 (executable)
index 2082eeffd779d586b558f45883c5dc1cc08a865b..a19531dba4dc311d00e33fcb637b91bced262bf2 100644 (file)
@@ -1,7 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+uname_M := $(shell uname -m 2>/dev/null || echo not)
+ARCH ?= $(shell echo $(uname_M) | sed -e s/x86_64/x86/)
+
+ifneq ($(ARCH),sparc64)
+nothing:
+.PHONY: all clean run_tests install
+.SILENT:
+else
+
 SUBDIRS := drivers
 
 TEST_PROGS := run.sh
 
+
 .PHONY: all clean
 
 include ../lib.mk
@@ -18,10 +29,6 @@ all:
                fi \
        done
 
-override define RUN_TESTS
-       @cd $(OUTPUT); ./run.sh
-endef
-
 override define INSTALL_RULE
        mkdir -p $(INSTALL_PATH)
        install -t $(INSTALL_PATH) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES)
@@ -33,10 +40,6 @@ override define INSTALL_RULE
        done;
 endef
 
-override define EMIT_TESTS
-       echo "./run.sh"
-endef
-
 override define CLEAN
        @for DIR in $(SUBDIRS); do              \
                BUILD_TARGET=$(OUTPUT)/$$DIR;   \
@@ -44,3 +47,4 @@ override define CLEAN
                make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
        done
 endef
+endif
index 6264f40bbdbc9dedf9fd41519823fa31771d6cd0..deb0df4155659ec1f4b13a4d74c5128673690226 100644 (file)
@@ -1,4 +1,4 @@
-
+# SPDX-License-Identifier: GPL-2.0
 INCLUDEDIR := -I.
 CFLAGS := $(CFLAGS) $(INCLUDEDIR) -Wall -O2 -g
 
index 24cff498b31aa831b388e638929f29c36db07dbd..fc9f8cde7d4223c3fd564105942d4d648acb8627 100755 (executable)
@@ -2,6 +2,19 @@
 # SPDX-License-Identifier: GPL-2.0
 # Runs static keys kernel module tests
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+if ! /sbin/modprobe -q -n test_static_key_base; then
+       echo "static_key: module test_static_key_base is not found [SKIP]"
+       exit $ksft_skip
+fi
+
+if ! /sbin/modprobe -q -n test_static_keys; then
+       echo "static_key: module test_static_keys is not found [SKIP]"
+       exit $ksft_skip
+fi
+
 if /sbin/modprobe -q test_static_key_base; then
        if /sbin/modprobe -q test_static_keys; then
                echo "static_key: ok"
diff --git a/tools/testing/selftests/sync/config b/tools/testing/selftests/sync/config
new file mode 100644 (file)
index 0000000..1ab7e81
--- /dev/null
@@ -0,0 +1,4 @@
+CONFIG_STAGING=y
+CONFIG_ANDROID=y
+CONFIG_SYNC=y
+CONFIG_SW_SYNC=y
index ec232c3cfcaac3b8f52936eabb908a0c316183f8..584eb8ea780a49220782d08e104756199fc19934 100755 (executable)
@@ -14,6 +14,9 @@
 
 # This performs a series tests against the proc sysctl interface.
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
 TEST_NAME="sysctl"
 TEST_DRIVER="test_${TEST_NAME}"
 TEST_DIR=$(dirname $0)
@@ -41,7 +44,7 @@ test_modprobe()
                echo "$0: $DIR not present" >&2
                echo "You must have the following enabled in your kernel:" >&2
                cat $TEST_DIR/config >&2
-               exit 1
+               exit $ksft_skip
        fi
 }
 
@@ -98,28 +101,30 @@ test_reqs()
        uid=$(id -u)
        if [ $uid -ne 0 ]; then
                echo $msg must be run as root >&2
-               exit 0
+               exit $ksft_skip
        fi
 
        if ! which perl 2> /dev/null > /dev/null; then
                echo "$0: You need perl installed"
-               exit 1
+               exit $ksft_skip
        fi
        if ! which getconf 2> /dev/null > /dev/null; then
                echo "$0: You need getconf installed"
-               exit 1
+               exit $ksft_skip
        fi
        if ! which diff 2> /dev/null > /dev/null; then
                echo "$0: You need diff installed"
-               exit 1
+               exit $ksft_skip
        fi
 }
 
 function load_req_mod()
 {
-       trap "test_modprobe" EXIT
-
        if [ ! -d $DIR ]; then
+               if ! modprobe -q -n $TEST_DRIVER; then
+                       echo "$0: module $TEST_DRIVER not found [SKIP]"
+                       exit $ksft_skip
+               fi
                modprobe $TEST_DRIVER
                if [ $? -ne 0 ]; then
                        exit
@@ -765,6 +770,7 @@ function parse_args()
 test_reqs
 allow_user_defaults
 check_production_sysctl_writes_strict
+test_modprobe
 load_req_mod
 
 trap "test_finish" EXIT
index d60506fc77f8bcba61f222db0b0df05a38e2e68b..f9b31a57439b759c1813ca94ac948a998e9dca51 100755 (executable)
@@ -2,6 +2,13 @@
 # SPDX-License-Identifier: GPL-2.0
 # Runs copy_to/from_user infrastructure using test_user_copy kernel module
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+if ! /sbin/modprobe -q -n test_user_copy; then
+       echo "user: module test_user_copy is not found [SKIP]"
+       exit $ksft_skip
+fi
 if /sbin/modprobe -q test_user_copy; then
        /sbin/modprobe -q -r test_user_copy
        echo "user_copy: ok"
index 1097f04e4d80e6cff93bb9e912c4c38894cd5959..bcec71250873108efdeae50eab0874b3924204c4 100644 (file)
@@ -16,6 +16,8 @@
 #include <unistd.h>
 #include <string.h>
 
+#include "../kselftest.h"
+
 #define MAP_SIZE 1048576
 
 struct map_list {
@@ -169,7 +171,7 @@ int main(int argc, char **argv)
                printf("Either the sysctl compact_unevictable_allowed is not\n"
                       "set to 1 or couldn't read the proc file.\n"
                       "Skipping the test\n");
-               return 0;
+               return KSFT_SKIP;
        }
 
        lim.rlim_cur = RLIM_INFINITY;
index 4997b9222cfa5055f9c07f4f1f0a1454bae89d6e..637b6d0ac0d0bf63d88ff5f5782a65453b486a7a 100644 (file)
@@ -9,6 +9,8 @@
 #include <stdbool.h>
 #include "mlock2.h"
 
+#include "../kselftest.h"
+
 struct vm_boundaries {
        unsigned long start;
        unsigned long end;
@@ -303,7 +305,7 @@ static int test_mlock_lock()
        if (mlock2_(map, 2 * page_size, 0)) {
                if (errno == ENOSYS) {
                        printf("Cannot call new mlock family, skipping test\n");
-                       _exit(0);
+                       _exit(KSFT_SKIP);
                }
                perror("mlock2(0)");
                goto unmap;
@@ -412,7 +414,7 @@ static int test_mlock_onfault()
        if (mlock2_(map, 2 * page_size, MLOCK_ONFAULT)) {
                if (errno == ENOSYS) {
                        printf("Cannot call new mlock family, skipping test\n");
-                       _exit(0);
+                       _exit(KSFT_SKIP);
                }
                perror("mlock2(MLOCK_ONFAULT)");
                goto unmap;
@@ -425,7 +427,7 @@ static int test_mlock_onfault()
        if (munlock(map, 2 * page_size)) {
                if (errno == ENOSYS) {
                        printf("Cannot call new mlock family, skipping test\n");
-                       _exit(0);
+                       _exit(KSFT_SKIP);
                }
                perror("munlock()");
                goto unmap;
@@ -457,7 +459,7 @@ static int test_lock_onfault_of_present()
        if (mlock2_(map, 2 * page_size, MLOCK_ONFAULT)) {
                if (errno == ENOSYS) {
                        printf("Cannot call new mlock family, skipping test\n");
-                       _exit(0);
+                       _exit(KSFT_SKIP);
                }
                perror("mlock2(MLOCK_ONFAULT)");
                goto unmap;
@@ -583,7 +585,7 @@ static int test_vma_management(bool call_mlock)
        if (call_mlock && mlock2_(map, 3 * page_size, MLOCK_ONFAULT)) {
                if (errno == ENOSYS) {
                        printf("Cannot call new mlock family, skipping test\n");
-                       _exit(0);
+                       _exit(KSFT_SKIP);
                }
                perror("mlock(ONFAULT)\n");
                goto out;
index 22d56467383029b24b52b95e6ee09cc0bb6bf835..88cbe5575f0cf9e0d8f165ecbe27002a3c5ed8a1 100755 (executable)
@@ -2,6 +2,9 @@
 # SPDX-License-Identifier: GPL-2.0
 #please run as root
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
 mnt=./huge
 exitcode=0
 
@@ -36,7 +39,7 @@ if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then
                echo $(( $lackpgs + $nr_hugepgs )) > /proc/sys/vm/nr_hugepages
                if [ $? -ne 0 ]; then
                        echo "Please run this test as root"
-                       exit 1
+                       exit $ksft_skip
                fi
                while read name size unit; do
                        if [ "$name" = "HugePages_Free:" ]; then
index de2f9ec8a87fb342a7a595a13b009358d9eae000..7b8171e3128a8715a62a10e020c69ea3ca1c5321 100644 (file)
@@ -69,6 +69,8 @@
 #include <setjmp.h>
 #include <stdbool.h>
 
+#include "../kselftest.h"
+
 #ifdef __NR_userfaultfd
 
 static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size;
@@ -1322,7 +1324,7 @@ int main(int argc, char **argv)
 int main(void)
 {
        printf("skip: Skipping userfaultfd test (missing __NR_userfaultfd)\n");
-       return 0;
+       return KSFT_SKIP;
 }
 
 #endif /* __NR_userfaultfd */
index 246145b84a127c341fd1fdc4fb41bcf6c7d51644..4d9dc3f2fd7048212181c51f03cef4d1650e07c9 100644 (file)
@@ -610,21 +610,41 @@ static int test_valid_sigreturn(int cs_bits, bool use_16bit_ss, int force_ss)
         */
        for (int i = 0; i < NGREG; i++) {
                greg_t req = requested_regs[i], res = resulting_regs[i];
+
                if (i == REG_TRAPNO || i == REG_IP)
                        continue;       /* don't care */
-               if (i == REG_SP) {
-                       printf("\tSP: %llx -> %llx\n", (unsigned long long)req,
-                              (unsigned long long)res);
 
+               if (i == REG_SP) {
                        /*
-                        * In many circumstances, the high 32 bits of rsp
-                        * are zeroed.  For example, we could be a real
-                        * 32-bit program, or we could hit any of a number
-                        * of poorly-documented IRET or segmented ESP
-                        * oddities.  If this happens, it's okay.
+                        * If we were using a 16-bit stack segment, then
+                        * the kernel is a bit stuck: IRET only restores
+                        * the low 16 bits of ESP/RSP if SS is 16-bit.
+                        * The kernel uses a hack to restore bits 31:16,
+                        * but that hack doesn't help with bits 63:32.
+                        * On Intel CPUs, bits 63:32 end up zeroed, and, on
+                        * AMD CPUs, they leak the high bits of the kernel
+                        * espfix64 stack pointer.  There's very little that
+                        * the kernel can do about it.
+                        *
+                        * Similarly, if we are returning to a 32-bit context,
+                        * the CPU will often lose the high 32 bits of RSP.
                         */
-                       if (res == (req & 0xFFFFFFFF))
-                               continue;  /* OK; not expected to work */
+
+                       if (res == req)
+                               continue;
+
+                       if (cs_bits != 64 && ((res ^ req) & 0xFFFFFFFF) == 0) {
+                               printf("[NOTE]\tSP: %llx -> %llx\n",
+                                      (unsigned long long)req,
+                                      (unsigned long long)res);
+                               continue;
+                       }
+
+                       printf("[FAIL]\tSP mismatch: requested 0x%llx; got 0x%llx\n",
+                              (unsigned long long)requested_regs[i],
+                              (unsigned long long)resulting_regs[i]);
+                       nerrs++;
+                       continue;
                }
 
                bool ignore_reg = false;
@@ -654,25 +674,18 @@ static int test_valid_sigreturn(int cs_bits, bool use_16bit_ss, int force_ss)
 #endif
 
                /* Sanity check on the kernel */
-               if (i == REG_CX && requested_regs[i] != resulting_regs[i]) {
+               if (i == REG_CX && req != res) {
                        printf("[FAIL]\tCX (saved SP) mismatch: requested 0x%llx; got 0x%llx\n",
-                              (unsigned long long)requested_regs[i],
-                              (unsigned long long)resulting_regs[i]);
+                              (unsigned long long)req,
+                              (unsigned long long)res);
                        nerrs++;
                        continue;
                }
 
-               if (requested_regs[i] != resulting_regs[i] && !ignore_reg) {
-                       /*
-                        * SP is particularly interesting here.  The
-                        * usual cause of failures is that we hit the
-                        * nasty IRET case of returning to a 16-bit SS,
-                        * in which case bits 16:31 of the *kernel*
-                        * stack pointer persist in ESP.
-                        */
+               if (req != res && !ignore_reg) {
                        printf("[FAIL]\tReg %d mismatch: requested 0x%llx; got 0x%llx\n",
-                              i, (unsigned long long)requested_regs[i],
-                              (unsigned long long)resulting_regs[i]);
+                              i, (unsigned long long)req,
+                              (unsigned long long)res);
                        nerrs++;
                }
        }
index 754de7da426a80a2ae386042d30a5904b44446e6..232e958ec454756501f2caa8eaf2133067fe10ac 100755 (executable)
@@ -2,6 +2,9 @@
 # SPDX-License-Identifier: GPL-2.0
 TCID="zram.sh"
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
 . ./zram_lib.sh
 
 run_zram () {
@@ -24,5 +27,5 @@ elif [ -b /dev/zram0 ]; then
 else
        echo "$TCID : No zram.ko module or /dev/zram0 device file not found"
        echo "$TCID : CONFIG_ZRAM is not set"
-       exit 1
+       exit $ksft_skip
 fi
index f6a9c73e7a442e7988b0820ebc809a342981df91..9e73a4fb9b0aa9b2a2e81368badfbe278876695d 100755 (executable)
@@ -18,6 +18,9 @@ MODULE=0
 dev_makeswap=-1
 dev_mounted=-1
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
 trap INT
 
 check_prereqs()
@@ -27,7 +30,7 @@ check_prereqs()
 
        if [ $uid -ne 0 ]; then
                echo $msg must be run as root >&2
-               exit 0
+               exit $ksft_skip
        fi
 }
 
index 95dd14648ba51cf9fcc7f86a38e81e8f47aa9f61..0f395dfb7774c3f87fffc337c89afbd8b5f247dd 100644 (file)
 
 /******************** Little Endian Handling ********************************/
 
-#define cpu_to_le16(x)  htole16(x)
-#define cpu_to_le32(x)  htole32(x)
+/*
+ * cpu_to_le16/32 are used when initializing structures, a context where a
+ * function call is not allowed. To solve this, we code cpu_to_le16/32 in a way
+ * that allows them to be used when initializing structures.
+ */
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+#define cpu_to_le16(x)  (x)
+#define cpu_to_le32(x)  (x)
+#else
+#define cpu_to_le16(x)  ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8))
+#define cpu_to_le32(x)  \
+       ((((x) & 0xff000000u) >> 24) | (((x) & 0x00ff0000u) >>  8) | \
+       (((x) & 0x0000ff00u) <<  8) | (((x) & 0x000000ffu) << 24))
+#endif
+
 #define le32_to_cpu(x)  le32toh(x)
 #define le16_to_cpu(x)  le16toh(x)
 
-
 /******************** Messages and Errors ***********************************/
 
 static const char argv0[] = "ffs-test";
index 9a45f90e2d08974c42c6e6dc242b5cfd35d5e120..369ee308b6686ca4a106581b91f8d382e45c79e8 100644 (file)
@@ -36,7 +36,6 @@ static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
         */
        BUG_ON((unsigned long) page & 0x03);
 #ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
        BUG_ON(sg_is_chain(sg));
 #endif
        sg->page_link = page_link | (unsigned long) page;
@@ -67,7 +66,6 @@ static inline void sg_set_page(struct scatterlist *sg, struct page *page,
 static inline struct page *sg_page(struct scatterlist *sg)
 {
 #ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
        BUG_ON(sg_is_chain(sg));
 #endif
        return (struct page *)((sg)->page_link & ~0x3);
@@ -116,9 +114,6 @@ static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
  **/
 static inline void sg_mark_end(struct scatterlist *sg)
 {
-#ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
        /*
         * Set termination bit, clear potential chain bit
         */
@@ -136,17 +131,11 @@ static inline void sg_mark_end(struct scatterlist *sg)
  **/
 static inline void sg_unmark_end(struct scatterlist *sg)
 {
-#ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
        sg->page_link &= ~0x02;
 }
 
 static inline struct scatterlist *sg_next(struct scatterlist *sg)
 {
-#ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
        if (sg_is_last(sg))
                return NULL;
 
@@ -160,13 +149,6 @@ static inline struct scatterlist *sg_next(struct scatterlist *sg)
 static inline void sg_init_table(struct scatterlist *sgl, unsigned int nents)
 {
        memset(sgl, 0, sizeof(*sgl) * nents);
-#ifdef CONFIG_DEBUG_SG
-       {
-               unsigned int i;
-               for (i = 0; i < nents; i++)
-                       sgl[i].sg_magic = SG_MAGIC;
-       }
-#endif
        sg_mark_end(&sgl[nents - 1]);
 }
 
index 72143cfaf6ec39404dad5f72a8cf08c5e5fefc7e..ea434ddc849925c6e2577a9ed6acea906ea8eafd 100644 (file)
@@ -47,7 +47,7 @@ config KVM_GENERIC_DIRTYLOG_READ_PROTECT
 
 config KVM_COMPAT
        def_bool y
-       depends on KVM && COMPAT && !S390
+       depends on KVM && COMPAT && !(S390 || ARM64)
 
 config HAVE_KVM_IRQ_BYPASS
        bool
index 8d90de213ce9b89340b7dc11927862f8344829c7..1d90d79706bd5b71d3914ecd808d2bd6c127286c 100644 (file)
@@ -297,6 +297,8 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
        phys_addr_t next;
 
        assert_spin_locked(&kvm->mmu_lock);
+       WARN_ON(size & ~PAGE_MASK);
+
        pgd = kvm->arch.pgd + stage2_pgd_index(addr);
        do {
                /*
index ff7dc890941a8447d6e5abeae6dfe6544fac18d7..cdce653e3c47fb31b9eb0ccf73c3bebd830d8496 100644 (file)
@@ -617,11 +617,6 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
                pr_warn("GICV physical address 0x%llx not page aligned\n",
                        (unsigned long long)info->vcpu.start);
                kvm_vgic_global_state.vcpu_base = 0;
-       } else if (!PAGE_ALIGNED(resource_size(&info->vcpu))) {
-               pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n",
-                       (unsigned long long)resource_size(&info->vcpu),
-                       PAGE_SIZE);
-               kvm_vgic_global_state.vcpu_base = 0;
        } else {
                kvm_vgic_global_state.vcpu_base = info->vcpu.start;
                kvm_vgic_global_state.can_emulate_gicv2 = true;
index 90d30fbe95aefb1e1a943d5bf29d7aee763fb9d0..b20b751286fc612214c59c95e787c9fb0fac50b7 100644 (file)
@@ -119,8 +119,12 @@ irqfd_shutdown(struct work_struct *work)
 {
        struct kvm_kernel_irqfd *irqfd =
                container_of(work, struct kvm_kernel_irqfd, shutdown);
+       struct kvm *kvm = irqfd->kvm;
        u64 cnt;
 
+       /* Make sure irqfd has been initalized in assign path. */
+       synchronize_srcu(&kvm->irq_srcu);
+
        /*
         * Synchronize with the wait-queue and unhook ourselves to prevent
         * further events.
@@ -387,7 +391,6 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
 
        idx = srcu_read_lock(&kvm->irq_srcu);
        irqfd_update(kvm, irqfd);
-       srcu_read_unlock(&kvm->irq_srcu, idx);
 
        list_add_tail(&irqfd->list, &kvm->irqfds.items);
 
@@ -402,11 +405,6 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
        if (events & EPOLLIN)
                schedule_work(&irqfd->inject);
 
-       /*
-        * do not drop the file until the irqfd is fully initialized, otherwise
-        * we might race against the EPOLLHUP
-        */
-       fdput(f);
 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
        if (kvm_arch_has_irq_bypass()) {
                irqfd->consumer.token = (void *)irqfd->eventfd;
@@ -421,6 +419,13 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
        }
 #endif
 
+       srcu_read_unlock(&kvm->irq_srcu, idx);
+
+       /*
+        * do not drop the file until the irqfd is fully initialized, otherwise
+        * we might race against the EPOLLHUP
+        */
+       fdput(f);
        return 0;
 
 fail:
index ada21f47f22b5a902e81572ba94efb16a2a7bccb..8b47507faab5b645295094992c0eaa388765f025 100644 (file)
@@ -116,6 +116,11 @@ static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
 #ifdef CONFIG_KVM_COMPAT
 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
                                  unsigned long arg);
+#define KVM_COMPAT(c)  .compat_ioctl   = (c)
+#else
+static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
+                               unsigned long arg) { return -EINVAL; }
+#define KVM_COMPAT(c)  .compat_ioctl   = kvm_no_compat_ioctl
 #endif
 static int hardware_enable_all(void);
 static void hardware_disable_all(void);
@@ -2396,11 +2401,9 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
 static struct file_operations kvm_vcpu_fops = {
        .release        = kvm_vcpu_release,
        .unlocked_ioctl = kvm_vcpu_ioctl,
-#ifdef CONFIG_KVM_COMPAT
-       .compat_ioctl   = kvm_vcpu_compat_ioctl,
-#endif
        .mmap           = kvm_vcpu_mmap,
        .llseek         = noop_llseek,
+       KVM_COMPAT(kvm_vcpu_compat_ioctl),
 };
 
 /*
@@ -2824,10 +2827,8 @@ static int kvm_device_release(struct inode *inode, struct file *filp)
 
 static const struct file_operations kvm_device_fops = {
        .unlocked_ioctl = kvm_device_ioctl,
-#ifdef CONFIG_KVM_COMPAT
-       .compat_ioctl = kvm_device_ioctl,
-#endif
        .release = kvm_device_release,
+       KVM_COMPAT(kvm_device_ioctl),
 };
 
 struct kvm_device *kvm_device_from_filp(struct file *filp)
@@ -3165,10 +3166,8 @@ static long kvm_vm_compat_ioctl(struct file *filp,
 static struct file_operations kvm_vm_fops = {
        .release        = kvm_vm_release,
        .unlocked_ioctl = kvm_vm_ioctl,
-#ifdef CONFIG_KVM_COMPAT
-       .compat_ioctl   = kvm_vm_compat_ioctl,
-#endif
        .llseek         = noop_llseek,
+       KVM_COMPAT(kvm_vm_compat_ioctl),
 };
 
 static int kvm_dev_ioctl_create_vm(unsigned long type)
@@ -3259,8 +3258,8 @@ out:
 
 static struct file_operations kvm_chardev_ops = {
        .unlocked_ioctl = kvm_dev_ioctl,
-       .compat_ioctl   = kvm_dev_ioctl,
        .llseek         = noop_llseek,
+       KVM_COMPAT(kvm_dev_ioctl),
 };
 
 static struct miscdevice kvm_dev = {